summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/cppc_acpi.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/device_sysfs.c4
-rw-r--r--drivers/acpi/ec.c69
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/nfit/core.c274
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/nfit/nfit.h37
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c137
-rw-r--r--drivers/acpi/scan.c21
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/android/binder.c42
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/ahci_mtk.c6
-rw-r--r--drivers/ata/ahci_qoriq.c12
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/atm/ambassador.c11
-rw-r--r--drivers/atm/firestream.c8
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/horizon.c8
-rw-r--r--drivers/atm/idt77105.c8
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/lanai.c16
-rw-r--r--drivers/atm/nicstar.c8
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/base/Kconfig25
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c16
-rw-r--r--drivers/base/isa.c10
-rw-r--r--drivers/base/power/main.c15
-rw-r--r--drivers/base/power/runtime.c34
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/test/test_async_driver_probe.c6
-rw-r--r--drivers/block/DAC960.c9
-rw-r--r--drivers/block/DAC960.h2
-rw-r--r--drivers/block/Kconfig12
-rw-r--r--drivers/block/amiflop.c57
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c9
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/brd.c67
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/null_blk.c5
-rw-r--r--drivers/block/rbd.c65
-rw-r--r--drivers/block/rsxx/cregs.c7
-rw-r--r--drivers/block/rsxx/dma.c7
-rw-r--r--drivers/block/skd_main.c6
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/swim3.c29
-rw-r--r--drivers/block/umem.c5
-rw-r--r--drivers/block/xsysace.c6
-rw-r--r--drivers/block/zram/zcomp.c6
-rw-r--r--drivers/block/zram/zram_drv.c18
-rw-r--r--drivers/bus/Kconfig15
-rw-r--r--drivers/bus/Makefile2
-rw-r--r--drivers/bus/arm-cci.c7
-rw-r--r--drivers/bus/arm-ccn.c25
-rw-r--r--drivers/bus/ti-sysc.c583
-rw-r--r--drivers/bus/ts-nbus.c375
-rw-r--r--drivers/char/dtlk.c4
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/ipmi/bt-bmc.c13
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c50
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c7
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c7
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/nwbutton.c4
-rw-r--r--drivers/char/nwbutton.h2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c6
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/pcmcia/synclink_cs.c8
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c7
-rw-r--r--drivers/clk/at91/clk-utmi.c95
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile1
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c30
-rw-r--r--drivers/clk/bcm/clk-hr2.c27
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c7
-rw-r--r--drivers/clk/clk-cdce925.c2
-rw-r--r--drivers/clk/clk-gpio.c84
-rw-r--r--drivers/clk/clk-hsdk-pll.c4
-rw-r--r--drivers/clk/clk-mux.c6
-rw-r--r--drivers/clk/clk-stm32h7.c4
-rw-r--r--drivers/clk/clk-twl6040.c2
-rw-r--r--drivers/clk/clk-u300.c84
-rw-r--r--drivers/clk/clk-wm831x.c6
-rw-r--r--drivers/clk/clk-xgene.c20
-rw-r--r--drivers/clk/clk.c178
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c2
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c4
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c6
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c12
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-gate2.c2
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/clk/imx/clk-imx7d.c11
-rw-r--r--drivers/clk/imx/clk-pllv1.c2
-rw-r--r--drivers/clk/imx/clk-pllv2.c2
-rw-r--r--drivers/clk/mediatek/Kconfig80
-rw-r--r--drivers/clk/mediatek/Makefile12
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c102
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c76
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c75
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c170
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c94
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c77
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c1435
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c195
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c156
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c169
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c780
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-pll.c18
-rw-r--r--drivers/clk/meson/gxbb.c292
-rw-r--r--drivers/clk/meson/gxbb.h6
-rw-r--r--drivers/clk/mmp/clk-apbc.c2
-rw-r--r--drivers/clk/mmp/clk-apmu.c2
-rw-r--r--drivers/clk/mmp/clk-frac.c6
-rw-r--r--drivers/clk/mmp/clk-gate.c4
-rw-r--r--drivers/clk/mmp/clk-mix.c27
-rw-r--r--drivers/clk/mmp/clk-mmp2.c6
-rw-r--r--drivers/clk/mmp/clk-pxa168.c6
-rw-r--r--drivers/clk/mmp/clk-pxa910.c8
-rw-r--r--drivers/clk/mxs/clk-div.c2
-rw-r--r--drivers/clk/mxs/clk-frac.c2
-rw-r--r--drivers/clk/pxa/clk-pxa.c4
-rw-r--r--drivers/clk/qcom/clk-rcg.h3
-rw-r--r--drivers/clk/qcom/clk-rcg2.c79
-rw-r--r--drivers/clk/qcom/clk-rpm.c93
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c82
-rw-r--r--drivers/clk/qcom/common.c32
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/clk-div6.c38
-rw-r--r--drivers/clk/renesas/clk-div6.h3
-rw-r--r--drivers/clk/renesas/clk-mstp.c5
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c199
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c7
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c79
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c105
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c76
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c111
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c179
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c18
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c409
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c12
-rw-r--r--drivers/clk/samsung/clk-pll.c11
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c16
-rw-r--r--drivers/clk/samsung/clk.c45
-rw-r--r--drivers/clk/samsung/clk.h80
-rw-r--r--drivers/clk/sirf/clk-atlas6.c2
-rw-r--r--drivers/clk/sirf/clk-atlas7.c18
-rw-r--r--drivers/clk/sirf/clk-common.c92
-rw-r--r--drivers/clk/sirf/clk-prima2.c2
-rw-r--r--drivers/clk/spear/clk-aux-synth.c10
-rw-r--r--drivers/clk/spear/clk-frac-synth.c6
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c6
-rw-r--r--drivers/clk/spear/clk-vco-pll.c12
-rw-r--r--drivers/clk/spear/clk.h4
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c28
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c27
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.h8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c38
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c56
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h25
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c158
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.h80
-rw-r--r--drivers/clk/sunxi/clk-factors.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c15
-rw-r--r--drivers/clk/tegra/clk-dfll.c10
-rw-r--r--drivers/clk/tegra/clk-dfll.h2
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-periph.c8
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c24
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c12
-rw-r--r--drivers/clk/tegra/clk-tegra20.c13
-rw-r--r--drivers/clk/tegra/clk-tegra210.c51
-rw-r--r--drivers/clk/tegra/clk-tegra30.c23
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c3
-rw-r--r--drivers/clk/ti/divider.c4
-rw-r--r--drivers/clk/ti/mux.c4
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c7
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c2
-rw-r--r--drivers/clk/ux500/clk-prcc.c6
-rw-r--r--drivers/clk/ux500/clk-prcmu.c6
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c6
-rw-r--r--drivers/clk/versatile/clk-icst.c7
-rw-r--r--drivers/clocksource/arm_arch_timer.c35
-rw-r--r--drivers/clocksource/timer-of.c9
-rw-r--r--drivers/clocksource/timer-of.h2
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c11
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c167
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/dax/device.c15
-rw-r--r--drivers/dax/super.c14
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/dma-buf/reservation.c56
-rw-r--r--drivers/dma-buf/sw_sync.c10
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/firmware/Kconfig11
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/efi/esrt.c17
-rw-r--r--drivers/firmware/efi/runtime-map.c10
-rw-r--r--drivers/firmware/google/vpd.c48
-rw-r--r--drivers/firmware/psci_checker.c5
-rw-r--r--drivers/firmware/qcom_scm-32.c31
-rw-r--r--drivers/firmware/qcom_scm-64.c71
-rw-r--r--drivers/firmware/qcom_scm.c182
-rw-r--r--drivers/firmware/qcom_scm.h13
-rw-r--r--drivers/firmware/qemu_fw_cfg.c11
-rw-r--r--drivers/firmware/tegra/Makefile4
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c444
-rw-r--r--drivers/firmware/tegra/bpmp.c31
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c4
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-em.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/Kconfig7
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c399
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c410
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c337
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h)18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c688
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c358
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c499
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c115
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c459
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c615
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h128
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c175
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c91
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig45
-rw-r--r--drivers/gpu/drm/amd/display/Makefile43
-rw-r--r--drivers/gpu/drm/amd/display/TODO107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile38
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4934
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h259
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c498
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c755
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h102
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c446
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h)36
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile48
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3871
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1934
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2424
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c290
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c364
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h (renamed from drivers/gpu/drm/radeon/radeon_kfd.h)33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile39
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/custom_float.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c3257
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c1899
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1626
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1684
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2435
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c775
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2601
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c331
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2807
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h467
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h652
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1379
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c700
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h310
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c1119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h733
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c933
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c522
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2991
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1052
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c738
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c555
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1329
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1966
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h273
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c711
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c716
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c854
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1283
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1004
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c481
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h1386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c816
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c702
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c960
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h683
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2958
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1468
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c1200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h374
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h387
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h282
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h559
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h557
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1763
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c392
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c1905
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c591
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile99
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c571
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c570
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h210
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c875
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c601
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/psm.h)34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h283
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/custom_float.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h481
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h635
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h156
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h175
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h289
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h152
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h183
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h304
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h197
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h392
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile69
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c289
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c356
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c170
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h193
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h310
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h143
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h49
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h154
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h149
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h466
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed32_32.h129
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h445
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h294
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h170
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h188
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h166
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h107
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h95
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1483
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h167
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h1
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h19
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h32
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h12
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h259
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile32
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile27
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c528
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c291
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c104
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c445
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c640
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c151
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c417
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c2513
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c250
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c510
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c475
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c110
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c183
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c520
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c129
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c281
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h152
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmanager.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h124
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h10299
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h200
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h11792
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h211
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c)2198
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c308
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2498
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c2537
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2567
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2364
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2380
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c130
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c263
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c261
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3275
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3181
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c194
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h16
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c66
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h19
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c37
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c46
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h65
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c21
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c22
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c5
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c9
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c7
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c49
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h54
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c349
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c133
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c38
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c2
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c48
-rw-r--r--drivers/gpu/drm/bridge/panel.c10
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c994
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c96
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c25
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c108
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c73
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c45
-rw-r--r--drivers/gpu/drm/drm_atomic.c36
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c327
-rw-r--r--drivers/gpu/drm/drm_auth.c32
-rw-r--r--drivers/gpu/drm/drm_bridge.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c108
-rw-r--r--drivers/gpu/drm/drm_crtc.c15
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c8
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c16
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c74
-rw-r--r--drivers/gpu/drm/drm_drv.c54
-rw-r--r--drivers/gpu/drm/drm_edid.c86
-rw-r--r--drivers/gpu/drm/drm_edid_load.c16
-rw-r--r--drivers/gpu/drm/drm_encoder.c7
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c77
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c18
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c17
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c86
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c28
-rw-r--r--drivers/gpu/drm/drm_lease.c767
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c28
-rw-r--r--drivers/gpu/drm/drm_mode_object.c37
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c98
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c90
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c12
-rw-r--r--drivers/gpu/drm/drm_syncobj.c93
-rw-r--r--drivers/gpu/drm/drm_trace.h2
-rw-r--r--drivers/gpu/drm/drm_vblank.c468
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c69
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c217
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c120
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c106
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c495
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h49
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c310
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c460
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c9
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c133
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c92
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c164
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c638
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c96
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h368
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c676
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c123
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c91
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c728
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c131
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c152
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c75
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c476
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.h80
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1101
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h34
-rw-r--r--drivers/gpu/drm/i915/i915_params.c207
-rw-r--r--drivers/gpu/drm/i915/i915_params.h85
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c341
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h86
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c45
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c76
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h118
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h18
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c144
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h49
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c49
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c392
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c21
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c318
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c136
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c39
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1040
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c41
-rw-r--r--drivers/gpu/drm/i915/intel_display.c798
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c293
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c76
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h159
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c76
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c534
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c41
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c14
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c369
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h120
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c (renamed from drivers/gpu/drm/i915/intel_guc_loader.c)252
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h62
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h59
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c12
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c186
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c172
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h41
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c707
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h37
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c80
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1231
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c450
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c194
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h184
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c40
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c39
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c363
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h247
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c318
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h121
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c281
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h19
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h339
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c1734
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c162
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c50
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c15
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c14
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c261
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h109
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c304
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c78
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c297
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h66
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c22
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c160
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c71
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c232
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c41
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c83
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c37
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c242
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h51
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c142
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c36
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h33
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c152
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0008.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000a.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000b.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000d.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if500b.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if500d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if900b.h23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if900d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ifb00d.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ifc00d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mem.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h56
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h36
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h140
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c391
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c280
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c135
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mem.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c329
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c696
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. (renamed from drivers/net/virtio_net.)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c122
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c352
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c1513
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h310
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c403
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c347
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c385
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c56
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c8
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c62
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c381
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c30
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig33
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c491
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c514
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c532
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c372
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c51
-rw-r--r--drivers/gpu/drm/pl111/Kconfig3
-rw-r--r--drivers/gpu/drm/pl111/Makefile4
-rw-r--r--drivers/gpu/drm/pl111/pl111_connector.c126
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c6
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c79
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h37
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c149
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c270
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.h9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h28
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c99
-rw-r--r--drivers/gpu/drm/r128/r128_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c46
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c38
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c870
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c40
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c89
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c19
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c32
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c586
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h114
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/stm/drv.c3
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c8
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/stm/ltdc.h2
-rw-r--r--drivers/gpu/drm/sun4i/Makefile33
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c76
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h112
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c277
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c227
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c69
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c466
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c12
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c84
-rw-r--r--drivers/gpu/drm/tegra/dc.h120
-rw-r--r--drivers/gpu/drm/tegra/drm.c30
-rw-r--r--drivers/gpu/drm/tegra/drm.h106
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c17
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c162
-rw-r--r--drivers/gpu/drm/tegra/vic.c22
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig11
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c57
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c266
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts72
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c4
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c3
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c22
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c7
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c45
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c497
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c256
-rw-r--r--drivers/gpu/drm/tve200/Kconfig16
-rw-r--r--drivers/gpu/drm/tve200/Makefile4
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c338
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h126
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c303
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c156
-rw-r--r--drivers/gpu/drm/udl/udl_connector.h13
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c5
-rw-r--r--drivers/gpu/drm/vc4/Makefile2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c297
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c144
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c170
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace.h2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c13
-rw-r--r--drivers/gpu/drm/via/via_verifier.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c3
-rw-r--r--drivers/gpu/host1x/Makefile3
-rw-r--r--drivers/gpu/host1x/bus.c3
-rw-r--r--drivers/gpu/host1x/channel.c3
-rw-r--r--drivers/gpu/host1x/debug.c14
-rw-r--r--drivers/gpu/host1x/debug.h14
-rw-r--r--drivers/gpu/host1x/dev.c69
-rw-r--r--drivers/gpu/host1x/dev.h19
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c49
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c240
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x01.c154
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x06.c135
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x04.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x05.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x06.c44
-rw-r--r--drivers/gpu/host1x/hw/host1x06.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x06_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_channel.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_channel.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h32
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_vm.h47
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c29
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c46
-rw-r--r--drivers/gpu/host1x/syncpt.c24
-rw-r--r--drivers/gpu/host1x/syncpt.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c3
-rw-r--r--drivers/gpu/vga/vgaarb.c72
-rw-r--r--drivers/hid/hid-appleir.c7
-rw-r--r--drivers/hid/hid-prodikeys.c7
-rw-r--r--drivers/hid/hid-wiimote-core.c6
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c43
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_trace.c4
-rw-r--r--drivers/hv/hv_trace.h327
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/vmbus_drv.c209
-rw-r--r--drivers/hwmon/jc42.c21
-rw-r--r--drivers/hwmon/k10temp.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c21
-rw-r--r--drivers/hwmon/w83781d.c12
-rw-r--r--drivers/hwmon/w83791d.c15
-rw-r--r--drivers/hwmon/w83792d.c15
-rw-r--r--drivers/hwmon/w83793.c15
-rw-r--r--drivers/hwspinlock/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c4
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/i2c/i2c-boardinfo.c2
-rw-r--r--drivers/i2c/i2c-dev.c268
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/ide/ide-pnp.c2
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c52
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c6
-rw-r--r--drivers/iio/health/max30102.c2
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/proximity/sx9500.c9
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/security.c57
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c22
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c20
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c31
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c43
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/input/gameport/gameport.c7
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/db9.c6
-rw-r--r--drivers/input/joystick/gamecon.c6
-rw-r--r--drivers/input/joystick/turbografx.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/irqchip/Kconfig7
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-bcm2836.c79
-rw-r--r--drivers/irqchip/irq-gic-v3.c19
-rw-r--r--drivers/irqchip/irq-gic-v4.c7
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-s3c24xx.c4
-rw-r--r--drivers/irqchip/irq-sni-exiu.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c2
-rw-r--r--drivers/isdn/capi/capidrv.c6
-rw-r--r--drivers/isdn/divert/isdn_divert.c9
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c10
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c10
-rw-r--r--drivers/isdn/hisax/asuscom.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c2
-rw-r--r--drivers/isdn/hisax/diva.c2
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hfcscard.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/isdn/hisax/isurf.c2
-rw-r--r--drivers/isdn/hisax/ix1_micro.c2
-rw-r--r--drivers/isdn/hisax/niccy.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer.c2
-rw-r--r--drivers/isdn/hisax/teles3.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c5
-rw-r--r--drivers/isdn/i4l/isdn_net.c9
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/isdn/i4l/isdn_tty.c7
-rw-r--r--drivers/lightnvm/pblk-core.c4
-rw-r--r--drivers/lightnvm/pblk-gc.c6
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/lightnvm/pblk-rl.c6
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/lightnvm/rrpc.c6
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/btree.c5
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/journal.c7
-rw-r--r--drivers/md/bcache/request.c13
-rw-r--r--drivers/md/bcache/stats.c8
-rw-r--r--drivers/md/dm-bufio.c23
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-delay.c6
-rw-r--r--drivers/md/dm-era-target.c1
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-mpath.c69
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap.c48
-rw-r--r--drivers/md/dm-table.c48
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5-cache.c22
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c19
-rw-r--r--drivers/media/cec/cec-core.c9
-rw-r--r--drivers/media/cec/cec-pin-priv.h133
-rw-r--r--drivers/media/cec/cec-pin.c40
-rw-r--r--drivers/media/common/cypress_firmware.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c12
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c5
-rw-r--r--drivers/media/common/siano/smscoreapi.c105
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c12
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dmxdev.h90
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c68
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-core/dvb_demux.h248
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c531
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h117
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/dvb-core/dvb_net.h34
-rw-r--r--drivers/media/dvb-core/dvbdev.c32
-rw-r--r--drivers/media/dvb-core/dvbdev.h137
-rw-r--r--drivers/media/dvb-frontends/Kconfig6
-rw-r--r--drivers/media/dvb-frontends/af9013.h24
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c7
-rw-r--r--drivers/media/dvb-frontends/ascot2e.h9
-rw-r--r--drivers/media/dvb-frontends/cx24113.c10
-rw-r--r--drivers/media/dvb-frontends/cx24116.c22
-rw-r--r--drivers/media/dvb-frontends/cxd2820r.h24
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h878
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c248
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.h220
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/drxk.h13
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c32
-rw-r--r--drivers/media/dvb-frontends/ds3000.c22
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h13
-rw-r--r--drivers/media/dvb-frontends/helene.h30
-rw-r--r--drivers/media/dvb-frontends/horus3a.h9
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c6
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h28
-rw-r--r--drivers/media/dvb-frontends/l64781.c2
-rw-r--r--drivers/media/dvb-frontends/lg2160.c14
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c3
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h155
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c23
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h17
-rw-r--r--drivers/media/dvb-frontends/mn88472.h16
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2830.h1
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h1
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.h6
-rw-r--r--drivers/media/dvb-frontends/si2168.c1
-rw-r--r--drivers/media/dvb-frontends/sp2.c9
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb6000.h11
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110.c9
-rw-r--r--drivers/media/dvb-frontends/tda10071.h1
-rw-r--r--drivers/media/dvb-frontends/tda826x.h11
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/tua6100.h2
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.h13
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10036.h16
-rw-r--r--drivers/media/i2c/Kconfig17
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c7
-rw-r--r--drivers/media/i2c/adv7604.c10
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/dw9714.c7
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig1
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/imx274.c1810
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/lm3560.c1
-rw-r--r--drivers/media/i2c/m5mols/m5mols_capture.c5
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c1
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c20
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/ov13858.c61
-rw-r--r--drivers/media/i2c/ov2640.c17
-rw-r--r--drivers/media/i2c/ov5640.c2
-rw-r--r--drivers/media/i2c/ov5647.c55
-rw-r--r--drivers/media/i2c/ov5670.c37
-rw-r--r--drivers/media/i2c/ov6650.c5
-rw-r--r--drivers/media/i2c/ov7670.c129
-rw-r--r--drivers/media/i2c/ov9650.c1
-rw-r--r--drivers/media/i2c/s5k6a3.c3
-rw-r--r--drivers/media/i2c/s5k6aa.c5
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c149
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h1
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c11
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c6
-rw-r--r--drivers/media/i2c/tc358743.c220
-rw-r--r--drivers/media/i2c/tc358743_regs.h94
-rw-r--r--drivers/media/i2c/tvaudio.c8
-rw-r--r--drivers/media/i2c/tvp514x.c12
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/pci/b2c2/Kconfig4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c19
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h3
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c5
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c28
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c8
-rw-r--r--drivers/media/pci/meye/meye.c20
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c15
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134.h4
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c7
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c10
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-enc.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c11
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/ttpci/budget-core.c2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c33
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c7
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig36
-rw-r--r--drivers/media/platform/Makefile6
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c8
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h1
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c652
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c24
-rw-r--r--drivers/media/platform/blackfin/ppi.c1
-rw-r--r--drivers/media/platform/cec-gpio/Makefile1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c239
-rw-r--r--drivers/media/platform/coda/coda-bit.c4
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c37
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif.c3
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c35
-rw-r--r--drivers/media/platform/davinci/vpif_display.c24
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c127
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c3
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c19
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c2
-rw-r--r--drivers/media/platform/fsl-viu.c7
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c2
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c3
-rw-r--r--drivers/media/platform/omap/omap_vout.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c133
-rw-r--r--drivers/media/platform/omap3isp/isp.h5
-rw-r--r--drivers/media/platform/pxa_camera.c17
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c1
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c8
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c9
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c12
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c34
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c117
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c10
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c14
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h4
-rw-r--r--drivers/media/platform/rcar_drif.c12
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/rockchip/rga/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c154
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c421
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h437
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c1010
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h125
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c26
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c14
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c21
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c7
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c23
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c10
-rw-r--r--drivers/media/platform/tegra-cec/Makefile1
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c495
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.h127
-rw-r--r--drivers/media/platform/ti-vpe/cal.c8
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vim2m.c8
-rw-r--r--drivers/media/platform/vimc/vimc-core.c5
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c8
-rw-r--r--drivers/media/radio/radio-cadet.c7
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/radio-si476x.c18
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h2
-rw-r--r--drivers/media/radio/wl128x/Kconfig10
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c7
-rw-r--r--drivers/media/rc/Kconfig16
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c7
-rw-r--r--drivers/media/rc/gpio-ir-recv.c192
-rw-r--r--drivers/media/rc/igorplugusb.c8
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c5
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c15
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/imon.c70
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c69
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c7
-rw-r--r--drivers/media/rc/ir-nec-decoder.c32
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c2
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile4
-rw-r--r--drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c70
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c3
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-poplar.c69
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-tv-demo.c81
-rw-r--r--drivers/media/rc/keymaps/rc-tango.c92
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c2
-rw-r--r--drivers/media/rc/lirc_dev.c515
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c10
-rw-r--r--drivers/media/rc/rc-main.c157
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/serial_ir.c5
-rw-r--r--drivers/media/rc/sir_ir.c44
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c8
-rw-r--r--drivers/media/rc/tango-ir.c281
-rw-r--r--drivers/media/tuners/mt2063.c6
-rw-r--r--drivers/media/usb/as102/as102_fw.c28
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c8
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c2
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c18
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/b2c2/Kconfig6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c65
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c32
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c16
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h1
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c26
-rw-r--r--drivers/media/usb/dvb-usb/friio.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c88
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/gspca/Kconfig16
-rw-r--r--drivers/media/usb/gspca/gspca.c1
-rw-r--r--drivers/media/usb/gspca/ov519.c24
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig1
-rw-r--r--drivers/media/usb/pwc/pwc-dec23.c7
-rw-r--r--drivers/media/usb/pwc/pwc-if.c3
-rw-r--r--drivers/media/usb/s2255/s2255drv.c7
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c27
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c15
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c21
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c6
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c3
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c32
-rw-r--r--drivers/media/v4l2-core/tuner-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c515
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c702
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c56
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_dpfe.c722
-rw-r--r--drivers/memory/omap-gpmc.c54
-rw-r--r--drivers/memstick/core/ms_block.c7
-rw-r--r--drivers/mfd/Kconfig36
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/axp20x.c2
-rw-r--r--drivers/mfd/cros_ec_spi.c53
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c17
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c184
-rw-r--r--drivers/mfd/lpc_ich.c1
-rw-r--r--drivers/mfd/max77693.c5
-rw-r--r--drivers/mfd/mxs-lradc.c6
-rw-r--r--drivers/mfd/rts5249.c155
-rw-r--r--drivers/mfd/rtsx_pcr.c142
-rw-r--r--drivers/mfd/rtsx_pcr.h14
-rw-r--r--drivers/mfd/rtsx_usb.c6
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c259
-rw-r--r--drivers/mfd/ssbi.c2
-rw-r--r--drivers/mfd/stw481x.c10
-rw-r--r--drivers/mfd/tps65217.c28
-rw-r--r--drivers/mfd/tps65218.c8
-rw-r--r--drivers/mfd/twl4030-audio.c9
-rw-r--r--drivers/mfd/twl6040.c12
-rw-r--r--drivers/misc/altera-stapl/Kconfig3
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cxl/api.c16
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h22
-rw-r--r--drivers/misc/cxl/debugfs.c29
-rw-r--r--drivers/misc/cxl/fault.c15
-rw-r--r--drivers/misc/cxl/file.c24
-rw-r--r--drivers/misc/cxl/native.c27
-rw-r--r--drivers/misc/cxl/pci.c100
-rw-r--r--drivers/misc/eeprom/at24.c41
-rw-r--r--drivers/misc/genwqe/card_base.h7
-rw-r--r--drivers/misc/genwqe/card_dev.c6
-rw-r--r--drivers/misc/genwqe/card_utils.c43
-rw-r--r--drivers/misc/lkdtm_bugs.c4
-rw-r--r--drivers/misc/lkdtm_core.c18
-rw-r--r--drivers/misc/mei/mei-trace.c1
-rw-r--r--drivers/misc/mei/mei-trace.h19
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/mmc/core/block.c67
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/debugfs.c1
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c14
-rw-r--r--drivers/mmc/host/sdhci.c28
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/chips/map_ram.c34
-rw-r--r--drivers/mtd/chips/map_rom.c34
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtdram.c36
-rw-r--r--drivers/mtd/devices/powernv_flash.c83
-rw-r--r--drivers/mtd/devices/slram.c9
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c38
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/mtdchar.c24
-rw-r--r--drivers/mtd/mtdconcat.c27
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/mtdsuper.c6
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c7
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c17
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h1
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c291
-rw-r--r--drivers/mtd/nand/denali.h44
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c5
-rw-r--r--drivers/mtd/nand/diskonchip.c3
-rw-r--r--drivers/mtd/nand/gpio.c112
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c6
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c34
-rw-r--r--drivers/mtd/nand/nandsim.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c377
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c41
-rw-r--r--drivers/mtd/nand/qcom_nandc.c127
-rw-r--r--drivers/mtd/nand/sh_flctl.c9
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/sharpslpart.c398
-rw-r--r--drivers/mtd/sm_ftl.c6
-rw-r--r--drivers/mtd/spi-nor/Kconfig6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c3
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c209
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c70
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c105
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c35
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/caif/caif_hsi.c21
-rw-r--r--drivers/net/can/flexcan.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c5
-rw-r--r--drivers/net/can/sja1000/peak_pci.c5
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c13
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c2
-rw-r--r--drivers/net/cris/eth_v10.c36
-rw-r--r--drivers/net/dsa/bcm_sf2.c3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/eql.c6
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c9
-rw-r--r--drivers/net/ethernet/agere/et131x.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c20
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c96
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/b44.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c55
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c7
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c11
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c166
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c38
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c50
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c109
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c8
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c55
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c41
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c7
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/hamradio/scc.c8
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c136
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c37
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c104
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/marvell10g.c5
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c74
-rw-r--r--drivers/net/phy/micrel.c6
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c1
-rw-r--r--drivers/net/phy/sfp.c41
-rw-r--r--drivers/net/slip/slip.c16
-rw-r--r--drivers/net/tap.c16
-rw-r--r--drivers/net/thunderbolt.c57
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c7
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c2
-rw-r--r--drivers/net/wireless/ray_cs.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c12
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c25
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c7
-rw-r--r--drivers/nfc/pn533/pn533.c8
-rw-r--r--drivers/nfc/st-nci/ndlc.c17
-rw-r--r--drivers/nfc/st-nci/se.c19
-rw-r--r--drivers/nfc/st21nfca/se.c19
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c16
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c75
-rw-r--r--drivers/ntb/hw/mscc/Kconfig9
-rw-r--r--drivers/ntb/hw/mscc/Makefile1
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c1216
-rw-r--r--drivers/ntb/ntb_transport.c20
-rw-r--r--drivers/ntb/test/ntb_perf.c18
-rw-r--r--drivers/ntb/test/ntb_pingpong.c8
-rw-r--r--drivers/ntb/test/ntb_tool.c6
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/badrange.c293
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/bus.c24
-rw-r--r--drivers/nvdimm/core.c260
-rw-r--r--drivers/nvdimm/dimm.c3
-rw-r--r--drivers/nvdimm/dimm_devs.c19
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c6
-rw-r--r--drivers/nvdimm/nd-core.h3
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pfn_devs.c8
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvdimm/region_devs.c8
-rw-r--r--drivers/nvme/host/core.c19
-rw-r--r--drivers/nvme/host/fabrics.h30
-rw-r--r--drivers/nvme/host/fc.c21
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/host/rdma.c266
-rw-r--r--drivers/nvme/target/fc.c15
-rw-r--r--drivers/nvme/target/loop.c25
-rw-r--r--drivers/nvmem/Kconfig35
-rw-r--r--drivers/nvmem/Makefile6
-rw-r--r--drivers/nvmem/bcm-ocotp.c1
-rw-r--r--drivers/nvmem/core.c13
-rw-r--r--drivers/nvmem/imx-iim.c24
-rw-r--r--drivers/nvmem/imx-ocotp.c193
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/lpc18xx_otp.c1
-rw-r--r--drivers/nvmem/meson-efuse.c5
-rw-r--r--drivers/nvmem/meson-mx-efuse.c265
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c1
-rw-r--r--drivers/nvmem/qfprom.c27
-rw-r--r--drivers/nvmem/rockchip-efuse.c5
-rw-r--r--drivers/nvmem/snvs_lpgpr.c156
-rw-r--r--drivers/nvmem/sunxi_sid.c7
-rw-r--r--drivers/nvmem/uniphier-efuse.c97
-rw-r--r--drivers/nvmem/vf610-ocotp.c1
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/dynamic.c4
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/of/of_pci.c2
-rw-r--r--drivers/of/of_reserved_mem.c26
-rw-r--r--drivers/of/overlay.c84
-rw-r--r--drivers/of/platform.c19
-rw-r--r--drivers/of/unittest-data/Makefile1
-rw-r--r--drivers/of/unittest-data/testcases.dts65
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/parisc/lba_pci.c33
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/htirq.c135
-rw-r--r--drivers/pci/pci-driver.c9
-rw-r--r--drivers/pci/switch/switchtec.c316
-rw-r--r--drivers/pcmcia/cistpl.c2
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/m32r_cfc.c7
-rw-r--r--drivers/pcmcia/m32r_pcc.c7
-rw-r--r--drivers/pcmcia/sa1111_badge4.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c33
-rw-r--r--drivers/pinctrl/Kconfig32
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c21
-rw-r--r--drivers/pinctrl/core.c12
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/intel/Kconfig11
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c375
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c20
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/meson/Kconfig41
-rw-r--r--drivers/pinctrl/meson/Makefile9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c892
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c852
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c200
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h47
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c108
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.h48
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c992
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c808
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c77
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c333
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c21
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c20
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c43
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c134
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/samsung/Kconfig2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c131
-rw-r--r--drivers/pinctrl/sh-pfc/core.h11
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c403
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c542
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1904
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c573
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c394
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c12
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h24
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c6
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c13
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c4
-rw-r--r--drivers/platform/x86/Kconfig61
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/asus-wireless.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c63
-rw-r--r--drivers/platform/x86/dell-laptop.c303
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c196
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c285
-rw-r--r--drivers/platform/x86/dell-smbios.c512
-rw-r--r--drivers/platform/x86/dell-smbios.h49
-rw-r--r--drivers/platform/x86/dell-smo8800.c3
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.c213
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.h28
-rw-r--r--drivers/platform/x86/dell-wmi.c99
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c14
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c18
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c98
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c114
-rw-r--r--drivers/platform/x86/intel_ips.c160
-rw-r--r--drivers/platform/x86/intel_ips.h4
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c24
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c25
-rw-r--r--drivers/platform/x86/intel_turbo_max_3.c1
-rw-r--r--drivers/platform/x86/mlx-platform.c4
-rw-r--r--drivers/platform/x86/peaq-wmi.c19
-rw-r--r--drivers/platform/x86/silead_dmi.c52
-rw-r--r--drivers/platform/x86/sony-laptop.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c132
-rw-r--r--drivers/platform/x86/wmi.c254
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c63
-rw-r--r--drivers/pwm/pwm-img.c160
-rw-r--r--drivers/pwm/pwm-mediatek.c53
-rw-r--r--drivers/pwm/pwm-stm32-lp.c3
-rw-r--r--drivers/pwm/pwm-sun4i.c8
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c15
-rw-r--r--drivers/rapidio/switches/idt_gen2.c2
-rw-r--r--drivers/rapidio/switches/idt_gen3.c2
-rw-r--r--drivers/rapidio/switches/idtcps.c2
-rw-r--r--drivers/rapidio/switches/tsi568.c2
-rw-r--r--drivers/rapidio/switches/tsi57x.c2
-rw-r--r--drivers/regulator/tps65217-regulator.c5
-rw-r--r--drivers/remoteproc/Kconfig4
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c291
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c130
-rw-r--r--drivers/reset/Kconfig30
-rw-r--r--drivers/reset/Makefile5
-rw-r--r--drivers/reset/reset-axs10x.c83
-rw-r--r--drivers/reset/reset-meson.c65
-rw-r--r--drivers/reset/reset-simple.c186
-rw-r--r--drivers/reset/reset-simple.h45
-rw-r--r--drivers/reset/reset-socfpga.c157
-rw-r--r--drivers/reset/reset-stm32.c108
-rw-r--r--drivers/reset/reset-sunxi.c104
-rw-r--r--drivers/reset/reset-uniphier.c30
-rw-r--r--drivers/reset/reset-zx2967.c99
-rw-r--r--drivers/rpmsg/Kconfig3
-rw-r--r--drivers/rpmsg/qcom_glink_native.c51
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-armada38x.c101
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-ds1305.c70
-rw-r--r--drivers/rtc/rtc-ds1307.c57
-rw-r--r--drivers/rtc/rtc-ds1390.c7
-rw-r--r--drivers/rtc/rtc-ds1511.c75
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c84
-rw-r--r--drivers/rtc/rtc-m48t86.c58
-rw-r--r--drivers/rtc/rtc-mt7622.c422
-rw-r--r--drivers/rtc/rtc-omap.c57
-rw-r--r--drivers/rtc/rtc-pcf8523.c40
-rw-r--r--drivers/rtc/rtc-pcf85363.c220
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pl031.c48
-rw-r--r--drivers/rtc/rtc-rv3029c2.c18
-rw-r--r--drivers/rtc/rtc-rx8010.c7
-rw-r--r--drivers/rtc/rtc-sc27xx.c662
-rw-r--r--drivers/rtc/rtc-sysfs.c25
-rw-r--r--drivers/rtc/rtc-xgene.c47
-rw-r--r--drivers/s390/Makefile1
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dasd.c31
-rw-r--r--drivers/s390/block/dasd_devmap.c1
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c17
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/block/xpram.c1
-rw-r--r--drivers/s390/char/Kconfig1
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/con3270.c10
-rw-r--r--drivers/s390/char/defkeymap.map1
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/hmcdrv_mod.c1
-rw-r--r--drivers/s390/char/monreader.c1
-rw-r--r--drivers/s390/char/monwriter.c1
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/char/sclp.c45
-rw-r--r--drivers/s390/char/sclp_async.c1
-rw-r--r--drivers/s390/char/sclp_con.c5
-rw-r--r--drivers/s390/char/sclp_tty.c5
-rw-r--r--drivers/s390/char/sclp_vt220.c6
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_class.c1
-rw-r--r--drivers/s390/char/tape_core.c15
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/vmlogrdr.c1
-rw-r--r--drivers/s390/char/vmur.c1
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/blacklist.h1
-rw-r--r--drivers/s390/cio/ccwgroup.c1
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/cmf.c15
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/device.c11
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c11
-rw-r--r--drivers/s390/cio/device_ops.c3
-rw-r--r--drivers/s390/cio/eadm_sch.c10
-rw-r--r--drivers/s390/cio/isc.c1
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/cio/scm.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/crypto/ap_bus.h17
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c19
-rw-r--r--drivers/s390/crypto/zcrypt_api.h15
-rw-r--r--drivers/s390/crypto/zcrypt_card.c11
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h15
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c15
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h15
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/crypto/zcrypt_error.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h15
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c15
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h15
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c11
-rw-r--r--drivers/s390/net/Kconfig1
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/fsm.c12
-rw-r--r--drivers/s390/net/lcs.c15
-rw-r--r--drivers/s390/net/netiucv.c16
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/net/qeth_core_sys.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c44
-rw-r--r--drivers/s390/net/qeth_l3_sys.c75
-rw-r--r--drivers/s390/net/smsgiucv.c15
-rw-r--r--drivers/s390/net/smsgiucv_app.c1
-rw-r--r--drivers/s390/scsi/Makefile1
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c15
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/virtio/Makefile5
-rw-r--r--drivers/s390/virtio/virtio_ccw.c5
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c14
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c16
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/cxlflash/main.c6
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c8
-rw-r--r--drivers/scsi/fnic/fnic_main.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c8
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c8
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/Kconfig21
-rw-r--r--drivers/soc/amlogic/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c243
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c4
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c175
-rw-r--r--drivers/soc/atmel/soc.c8
-rw-r--r--drivers/soc/atmel/soc.h4
-rw-r--r--drivers/soc/bcm/Kconfig2
-rw-r--r--drivers/soc/bcm/brcmstb/Kconfig10
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/pm/Makefile3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/aon_defs.h113
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c822
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-mips.c461
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h89
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-arm.S76
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-mips.S200
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s3-mips.S146
-rw-r--r--drivers/soc/fsl/guts.c1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/Makefile2
-rw-r--r--drivers/soc/fsl/qbman/bman.c42
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c15
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c23
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h8
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c78
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h25
-rw-r--r--drivers/soc/fsl/qbman/qman.c83
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c95
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c23
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h11
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h2
-rw-r--r--drivers/soc/mediatek/Kconfig8
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c511
-rw-r--r--drivers/soc/qcom/Kconfig11
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c269
-rw-r--r--drivers/soc/qcom/smem.c335
-rw-r--r--drivers/soc/renesas/Kconfig8
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c39
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/samsung/exynos-pmu.c9
-rw-r--r--drivers/soc/samsung/exynos-pmu.h2
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c13
-rw-r--r--drivers/soc/tegra/powergate-bpmp.c15
-rw-r--r--drivers/spi/spi-armada-3700.c8
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-xilinx.c11
-rw-r--r--drivers/staging/ccree/ssi_hash.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c5
-rw-r--r--drivers/staging/greybus/operation.c7
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h9
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c157
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c99
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/media/atomisp/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/TODO24
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig100
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile19
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c1255
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h198
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c (renamed from drivers/staging/media/atomisp/i2c/gc0310.c)53
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c (renamed from drivers/staging/media/atomisp/i2c/gc2235.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c (renamed from drivers/staging/media/atomisp/i2c/libmsrlisthelper.c)4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c (renamed from drivers/staging/media/atomisp/i2c/lm3554.c)47
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c (renamed from drivers/staging/media/atomisp/i2c/mt9m114.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c (renamed from drivers/staging/media/atomisp/i2c/ov2680.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c (renamed from drivers/staging/media/atomisp/i2c/ov2722.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h7
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile14
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c217
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.h50
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/common.h66
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c210
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.h39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c224
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c233
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c198
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.h58
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c2480
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h737
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx132.h566
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx134.h2465
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx135.h3374
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx175.h1960
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx208.h550
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx219.h228
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx227.h727
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp.c39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c80
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c89
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_imx.c191
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/vcm.c45
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h9
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h14
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c (renamed from drivers/staging/media/atomisp/i2c/ov5693/ov5693.c)59
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c65
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h5
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h5
-rw-r--r--drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h38
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h38
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h3
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h25
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h4
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h5
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3642.h153
-rw-r--r--drivers/staging/media/atomisp/pci/Kconfig17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c67
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h107
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h87
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h153
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h216
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h62
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h120
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h112
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h90
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h148
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h82
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h98
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h104
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c201
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c4
-rw-r--r--drivers/staging/media/atomisp/platform/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/clock/Makefile6
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c40
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h27
-rw-r--r--drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c247
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c141
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c298
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c5
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c7
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c7
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c8
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c231
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c6
-rw-r--r--drivers/staging/pi433/pi433_if.c90
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c16
-rw-r--r--drivers/staging/rtl8712/recv_linux.c9
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c9
-rw-r--r--drivers/staging/speakup/main.c4
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c8
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c20
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c45
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c80
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_alua.c51
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c14
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pr.c41
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_transport.c84
-rw-r--r--drivers/target/target_core_user.c215
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/thermal/Kconfig3
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c387
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/hisi_thermal.c612
-rw-r--r--drivers/thermal/imx_thermal.c104
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c6
-rw-r--r--drivers/thermal/intel_bxt_pmic_thermal.c3
-rw-r--r--drivers/thermal/intel_pch_thermal.c11
-rw-r--r--drivers/thermal/intel_powerclamp.c4
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c43
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c34
-rw-r--r--drivers/thermal/rockchip_thermal.c67
-rw-r--r--drivers/thermal/step_wise.c11
-rw-r--r--drivers/thermal/tegra/Kconfig7
-rw-r--r--drivers/thermal/tegra/Makefile3
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c263
-rw-r--r--drivers/thermal/thermal-generic-adc.c24
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
-rw-r--r--drivers/thunderbolt/tb.c1
-rw-r--r--drivers/tty/cyclades.c4
-rw-r--r--drivers/tty/ipwireless/hardware.c11
-rw-r--r--drivers/tty/isicom.c4
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/n_r3964.c8
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c26
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_early.c14
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/ifx6x60.c7
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/kgdb_nmi.c6
-rw-r--r--drivers/tty/serial/max3100.c7
-rw-r--r--drivers/tty/serial/mux.c4
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c7
-rw-r--r--drivers/tty/serial/sa1100.c7
-rw-r--r--drivers/tty/serial/sh-sci.c16
-rw-r--r--drivers/tty/serial/sn_console.c6
-rw-r--r--drivers/tty/synclink.c8
-rw-r--r--drivers/tty/synclink_gt.c16
-rw-r--r--drivers/tty/synclinkmp.c17
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/selection.c50
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/tty/vt/vt_ioctl.c68
-rw-r--r--drivers/usb/atm/cxacru.c23
-rw-r--r--drivers/usb/atm/speedtch.c16
-rw-r--r--drivers/usb/atm/usbatm.c10
-rw-r--r--drivers/usb/common/ulpi.c4
-rw-r--r--drivers/usb/core/config.c32
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/hcd_queue.c7
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/composite.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c15
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/core.c8
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c8
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c9
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c7
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c3
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/xhci-mem.c22
-rw-r--r--drivers/usb/host/xhci-ring.c18
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/serial/mos7840.c15
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c7
-rw-r--r--drivers/usb/storage/uas-detect.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig54
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_rx.c51
-rw-r--r--drivers/usb/usbip/stub_tx.c7
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c1
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/uwb/drp.c6
-rw-r--r--drivers/uwb/neh.c8
-rw-r--r--drivers/uwb/rsv.c15
-rw-r--r--drivers/uwb/uwb-internal.h2
-rw-r--r--drivers/vhost/net.c20
-rw-r--r--drivers/vhost/scsi.c77
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/ili922x.c3
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/tps65217_bl.c17
-rw-r--r--drivers/video/fbdev/Kconfig10
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c3
-rw-r--r--drivers/video/fbdev/au1200fb.c43
-rw-r--r--drivers/video/fbdev/cirrusfb.c6
-rw-r--r--drivers/video/fbdev/controlfb.h2
-rw-r--r--drivers/video/fbdev/core/fbcon.c11
-rw-r--r--drivers/video/fbdev/core/fbcon.h1
-rw-r--r--drivers/video/fbdev/dnfb.c15
-rw-r--r--drivers/video/fbdev/goldfishfb.c8
-rw-r--r--drivers/video/fbdev/igafb.c579
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c9
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mxsfb.c13
-rw-r--r--drivers/video/fbdev/omap/hwa742.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c24
-rw-r--r--drivers/video/fbdev/sa1100fb.c75
-rw-r--r--drivers/video/fbdev/sa1100fb.h2
-rw-r--r--drivers/video/fbdev/sis/init301.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c4
-rw-r--r--drivers/video/fbdev/sm501fb.c22
-rw-r--r--drivers/video/fbdev/udlfb.c10
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/virtio/virtio_balloon.c23
-rw-r--r--drivers/virtio/virtio_mmio.c22
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c73
-rw-r--r--drivers/vme/bridges/vme_fake.c35
-rw-r--r--drivers/vme/bridges/vme_tsi148.c83
-rw-r--r--drivers/vme/vme.c214
-rw-r--r--drivers/w1/slaves/Kconfig15
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c771
-rw-r--r--drivers/w1/slaves/w1_therm.c59
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/alim7101_wdt.c4
-rw-r--r--drivers/watchdog/at91sam9_wdt.c6
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c9
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/cpu5wdt.c4
-rw-r--r--drivers/watchdog/machzwd.c4
-rw-r--r--drivers/watchdog/mixcomwd.c4
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/nuc900_wdt.c4
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pika_wdt.c4
-rw-r--r--drivers/watchdog/rdc321x_wdt.c4
-rw-r--r--drivers/watchdog/sbc60xxwdt.c4
-rw-r--r--drivers/watchdog/sc520_wdt.c4
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/via_wdt.c4
-rw-r--r--drivers/watchdog/w83877f_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c35
-rw-r--r--drivers/watchdog/watchdog_dev.c32
-rw-r--r--drivers/xen/Kconfig13
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/grant-table.c248
-rw-r--r--drivers/xen/manage.c7
-rw-r--r--drivers/xen/privcmd.c3
-rw-r--r--drivers/xen/pvcalls-back.c20
-rw-r--r--drivers/xen/pvcalls-front.c1280
-rw-r--r--drivers/xen/pvcalls-front.h28
-rw-r--r--drivers/xen/time.c72
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
3446 files changed, 251459 insertions, 95258 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 1d034b680431..e06f7f633f73 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_SUPPORT) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/
obj-$(CONFIG_OF) += usb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 91477d5ab422..46505396869e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -527,6 +527,12 @@ config CHT_WC_PMIC_OPREGION
help
This config adds ACPI operation region support for CHT Whiskey Cove PMIC.
+config CHT_DC_TI_PMIC_OPREGION
+ bool "ACPI operation region support for Dollar Cove TI PMIC"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ This config adds ACPI operation region support for Dollar Cove TI PMIC.
+
endif
config ACPI_CONFIGFS
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 31c15d84a8d0..41954a601989 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
+obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += pmic/intel_pmic_chtdc_ti.o
obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6742f6c68034..9bff853e85f3 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1007,7 +1007,7 @@ skip:
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < sizeof(*rcd)) {
+ else if (len < 0 || len < sizeof(*rcd)) {
rc = -EIO;
goto out;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 21c28433c590..06ea4749ebd9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -949,7 +949,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
@@ -988,7 +988,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
@@ -1035,14 +1035,15 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
*lowest_non_linear_reg, *nominal_reg;
u64 high, low, nom, min_nonlinear;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
int ret = 0, regs_in_pcc = 0;
- if (!cpc_desc) {
+ if (!cpc_desc || pcc_ss_id < 0) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
+ pcc_ss_data = pcc_data[pcc_ss_id];
highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
@@ -1095,15 +1096,16 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
struct cpc_register_resource *delivered_reg, *reference_reg,
*ref_perf_reg, *ctr_wrap_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
u64 delivered, reference, ref_perf, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0;
- if (!cpc_desc) {
+ if (!cpc_desc || pcc_ss_id < 0) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
+ pcc_ss_data = pcc_data[pcc_ss_id];
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
@@ -1169,14 +1171,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
int ret = 0;
- if (!cpc_desc) {
+ if (!cpc_desc || pcc_ss_id < 0) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
+ pcc_ss_data = pcc_data[pcc_ss_id];
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
/*
@@ -1301,7 +1304,7 @@ unsigned int cppc_get_transition_latency(int cpu_num)
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
@@ -1311,6 +1314,10 @@ unsigned int cppc_get_transition_latency(int cpu_num)
if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
+ if (pcc_ss_id < 0)
+ return CPUFREQ_ETERNAL;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..a4c8ad98560d 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
* skip all of the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 24418932612e..a041689e5701 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
int count;
struct acpi_hardware_id *id;
+ /* Avoid unnecessarily loading modules for non present devices. */
+ if (!acpi_device_is_present(acpi_dev))
+ return 0;
+
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index da176c95aa2c..0252c9b9af3d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec = NULL;
int ret;
+ bool is_ecdt = false;
+ acpi_status status;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
- ec = acpi_ec_alloc();
- if (!ec)
- return -ENOMEM;
- if (ec_parse_device(device->handle, 0, ec, NULL) !=
- AE_CTRL_TERMINATE) {
+ if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+ is_ecdt = true;
+ ec = boot_ec;
+ } else {
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
+ status = ec_parse_device(device->handle, 0, ec, NULL);
+ if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL;
goto err_alloc;
+ }
}
if (acpi_is_boot_ec(ec)) {
- boot_ec_is_ecdt = false;
- /*
- * Trust PNP0C09 namespace location rather than ECDT ID.
- *
- * But trust ECDT GPE rather than _GPE because of ASUS quirks,
- * so do not change boot_ec->gpe to ec->gpe.
- */
- boot_ec->handle = ec->handle;
- acpi_handle_debug(ec->handle, "duplicated.\n");
- acpi_ec_free(ec);
- ec = boot_ec;
- ret = acpi_config_boot_ec(ec, ec->handle, true, false);
+ boot_ec_is_ecdt = is_ecdt;
+ if (!is_ecdt) {
+ /*
+ * Trust PNP0C09 namespace location rather than
+ * ECDT ID. But trust ECDT GPE rather than _GPE
+ * because of ASUS quirks, so do not change
+ * boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
+ acpi_handle_debug(ec->handle, "duplicated.\n");
+ acpi_ec_free(ec);
+ ec = boot_ec;
+ }
+ ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
} else
ret = acpi_ec_setup(ec, true);
if (ret)
@@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- /* Reprobe devices depending on the EC */
- acpi_walk_dep_device_list(ec->handle);
+ if (!is_ecdt) {
+ /* Reprobe devices depending on the EC */
+ acpi_walk_dep_device_list(ec->handle);
+ }
acpi_handle_debug(ec->handle, "enumerated.\n");
return 0;
@@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
+ {ACPI_ECDT_HID, 0},
{"", 0},
};
@@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
* Note: ec->handle can be valid if this function is called after
* acpi_ec_add(), hence the fast path.
*/
- if (boot_ec->handle != ACPI_ROOT_OBJECT)
- handle = boot_ec->handle;
- else if (!acpi_ec_ecdt_get_handle(&handle))
- return -ENODEV;
- return acpi_config_boot_ec(boot_ec, handle, true, true);
+ if (boot_ec->handle == ACPI_ROOT_OBJECT) {
+ if (!acpi_ec_ecdt_get_handle(&handle))
+ return -ENODEV;
+ boot_ec->handle = handle;
+ }
+
+ /* Register to ACPI bus with PM ops attached */
+ return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
}
#if 0
@@ -2022,6 +2037,12 @@ int __init acpi_ec_init(void)
/* Drivers must be started after acpi_ec_query_init() */
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ /*
+ * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
+ * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
+ * settings but invalid DSDT settings.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196847
+ */
ecdt_fail = acpi_ec_ecdt_start();
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index fc8c43e76707..7f43423de43c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
+int acpi_bus_register_early_device(int type);
/* --------------------------------------------------------------------------
Device Matching and Notification
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 9c2c49b6a240..ff2580e7611d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -183,13 +183,33 @@ static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
return 0;
}
-static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status)
+#define ACPI_LABELS_LOCKED 3
+
+static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
+ u32 status)
{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
+ /*
+ * In the _LSI, _LSR, _LSW case the locked status is
+ * communicated via the read/write commands
+ */
+ if (nfit_mem->has_lsi)
+ break;
+
if (status >> 16 & ND_CONFIG_LOCKED)
return -EACCES;
break;
+ case ND_CMD_GET_CONFIG_DATA:
+ if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
+ return -EACCES;
+ break;
+ case ND_CMD_SET_CONFIG_DATA:
+ if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
+ return -EACCES;
+ break;
default:
break;
}
@@ -205,13 +225,182 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
{
if (!nvdimm)
return xlat_bus_status(buf, cmd, status);
- return xlat_nvdimm_status(buf, cmd, status);
+ return xlat_nvdimm_status(nvdimm, buf, cmd, status);
+}
+
+/* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
+static union acpi_object *pkg_to_buf(union acpi_object *pkg)
+{
+ int i;
+ void *dst;
+ size_t size = 0;
+ union acpi_object *buf = NULL;
+
+ if (pkg->type != ACPI_TYPE_PACKAGE) {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ pkg->type);
+ goto err;
+ }
+
+ for (i = 0; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+
+ if (obj->type == ACPI_TYPE_INTEGER)
+ size += 4;
+ else if (obj->type == ACPI_TYPE_BUFFER)
+ size += obj->buffer.length;
+ else {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ obj->type);
+ goto err;
+ }
+ }
+
+ buf = ACPI_ALLOCATE(sizeof(*buf) + size);
+ if (!buf)
+ goto err;
+
+ dst = buf + 1;
+ buf->type = ACPI_TYPE_BUFFER;
+ buf->buffer.length = size;
+ buf->buffer.pointer = dst;
+ for (i = 0; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ memcpy(dst, &obj->integer.value, 4);
+ dst += 4;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ memcpy(dst, obj->buffer.pointer, obj->buffer.length);
+ dst += obj->buffer.length;
+ }
+ }
+err:
+ ACPI_FREE(pkg);
+ return buf;
+}
+
+static union acpi_object *int_to_buf(union acpi_object *integer)
+{
+ union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ void *dst = NULL;
+
+ if (!buf)
+ goto err;
+
+ if (integer->type != ACPI_TYPE_INTEGER) {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ integer->type);
+ goto err;
+ }
+
+ dst = buf + 1;
+ buf->type = ACPI_TYPE_BUFFER;
+ buf->buffer.length = 4;
+ buf->buffer.pointer = dst;
+ memcpy(dst, &integer->integer.value, 4);
+err:
+ ACPI_FREE(integer);
+ return buf;
+}
+
+static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
+ u32 len, void *data)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input = {
+ .count = 3,
+ .pointer = (union acpi_object []) {
+ [0] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = offset,
+ },
+ [1] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = len,
+ },
+ [2] = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.pointer = data,
+ .buffer.length = len,
+ },
+ },
+ };
+
+ rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return int_to_buf(buf.pointer);
+}
+
+static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
+ u32 len)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input = {
+ .count = 2,
+ .pointer = (union acpi_object []) {
+ [0] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = offset,
+ },
+ [1] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = len,
+ },
+ },
+ };
+
+ rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return pkg_to_buf(buf.pointer);
+}
+
+static union acpi_object *acpi_label_info(acpi_handle handle)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return pkg_to_buf(buf.pointer);
+}
+
+static u8 nfit_dsm_revid(unsigned family, unsigned func)
+{
+ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
+ [NVDIMM_FAMILY_INTEL] = {
+ [NVDIMM_INTEL_GET_MODES] = 2,
+ [NVDIMM_INTEL_GET_FWINFO] = 2,
+ [NVDIMM_INTEL_START_FWUPDATE] = 2,
+ [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
+ [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
+ [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
+ [NVDIMM_INTEL_SET_THRESHOLD] = 2,
+ [NVDIMM_INTEL_INJECT_ERROR] = 2,
+ },
+ };
+ u8 id;
+
+ if (family > NVDIMM_FAMILY_MAX)
+ return 0;
+ if (func > 31)
+ return 0;
+ id = revid_table[family][func];
+ if (id == 0)
+ return 1; /* default */
+ return id;
}
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
struct device *dev = acpi_desc->dev;
@@ -235,7 +424,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
}
if (nvdimm) {
- struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
@@ -294,7 +482,29 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.pointer,
min_t(u32, 256, in_buf.buffer.length), true);
- out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
+ /* call the BIOS, prefer the named methods over _DSM if available */
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
+ out_obj = acpi_label_info(handle);
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ struct nd_cmd_get_config_data_hdr *p = buf;
+
+ out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
+ } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
+ && nfit_mem->has_lsw) {
+ struct nd_cmd_set_config_hdr *p = buf;
+
+ out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
+ p->in_buf);
+ } else {
+ u8 revid;
+
+ if (nvdimm)
+ revid = nfit_dsm_revid(nfit_mem->family, func);
+ else
+ revid = 1;
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
+ }
+
if (!out_obj) {
dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
cmd_name);
@@ -356,8 +566,10 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* Set fw_status for all the commands with a known format to be
* later interpreted by xlat_status().
*/
- if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
- || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+ if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
+ && cmd <= ND_CMD_CLEAR_ERROR)
+ || (nvdimm && cmd >= ND_CMD_SMART
+ && cmd <= ND_CMD_VENDOR)))
fw_status = *(u32 *) out_obj->buffer.pointer;
if (offset + in_buf.buffer.length < buf_len) {
@@ -1431,6 +1643,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
{
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
+ union acpi_object *obj;
unsigned long dsm_mask;
const guid_t *guid;
int i;
@@ -1463,7 +1676,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
* different command sets. Note, that checking for function0 (bit0)
* tells us if any commands are reachable through this GUID.
*/
- for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
+ for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
if (family < 0 || i == default_dsm_family)
family = i;
@@ -1473,7 +1686,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (override_dsm_mask && !disable_vendor_specific)
dsm_mask = override_dsm_mask;
else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
- dsm_mask = 0x3fe;
+ dsm_mask = NVDIMM_INTEL_CMDMASK;
if (disable_vendor_specific)
dsm_mask &= ~(1 << ND_CMD_VENDOR);
} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
@@ -1493,9 +1706,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
guid = to_nfit_uuid(nfit_mem->family);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
- if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
+ if (acpi_check_dsm(adev_dimm->handle, guid,
+ nfit_dsm_revid(nfit_mem->family, i),
+ 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask);
+ obj = acpi_label_info(adev_dimm->handle);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsi = 1;
+ dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
+ }
+
+ obj = acpi_label_read(adev_dimm->handle, 0, 0);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsr = 1;
+ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
+ }
+
+ obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsw = 1;
+ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
+ }
+
return 0;
}
@@ -1571,8 +1807,21 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
* userspace interface.
*/
cmd_mask = 1UL << ND_CMD_CALL;
- if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
- cmd_mask |= nfit_mem->dsm_mask;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+ /*
+ * These commands have a 1:1 correspondence
+ * between DSM payload and libnvdimm ioctl
+ * payload format.
+ */
+ cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
+ }
+
+ if (nfit_mem->has_lsi)
+ set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ if (nfit_mem->has_lsr)
+ set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ if (nfit_mem->has_lsw)
+ set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
: NULL;
@@ -1645,6 +1894,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
int i;
nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -2239,7 +2489,7 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
if (ars_status->out_length
< 44 + sizeof(struct nd_ars_record) * (i + 1))
break;
- rc = nvdimm_bus_add_poison(nvdimm_bus,
+ rc = nvdimm_bus_add_badrange(nvdimm_bus,
ars_status->records[i].err_address,
ars_status->records[i].length);
if (rc)
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index feeb95d574fa..b92921439657 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -67,7 +67,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
continue;
/* If this fails due to an -ENOMEM, there is little we can do */
- nvdimm_bus_add_poison(acpi_desc->nvdimm_bus,
+ nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus,
ALIGN(mce->addr, L1_CACHE_BYTES),
L1_CACHE_BYTES);
nvdimm_region_notify(nfit_spa->nd_region,
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 54292db61262..f0cf18b2da8b 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -24,7 +24,7 @@
/* ACPI 6.1 */
#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
-/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */
+/* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
@@ -38,6 +38,37 @@
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
+
+#define NVDIMM_STANDARD_CMDMASK \
+(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
+ | 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA \
+ | 1 << ND_CMD_SET_CONFIG_DATA | 1 << ND_CMD_VENDOR_EFFECT_LOG_SIZE \
+ | 1 << ND_CMD_VENDOR_EFFECT_LOG | 1 << ND_CMD_VENDOR)
+
+/*
+ * Command numbers that the kernel needs to know about to handle
+ * non-default DSM revision ids
+ */
+enum nvdimm_family_cmds {
+ NVDIMM_INTEL_LATCH_SHUTDOWN = 10,
+ NVDIMM_INTEL_GET_MODES = 11,
+ NVDIMM_INTEL_GET_FWINFO = 12,
+ NVDIMM_INTEL_START_FWUPDATE = 13,
+ NVDIMM_INTEL_SEND_FWUPDATE = 14,
+ NVDIMM_INTEL_FINISH_FWUPDATE = 15,
+ NVDIMM_INTEL_QUERY_FWUPDATE = 16,
+ NVDIMM_INTEL_SET_THRESHOLD = 17,
+ NVDIMM_INTEL_INJECT_ERROR = 18,
+};
+
+#define NVDIMM_INTEL_CMDMASK \
+(NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
+ | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
+ | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
+ | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
+ | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN)
+
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
@@ -140,6 +171,9 @@ struct nfit_mem {
struct resource *flush_wpq;
unsigned long dsm_mask;
int family;
+ u32 has_lsi:1;
+ u32 has_lsr:1;
+ u32 has_lsw:1;
};
struct acpi_nfit_desc {
@@ -167,6 +201,7 @@ struct acpi_nfit_desc {
unsigned int init_complete:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
+ unsigned long bus_nfit_cmd_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
};
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..109c1e9c9c7a
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -0,0 +1,137 @@
+/*
+ * Dollar Cove TI PMIC operation region driver
+ * Copyright (C) 2014 Intel Corporation. All rights reserved.
+ *
+ * Rewritten and cleaned up
+ * Copyright (C) 2017 Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include "intel_pmic.h"
+
+/* registers stored in 16bit BE (high:low, total 10bit) */
+#define CHTDC_TI_VBAT 0x54
+#define CHTDC_TI_DIETEMP 0x56
+#define CHTDC_TI_BPTHERM 0x58
+#define CHTDC_TI_GPADC 0x5a
+
+static struct pmic_table chtdc_ti_power_table[] = {
+ { .address = 0x00, .reg = 0x41 },
+ { .address = 0x04, .reg = 0x42 },
+ { .address = 0x08, .reg = 0x43 },
+ { .address = 0x0c, .reg = 0x45 },
+ { .address = 0x10, .reg = 0x46 },
+ { .address = 0x14, .reg = 0x47 },
+ { .address = 0x18, .reg = 0x48 },
+ { .address = 0x1c, .reg = 0x49 },
+ { .address = 0x20, .reg = 0x4a },
+ { .address = 0x24, .reg = 0x4b },
+ { .address = 0x28, .reg = 0x4c },
+ { .address = 0x2c, .reg = 0x4d },
+ { .address = 0x30, .reg = 0x4e },
+};
+
+static struct pmic_table chtdc_ti_thermal_table[] = {
+ {
+ .address = 0x00,
+ .reg = CHTDC_TI_GPADC
+ },
+ {
+ .address = 0x0c,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP2 -> SYSTEMP */
+ {
+ .address = 0x18,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP3 -> BPTHERM */
+ {
+ .address = 0x24,
+ .reg = CHTDC_TI_BPTHERM
+ },
+ {
+ .address = 0x30,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP5 -> DIETEMP */
+ {
+ .address = 0x3c,
+ .reg = CHTDC_TI_DIETEMP
+ },
+};
+
+static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit,
+ u64 *value)
+{
+ int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & 1;
+ return 0;
+}
+
+static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit,
+ bool on)
+{
+ return regmap_update_bits(regmap, reg, 1, on);
+}
+
+static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
+{
+ u8 buf[2];
+
+ if (regmap_bulk_read(regmap, reg, buf, 2))
+ return -EIO;
+
+ /* stored in big-endian */
+ return ((buf[0] & 0x03) << 8) | buf[1];
+}
+
+static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
+ .get_power = chtdc_ti_pmic_get_power,
+ .update_power = chtdc_ti_pmic_update_power,
+ .get_raw_temp = chtdc_ti_pmic_get_raw_temp,
+ .power_table = chtdc_ti_power_table,
+ .power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
+ .thermal_table = chtdc_ti_thermal_table,
+ .thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table),
+};
+
+static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ int err;
+
+ err = intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &chtdc_ti_pmic_opregion_data);
+ if (err < 0)
+ return err;
+
+ /* Re-enumerate devices depending on PMIC */
+ acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent));
+ return 0;
+}
+
+static const struct platform_device_id chtdc_ti_pmic_opregion_id_table[] = {
+ { .name = "chtdc_ti_region" },
+ {},
+};
+
+static struct platform_driver chtdc_ti_pmic_opregion_driver = {
+ .probe = chtdc_ti_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_dollar_cove_ti_pmic",
+ },
+ .id_table = chtdc_ti_pmic_opregion_id_table,
+};
+module_platform_driver(chtdc_ti_pmic_opregion_driver);
+
+MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e14e964bfe6d..b0fe5272c76a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
case ACPI_BUS_TYPE_SLEEP_BUTTON:
strcpy(device->pnp.bus_id, "SLPF");
break;
+ case ACPI_BUS_TYPE_ECDT_EC:
+ strcpy(device->pnp.bus_id, "ECDT");
+ break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */
@@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
break;
+ case ACPI_BUS_TYPE_ECDT_EC:
+ acpi_add_id(pnp, ACPI_ECDT_HID);
+ break;
}
}
@@ -2046,6 +2052,21 @@ void acpi_bus_trim(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
+int acpi_bus_register_early_device(int type)
+{
+ struct acpi_device *device = NULL;
+ int result;
+
+ result = acpi_add_single_object(&device, NULL,
+ type, ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ device->flags.match_driver = true;
+ return device_attach(&device->dev);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
+
static int acpi_bus_scan_fixed(void)
{
int result = 0;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0a9e5979aaa9..9d49a1acebe3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -355,6 +355,7 @@ acpi_evaluate_reference(acpi_handle handle,
}
if (package->package.count > ACPI_MAX_HANDLES) {
+ kfree(package);
return AE_NO_MEMORY;
}
list->count = package->package.count;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7255f94ded30..bccec9de0533 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1948,6 +1948,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t: transaction that needs to be cleaned up
+ * @reason: reason the transaction wasn't delivered
+ * @error_code: error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+ const char *reason,
+ uint32_t error_code)
+{
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+ binder_send_failed_reply(t, error_code);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered transaction %d, %s\n",
+ t->debug_id, reason);
+ binder_free_transaction(t);
+ }
+}
+
+/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the buffer at which to validate an object.
@@ -2192,7 +2212,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
off_start,
offp - off_start);
if (!parent) {
- pr_err("transaction release %d bad parent offset",
+ pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
@@ -4015,12 +4035,20 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "put_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "copy_to_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(tr);
@@ -4090,15 +4118,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node &&
- !(t->flags & TF_ONE_WAY)) {
- binder_send_failed_reply(t, BR_DEAD_REPLY);
- } else {
- binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered transaction %d\n",
- t->debug_id);
- binder_free_transaction(t);
- }
+
+ binder_cleanup_transaction(t, "process died.",
+ BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c2819a3d58a6..6f6f745605af 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
@@ -215,7 +215,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
@@ -437,7 +437,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -478,7 +478,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- end_page_addr, NULL);
+ end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -562,8 +562,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
alloc->pid, buffer->data,
prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE,
- NULL);
+ buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
@@ -600,8 +599,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -984,7 +982,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 80854f71559a..0ae6971c2a4c 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -1,5 +1,5 @@
/*
- * MeidaTek AHCI SATA driver
+ * MediaTek AHCI SATA driver
*
* Copyright (c) 2017 MediaTek Inc.
* Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
#include <linux/reset.h>
#include "ahci.h"
-#define DRV_NAME "ahci"
+#define DRV_NAME "ahci-mtk"
#define SYS_CFG 0x14
#define SYS_CFG_SATA_MSK GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
};
module_platform_driver(mtk_ahci_driver);
-MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b6b0bf76dfc7..2685f28160f7 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
+#define AHCI_PORT_PHY2_CFG 0x28184d1f
+#define AHCI_PORT_PHY3_CFG 0x0e081509
#define AHCI_PORT_TRANS_CFG 0x08000029
#define AHCI_PORT_AXICC_CFG 0x3fffffff
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS2088A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a882929de4a..8193b38a1cae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
bit = fls(mask) - 1;
mask &= ~(1 << bit);
- /* Mask off all speeds higher than or equal to the current
- * one. Force 1.5Gbps if current SPD is not available.
+ /*
+ * Mask off all speeds higher than or equal to the current one. At
+ * this point, if current SPD is not available and we previously
+ * recorded the link speed from SStatus, the driver has already
+ * masked off the highest bit so mask should already be 1 or 0.
+ * Otherwise, we should not force 1.5Gbps on a link where we have
+ * not previously recorded speed from SStatus. Just return in this
+ * case.
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
else
- mask &= 1;
+ return -EINVAL;
/* were we already at the bottom? */
if (!mask)
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ffd8d33c6e0f..6db2e34bd52f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
* is issued to the device. However, if the controller clock is 133MHz,
* the following tables must be used.
*/
-static struct pdc2027x_pio_timing {
+static const struct pdc2027x_pio_timing {
u8 value0, value1, value2;
} pdc2027x_pio_timing_tbl[] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
-static struct pdc2027x_mdma_timing {
+static const struct pdc2027x_mdma_timing {
u8 value0, value1;
} pdc2027x_mdma_timing_tbl[] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
{ 0x69, 0x25 }, /* MDMA mode 2 */
};
-static struct pdc2027x_udma_timing {
+static const struct pdc2027x_udma_timing {
u8 value0, value1, value2;
} pdc2027x_udma_timing_tbl[] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
* @host: target ATA host
* @board_idx: board identifier
*/
-static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
{
long pll_clock;
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
/* Adjust PLL control register */
pdc_adjust_pll(host, pll_clock, board_idx);
-
- return 0;
}
/**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
//pci_enable_intx(pdev);
/* initialize adapter */
- if (pdc_hardware_init(host, board_idx) != 0)
- return -EIO;
+ pdc_hardware_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
else
board_idx = PDC_UDMA_133;
- if (pdc_hardware_init(host, board_idx))
- return -EIO;
+ pdc_hardware_init(host, board_idx);
ata_host_resume(host);
return 0;
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index acf16c323e38..9287ec958b70 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -293,7 +293,7 @@ static inline void __init show_version (void) {
*/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
/********** globals **********/
static unsigned short debug = 0;
@@ -1493,8 +1493,8 @@ static const struct atmdev_ops amb_ops = {
};
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
- amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+ amb_dev * dev = from_timer(dev, t, housekeeping);
// could collect device-specific (not driver/atm-linux) stats here
@@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
+ dev->atm_dev->dev_data = (void *) dev;
// register our address
amb_esi (dev, dev->atm_dev->esi);
@@ -2267,8 +2267,7 @@ static int amb_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
- setup_timer(&dev->housekeeping, do_housekeeping,
- (unsigned long)dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 6b6368a56526..d97c05690faa 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1656,9 +1656,9 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
#ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
{
- struct fs_dev *dev = (struct fs_dev *) data;
+ struct fs_dev *dev = from_timer(dev, t, timer);
fs_irq (0, dev);
dev->timer.expires = jiffies + FS_POLL_FREQ;
@@ -1885,9 +1885,7 @@ static int fs_init(struct fs_dev *dev)
}
#ifdef FS_POLL_FREQ
- init_timer (&dev->timer);
- dev->timer.data = (unsigned long) dev;
- dev->timer.function = fs_poll;
+ timer_setup(&dev->timer, fs_poll, 0);
dev->timer.expires = jiffies + FS_POLL_FREQ;
add_timer (&dev->timer);
#endif
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 126855e6cb7d..6ebc4e4820fc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
ASSERT(fore200e_vcc);
len = sprintf(page,
- " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
- (u32)(unsigned long)vcc,
+ " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
+ vcc,
vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
fore200e_vcc->tx_pdu,
fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index e121b8485731..5ddc203206b8 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -357,7 +357,7 @@ static inline void __init show_version (void) {
/********** globals **********/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
static unsigned short debug = 0;
static unsigned short vpi_bits = 0;
@@ -1418,9 +1418,9 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
// just stats at the moment
- hrz_dev * dev = (hrz_dev *) arg;
+ hrz_dev * dev = from_timer(dev, t, housekeeping);
// collect device-specific (not driver/atm-linux) stats here
dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
@@ -2796,7 +2796,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = vpi_bits;
dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
- setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
out:
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 909744eb7bab..0a67487c0b1d 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -45,8 +45,8 @@ static DEFINE_SPINLOCK(idt77105_priv_lock);
#define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
#define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
@@ -80,7 +80,7 @@ static u16 get_counter(struct atm_dev *dev, int counter)
* a separate copy of the stats allows implementation of
* an ioctl which gathers the stats *without* zero'ing them.
*/
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
@@ -109,7 +109,7 @@ static void idt77105_stats_timer_func(unsigned long dummy)
* interrupts need to be disabled when the cable is pulled out
* to avoid lots of spurious cell error interrupts.
*/
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 0e3b9c44c808..0277f36be85b 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1528,9 +1528,9 @@ idt77252_tx(struct idt77252_dev *card)
static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = (struct idt77252_dev *)data;
+ struct idt77252_dev *card = from_timer(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -3634,7 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
spin_lock_init(&card->cmd_lock);
spin_lock_init(&card->tst_lock);
- setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+ timer_setup(&card->tst_timer, tst_timer, 0);
/* Do the I/O remapping... */
card->membase = ioremap(membase, 1024);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 12f646760b68..98a3a43484c8 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -75,7 +75,7 @@ static void desc_dbg(IADEV *iadev);
static IADEV *ia_dev[8];
static struct atm_dev *_ia_dev[8];
static int iadev_count;
-static void ia_led_timer(unsigned long arg);
+static void ia_led_timer(struct timer_list *unused);
static DEFINE_TIMER(ia_timer, ia_led_timer);
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
@@ -2432,7 +2432,7 @@ static void ia_update_stats(IADEV *iadev) {
return;
}
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
unsigned long flags;
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
u_char i;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 2351dad78ff5..5f8e009b2da1 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
lanai->pci);
if (unlikely(lanai->service.start == NULL))
return -ENOMEM;
- DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
- (unsigned long) lanai->service.start,
+ DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
+ lanai->service.start,
lanai_buf_size(&lanai->service),
lanai_buf_size_cardorder(&lanai->service));
/* Clear ServWrite register to be safe */
@@ -1761,9 +1761,9 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
}
#endif /* !DEBUG_RW */
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = (struct lanai_dev *) arg;
+ struct lanai_dev *lanai = from_timer(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
@@ -1790,10 +1790,8 @@ static void lanai_timed_poll(unsigned long arg)
static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
{
- init_timer(&lanai->timer);
+ timer_setup(&lanai->timer, lanai_timed_poll, 0);
lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
- lanai->timer.data = (unsigned long) lanai;
- lanai->timer.function = lanai_timed_poll;
add_timer(&lanai->timer);
}
@@ -2220,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
#endif
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
lanai_timed_poll_start(lanai);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
+ printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
- (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
+ lanai->base, lanai->pci->irq, atmdev->esi);
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
"board_rev=%d\n", lanai->number,
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index a9702836cbae..cbec9adc01c7 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -145,7 +145,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
#ifdef EXTRA_DEBUG
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -284,10 +284,8 @@ static int __init nicstar_init(void)
XPRINTK("nicstar: nicstar_init() returned.\n");
if (!error) {
- init_timer(&ns_timer);
+ timer_setup(&ns_timer, ns_poll, 0);
ns_timer.expires = jiffies + NS_POLL_PERIOD;
- ns_timer.data = 0UL;
- ns_timer.function = ns_poll;
add_timer(&ns_timer);
}
@@ -2681,7 +2679,7 @@ static void which_list(ns_dev * card, struct sk_buff *skb)
}
#endif /* EXTRA_DEBUG */
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
{
int i;
ns_dev *card;
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index b8825f2d79e0..4b044710a8cf 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
default:
return -EINVAL;
}
- dev->ops->phy_put(dev, control, reg);
+ dev->ops->phy_put(dev, control, reg);
PRIV(dev)->loop_mode = mode;
return 0;
}
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index d7d21118d3e0..2c2ed9cf8796 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -136,6 +136,7 @@ config CFAG12864B_RATE
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
default y if MIPS_MALTA || MIPS_SEAD3
select SYSCON
help
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2f6614c9a229..bdc87907d6a1 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
depends on FW_LOADER
default y
help
- The kernel source tree includes a number of firmware 'blobs'
- that are used by various drivers. The recommended way to
- use these is to run "make firmware_install", which, after
- converting ihex files to binary, copies all of the needed
- binary files in firmware/ to /lib/firmware/ on your system so
- that they can be loaded by userspace helpers on request.
+ Various drivers in the kernel source tree may require firmware,
+ which is generally available in your distribution's linux-firmware
+ package.
+
+ The linux-firmware package should install firmware into
+ /lib/firmware/ on your system, so they can be loaded by userspace
+ helpers on request.
Enabling this option will build each required firmware blob
- into the kernel directly, where request_firmware() will find
- them without having to call out to userspace. This may be
- useful if your root file system requires a device that uses
- such firmware and do not wish to use an initrd.
+ specified by EXTRA_FIRMWARE into the kernel directly, where
+ request_firmware() will find them without having to call out to
+ userspace. This may be useful if your root file system requires a
+ device that uses such firmware and you do not wish to use an
+ initrd.
This single option controls the inclusion of firmware for
- every driver that uses request_firmware() and ships its
- firmware in the kernel source tree, which avoids a
+ every driver that uses request_firmware(), which avoids a
proliferation of 'Include firmware for xxx device' options.
Say 'N' and let firmware be loaded from userspace.
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 0739c5b953bf..4de87b0b53c8 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -105,7 +105,7 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static int __init free_raw_capacity(void)
+static int free_raw_capacity(void)
{
kfree(raw_capacity);
raw_capacity = NULL;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a36cf1bd07a9..110230d86527 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1958,7 +1958,6 @@ void device_del(struct device *dev)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1986,6 +1985,7 @@ void device_del(struct device *dev)
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_remove_properties(dev);
+ device_links_purge(dev);
/* Notify the platform of the removal, in case they
* need to do anything...
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 45575e134696..2c964f56dafe 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -350,6 +350,15 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+static void driver_deferred_probe_add_trigger(struct device *dev,
+ int local_trigger_count)
+{
+ driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
+}
+
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
@@ -369,6 +378,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
ret = device_links_check_suppliers(dev);
+ if (ret == -EPROBE_DEFER)
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
if (ret)
return ret;
@@ -470,10 +481,7 @@ pinctrl_bind_failed:
case -EPROBE_DEFER:
/* Driver requested deferred probing */
dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
- driver_deferred_probe_add(dev);
- /* Did a trigger occur while probing? Need to re-trigger if yes */
- if (local_trigger_count != atomic_read(&deferred_trigger_count))
- driver_deferred_probe_trigger();
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
break;
case -ENODEV:
case -ENXIO:
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index cd6ccdcf9df0..372d10af2600 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
- if (isa_driver->probe)
+ if (isa_driver && isa_driver->probe)
return isa_driver->probe(dev, to_isa_dev(dev)->id);
return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
- if (isa_driver->remove)
+ if (isa_driver && isa_driver->remove)
return isa_driver->remove(dev, to_isa_dev(dev)->id);
return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
- if (isa_driver->shutdown)
+ if (isa_driver && isa_driver->shutdown)
isa_driver->shutdown(dev, to_isa_dev(dev)->id);
}
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
{
struct isa_driver *isa_driver = dev->platform_data;
- if (isa_driver->suspend)
+ if (isa_driver && isa_driver->suspend)
return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
- if (isa_driver->resume)
+ if (isa_driver && isa_driver->resume)
return isa_driver->resume(dev, to_isa_dev(dev)->id);
return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..08744b572af6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,6 +526,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+ dev->power.is_late_suspended = false;
+ dev->power.is_suspended = false;
+}
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2362b9e9701e..6e89b51ea3d9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
continue;
retval = pm_runtime_get_sync(link->supplier);
- if (retval < 0) {
+ /* Ignore suppliers with disabled runtime PM. */
+ if (retval < 0 && retval != -EACCES) {
pm_runtime_put_noidle(link->supplier);
return retval;
}
@@ -1101,29 +1102,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out;
}
- if (dev->power.runtime_status == status)
+ if (dev->power.runtime_status == status || !parent)
goto out_set;
if (status == RPM_SUSPENDED) {
- /*
- * It is invalid to suspend a device with an active child,
- * unless it has been set to ignore its children.
- */
- if (!dev->power.ignore_children &&
- atomic_read(&dev->power.child_count)) {
- dev_err(dev, "runtime PM trying to suspend device but active child\n");
- error = -EBUSY;
- goto out;
- }
-
- if (parent) {
- atomic_add_unless(&parent->power.child_count, -1, 0);
- notify_parent = !parent->power.ignore_children;
- }
- goto out_set;
- }
-
- if (parent) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ } else {
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
@@ -1307,6 +1292,13 @@ void pm_runtime_enable(struct device *dev)
else
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ WARN(!dev->power.disable_depth &&
+ dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0,
+ "Enabling runtime PM for inactive device (%s) with active children\n",
+ dev_name(dev));
+
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 680ee1d36ac9..38559f04db2c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -481,7 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
+ return ws->timer.function != pm_wakeup_timer_fn;
}
/*
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 304d5c2bd5e9..a3355d66bc12 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -64,7 +64,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(async_dev_1);
- pr_err("failed to create async_dev_1: %d", error);
+ pr_err("failed to create async_dev_1: %d\n", error);
return error;
}
@@ -91,7 +91,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_2)) {
error = PTR_ERR(async_dev_2);
- pr_err("failed to create async_dev_2: %d", error);
+ pr_err("failed to create async_dev_2: %d\n", error);
goto err_unregister_async_driver;
}
@@ -118,7 +118,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(sync_dev_1)) {
error = PTR_ERR(sync_dev_1);
- pr_err("failed to create sync_dev_1: %d", error);
+ pr_err("failed to create sync_dev_1: %d\n", error);
goto err_unregister_sync_driver;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 255591ab3716..442e777bdfb2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3079,11 +3079,10 @@ DAC960_InitializeController(DAC960_Controller_T *Controller)
/*
Initialize the Monitoring Timer.
*/
- init_timer(&Controller->MonitoringTimer);
+ timer_setup(&Controller->MonitoringTimer,
+ DAC960_MonitoringTimerFunction, 0);
Controller->MonitoringTimer.expires =
jiffies + DAC960_MonitoringTimerInterval;
- Controller->MonitoringTimer.data = (unsigned long) Controller;
- Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
add_timer(&Controller->MonitoringTimer);
Controller->ControllerInitialized = true;
return true;
@@ -5620,9 +5619,9 @@ static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
the status of DAC960 Controllers.
*/
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+ DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
DAC960_Command_T *Command;
unsigned long flags;
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
index 85fa9bb63759..6a6226a2b932 100644
--- a/drivers/block/DAC960.h
+++ b/drivers/block/DAC960.h
@@ -4406,7 +4406,7 @@ static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
static irqreturn_t DAC960_P_InterruptHandler(int, void *);
static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
DAC960_Controller_T *, ...);
static void DAC960_CreateProcEntries(DAC960_Controller_T *);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 923b417eaf4c..40579d0cb3d1 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -302,7 +302,6 @@ config BLK_DEV_SX8
config BLK_DEV_RAM
tristate "RAM block device support"
- select DAX if BLK_DEV_RAM_DAX
---help---
Saying Y here will allow you to use a portion of your RAM memory as
a block device, so that you can make file systems on it, read and
@@ -338,17 +337,6 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config BLK_DEV_RAM_DAX
- bool "Support Direct Access (DAX) to RAM block devices"
- depends on BLK_DEV_RAM && FS_DAX
- default n
- help
- Support filesystems using DAX to access RAM block devices. This
- avoids double-buffering data in the page cache before copying it
- to the block device. Answering Y will slightly enlarge the kernel,
- and will prevent RAM block device backing store memory from being
- allocated from highmem (only a problem for highmem systems).
-
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4e3fb9f104af..e5aa62fcf5a8 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -146,6 +146,7 @@ static struct amiga_floppy_struct unit[FD_MAX_UNITS];
static struct timer_list flush_track_timer[FD_MAX_UNITS];
static struct timer_list post_write_timer;
+static unsigned long post_write_timer_drive;
static struct timer_list motor_on_timer;
static struct timer_list motor_off_timer[FD_MAX_UNITS];
static int on_attempts;
@@ -323,7 +324,7 @@ static void fd_deselect (int drive)
}
-static void motor_on_callback(unsigned long ignored)
+static void motor_on_callback(struct timer_list *unused)
{
if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
complete_all(&motor_on_completion);
@@ -355,7 +356,7 @@ static int fd_motor_on(int nr)
on_attempts = -1;
#if 0
printk (KERN_ERR "motor_on failed, turning motor off\n");
- fd_motor_off (nr);
+ fd_motor_off (motor_off_timer + nr);
return 0;
#else
printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
@@ -365,20 +366,17 @@ static int fd_motor_on(int nr)
return 1;
}
-static void fd_motor_off(unsigned long drive)
+static void fd_motor_off(struct timer_list *timer)
{
- long calledfromint;
-#ifdef MODULE
- long decusecount;
+ unsigned long drive = ((unsigned long)timer -
+ (unsigned long)&motor_off_timer[0]) /
+ sizeof(motor_off_timer[0]);
- decusecount = drive & 0x40000000;
-#endif
- calledfromint = drive & 0x80000000;
drive&=3;
- if (calledfromint && !try_fdc(drive)) {
+ if (!try_fdc(drive)) {
/* We would be blocked in an interrupt, so try again later */
- motor_off_timer[drive].expires = jiffies + 1;
- add_timer(motor_off_timer + drive);
+ timer->expires = jiffies + 1;
+ add_timer(timer);
return;
}
unit[drive].motor = 0;
@@ -392,8 +390,6 @@ static void floppy_off (unsigned int nr)
int drive;
drive = nr & 3;
- /* called this way it is always from interrupt */
- motor_off_timer[drive].data = nr | 0x80000000;
mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
}
@@ -435,7 +431,7 @@ static int fd_calibrate(int drive)
break;
if (--n == 0) {
printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
- fd_motor_off (drive);
+ fd_motor_off (motor_off_timer + drive);
unit[drive].track = -1;
rel_fdc();
return 0;
@@ -564,7 +560,7 @@ static irqreturn_t fd_block_done(int irq, void *dummy)
if (block_flag == 2) { /* writing */
writepending = 2;
post_write_timer.expires = jiffies + 1; /* at least 2 ms */
- post_write_timer.data = selected;
+ post_write_timer_drive = selected;
add_timer(&post_write_timer);
}
else { /* reading */
@@ -651,6 +647,10 @@ static void post_write (unsigned long drive)
rel_fdc(); /* corresponds to get_fdc() in raw_write */
}
+static void post_write_callback(struct timer_list *timer)
+{
+ post_write(post_write_timer_drive);
+}
/*
* The following functions are to convert the block contents into raw data
@@ -1244,8 +1244,12 @@ static void dos_write(int disk)
/* FIXME: this assumes the drive is still spinning -
* which is only true if we complete writing a track within three seconds
*/
-static void flush_track_callback(unsigned long nr)
+static void flush_track_callback(struct timer_list *timer)
{
+ unsigned long nr = ((unsigned long)timer -
+ (unsigned long)&flush_track_timer[0]) /
+ sizeof(flush_track_timer[0]);
+
nr&=3;
writefromint = 1;
if (!try_fdc(nr)) {
@@ -1649,8 +1653,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
fd_ref[drive] = 0;
}
#ifdef MODULE
-/* the mod_use counter is handled this way */
- floppy_off (drive | 0x40000000);
+ floppy_off (drive);
#endif
mutex_unlock(&amiflop_mutex);
}
@@ -1791,27 +1794,19 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
floppy_find, NULL, NULL);
/* initialize variables */
- init_timer(&motor_on_timer);
+ timer_setup(&motor_on_timer, motor_on_callback, 0);
motor_on_timer.expires = 0;
- motor_on_timer.data = 0;
- motor_on_timer.function = motor_on_callback;
for (i = 0; i < FD_MAX_UNITS; i++) {
- init_timer(&motor_off_timer[i]);
+ timer_setup(&motor_off_timer[i], fd_motor_off, 0);
motor_off_timer[i].expires = 0;
- motor_off_timer[i].data = i|0x80000000;
- motor_off_timer[i].function = fd_motor_off;
- init_timer(&flush_track_timer[i]);
+ timer_setup(&flush_track_timer[i], flush_track_callback, 0);
flush_track_timer[i].expires = 0;
- flush_track_timer[i].data = i;
- flush_track_timer[i].function = flush_track_callback;
unit[i].track = -1;
}
- init_timer(&post_write_timer);
+ timer_setup(&post_write_timer, post_write_callback, 0);
post_write_timer.expires = 0;
- post_write_timer.data = 0;
- post_write_timer.function = post_write;
for (i = 0; i < 128; i++)
mfmdecode[i]=255;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index dc43254e05a4..812fed069708 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -744,7 +744,7 @@ count_targets(struct aoedev *d, int *untainted)
}
static void
-rexmit_timer(ulong vp)
+rexmit_timer(struct timer_list *timer)
{
struct aoedev *d;
struct aoetgt *t;
@@ -758,7 +758,7 @@ rexmit_timer(ulong vp)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = (struct aoedev *) vp;
+ d = from_timer(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index b28fefb90391..697f735b07a4 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include "aoe.h"
-static void dummy_timer(ulong);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -146,11 +145,11 @@ aoedev_put(struct aoedev *d)
}
static void
-dummy_timer(ulong vp)
+dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = (struct aoedev *)vp;
+ d = from_timer(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -466,9 +465,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
- init_timer(&d->timer);
- d->timer.data = (ulong) d;
- d->timer.function = dummy_timer;
+ timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index ae596e55bcb6..8bc3b9fd8dd2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -342,8 +342,8 @@ static int NeedSeek = 0;
static void fd_select_side( int side );
static void fd_select_drive( int drive );
static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
static irqreturn_t floppy_irq (int irq, void *dummy);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
@@ -353,12 +353,12 @@ static void fd_calibrate_done( int status );
static void fd_seek( void );
static void fd_seek_done( int status );
static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
static void fd_rwsec_done( int status );
static void fd_rwsec_done1(int status);
static void fd_writetrack( void );
static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
@@ -479,7 +479,7 @@ static void fd_deselect( void )
* counts the index signals, which arrive only if one drive is selected.
*/
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
{
unsigned char status;
@@ -515,7 +515,7 @@ static void fd_motor_off_timer( unsigned long dummy )
* as possible) and keep track of the current state of the write protection.
*/
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
{
static int drive = 0;
@@ -966,7 +966,7 @@ static void fd_rwsec( void )
}
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
{
unsigned long flags, addr, addr2;
@@ -1237,7 +1237,7 @@ static void fd_writetrack_done( int status )
fd_error();
}
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
{
atari_disable_irq( IRQ_MFP_FDC );
if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c1cf87718c2e..8028a3a7e7fd 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -20,11 +20,7 @@
#include <linux/radix-tree.h>
#include <linux/fs.h>
#include <linux/slab.h>
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-#include <linux/pfn_t.h>
-#include <linux/dax.h>
-#include <linux/uio.h>
-#endif
+#include <linux/backing-dev.h>
#include <linux/uaccess.h>
@@ -44,9 +40,6 @@ struct brd_device {
struct request_queue *brd_queue;
struct gendisk *brd_disk;
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- struct dax_device *dax_dev;
-#endif
struct list_head brd_list;
/*
@@ -111,9 +104,6 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
* restriction might be able to be lifted.
*/
gfp_flags = GFP_NOIO | __GFP_ZERO;
-#ifndef CONFIG_BLK_DEV_RAM_DAX
- gfp_flags |= __GFP_HIGHMEM;
-#endif
page = alloc_page(gfp_flags);
if (!page)
return NULL;
@@ -333,43 +323,6 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
return err;
}
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
-{
- struct page *page;
-
- if (!brd)
- return -ENODEV;
- page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
- if (!page)
- return -ENOSPC;
- *kaddr = page_address(page);
- *pfn = page_to_pfn_t(page);
-
- return 1;
-}
-
-static long brd_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
-{
- struct brd_device *brd = dax_get_private(dax_dev);
-
- return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
-}
-
-static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static const struct dax_operations brd_dax_ops = {
- .direct_access = brd_dax_direct_access,
- .copy_from_iter = brd_dax_copy_from_iter,
-};
-#endif
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
@@ -448,22 +401,10 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
-
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
- brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
- if (!brd->dax_dev)
- goto out_free_inode;
-#endif
-
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
return brd;
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-out_free_inode:
- kill_dax(brd->dax_dev);
- put_dax(brd->dax_dev);
-#endif
out_free_queue:
blk_cleanup_queue(brd->brd_queue);
out_free_dev:
@@ -503,10 +444,6 @@ out:
static void brd_del_one(struct brd_device *brd)
{
list_del(&brd->brd_list);
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- kill_dax(brd->dax_dev);
- put_dax(brd->dax_dev);
-#endif
del_gendisk(brd->brd_disk);
brd_free(brd);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a54183935aa1..eae484acfbbc 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -903,10 +903,14 @@ static void unlock_fdc(void)
}
/* switches the motor off after a given timeout */
-static void motor_off_callback(unsigned long nr)
+static void motor_off_callback(struct timer_list *t)
{
+ unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
+ if (WARN_ON_ONCE(nr >= N_DRIVE))
+ return;
+
set_dor(FDC(nr), mask, 0);
}
@@ -3047,7 +3051,7 @@ static void raw_cmd_done(int flag)
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
- motor_off_callback(current_drive);
+ motor_off_callback(&motor_off_timer[current_drive]);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
@@ -4542,7 +4546,7 @@ static int __init do_floppy_init(void)
disks[drive]->fops = &floppy_fops;
sprintf(disks[drive]->disk_name, "fd%d", drive);
- setup_timer(&motor_off_timer[drive], motor_off_callback, drive);
+ timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index c61960deb74a..ccb9975a97fa 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item)
{
struct nullb_device *dev = to_nullb_device(item);
- badblocks_exit(&dev->badblocks);
null_free_device_storage(dev, false);
null_free_dev(dev);
}
@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void)
static void null_free_dev(struct nullb_device *dev)
{
+ if (!dev)
+ return;
+
+ badblocks_exit(&dev->badblocks);
kfree(dev);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index adc877dfef5c..38fc5f397fde 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -348,7 +348,6 @@ struct rbd_client_id {
struct rbd_mapping {
u64 size;
u64 features;
- bool read_only;
};
/*
@@ -450,12 +449,11 @@ static DEFINE_IDA(rbd_dev_id_ida);
static struct workqueue_struct *rbd_wq;
/*
- * Default to false for now, as single-major requires >= 0.75 version of
- * userspace rbd utility.
+ * single-major requires >= 0.75 version of userspace rbd utility.
*/
-static bool single_major = false;
+static bool single_major = true;
module_param(single_major, bool, S_IRUGO);
-MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
static int rbd_img_request_submit(struct rbd_img_request *img_request);
@@ -608,9 +606,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bool removing = false;
- if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
- return -EROFS;
-
spin_lock_irq(&rbd_dev->lock);
if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
removing = true;
@@ -640,46 +635,24 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
- int ret = 0;
- int val;
- bool ro;
- bool ro_changed = false;
+ int ro;
- /* get_user() may sleep, so call it before taking rbd_dev->lock */
- if (get_user(val, (int __user *)(arg)))
+ if (get_user(ro, (int __user *)arg))
return -EFAULT;
- ro = val ? true : false;
- /* Snapshot doesn't allow to write*/
+ /* Snapshots can't be marked read-write */
if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
return -EROFS;
- spin_lock_irq(&rbd_dev->lock);
- /* prevent others open this device */
- if (rbd_dev->open_count > 1) {
- ret = -EBUSY;
- goto out;
- }
-
- if (rbd_dev->mapping.read_only != ro) {
- rbd_dev->mapping.read_only = ro;
- ro_changed = true;
- }
-
-out:
- spin_unlock_irq(&rbd_dev->lock);
- /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
- if (ret == 0 && ro_changed)
- set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
-
- return ret;
+ /* Let blkdev_roset() handle it */
+ return -ENOTTY;
}
static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- int ret = 0;
+ int ret;
switch (cmd) {
case BLKROSET:
@@ -4050,15 +4023,8 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- /* Only reads are allowed to a read-only device */
-
- if (op_type != OBJ_OP_READ) {
- if (rbd_dev->mapping.read_only) {
- result = -EROFS;
- goto err_rq;
- }
- rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
- }
+ rbd_assert(op_type == OBJ_OP_READ ||
+ rbd_dev->spec->snap_id == CEPH_NOSNAP);
/*
* Quit early if the mapped snapshot no longer exists. It's
@@ -4423,7 +4389,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
/* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
- q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
@@ -5994,7 +5959,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
goto err_out_disk;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
- set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+ set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
@@ -6145,7 +6110,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
- bool read_only;
int rc;
if (!try_module_get(THIS_MODULE))
@@ -6194,11 +6158,8 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
/* If we are mapping a snapshot it must be marked read-only */
-
- read_only = rbd_dev->opts->read_only;
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
- read_only = true;
- rbd_dev->mapping.read_only = read_only;
+ rbd_dev->opts->read_only = true;
rc = rbd_dev_device_setup(rbd_dev);
if (rc)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 926dce9c452f..c148e83e4ed7 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -203,9 +203,9 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
return 0;
}
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
{
- struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
struct creg_cmd *cmd;
spin_lock(&card->creg_ctrl.lock);
@@ -745,8 +745,7 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
spin_lock_init(&card->creg_ctrl.lock);
- setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
- (unsigned long) card);
+ timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
return 0;
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 6a1b2177951c..beaccf197a5a 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -354,9 +354,9 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
rsxx_complete_dma(ctrl, dma, status);
}
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
{
- struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+ struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
int cnt;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
@@ -838,8 +838,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
- setup_timer(&ctrl->activity_timer, dma_engine_stalled,
- (unsigned long)ctrl);
+ timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
if (!ctrl->issue_wq)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 2819f23e8bf2..de0d08133c7e 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -707,9 +707,9 @@ static void skd_start_queue(struct work_struct *work)
blk_mq_start_hw_queues(skdev->queue);
}
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
{
- struct skd_device *skdev = (struct skd_device *)arg;
+ struct skd_device *skdev = from_timer(skdev, t, timer);
unsigned long reqflags;
u32 state;
@@ -857,7 +857,7 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+ timer_setup(&skdev->timer, skd_timer_tick, 0);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index ad9749463d4f..5ca56bfae63c 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -81,7 +81,7 @@ struct vdc_port {
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
@@ -974,8 +974,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
- (unsigned long)port);
+ timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1087,9 +1086,9 @@ static void vdc_queue_drain(struct vdc_port *port)
__blk_end_request_all(req, BLK_STS_IOERR);
}
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
{
- struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 9f931f8f6b4c..af51015d056e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -239,10 +239,10 @@ static unsigned short write_postamble[] = {
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void act(struct floppy_state *fs);
-static void scan_timeout(unsigned long data);
-static void seek_timeout(unsigned long data);
-static void settle_timeout(unsigned long data);
-static void xfer_timeout(unsigned long data);
+static void scan_timeout(struct timer_list *t);
+static void seek_timeout(struct timer_list *t);
+static void settle_timeout(struct timer_list *t);
+static void xfer_timeout(struct timer_list *t);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
@@ -392,13 +392,12 @@ static void do_fd_request(struct request_queue * q)
}
static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long))
+ void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
- fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
@@ -569,9 +568,9 @@ static void act(struct floppy_state *fs)
}
}
-static void scan_timeout(unsigned long data)
+static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -594,9 +593,9 @@ static void scan_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void seek_timeout(unsigned long data)
+static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -614,9 +613,9 @@ static void seek_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void settle_timeout(unsigned long data)
+static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -644,9 +643,9 @@ static void settle_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void xfer_timeout(unsigned long data)
+static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
@@ -1182,7 +1181,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
return -EBUSY;
}
- init_timer(&fs->timeout);
+ timer_setup(&fs->timeout, NULL, 0);
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 0677d2514665..8077123678ad 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -718,7 +718,7 @@ static void check_batteries(struct cardinfo *card)
set_fault_to_battery_status(card);
}
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
{
int i;
@@ -738,8 +738,7 @@ static void check_all_batteries(unsigned long ptr)
static void init_battery_timer(void)
{
- init_timer(&battery_timer);
- battery_timer.function = check_all_batteries;
+ timer_setup(&battery_timer, check_all_batteries, 0);
battery_timer.expires = jiffies + (HZ * 60);
add_timer(&battery_timer);
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 14459d66ef0c..c24589414c75 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -770,9 +770,9 @@ static void ace_fsm_tasklet(unsigned long data)
spin_unlock_irqrestore(&ace->lock, flags);
}
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
{
- struct ace_device *ace = (void *)data;
+ struct ace_device *ace = from_timer(ace, t, stall_timer);
unsigned long flags;
dev_warn(ace->dev,
@@ -984,7 +984,7 @@ static int ace_setup(struct ace_device *ace)
* Initialize the state machine tasklet and stall timer
*/
tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
- setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+ timer_setup(&ace->stall_timer, ace_stall_timer, 0);
/*
* Initialize the request queue
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5b8992beffec..4ed0a78fdc09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -23,15 +23,15 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
"lz4",
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
- "deflate",
-#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
"lz4hc",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
+ "zstd",
+#endif
NULL
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f149d3e61234..d70eba30003a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -122,14 +122,6 @@ static inline bool is_partial_io(struct bio_vec *bvec)
}
#endif
-static void zram_revalidate_disk(struct zram *zram)
-{
- revalidate_disk(zram->disk);
- /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
-}
-
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -436,7 +428,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
WARN_ON_ONCE(!was_set);
}
-void zram_page_end_io(struct bio *bio)
+static void zram_page_end_io(struct bio *bio)
{
struct page *page = bio->bi_io_vec[0].bv_page;
@@ -1373,7 +1365,8 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_revalidate_disk(zram);
+
+ revalidate_disk(zram->disk);
up_write(&zram->init_lock);
return len;
@@ -1420,7 +1413,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- zram_revalidate_disk(zram);
+ revalidate_disk(zram->disk);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
@@ -1539,6 +1532,7 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@@ -1563,6 +1557,8 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ zram->disk->queue->backing_dev_info->capabilities |=
+ (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
add_disk(zram->disk);
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 3e66f4cc1a59..dc7b3c7b7d42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -158,6 +158,21 @@ config TEGRA_GMI
Driver for the Tegra Generic Memory Interface bus which can be used
to attach devices such as NOR, UART, FPGA and more.
+config TI_SYSC
+ bool "TI sysc interconnect target module driver"
+ depends on ARCH_OMAP2PLUS
+ help
+ Generic driver for Texas Instruments interconnect target module
+ found on many TI SoCs.
+
+config TS_NBUS
+ tristate "Technologic Systems NBUS Driver"
+ depends on SOC_IMX28
+ depends on OF_GPIO && PWM
+ help
+ Driver for the Technologic Systems NBUS which is used to interface
+ with the peripherals in the FPGA of the TS-4600 SoM.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 3ae96cffabd5..9bcd0bf3954b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_SYSC) += ti-sysc.o
+obj-$(CONFIG_TS_NBUS) += ts-nbus.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 3c29d36702a8..5426c04fe24b 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
mutex_init(&cci_pmu->reserve_mutex);
atomic_set(&cci_pmu->active_events, 0);
- cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
+ cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
ret = cci_pmu_init(cci_pmu, pdev);
- if (ret)
+ if (ret) {
+ put_cpu();
return ret;
+ }
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
&cci_pmu->node);
+ put_cpu();
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 3063f5312397..b52332e52ca5 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -262,7 +262,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
NULL
};
-static struct attribute_group arm_ccn_pmu_format_attr_group = {
+static const struct attribute_group arm_ccn_pmu_format_attr_group = {
.name = "format",
.attrs = arm_ccn_pmu_format_attrs,
};
@@ -451,7 +451,7 @@ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
static struct attribute
*arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
-static struct attribute_group arm_ccn_pmu_events_attr_group = {
+static const struct attribute_group arm_ccn_pmu_events_attr_group = {
.name = "events",
.is_visible = arm_ccn_pmu_events_is_visible,
.attrs = arm_ccn_pmu_events_attrs,
@@ -548,7 +548,7 @@ static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
NULL
};
-static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
.name = "cmp_mask",
.attrs = arm_ccn_pmu_cmp_mask_attrs,
};
@@ -569,7 +569,7 @@ static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
.attrs = arm_ccn_pmu_cpumask_attrs,
};
@@ -1268,10 +1268,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
if (ccn->dt.id == 0) {
name = "ccn";
} else {
- int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
-
- name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
- snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+ name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
+ ccn->dt.id);
+ if (!name) {
+ err = -ENOMEM;
+ goto error_choose_name;
+ }
}
/* Perf driver registration */
@@ -1298,7 +1300,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
}
/* Pick one CPU which we will use to collect data from CCN... */
- cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
+ cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
@@ -1315,10 +1317,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
+ put_cpu();
return 0;
error_pmu_register:
error_set_affinity:
+ put_cpu();
+error_choose_name:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1581,8 +1586,8 @@ static int __init arm_ccn_init(void)
static void __exit arm_ccn_exit(void)
{
- cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
platform_driver_unregister(&arm_ccn_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
}
module_init(arm_ccn_init);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
new file mode 100644
index 000000000000..c3c76a1ea8a8
--- /dev/null
+++ b/drivers/bus/ti-sysc.c
@@ -0,0 +1,583 @@
+/*
+ * ti-sysc.c - Texas Instruments sysc interconnect target driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+enum sysc_registers {
+ SYSC_REVISION,
+ SYSC_SYSCONFIG,
+ SYSC_SYSSTATUS,
+ SYSC_MAX_REGS,
+};
+
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
+
+enum sysc_clocks {
+ SYSC_FCK,
+ SYSC_ICK,
+ SYSC_MAX_CLOCKS,
+};
+
+static const char * const clock_names[] = { "fck", "ick", };
+
+/**
+ * struct sysc - TI sysc interconnect target module registers and capabilities
+ * @dev: struct device pointer
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @module_va: virtual address of the interconnect target module
+ * @offsets: register offsets from module base
+ * @clocks: clocks used by the interconnect target module
+ * @legacy_mode: configured for legacy mode if set
+ */
+struct sysc {
+ struct device *dev;
+ u64 module_pa;
+ u32 module_size;
+ void __iomem *module_va;
+ int offsets[SYSC_MAX_REGS];
+ struct clk *clocks[SYSC_MAX_CLOCKS];
+ const char *legacy_mode;
+};
+
+static u32 sysc_read_revision(struct sysc *ddata)
+{
+ return readl_relaxed(ddata->module_va +
+ ddata->offsets[SYSC_REVISION]);
+}
+
+static int sysc_get_one_clock(struct sysc *ddata,
+ enum sysc_clocks index)
+{
+ const char *name;
+ int error;
+
+ switch (index) {
+ case SYSC_FCK:
+ break;
+ case SYSC_ICK:
+ break;
+ default:
+ return -EINVAL;
+ }
+ name = clock_names[index];
+
+ ddata->clocks[index] = devm_clk_get(ddata->dev, name);
+ if (IS_ERR(ddata->clocks[index])) {
+ if (PTR_ERR(ddata->clocks[index]) == -ENOENT)
+ return 0;
+
+ dev_err(ddata->dev, "clock get error for %s: %li\n",
+ name, PTR_ERR(ddata->clocks[index]));
+
+ return PTR_ERR(ddata->clocks[index]);
+ }
+
+ error = clk_prepare(ddata->clocks[index]);
+ if (error) {
+ dev_err(ddata->dev, "clock prepare error for %s: %i\n",
+ name, error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_get_clocks(struct sysc *ddata)
+{
+ int i, error;
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ error = sysc_get_one_clock(ddata, i);
+ if (error && error != -ENOENT)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_parse_and_check_child_range - parses module IO region from ranges
+ * @ddata: device driver data
+ *
+ * In general we only need rev, syss, and sysc registers and not the whole
+ * module range. But we do want the offsets for these registers from the
+ * module base. This allows us to check them against the legacy hwmod
+ * platform data. Let's also check the ranges are configured properly.
+ */
+static int sysc_parse_and_check_child_range(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ const __be32 *ranges;
+ u32 nr_addr, nr_size;
+ int len, error;
+
+ ranges = of_get_property(np, "ranges", &len);
+ if (!ranges) {
+ dev_err(ddata->dev, "missing ranges for %pOF\n", np);
+
+ return -ENOENT;
+ }
+
+ len /= sizeof(*ranges);
+
+ if (len < 3) {
+ dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(np, "#address-cells", &nr_addr);
+ if (error)
+ return -ENOENT;
+
+ error = of_property_read_u32(np, "#size-cells", &nr_size);
+ if (error)
+ return -ENOENT;
+
+ if (nr_addr != 1 || nr_size != 1) {
+ dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ ranges++;
+ ddata->module_pa = of_translate_address(np, ranges++);
+ ddata->module_size = be32_to_cpup(ranges);
+
+ dev_dbg(ddata->dev, "interconnect target 0x%llx size 0x%x for %pOF\n",
+ ddata->module_pa, ddata->module_size, np);
+
+ return 0;
+}
+
+/**
+ * sysc_check_one_child - check child configuration
+ * @ddata: device driver data
+ * @np: child device node
+ *
+ * Let's avoid messy situations where we have new interconnect target
+ * node but children have "ti,hwmods". These belong to the interconnect
+ * target node and are managed by this driver.
+ */
+static int sysc_check_one_child(struct sysc *ddata,
+ struct device_node *np)
+{
+ const char *name;
+
+ name = of_get_property(np, "ti,hwmods", NULL);
+ if (name)
+ dev_warn(ddata->dev, "really a child ti,hwmods property?");
+
+ return 0;
+}
+
+static int sysc_check_children(struct sysc *ddata)
+{
+ struct device_node *child;
+ int error;
+
+ for_each_child_of_node(ddata->dev->of_node, child) {
+ error = sysc_check_one_child(ddata, child);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_parse_one - parses the interconnect target module registers
+ * @ddata: device driver data
+ * @reg: register to parse
+ */
+static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
+{
+ struct resource *res;
+ const char *name;
+
+ switch (reg) {
+ case SYSC_REVISION:
+ case SYSC_SYSCONFIG:
+ case SYSC_SYSSTATUS:
+ name = reg_names[reg];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(to_platform_device(ddata->dev),
+ IORESOURCE_MEM, name);
+ if (!res) {
+ dev_dbg(ddata->dev, "has no %s register\n", name);
+ ddata->offsets[reg] = -ENODEV;
+
+ return 0;
+ }
+
+ ddata->offsets[reg] = res->start - ddata->module_pa;
+
+ return 0;
+}
+
+static int sysc_parse_registers(struct sysc *ddata)
+{
+ int i, error;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ error = sysc_parse_one(ddata, i);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_check_registers - check for misconfigured register overlaps
+ * @ddata: device driver data
+ */
+static int sysc_check_registers(struct sysc *ddata)
+{
+ int i, j, nr_regs = 0, nr_matches = 0;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ if (ddata->offsets[i] < 0)
+ continue;
+
+ if (ddata->offsets[i] > (ddata->module_size - 4)) {
+ dev_err(ddata->dev, "register outside module range");
+
+ return -EINVAL;
+ }
+
+ for (j = 0; j < SYSC_MAX_REGS; j++) {
+ if (ddata->offsets[j] < 0)
+ continue;
+
+ if (ddata->offsets[i] == ddata->offsets[j])
+ nr_matches++;
+ }
+ nr_regs++;
+ }
+
+ if (nr_regs < 1) {
+ dev_err(ddata->dev, "missing registers\n");
+
+ return -EINVAL;
+ }
+
+ if (nr_matches > nr_regs) {
+ dev_err(ddata->dev, "overlapping registers: (%i/%i)",
+ nr_regs, nr_matches);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * syc_ioremap - ioremap register space for the interconnect target module
+ * @ddata: deviec driver data
+ *
+ * Note that the interconnect target module registers can be anywhere
+ * within the first child device address space. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. We just
+ * what we need around the interconnect target module registers.
+ */
+static int sysc_ioremap(struct sysc *ddata)
+{
+ u32 size = 0;
+
+ if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
+ size = ddata->offsets[SYSC_SYSSTATUS];
+ else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
+ size = ddata->offsets[SYSC_SYSCONFIG];
+ else if (ddata->offsets[SYSC_REVISION] >= 0)
+ size = ddata->offsets[SYSC_REVISION];
+ else
+ return -EINVAL;
+
+ size &= 0xfff00;
+ size += SZ_256;
+
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * sysc_map_and_check_registers - ioremap and check device registers
+ * @ddata: device driver data
+ */
+static int sysc_map_and_check_registers(struct sysc *ddata)
+{
+ int error;
+
+ error = sysc_parse_and_check_child_range(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_children(ddata);
+ if (error)
+ return error;
+
+ error = sysc_parse_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_ioremap(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_registers(ddata);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/**
+ * sysc_show_rev - read and show interconnect target module revision
+ * @bufp: buffer to print the information to
+ * @ddata: device driver data
+ */
+static int sysc_show_rev(char *bufp, struct sysc *ddata)
+{
+ int error, len;
+
+ if (ddata->offsets[SYSC_REVISION] < 0)
+ return sprintf(bufp, ":NA");
+
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return 0;
+ }
+
+ len = sprintf(bufp, ":%08x", sysc_read_revision(ddata));
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return len;
+}
+
+static int sysc_show_reg(struct sysc *ddata,
+ char *bufp, enum sysc_registers reg)
+{
+ if (ddata->offsets[reg] < 0)
+ return sprintf(bufp, ":NA");
+
+ return sprintf(bufp, ":%x", ddata->offsets[reg]);
+}
+
+/**
+ * sysc_show_registers - show information about interconnect target module
+ * @ddata: device driver data
+ */
+static void sysc_show_registers(struct sysc *ddata)
+{
+ char buf[128];
+ char *bufp = buf;
+ int i;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++)
+ bufp += sysc_show_reg(ddata, bufp, i);
+
+ bufp += sysc_show_rev(bufp, ddata);
+
+ dev_dbg(ddata->dev, "%llx:%x%s\n",
+ ddata->module_pa, ddata->module_size,
+ buf);
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int i;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (IS_ERR_OR_NULL(ddata->clocks[i]))
+ continue;
+ clk_disable(ddata->clocks[i]);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int i, error;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (IS_ERR_OR_NULL(ddata->clocks[i]))
+ continue;
+ error = clk_enable(ddata->clocks[i]);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops sysc_pm_ops = {
+ SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
+ sysc_runtime_resume,
+ NULL)
+};
+
+static void sysc_unprepare(struct sysc *ddata)
+{
+ int i;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!IS_ERR_OR_NULL(ddata->clocks[i]))
+ clk_unprepare(ddata->clocks[i]);
+ }
+}
+
+static int sysc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sysc *ddata;
+ int error;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &pdev->dev;
+ ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_map_and_check_registers(ddata);
+ if (error)
+ goto unprepare;
+
+ platform_set_drvdata(pdev, ddata);
+
+ pm_runtime_enable(ddata->dev);
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ pm_runtime_use_autosuspend(ddata->dev);
+
+ sysc_show_registers(ddata);
+
+ error = of_platform_populate(ddata->dev->of_node,
+ NULL, NULL, ddata->dev);
+ if (error)
+ goto err;
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return 0;
+
+err:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+unprepare:
+ sysc_unprepare(ddata);
+
+ return error;
+}
+
+static int sysc_remove(struct platform_device *pdev)
+{
+ struct sysc *ddata = platform_get_drvdata(pdev);
+ int error;
+
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+unprepare:
+ sysc_unprepare(ddata);
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match[] = {
+ { .compatible = "ti,sysc-omap2" },
+ { .compatible = "ti,sysc-omap4" },
+ { .compatible = "ti,sysc-omap4-simple" },
+ { .compatible = "ti,sysc-omap3430-sr" },
+ { .compatible = "ti,sysc-omap3630-sr" },
+ { .compatible = "ti,sysc-omap4-sr" },
+ { .compatible = "ti,sysc-omap3-sham" },
+ { .compatible = "ti,sysc-omap-aes" },
+ { .compatible = "ti,sysc-mcasp" },
+ { .compatible = "ti,sysc-usb-host-fs" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sysc_match);
+
+static struct platform_driver sysc_driver = {
+ .probe = sysc_probe,
+ .remove = sysc_remove,
+ .driver = {
+ .name = "ti-sysc",
+ .of_match_table = sysc_match,
+ .pm = &sysc_pm_ops,
+ },
+};
+module_platform_driver(sysc_driver);
+
+MODULE_DESCRIPTION("TI sysc interconnect target driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
new file mode 100644
index 000000000000..073fd9011154
--- /dev/null
+++ b/drivers/bus/ts-nbus.c
@@ -0,0 +1,375 @@
+/*
+ * NBUS driver for TS-4600 based boards
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ * Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
+ * Systems. It is used to communicate with the peripherals in the FPGA on the
+ * TS-4600 SoM.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/ts-nbus.h>
+
+#define TS_NBUS_DIRECTION_IN 0
+#define TS_NBUS_DIRECTION_OUT 1
+#define TS_NBUS_WRITE_ADR 0
+#define TS_NBUS_WRITE_VAL 1
+
+struct ts_nbus {
+ struct pwm_device *pwm;
+ struct gpio_descs *data;
+ struct gpio_desc *csn;
+ struct gpio_desc *txrx;
+ struct gpio_desc *strobe;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct mutex lock;
+};
+
+/*
+ * request all gpios required by the bus.
+ */
+static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
+ *ts_nbus)
+{
+ ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->data)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
+ return PTR_ERR(ts_nbus->data);
+ }
+
+ ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->csn)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
+ return PTR_ERR(ts_nbus->csn);
+ }
+
+ ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->txrx)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
+ return PTR_ERR(ts_nbus->txrx);
+ }
+
+ ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->strobe)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
+ return PTR_ERR(ts_nbus->strobe);
+ }
+
+ ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->ale)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
+ return PTR_ERR(ts_nbus->ale);
+ }
+
+ ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
+ if (IS_ERR(ts_nbus->rdy)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
+ return PTR_ERR(ts_nbus->rdy);
+ }
+
+ return 0;
+}
+
+/*
+ * the data gpios are used for reading and writing values, their directions
+ * should be adjusted accordingly.
+ */
+static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (direction == TS_NBUS_DIRECTION_IN)
+ gpiod_direction_input(ts_nbus->data->desc[i]);
+ else
+ /* when used as output the default state of the data
+ * lines are set to high */
+ gpiod_direction_output(ts_nbus->data->desc[i], 1);
+ }
+}
+
+/*
+ * reset the bus in its initial state.
+ * The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a
+ * new transaction can be process.
+ */
+static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
+{
+ int i;
+ int values[8];
+
+ for (i = 0; i < 8; i++)
+ values[i] = 0;
+
+ gpiod_set_array_value_cansleep(8, ts_nbus->data->desc, values);
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->strobe, 0);
+ gpiod_set_value_cansleep(ts_nbus->ale, 0);
+}
+
+/*
+ * let the FPGA knows it can process.
+ */
+static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus)
+{
+ gpiod_set_value_cansleep(ts_nbus->strobe, 1);
+}
+
+/*
+ * read a byte value from the data gpios.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int ret, i;
+
+ *val = 0;
+ for (i = 0; i < 8; i++) {
+ ret = gpiod_get_value_cansleep(gpios->desc[i]);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ *val |= BIT(i);
+ }
+
+ return 0;
+}
+
+/*
+ * set the data gpios accordingly to the byte value.
+ */
+static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int i;
+ int values[8];
+
+ for (i = 0; i < 8; i++)
+ if (byte & BIT(i))
+ values[i] = 1;
+ else
+ values[i] = 0;
+
+ gpiod_set_array_value_cansleep(8, gpios->desc, values);
+}
+
+/*
+ * reading the bus consists of resetting the bus, then notifying the FPGA to
+ * send the data in the data gpios and return the read value.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+ ts_nbus_start_transaction(ts_nbus);
+
+ return ts_nbus_read_byte(ts_nbus, val);
+}
+
+/*
+ * writing to the bus consists of resetting the bus, then define the type of
+ * command (address/value), write the data and notify the FPGA to retrieve the
+ * value in the data gpios.
+ */
+static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+
+ if (cmd == TS_NBUS_WRITE_ADR)
+ gpiod_set_value_cansleep(ts_nbus->ale, 1);
+
+ ts_nbus_write_byte(ts_nbus, val);
+ ts_nbus_start_transaction(ts_nbus);
+}
+
+/*
+ * read the value in the FPGA register at the given address.
+ * return 0 on success or negative errno on failure.
+ */
+int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val)
+{
+ int ret, i;
+ u8 byte;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in read mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 0);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* set the data gpios direction as input before reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN);
+
+ /* reading value MSB first */
+ do {
+ *val = 0;
+ byte = 0;
+ for (i = 1; i >= 0; i--) {
+ /* read a byte from the bus, leave on error */
+ ret = ts_nbus_read_bus(ts_nbus, &byte);
+ if (ret < 0)
+ goto err;
+
+ /* append the byte read to the final value */
+ *val |= byte << (i * 8);
+ }
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ ret = gpiod_get_value_cansleep(ts_nbus->rdy);
+ } while (ret);
+
+err:
+ /* restore the data gpios direction as output after reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT);
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_read);
+
+/*
+ * write the desired value in the FPGA register at the given address.
+ */
+int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val)
+{
+ int i;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in write mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 1);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* writing value MSB first */
+ for (i = 1; i >= 0; i--)
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8)));
+
+ /* wait for completion */
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) {
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ }
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_write);
+
+static int ts_nbus_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct pwm_args pargs;
+ struct device *dev = &pdev->dev;
+ struct ts_nbus *ts_nbus;
+ int ret;
+
+ ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL);
+ if (!ts_nbus)
+ return -ENOMEM;
+
+ mutex_init(&ts_nbus->lock);
+
+ ret = ts_nbus_init_pdata(pdev, ts_nbus);
+ if (ret < 0)
+ return ret;
+
+ pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(pwm)) {
+ ret = PTR_ERR(pwm);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to request PWM\n");
+ return ret;
+ }
+
+ pwm_get_args(pwm, &pargs);
+ if (!pargs.period) {
+ dev_err(&pdev->dev, "invalid PWM period\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
+ ret = pwm_config(pwm, pargs.period, pargs.period);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we can now start the FPGA and populate the peripherals.
+ */
+ pwm_enable(pwm);
+ ts_nbus->pwm = pwm;
+
+ /*
+ * let the child nodes retrieve this instance of the ts-nbus.
+ */
+ dev_set_drvdata(dev, ts_nbus);
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "initialized\n");
+
+ return 0;
+}
+
+static int ts_nbus_remove(struct platform_device *pdev)
+{
+ struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
+
+ /* shutdown the FPGA */
+ mutex_lock(&ts_nbus->lock);
+ pwm_disable(ts_nbus->pwm);
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+
+static const struct of_device_id ts_nbus_of_match[] = {
+ { .compatible = "technologic,ts-nbus", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
+
+static struct platform_driver ts_nbus_driver = {
+ .probe = ts_nbus_probe,
+ .remove = ts_nbus_remove,
+ .driver = {
+ .name = "ts_nbus",
+ .of_match_table = ts_nbus_of_match,
+ },
+};
+
+module_platform_driver(ts_nbus_driver);
+
+MODULE_ALIAS("platform:ts_nbus");
+MODULE_AUTHOR("Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems NBUS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 1a0385ed6417..839ee61d352a 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -74,7 +74,7 @@
#endif /* TRACING */
static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
static int dtlk_major;
static int dtlk_port_lpc;
@@ -259,7 +259,7 @@ static unsigned int dtlk_poll(struct file *file, poll_table * wait)
return mask;
}
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
{
TRACE_TEXT(" dtlk_timer_tick");
wake_up_interruptible(&dtlk_process_list);
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 5b8db2ed844d..7700280717f2 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -122,11 +122,11 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
/* Last time scheduled */
static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
{
unsigned long long cur_tsc, tsc_diff;
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 70d434bc1cbf..6edfaa72b98b 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -204,9 +204,6 @@ static ssize_t bt_bmc_read(struct file *file, char __user *buf,
ssize_t ret = 0;
ssize_t nread;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
if (wait_event_interruptible(bt_bmc->queue,
@@ -277,9 +274,6 @@ static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
if (count < 5)
return -EINVAL;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
/*
@@ -373,9 +367,9 @@ static const struct file_operations bt_bmc_fops = {
.unlocked_ioctl = bt_bmc_ioctl,
};
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = (void *)data;
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
@@ -493,8 +487,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
- setup_timer(&bt_bmc->poll_timer, poll_timer,
- (unsigned long)bt_bmc);
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
add_timer(&bt_bmc->poll_timer);
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9de189db2cc3..f45732a2cb3e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4766,7 +4766,7 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
{
ipmi_smi_t intf;
int nt = 0;
@@ -5172,7 +5172,7 @@ static int ipmi_init_msghandler(void)
#endif /* CONFIG_IPMI_PROC_INTERFACE */
- setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71d33a1807e4..71fad747c0c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -199,6 +199,9 @@ struct smi_info {
/* The timer for this si. */
struct timer_list si_timer;
+ /* This flag is set, if the timer can be set */
+ bool timer_can_start;
+
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
@@ -355,6 +358,8 @@ out:
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
+ if (!smi_info->timer_can_start)
+ return;
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
- if (start_timer)
- start_new_msg(smi_info, msg, 2);
- else
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
}
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
- if (start_timer)
- start_new_msg(smi_info, msg, 3);
- else
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ start_new_msg(smi_info, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
* Note that we cannot just use disable_irq(), since the interrupt may
* be shared.
*/
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
- start_check_enables(smi_info, start_timer);
+ start_check_enables(smi_info);
return true;
}
return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
- start_check_enables(smi_info, true);
+ start_check_enables(smi_info);
return true;
}
return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
msg = ipmi_alloc_smi_msg();
if (!msg) {
- if (!disable_si_irq(smi_info, true))
+ if (!disable_si_irq(smi_info))
smi_info->si_state = SI_NORMAL;
} else if (enable_si_irq(smi_info)) {
ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
- start_clear_flags(smi_info, true);
+ start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
if (smi_info->intf)
ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
* disable and messages disabled.
*/
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
- start_check_enables(smi_info, true);
+ start_check_enables(smi_info);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
@@ -1091,9 +1090,9 @@ static void set_need_watch(void *send_info, bool enable)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = (struct smi_info *) data;
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1166,7 +1165,8 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
- setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
+ new_smi->timer_can_start = true;
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
check_set_rcv_irq(smi_info);
}
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL)
kthread_stop(smi_info->thread);
+
+ smi_info->timer_can_start = false;
if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
}
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
- start_clear_flags(new_smi, false);
+ start_clear_flags(new_smi);
/*
* IRQ is defined to be set when non-zero. req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
dev_set_drvdata(new_smi->io.dev, NULL);
out_err_stop_timer:
- wait_for_timer_and_thread(new_smi);
+ stop_timer_and_thread(new_smi);
out_err:
new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
*/
if (to_clean->io.irq_cleanup)
to_clean->io.irq_cleanup(&to_clean->io);
- wait_for_timer_and_thread(to_clean);
+ stop_timer_and_thread(to_clean);
/*
* Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
if (to_clean->handlers)
- disable_si_irq(to_clean, false);
+ disable_si_irq(to_clean);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 090b073ab441..6b10f0e18a95 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
{
struct si_sm_io io;
+ memset(&io, 0, sizeof(io));
+
io.si_type = SI_KCS;
io.addr_source = SI_DEVICETREE;
io.addr_type = IPMI_MEM_ADDR_SPACE;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 99771f5cad07..27dd11c49d21 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
io.addr_source_cleanup = ipmi_pci_cleanup;
io.addr_source_data = pdev;
- if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
io.addr_type = IPMI_IO_ADDR_SPACE;
- else
+ io.io_setup = ipmi_si_port_setup;
+ } else {
io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.io_setup = ipmi_si_mem_setup;
+ }
io.addr_data = pci_resource_start(pdev, 0);
io.regspacing = ipmi_pci_probe_regspacing(&io);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 466b3a1c0adf..3cfaec728604 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -551,9 +551,9 @@ static void start_get(struct ssif_info *ssif_info)
}
}
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = (void *) data;
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
bool waiting;
@@ -1691,8 +1691,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
spin_lock_init(&ssif_info->lock);
ssif_info->ssif_state = SSIF_NORMAL;
- setup_timer(&ssif_info->retry_timer, retry_timeout,
- (unsigned long)ssif_info);
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
for (i = 0; i < SSIF_NUM_STATS; i++)
atomic_set(&ssif_info->stats[i], 0);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 970e1242a282..6aefe5370e5b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,6 +343,10 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ /* Does it even fit in phys_addr_t? */
+ if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ return -EINVAL;
+
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 44006ed9558f..a7113b78251a 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -23,7 +23,7 @@
#define __NWBUTTON_C /* Tell the header file who we are */
#include "nwbutton.h"
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static int button_press_count; /* The count of button presses */
/* Times for the end of a sequence */
@@ -127,7 +127,7 @@ static void button_consume_callbacks (int bpcount)
* any matching registered function callbacks, initiate reboot, etc.).
*/
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
{
if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
button_press_count == reboot_count)
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
index abee3ca74801..9dedfd7adc0e 100644
--- a/drivers/char/nwbutton.h
+++ b/drivers/char/nwbutton.h
@@ -25,7 +25,7 @@ struct button_callback {
/* Function prototypes: */
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static irqreturn_t button_handler (int irq, void *dev_id);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index cd53771b9ae7..370e0a64ead1 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -659,9 +659,9 @@ static void terminate_monitor(struct cm4000_dev *dev)
* is already doing that for you.
*/
-static void monitor_card(unsigned long p)
+static void monitor_card(struct timer_list *t)
{
- struct cm4000_dev *dev = (struct cm4000_dev *) p;
+ struct cm4000_dev *dev = from_timer(dev, t, timer);
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
@@ -1374,7 +1374,7 @@ static void start_monitor(struct cm4000_dev *dev)
DEBUGP(3, dev, "-> start_monitor\n");
if (!dev->monitor_running) {
DEBUGP(5, dev, "create, init and add timer\n");
- setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
+ timer_setup(&dev->timer, monitor_card, 0);
dev->monitor_running = 1;
mod_timer(&dev->timer, jiffies);
} else
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 382c864814d9..9a1aaf538758 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -104,9 +104,9 @@ static inline unsigned char xinb(unsigned short port)
/* poll the device fifo status register. not to be confused with
* the poll syscall. */
-static void cm4040_do_poll(unsigned long dummy)
+static void cm4040_do_poll(struct timer_list *t)
{
- struct reader_dev *dev = (struct reader_dev *) dummy;
+ struct reader_dev *dev = from_timer(dev, t, poll_timer);
unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ REG_OFFSET_BUFFER_STATUS);
@@ -465,7 +465,6 @@ static int cm4040_open(struct inode *inode, struct file *filp)
link->open = 1;
- dev->poll_timer.data = (unsigned long) dev;
mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
@@ -585,7 +584,7 @@ static int reader_probe(struct pcmcia_device *link)
init_waitqueue_head(&dev->poll_wait);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
+ timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
ret = reader_config(link, i);
if (ret) {
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 62be953e5fb0..aa502e9fb7fa 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -375,7 +375,7 @@ static void reset_device(MGSLPC_INFO *info);
static void hdlc_mode(MGSLPC_INFO *info);
static void async_mode(MGSLPC_INFO *info);
-static void tx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
static int carrier_raised(struct tty_port *port);
static void dtr_rts(struct tty_port *port, int onoff);
@@ -1289,7 +1289,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = claim_resources(info);
@@ -3846,9 +3846,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- MGSLPC_INFO *info = (MGSLPC_INFO*)context;
+ MGSLPC_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6c7ccac2679e..ec42c8bb9b0d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,7 +259,6 @@
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
-#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/syscalls.h>
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 616871e68e09..5542a438bbd0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -135,7 +135,7 @@ static struct fasync_struct *rtc_async_queue;
static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
#ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
#endif
@@ -1171,7 +1171,7 @@ module_exit(rtc_exit);
* for something that requires a steady > 1KHz signal anyways.)
*/
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
{
unsigned long freq;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 461bf0b8a094..230b99288024 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -22,9 +22,9 @@
#include "tpm.h"
#include "tpm-dev.h"
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = (struct file_priv *)ptr;
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
@@ -48,8 +48,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
- setup_timer(&priv->user_read_timer, user_reader_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index aadabd9d1e2b..cd8d689138ff 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -14,14 +14,20 @@
#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
#include "pmc.h"
-#define UTMI_FIXED_MUL 40
+/*
+ * The purpose of this clock is to generate a 480 MHz signal. A different
+ * rate can't be configured.
+ */
+#define UTMI_RATE 480000000
struct clk_utmi {
struct clk_hw hw;
- struct regmap *regmap;
+ struct regmap *regmap_pmc;
+ struct regmap *regmap_sfr;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
@@ -37,13 +43,54 @@ static inline bool clk_utmi_ready(struct regmap *regmap)
static int clk_utmi_prepare(struct clk_hw *hw)
{
+ struct clk_hw *hw_parent;
struct clk_utmi *utmi = to_clk_utmi(hw);
unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
AT91_PMC_BIASEN;
+ unsigned int utmi_ref_clk_freq;
+ unsigned long parent_rate;
+
+ /*
+ * If mainck rate is different from 12 MHz, we have to configure the
+ * FREQ field of the SFR_UTMICKTRIM register to generate properly
+ * the utmi clock.
+ */
+ hw_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(hw_parent);
+
+ switch (parent_rate) {
+ case 12000000:
+ utmi_ref_clk_freq = 0;
+ break;
+ case 16000000:
+ utmi_ref_clk_freq = 1;
+ break;
+ case 24000000:
+ utmi_ref_clk_freq = 2;
+ break;
+ /*
+ * Not supported on SAMA5D2 but it's not an issue since MAINCK
+ * maximum value is 24 MHz.
+ */
+ case 48000000:
+ utmi_ref_clk_freq = 3;
+ break;
+ default:
+ pr_err("UTMICK: unsupported mainck rate\n");
+ return -EINVAL;
+ }
- regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
+ if (utmi->regmap_sfr) {
+ regmap_update_bits(utmi->regmap_sfr, AT91_SFR_UTMICKTRIM,
+ AT91_UTMICKTRIM_FREQ, utmi_ref_clk_freq);
+ } else if (utmi_ref_clk_freq) {
+ pr_err("UTMICK: sfr node required\n");
+ return -EINVAL;
+ }
- while (!clk_utmi_ready(utmi->regmap))
+ regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR, uckr, uckr);
+
+ while (!clk_utmi_ready(utmi->regmap_pmc))
cpu_relax();
return 0;
@@ -53,21 +100,22 @@ static int clk_utmi_is_prepared(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- return clk_utmi_ready(utmi->regmap);
+ return clk_utmi_ready(utmi->regmap_pmc);
}
static void clk_utmi_unprepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
+ regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR,
+ AT91_PMC_UPLLEN, 0);
}
static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- /* UTMI clk is a fixed clk multiplier */
- return parent_rate * UTMI_FIXED_MUL;
+ /* UTMI clk rate is fixed. */
+ return UTMI_RATE;
}
static const struct clk_ops utmi_ops = {
@@ -78,7 +126,7 @@ static const struct clk_ops utmi_ops = {
};
static struct clk_hw * __init
-at91_clk_register_utmi(struct regmap *regmap,
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
const char *name, const char *parent_name)
{
struct clk_utmi *utmi;
@@ -97,7 +145,8 @@ at91_clk_register_utmi(struct regmap *regmap,
init.flags = CLK_SET_RATE_GATE;
utmi->hw.init = &init;
- utmi->regmap = regmap;
+ utmi->regmap_pmc = regmap_pmc;
+ utmi->regmap_sfr = regmap_sfr;
hw = &utmi->hw;
ret = clk_hw_register(NULL, &utmi->hw);
@@ -114,17 +163,35 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
- struct regmap *regmap;
+ struct regmap *regmap_pmc, *regmap_sfr;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
+ regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap_pmc))
return;
- hw = at91_clk_register_utmi(regmap, name, parent_name);
+ /*
+ * If the device supports different mainck rates, this value has to be
+ * set in the UTMI Clock Trimming register.
+ * - 9x5: mainck supports several rates but it is indicated that a
+ * 12 MHz is needed in case of USB.
+ * - sama5d3 and sama5d2: mainck supports several rates. Configuring
+ * the FREQ field of the UTMI Clock Trimming register is mandatory.
+ * - sama5d4: mainck is at 12 MHz.
+ *
+ * We only need to retrieve sama5d3 or sama5d2 sfr regmap.
+ */
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr");
+ if (IS_ERR(regmap_sfr)) {
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ regmap_sfr = NULL;
+ }
+
+ hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
if (IS_ERR(hw))
return;
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index 1d9187df167b..4c4bd85f707c 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -30,6 +30,15 @@ config CLK_BCM_CYGNUS
help
Enable common clock framework support for the Broadcom Cygnus SoC
+config CLK_BCM_HR2
+ bool "Broadcom Hurricane 2 clock support"
+ depends on ARCH_BCM_HR2 || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_HR2
+ help
+ Enable common clock framework support for the Broadcom Hurricane 2
+ SoC
+
config CLK_BCM_NSP
bool "Broadcom Northstar/Northstar Plus clock support"
depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index e3f0cb0d90f3..002661d39128 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o
obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o
obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o
+obj-$(CONFIG_CLK_BCM_HR2) += clk-hr2.o
obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o
obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o
obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index bd750cf2238d..77e276d61702 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/bcm2835.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 58ce6af8452d..44301a3d9963 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -37,7 +37,6 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
-#include <linux/clk/bcm2835.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -416,35 +415,6 @@ static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
return regdump ? 0 : -ENOMEM;
}
-/*
- * These are fixed clocks. They're probably not all root clocks and it may
- * be possible to turn them on and off but until this is mapped out better
- * it's the only way they can be used.
- */
-void __init bcm2835_init_clocks(void)
-{
- struct clk_hw *hw;
- int ret;
-
- hw = clk_hw_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
- if (IS_ERR(hw))
- pr_err("apb_pclk not registered\n");
-
- hw = clk_hw_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
- if (IS_ERR(hw))
- pr_err("uart0_pclk not registered\n");
- ret = clk_hw_register_clkdev(hw, NULL, "20201000.uart");
- if (ret)
- pr_err("uart0_pclk alias not registered\n");
-
- hw = clk_hw_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
- if (IS_ERR(hw))
- pr_err("uart1_pclk not registered\n");
- ret = clk_hw_register_clkdev(hw, NULL, "20215000.uart");
- if (ret)
- pr_err("uart1_pclk alias not registered\n");
-}
-
struct bcm2835_pll_data {
const char *name;
u32 cm_ctrl_reg;
diff --git a/drivers/clk/bcm/clk-hr2.c b/drivers/clk/bcm/clk-hr2.c
new file mode 100644
index 000000000000..f7c5b7379475
--- /dev/null
+++ b/drivers/clk/bcm/clk-hr2.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk-iproc.h"
+
+static void __init hr2_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(hr2_armpll, "brcm,hr2-armpll", hr2_armpll_init);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c37a7f0e83aa..281f4322355c 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -579,18 +579,13 @@ static u32 *parent_process(const char *clocks[],
*/
parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
GFP_KERNEL);
- if (!parent_names) {
- pr_err("%s: error allocating %u parent names\n", __func__,
- parent_count);
+ if (!parent_names)
return ERR_PTR(-ENOMEM);
- }
/* There is at least one parent, so allocate a selector array */
parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
GFP_KERNEL);
if (!parent_sel) {
- pr_err("%s: error allocating %u parent selectors\n", __func__,
- parent_count);
kfree(parent_names);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index c933be01c7db..0a7e7d5a7506 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -665,7 +665,7 @@ static int cdce925_probe(struct i2c_client *client,
init.ops = &cdce925_pll_ops;
init.flags = 0;
init.parent_names = &parent_name;
- init.num_parents = parent_name ? 1 : 0;
+ init.num_parents = 1;
/* Register PLL clocks */
for (i = 0; i < data->chip_info->num_plls; ++i) {
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 86b245746a6b..151513c655c3 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -15,9 +15,7 @@
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -95,14 +93,12 @@ const struct clk_ops clk_gpio_mux_ops = {
EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags,
- const struct clk_ops *clk_gpio_ops)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags, const struct clk_ops *clk_gpio_ops)
{
struct clk_gpio *clk_gpio;
struct clk_hw *hw;
struct clk_init_data init = {};
- unsigned long gpio_flags;
int err;
if (dev)
@@ -113,32 +109,13 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
if (!clk_gpio)
return ERR_PTR(-ENOMEM);
- if (active_low)
- gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
- else
- gpio_flags = GPIOF_OUT_INIT_LOW;
-
- if (dev)
- err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
- else
- err = gpio_request_one(gpio, gpio_flags, name);
- if (err) {
- if (err != -EPROBE_DEFER)
- pr_err("%s: %s: Error requesting clock control gpio %u\n",
- __func__, name, gpio);
- if (!dev)
- kfree(clk_gpio);
-
- return ERR_PTR(err);
- }
-
init.name = name;
init.ops = clk_gpio_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
- clk_gpio->gpiod = gpio_to_desc(gpio);
+ clk_gpio->gpiod = gpiod;
clk_gpio->hw.init = &init;
hw = &clk_gpio->hw;
@@ -151,7 +128,6 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
return hw;
if (!dev) {
- gpiod_put(clk_gpio->gpiod);
kfree(clk_gpio);
}
@@ -164,29 +140,27 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of this clock's parent
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
* @flags: clock flags
*/
struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+ const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
return clk_register_gpio(dev, name,
(parent_name ? &parent_name : NULL),
- (parent_name ? 1 : 0), gpio, active_low, flags,
+ (parent_name ? 1 : 0), gpiod, flags,
&clk_gpio_gate_ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+ const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
struct clk_hw *hw;
- hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpio, active_low,
- flags);
+ hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
@@ -199,13 +173,12 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
* @name: name of this clock
* @parent_names: names of this clock's parents
* @num_parents: number of parents listed in @parent_names
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
* @flags: clock flags
*/
struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags)
{
if (num_parents != 2) {
pr_err("mux-clock %s must have 2 parents\n", name);
@@ -213,18 +186,18 @@ struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
}
return clk_register_gpio(dev, name, parent_names, num_parents,
- gpio, active_low, flags, &clk_gpio_mux_ops);
+ gpiod, flags, &clk_gpio_mux_ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags)
{
struct clk_hw *hw;
hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
- gpio, active_low, flags);
+ gpiod, flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
@@ -236,10 +209,10 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const char **parent_names, *gpio_name;
unsigned int num_parents;
- int gpio;
- enum of_gpio_flags of_flags;
+ struct gpio_desc *gpiod;
struct clk *clk;
- bool active_low, is_mux;
+ bool is_mux;
+ int ret;
num_parents = of_clk_get_parent_count(node);
if (num_parents) {
@@ -255,28 +228,27 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
- gpio_name = is_mux ? "select-gpios" : "enable-gpios";
- gpio = of_get_named_gpio_flags(node, gpio_name, 0, &of_flags);
- if (gpio < 0) {
- if (gpio == -EPROBE_DEFER)
+ gpio_name = is_mux ? "select" : "enable";
+ gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret == -EPROBE_DEFER)
pr_debug("%s: %s: GPIOs not yet available, retry later\n",
node->name, __func__);
else
- pr_err("%s: %s: Can't get '%s' DT property\n",
+ pr_err("%s: %s: Can't get '%s' named GPIO property\n",
node->name, __func__,
gpio_name);
- return gpio;
+ return ret;
}
- active_low = of_flags & OF_GPIO_ACTIVE_LOW;
-
if (is_mux)
clk = clk_register_gpio_mux(&pdev->dev, node->name,
- parent_names, num_parents, gpio, active_low, 0);
+ parent_names, num_parents, gpiod, 0);
else
clk = clk_register_gpio_gate(&pdev->dev, node->name,
- parent_names ? parent_names[0] : NULL, gpio,
- active_low, 0);
+ parent_names ? parent_names[0] : NULL, gpiod,
+ 0);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index bbf237173b37..c4ee280f454d 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -139,7 +139,7 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
- dev_dbg(clk->dev, "write configurarion: %#x\n", val);
+ dev_dbg(clk->dev, "write configuration: %#x\n", val);
hsdk_pll_write(clk, CGU_PLL_CTRL, val);
}
@@ -169,7 +169,7 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
- dev_dbg(clk->dev, "current configurarion: %#x\n", val);
+ dev_dbg(clk->dev, "current configuration: %#x\n", val);
/* Check if PLL is disabled */
if (val & CGU_PLL_CTRL_PD)
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 16a3d5717f4e..39cabe157163 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -134,11 +134,9 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
}
/* allocate the mux */
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- pr_err("%s: could not allocate mux clk\n", __func__);
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
if (clk_mux_flags & CLK_MUX_READ_ONLY)
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
index a94c3f56c590..61c3e40507d3 100644
--- a/drivers/clk/clk-stm32h7.c
+++ b/drivers/clk/clk-stm32h7.c
@@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
mux_ops = div_ops = gate_ops = NULL;
mux_hw = div_hw = gate_hw = NULL;
- if (gcfg->mux && gcfg->mux) {
+ if (gcfg->mux && cfg->mux) {
mux = _get_cmux(base + cfg->mux->offset,
cfg->mux->shift,
cfg->mux->width,
@@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
}
}
- if (gcfg->gate && gcfg->gate) {
+ if (gcfg->gate && cfg->gate) {
gate = _get_cgate(base + cfg->gate->offset,
cfg->gate->bit_idx,
gcfg->gate->flags, lock);
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 7b222a5db931..25dfe050ae9f 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -82,7 +82,7 @@ static const struct clk_ops twl6040_pdmclk_ops = {
.recalc_rate = twl6040_pdmclk_recalc_rate,
};
-static struct clk_init_data twl6040_pdmclk_init = {
+static const struct clk_init_data twl6040_pdmclk_init = {
.name = "pdmclk",
.ops = &twl6040_pdmclk_ops,
.flags = CLK_GET_RATE_NOCACHE,
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index ec8aafda6e24..7b3e1921771f 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -229,15 +229,15 @@
#define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S0CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S0CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6<<1)
+#define U300_SYSCON_S0CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6 << 1)
/* SYS_1_CLK_CONTROL second clock control 16 bit (R/W) */
#define U300_SYSCON_S1CCR (0x124)
#define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF)
@@ -247,16 +247,16 @@
#define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S1CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S1CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6<<1)
-/* SYS_2_CLK_CONTROL third clock contol 16 bit (R/W) */
+#define U300_SYSCON_S1CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6 << 1)
+/* SYS_2_CLK_CONTROL third clock control 16 bit (R/W) */
#define U300_SYSCON_S2CCR (0x128)
#define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF)
#define U300_SYSCON_S2CCR_CLK_STEAL (0x8000)
@@ -266,15 +266,15 @@
#define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S2CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S2CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6<<1)
+#define U300_SYSCON_S2CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6 << 1)
/* SC_PLL_IRQ_CONTROL 16bit (R/W) */
#define U300_SYSCON_PICR (0x0130)
#define U300_SYSCON_PICR_MASK (0x00FF)
@@ -378,7 +378,7 @@
* +- ISP Image Signal Processor (U335 only)
* +- CDS (U335 only)
* +- DMA Direct Memory Access Controller
- * +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
+ * +- AAIF APP/ACC Interface (Mobile Scalable Link, MSL)
* +- APEX
* +- VIDEO_ENC AVE2/3 Video Encoder
* +- XGAM Graphics Accelerator Controller
@@ -568,14 +568,14 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
struct clk_syscon *sclk = to_syscon(hw);
u16 perf = syscon_get_perf();
- switch(sclk->clk_val) {
+ switch (sclk->clk_val) {
case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
case U300_SYSCON_SBCER_I2C0_CLK_EN:
case U300_SYSCON_SBCER_I2C1_CLK_EN:
case U300_SYSCON_SBCER_MMC_CLK_EN:
case U300_SYSCON_SBCER_SPI_CLK_EN:
/* The FAST clocks have one progression */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -586,7 +586,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
case U300_SYSCON_SBCER_NANDIF_CLK_EN:
case U300_SYSCON_SBCER_XGAM_CLK_EN:
/* AMBA interconnect peripherals */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 6500000;
@@ -598,7 +598,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
case U300_SYSCON_SBCER_SEMI_CLK_EN:
case U300_SYSCON_SBCER_EMIF_CLK_EN:
/* EMIF speeds */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -609,7 +609,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
}
case U300_SYSCON_SBCER_CPU_CLK_EN:
/* And the fast CPU clock */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -702,12 +702,10 @@ syscon_clk_register(struct device *dev, const char *name,
struct clk_init_data init;
int ret;
- sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
- if (!sclk) {
- pr_err("could not allocate syscon clock %s\n",
- name);
+ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+ if (!sclk)
return ERR_PTR(-ENOMEM);
- }
+
init.name = name;
init.ops = &syscon_clk_ops;
init.flags = flags;
@@ -1123,12 +1121,10 @@ mclk_clk_register(struct device *dev, const char *name,
struct clk_init_data init;
int ret;
- mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
- if (!mclk) {
- pr_err("could not allocate MMC/SD clock %s\n",
- name);
+ mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
+ if (!mclk)
return ERR_PTR(-ENOMEM);
- }
+
init.name = "mclk";
init.ops = &mclk_ops;
init.flags = 0;
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index a47960aacfa5..146769532325 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -52,7 +52,7 @@ static const struct clk_ops wm831x_xtal_ops = {
.recalc_rate = wm831x_xtal_recalc_rate,
};
-static struct clk_init_data wm831x_xtal_init = {
+static const struct clk_init_data wm831x_xtal_init = {
.name = "xtal",
.ops = &wm831x_xtal_ops,
};
@@ -225,7 +225,7 @@ static const struct clk_ops wm831x_fll_ops = {
.get_parent = wm831x_fll_get_parent,
};
-static struct clk_init_data wm831x_fll_init = {
+static const struct clk_init_data wm831x_fll_init = {
.name = "fll",
.ops = &wm831x_fll_ops,
.parent_names = wm831x_fll_parents,
@@ -338,7 +338,7 @@ static const struct clk_ops wm831x_clkout_ops = {
.set_parent = wm831x_clkout_set_parent,
};
-static struct clk_init_data wm831x_clkout_init = {
+static const struct clk_init_data wm831x_clkout_init = {
.name = "clkout",
.ops = &wm831x_clkout_ops,
.parent_names = wm831x_clkout_parents,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 4c75821a3933..531b030d4d4e 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -146,10 +146,8 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
- if (!apmclk) {
- pr_err("%s: could not allocate APM clk\n", __func__);
+ if (!apmclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &xgene_clk_pll_ops;
@@ -191,7 +189,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
int version = xgene_pllclk_version(np);
reg = of_iomap(np, 0);
- if (reg == NULL) {
+ if (!reg) {
pr_err("Unable to map CSR register for %pOF\n", np);
return;
}
@@ -467,7 +465,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
@@ -507,7 +505,7 @@ static void xgene_clk_disable(struct clk_hw *hw)
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
/* First put the CSR in reset */
data = xgene_clk_read(pclk->param.csr_reg +
@@ -533,7 +531,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
u32 data = 0;
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock checking\n", clk_hw_get_name(hw));
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
@@ -542,7 +540,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
"disabled");
}
- if (pclk->param.csr_reg == NULL)
+ if (!pclk->param.csr_reg)
return 1;
return data & pclk->param.reg_clk_mask ? 1 : 0;
}
@@ -650,10 +648,8 @@ static struct clk *xgene_register_clk(struct device *dev,
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
- if (!apmclk) {
- pr_err("%s: could not allocate APM clk\n", __func__);
+ if (!apmclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &xgene_clk_ops;
@@ -709,7 +705,7 @@ static void __init xgene_devclk_init(struct device_node *np)
break;
}
map_res = of_iomap(np, i);
- if (map_res == NULL) {
+ if (!map_res) {
pr_err("Unable to map resource %d for %pOF\n", i, np);
goto err;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c8d83acda006..647d056df88c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
@@ -46,6 +47,7 @@ struct clk_core {
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
+ struct device *dev;
struct clk_core *parent;
const char **parent_names;
struct clk_core **parents;
@@ -87,6 +89,26 @@ struct clk {
struct hlist_node clks_node;
};
+/*** runtime pm ***/
+static int clk_pm_runtime_get(struct clk_core *core)
+{
+ int ret = 0;
+
+ if (!core->dev)
+ return 0;
+
+ ret = pm_runtime_get_sync(core->dev);
+ return ret < 0 ? ret : 0;
+}
+
+static void clk_pm_runtime_put(struct clk_core *core)
+{
+ if (!core->dev)
+ return;
+
+ pm_runtime_put_sync(core->dev);
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -150,6 +172,8 @@ static void clk_enable_unlock(unsigned long flags)
static bool clk_core_is_prepared(struct clk_core *core)
{
+ bool ret = false;
+
/*
* .is_prepared is optional for clocks that can prepare
* fall back to software usage counter if it is missing
@@ -157,11 +181,18 @@ static bool clk_core_is_prepared(struct clk_core *core)
if (!core->ops->is_prepared)
return core->prepare_count;
- return core->ops->is_prepared(core->hw);
+ if (!clk_pm_runtime_get(core)) {
+ ret = core->ops->is_prepared(core->hw);
+ clk_pm_runtime_put(core);
+ }
+
+ return ret;
}
static bool clk_core_is_enabled(struct clk_core *core)
{
+ bool ret = false;
+
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
@@ -169,7 +200,29 @@ static bool clk_core_is_enabled(struct clk_core *core)
if (!core->ops->is_enabled)
return core->enable_count;
- return core->ops->is_enabled(core->hw);
+ /*
+ * Check if clock controller's device is runtime active before
+ * calling .is_enabled callback. If not, assume that clock is
+ * disabled, because we might be called from atomic context, from
+ * which pm_runtime_get() is not allowed.
+ * This function is called mainly from clk_disable_unused_subtree,
+ * which ensures proper runtime pm activation of controller before
+ * taking enable spinlock, but the below check is needed if one tries
+ * to call it from other places.
+ */
+ if (core->dev) {
+ pm_runtime_get_noresume(core->dev);
+ if (!pm_runtime_active(core->dev)) {
+ ret = false;
+ goto done;
+ }
+ }
+
+ ret = core->ops->is_enabled(core->hw);
+done:
+ clk_pm_runtime_put(core);
+
+ return ret;
}
/*** helper functions ***/
@@ -489,6 +542,8 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
+ clk_pm_runtime_put(core);
+
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
}
@@ -530,10 +585,14 @@ static int clk_core_prepare(struct clk_core *core)
return 0;
if (core->prepare_count == 0) {
- ret = clk_core_prepare(core->parent);
+ ret = clk_pm_runtime_get(core);
if (ret)
return ret;
+ ret = clk_core_prepare(core->parent);
+ if (ret)
+ goto runtime_put;
+
trace_clk_prepare(core);
if (core->ops->prepare)
@@ -541,15 +600,18 @@ static int clk_core_prepare(struct clk_core *core)
trace_clk_prepare_complete(core);
- if (ret) {
- clk_core_unprepare(core->parent);
- return ret;
- }
+ if (ret)
+ goto unprepare;
}
core->prepare_count++;
return 0;
+unprepare:
+ clk_core_unprepare(core->parent);
+runtime_put:
+ clk_pm_runtime_put(core);
+ return ret;
}
static int clk_core_prepare_lock(struct clk_core *core)
@@ -745,6 +807,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
+ if (clk_pm_runtime_get(core))
+ return;
+
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -753,6 +818,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
+
+ clk_pm_runtime_put(core);
}
static void clk_disable_unused_subtree(struct clk_core *core)
@@ -768,6 +835,9 @@ static void clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
+ if (clk_pm_runtime_get(core))
+ goto unprepare_out;
+
flags = clk_enable_lock();
if (core->enable_count)
@@ -792,6 +862,8 @@ static void clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
+ clk_pm_runtime_put(core);
+unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1038,9 +1110,13 @@ EXPORT_SYMBOL_GPL(clk_get_accuracy);
static unsigned long clk_recalc(struct clk_core *core,
unsigned long parent_rate)
{
- if (core->ops->recalc_rate)
- return core->ops->recalc_rate(core->hw, parent_rate);
- return parent_rate;
+ unsigned long rate = parent_rate;
+
+ if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
+ rate = core->ops->recalc_rate(core->hw, parent_rate);
+ clk_pm_runtime_put(core);
+ }
+ return rate;
}
/**
@@ -1565,6 +1641,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
+ int ret = 0;
if (!core)
return 0;
@@ -1581,21 +1658,28 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (!top)
return -EINVAL;
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ return ret;
+
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
/* change the rates */
clk_change_rate(top);
core->req_rate = req_rate;
+err:
+ clk_pm_runtime_put(core);
- return 0;
+ return ret;
}
/**
@@ -1826,12 +1910,16 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
p_rate = parent->rate;
}
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ goto out;
+
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(core, p_rate);
/* abort if a driver objects */
if (ret & NOTIFY_STOP_MASK)
- goto out;
+ goto runtime_put;
/* do the re-parent */
ret = __clk_set_parent(core, parent, p_index);
@@ -1844,6 +1932,8 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
__clk_recalc_accuracies(core);
}
+runtime_put:
+ clk_pm_runtime_put(core);
out:
clk_prepare_unlock();
@@ -2350,7 +2440,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
*/
static int __clk_core_init(struct clk_core *core)
{
- int i, ret = 0;
+ int i, ret;
struct clk_core *orphan;
struct hlist_node *tmp2;
unsigned long rate;
@@ -2360,6 +2450,10 @@ static int __clk_core_init(struct clk_core *core)
clk_prepare_lock();
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ goto unlock;
+
/* check to see if a clock with this name is already registered */
if (clk_core_lookup(core->name)) {
pr_debug("%s: clk %s already initialized\n",
@@ -2512,6 +2606,8 @@ static int __clk_core_init(struct clk_core *core)
kref_init(&core->ref);
out:
+ clk_pm_runtime_put(core);
+unlock:
clk_prepare_unlock();
if (!ret)
@@ -2583,6 +2679,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
goto fail_name;
}
core->ops = hw->init->ops;
+ if (dev && pm_runtime_enabled(dev))
+ core->dev = dev;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3177,6 +3275,37 @@ int of_clk_add_hw_provider(struct device_node *np,
}
EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
+static void devm_of_clk_release_provider(struct device *dev, void *res)
+{
+ of_clk_del_provider(*(struct device_node **)res);
+}
+
+int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ struct device_node **ptr, *np;
+ int ret;
+
+ ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ np = dev->of_node;
+ ret = of_clk_add_hw_provider(np, get, data);
+ if (!ret) {
+ *ptr = np;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
+
/**
* of_clk_del_provider() - Remove a previously registered clock provider
* @np: Device node pointer associated with clock provider
@@ -3198,6 +3327,27 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
+static int devm_clk_provider_match(struct device *dev, void *res, void *data)
+{
+ struct device_node **np = res;
+
+ if (WARN_ON(!np || !*np))
+ return 0;
+
+ return *np == data;
+}
+
+void devm_of_clk_del_provider(struct device *dev)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_of_clk_release_provider,
+ devm_clk_provider_match, dev->of_node);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_of_clk_del_provider);
+
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index fa0fba653898..77072c7778b9 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -415,7 +415,7 @@ static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return mmc_clk_set_timing(hw, rate);
}
-static struct clk_ops clk_mmc_ops = {
+static const struct clk_ops clk_mmc_ops = {
.prepare = mmc_clk_prepare,
.determine_rate = mmc_clk_determine_rate,
.set_rate = mmc_clk_set_rate,
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index a18258eb89cb..f40419959656 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
/* crgctrl */
static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
- { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, },
+ { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
{ HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
{ HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
{ HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index e786d717f75d..a87809d4bd52 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
{ HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, },
{ HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
{ HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
- { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+ { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, },
};
static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 14b05efa3c2a..9584f0c32dda 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -208,7 +208,7 @@ static void clk_ether_unprepare(struct clk_hw *hw)
writel_relaxed(val, clk->ctrl_reg);
}
-static struct clk_ops clk_ether_ops = {
+static const struct clk_ops clk_ether_ops = {
.prepare = clk_ether_prepare,
.unprepare = clk_ether_unprepare,
};
@@ -247,7 +247,7 @@ static void clk_complex_disable(struct clk_hw *hw)
writel_relaxed(val, clk->phy_reg);
}
-static struct clk_ops clk_complex_ops = {
+static const struct clk_ops clk_complex_ops = {
.enable = clk_complex_enable,
.disable = clk_complex_disable,
};
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 7908bc3c9ec7..f36bdef91831 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -88,7 +88,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
-static struct clk_ops clkgate_separated_ops = {
+static const struct clk_ops clkgate_separated_ops = {
.enable = clkgate_separated_enable,
.disable = clkgate_separated_disable,
.is_enabled = clkgate_separated_is_enabled,
@@ -105,10 +105,8 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
struct clk_init_data init;
sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
- if (!sclk) {
- pr_err("%s: fail to allocate separated gated clk\n", __func__);
+ if (!sclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &clkgate_separated_ops;
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index ed8bb5f7507f..8478948e858e 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -47,6 +47,8 @@
#define HI3798CV200_FIXED_12M 81
#define HI3798CV200_FIXED_48M 82
#define HI3798CV200_FIXED_60M 83
+#define HI3798CV200_FIXED_166P5M 84
+#define HI3798CV200_SDIO0_MUX 85
#define HI3798CV200_CRG_NR_CLKS 128
@@ -63,6 +65,7 @@ static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
{ HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
{ HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
{ HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HI3798CV200_FIXED_166P5M, "166p5m", NULL, 0, 165000000, },
{ HI3798CV200_FIXED_200M, "200m", NULL, 0, 200000000, },
{ HI3798CV200_FIXED_250M, "250m", NULL, 0, 250000000, },
};
@@ -75,12 +78,19 @@ static const char *const comphy1_mux_p[] = {
"100m", "25m"};
static u32 comphy1_mux_table[] = {2, 3};
+static const char *const sdio_mux_p[] = {
+ "100m", "50m", "150m", "166p5m" };
+static u32 sdio_mux_table[] = {0, 1, 2, 3};
+
static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
{ HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
{ HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+ { HI3798CV200_SDIO0_MUX, "sdio0_mux", sdio_mux_p,
+ ARRAY_SIZE(sdio_mux_p), CLK_SET_RATE_PARENT,
+ 0x9c, 8, 2, 0, sdio_mux_table, },
};
static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
@@ -104,7 +114,7 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
/* SDIO */
{ HISTB_SDIO0_BIU_CLK, "clk_sdio0_biu", "200m",
CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
- { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "mmc_mux",
+ { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "sdio0_mux",
CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
/* EMMC */
{ HISTB_MMC_BIU_CLK, "clk_mmc_biu", "200m",
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 5cc99590f9a3..6df3389687bc 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -72,7 +72,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static struct clk_ops clk_busy_divider_ops = {
+static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
.round_rate = clk_busy_divider_round_rate,
.set_rate = clk_busy_divider_set_rate,
@@ -147,7 +147,7 @@ static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
return ret;
}
-static struct clk_ops clk_busy_mux_ops = {
+static const struct clk_ops clk_busy_mux_ops = {
.get_parent = clk_busy_mux_get_parent,
.set_parent = clk_busy_mux_set_parent,
};
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index db44a198a0d9..60fc9d7a9723 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -118,7 +118,7 @@ static void clk_gate2_disable_unused(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
}
-static struct clk_ops clk_gate2_ops = {
+static const struct clk_ops clk_gate2_ops = {
.enable = clk_gate2_enable,
.disable = clk_gate2_disable,
.disable_unused = clk_gate2_disable_unused,
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index c07df719b8a3..8d518ad5dc13 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
- clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
+ clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4);
clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 5e8c18afce9a..85c118164469 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -267,7 +267,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6ULL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
}
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
- clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+ clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 2305699db467..80dc211eb74b 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -54,11 +54,6 @@ static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
-static const char *arm_m0_sel[] = { "osc", "pll_sys_main_120m_clk",
- "pll_enet_125m_clk", "pll_sys_pfd2_135m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
- "pll_usb_main_clk", };
-
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
"pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
@@ -510,7 +505,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
- clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux2("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel));
clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
@@ -582,7 +576,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate3("arm_m0_cg", "arm_m0_src", base + 0x8100, 28);
clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28);
clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
@@ -721,7 +714,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider2("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3);
clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
@@ -793,11 +785,10 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
- clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate4("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0);
clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
- clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0);
+ clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0);
clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0);
clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0);
clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index e47a1c2fe8bd..4ba9973d4c18 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -107,7 +107,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
return ull;
}
-static struct clk_ops clk_pllv1_ops = {
+static const struct clk_ops clk_pllv1_ops = {
.recalc_rate = clk_pllv1_recalc_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index 9842d657e974..85b5cbe9744c 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -227,7 +227,7 @@ static void clk_pllv2_unprepare(struct clk_hw *hw)
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
}
-static struct clk_ops clk_pllv2_ops = {
+static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 28739a9a6e37..59dc0aad553c 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -50,6 +50,56 @@ config COMMON_CLK_MT2701_BDPSYS
---help---
This driver supports Mediatek MT2701 bdpsys clocks.
+config COMMON_CLK_MT2712
+ bool "Clock driver for Mediatek MT2712"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ ---help---
+ This driver supports Mediatek MT2712 basic clocks.
+
+config COMMON_CLK_MT2712_BDPSYS
+ bool "Clock driver for Mediatek MT2712 bdpsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 bdpsys clocks.
+
+config COMMON_CLK_MT2712_IMGSYS
+ bool "Clock driver for Mediatek MT2712 imgsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 imgsys clocks.
+
+config COMMON_CLK_MT2712_JPGDECSYS
+ bool "Clock driver for Mediatek MT2712 jpgdecsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 jpgdecsys clocks.
+
+config COMMON_CLK_MT2712_MFGCFG
+ bool "Clock driver for Mediatek MT2712 mfgcfg"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 mfgcfg clocks.
+
+config COMMON_CLK_MT2712_MMSYS
+ bool "Clock driver for Mediatek MT2712 mmsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 mmsys clocks.
+
+config COMMON_CLK_MT2712_VDECSYS
+ bool "Clock driver for Mediatek MT2712 vdecsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 vdecsys clocks.
+
+config COMMON_CLK_MT2712_VENCSYS
+ bool "Clock driver for Mediatek MT2712 vencsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 vencsys clocks.
+
config COMMON_CLK_MT6797
bool "Clock driver for Mediatek MT6797"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -82,6 +132,36 @@ config COMMON_CLK_MT6797_VENCSYS
---help---
This driver supports Mediatek MT6797 vencsys clocks.
+config COMMON_CLK_MT7622
+ bool "Clock driver for MediaTek MT7622"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports MediaTek MT7622 basic clocks and clocks
+ required for various periperals found on MediaTek.
+
+config COMMON_CLK_MT7622_ETHSYS
+ bool "Clock driver for MediaTek MT7622 ETHSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver add support for clocks for Ethernet and SGMII
+ required on MediaTek MT7622 SoC.
+
+config COMMON_CLK_MT7622_HIFSYS
+ bool "Clock driver for MediaTek MT7622 HIFSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver supports MediaTek MT7622 HIFSYS clocks providing
+ to PCI-E and USB.
+
+config COMMON_CLK_MT7622_AUDSYS
+ bool "Clock driver for MediaTek MT7622 AUDSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver supports MediaTek MT7622 AUDSYS clocks providing
+ to audio consumers such as I2S and TDM.
+
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index ba2a070765f0..c421ffcd49ff 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -13,5 +13,17 @@ obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712) += clk-mt2712.o
+obj-$(CONFIG_COMMON_CLK_MT2712_BDPSYS) += clk-mt2712-bdp.o
+obj-$(CONFIG_COMMON_CLK_MT2712_IMGSYS) += clk-mt2712-img.o
+obj-$(CONFIG_COMMON_CLK_MT2712_JPGDECSYS) += clk-mt2712-jpgdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MFGCFG) += clk-mt2712-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MMSYS) += clk-mt2712-mm.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VDECSYS) += clk-mt2712-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VENCSYS) += clk-mt2712-venc.o
+obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622.o
+obj-$(CONFIG_COMMON_CLK_MT7622_ETHSYS) += clk-mt7622-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 9598889f972b..8e7f16fd87c9 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -750,7 +750,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
static struct clk_onecell_data *infra_clk_data;
-static void mtk_infrasys_init_early(struct device_node *node)
+static void __init mtk_infrasys_init_early(struct device_node *node)
{
int r, i;
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
new file mode 100644
index 000000000000..5fe4728c076e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs bdp_cg_regs = {
+ .set_ofs = 0x100,
+ .clr_ofs = 0x100,
+ .sta_ofs = 0x100,
+};
+
+#define GATE_BDP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate bdp_clks[] = {
+ GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
+ GATE_BDP(CLK_BDP_BRIDGE_DRAM, "bdp_bridge_d", "mm_sel", 1),
+ GATE_BDP(CLK_BDP_LARB_DRAM, "bdp_larb_d", "mm_sel", 2),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_PXL, "bdp_vdi_pxl", "tvd_sel", 3),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_DRAM, "bdp_vdi_d", "mm_sel", 4),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_B, "bdp_vdi_b", "mm_sel", 5),
+ GATE_BDP(CLK_BDP_MT_B, "bdp_fmt_b", "mm_sel", 9),
+ GATE_BDP(CLK_BDP_DISPFMT_27M, "bdp_27m", "di_sel", 10),
+ GATE_BDP(CLK_BDP_DISPFMT_27M_VDOUT, "bdp_27m_vdout", "di_sel", 11),
+ GATE_BDP(CLK_BDP_DISPFMT_27_74_74, "bdp_27_74_74", "di_sel", 12),
+ GATE_BDP(CLK_BDP_DISPFMT_2FS, "bdp_2fs", "di_sel", 13),
+ GATE_BDP(CLK_BDP_DISPFMT_2FS_2FS74_148, "bdp_2fs74_148", "di_sel", 14),
+ GATE_BDP(CLK_BDP_DISPFMT_B, "bdp_b", "mm_sel", 15),
+ GATE_BDP(CLK_BDP_VDO_DRAM, "bdp_vdo_d", "mm_sel", 16),
+ GATE_BDP(CLK_BDP_VDO_2FS, "bdp_vdo_2fs", "di_sel", 17),
+ GATE_BDP(CLK_BDP_VDO_B, "bdp_vdo_b", "mm_sel", 18),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_PXL, "bdp_di_pxl", "di_sel", 19),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_DRAM, "bdp_di_d", "mm_sel", 20),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_B, "bdp_di_b", "mm_sel", 21),
+ GATE_BDP(CLK_BDP_NR_AGENT, "bdp_nr_agent", "nr_sel", 22),
+ GATE_BDP(CLK_BDP_NR_DRAM, "bdp_nr_d", "mm_sel", 23),
+ GATE_BDP(CLK_BDP_NR_B, "bdp_nr_b", "mm_sel", 24),
+ GATE_BDP(CLK_BDP_BRIDGE_RT_B, "bdp_bridge_rt_b", "mm_sel", 25),
+ GATE_BDP(CLK_BDP_BRIDGE_RT_DRAM, "bdp_bridge_rt_d", "mm_sel", 26),
+ GATE_BDP(CLK_BDP_LARB_RT_DRAM, "bdp_larb_rt_d", "mm_sel", 27),
+ GATE_BDP(CLK_BDP_TVD_TDC, "bdp_tvd_tdc", "mm_sel", 28),
+ GATE_BDP(CLK_BDP_TVD_54, "bdp_tvd_clk_54", "tvd_sel", 29),
+ GATE_BDP(CLK_BDP_TVD_CBUS, "bdp_tvd_cbus", "mm_sel", 30),
+};
+
+static int clk_mt2712_bdp_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_BDP_NR_CLK);
+
+ mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_bdp[] = {
+ { .compatible = "mediatek,mt2712-bdpsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_bdp_drv = {
+ .probe = clk_mt2712_bdp_probe,
+ .driver = {
+ .name = "clk-mt2712-bdp",
+ .of_match_table = of_match_clk_mt2712_bdp,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_bdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
new file mode 100644
index 000000000000..139ff55d495e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_SENINF_SCAM_EN, "img_scam_en", "csi0", 3),
+ GATE_IMG(CLK_IMG_SENINF_CAM_EN, "img_cam_en", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_CAM_SV_EN, "img_cam_sv_en", "mm_sel", 9),
+ GATE_IMG(CLK_IMG_CAM_SV1_EN, "img_cam_sv1_en", "mm_sel", 10),
+ GATE_IMG(CLK_IMG_CAM_SV2_EN, "img_cam_sv2_en", "mm_sel", 11),
+};
+
+static int clk_mt2712_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_img[] = {
+ { .compatible = "mediatek,mt2712-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_img_drv = {
+ .probe = clk_mt2712_img_probe,
+ .driver = {
+ .name = "clk-mt2712-img",
+ .of_match_table = of_match_clk_mt2712_img,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
new file mode 100644
index 000000000000..c7d4aada4892
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs jpgdec_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_JPGDEC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &jpgdec_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate jpgdec_clks[] = {
+ GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
+ GATE_JPGDEC(CLK_JPGDEC_JPGDEC, "jpgdec_jpgdec", "jpgdec_sel", 4),
+};
+
+static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_JPGDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
+ { .compatible = "mediatek,mt2712-jpgdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_jpgdec_drv = {
+ .probe = clk_mt2712_jpgdec_probe,
+ .driver = {
+ .name = "clk-mt2712-jpgdec",
+ .of_match_table = of_match_clk_mt2712_jpgdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_jpgdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
new file mode 100644
index 000000000000..570f72d48d4d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mfg_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+};
+
+static int clk_mt2712_mfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+ mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mfg[] = {
+ { .compatible = "mediatek,mt2712-mfgcfg", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_mfg_drv = {
+ .probe = clk_mt2712_mfg_probe,
+ .driver = {
+ .name = "clk-mt2712-mfg",
+ .of_match_table = of_match_clk_mt2712_mfg,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
new file mode 100644
index 000000000000..a8b4b6d42488
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm2_cg_regs = {
+ .set_ofs = 0x224,
+ .clr_ofs = 0x228,
+ .sta_ofs = 0x220,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_CROP, "mm_mdp_crop", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "clk32k", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0_MM, "mm_pwm0_mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM0_26M, "mm_pwm0_26m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1_MM, "mm_pwm1_mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM1_26M, "mm_pwm1_26m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_lntc", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_lntc", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "vpll_dpix", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "vpll3_dpix", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "vpll_dpix", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvdstx", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_SMI_COMMON1, "mm_smi_common1", "mm_sel", 21),
+ GATE_MM1(CLK_MM_SMI_LARB5, "mm_smi_larb5", "mm_sel", 22),
+ GATE_MM1(CLK_MM_MDP_RDMA2, "mm_mdp_rdma2", "mm_sel", 23),
+ GATE_MM1(CLK_MM_MDP_TDSHP2, "mm_mdp_tdshp2", "mm_sel", 24),
+ GATE_MM1(CLK_MM_DISP_OVL2, "mm_disp_ovl2", "mm_sel", 25),
+ GATE_MM1(CLK_MM_DISP_WDMA2, "mm_disp_wdma2", "mm_sel", 26),
+ GATE_MM1(CLK_MM_DISP_COLOR2, "mm_disp_color2", "mm_sel", 27),
+ GATE_MM1(CLK_MM_DISP_AAL1, "mm_disp_aal1", "mm_sel", 28),
+ GATE_MM1(CLK_MM_DISP_OD1, "mm_disp_od1", "mm_sel", 29),
+ GATE_MM1(CLK_MM_LVDS1_PIXEL, "mm_lvds1_pixel", "vpll3_dpix", 30),
+ GATE_MM1(CLK_MM_LVDS1_CTS, "mm_lvds1_cts", "lvdstx3", 31),
+ /* MM2 */
+ GATE_MM2(CLK_MM_SMI_LARB7, "mm_smi_larb7", "mm_sel", 0),
+ GATE_MM2(CLK_MM_MDP_RDMA3, "mm_mdp_rdma3", "mm_sel", 1),
+ GATE_MM2(CLK_MM_MDP_WROT2, "mm_mdp_wrot2", "mm_sel", 2),
+ GATE_MM2(CLK_MM_DSI2, "mm_dsi2", "mm_sel", 3),
+ GATE_MM2(CLK_MM_DSI2_DIGITAL, "mm_dsi2_digital", "dsi0_lntc", 4),
+ GATE_MM2(CLK_MM_DSI3, "mm_dsi3", "mm_sel", 5),
+ GATE_MM2(CLK_MM_DSI3_DIGITAL, "mm_dsi3_digital", "dsi1_lntc", 6),
+};
+
+static int clk_mt2712_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mm[] = {
+ { .compatible = "mediatek,mt2712-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_mm_drv = {
+ .probe = clk_mt2712_mm_probe,
+ .driver = {
+ .name = "clk-mt2712-mm",
+ .of_match_table = of_match_clk_mt2712_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
new file mode 100644
index 000000000000..55c64ee8cc91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_IMGRZ_CKEN, "vdec_imgrz_cken", "vdec_sel", 1),
+};
+
+static int clk_mt2712_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_vdec[] = {
+ { .compatible = "mediatek,mt2712-vdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_vdec_drv = {
+ .probe = clk_mt2712_vdec_probe,
+ .driver = {
+ .name = "clk-mt2712-vdec",
+ .of_match_table = of_match_clk_mt2712_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
new file mode 100644
index 000000000000..ccbfe98777c8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &venc_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_SMI_LARB6, "venc_smi_larb6", "jpgdec_sel", 12),
+};
+
+static int clk_mt2712_venc_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_venc[] = {
+ { .compatible = "mediatek,mt2712-vencsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_venc_drv = {
+ .probe = clk_mt2712_venc_probe,
+ .driver = {
+ .name = "clk-mt2712-venc",
+ .of_match_table = of_match_clk_mt2712_venc,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
new file mode 100644
index 000000000000..498d13799388
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static DEFINE_SPINLOCK(mt2712_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_VPLL3_DPIX, "vpll3_dpix", NULL, 200000000),
+ FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", NULL, 200000000),
+ FIXED_CLK(CLK_TOP_LTEPLL_FS26M, "ltepll_fs26m", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 350000000),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC, "dsi0_lntc", NULL, 143000000),
+ FIXED_CLK(CLK_TOP_DSI1_LNTC, "dsi1_lntc", NULL, 143000000),
+ FIXED_CLK(CLK_TOP_LVDSTX3_CLKDIG_CTS, "lvdstx3", NULL, 140000000),
+ FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx", NULL, 140000000),
+ FIXED_CLK(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", NULL, 32768),
+ FIXED_CLK(CLK_TOP_CLKRTC_INT, "clkrtc_int", NULL, 32747),
+ FIXED_CLK(CLK_TOP_CSI0, "csi0", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_CVBSPLL, "cvbspll", NULL, 108000000),
+};
+
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_SYS_26M, "sys_26m", "clk26m", 1,
+ 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "sys_26m", 1,
+ 2),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_ARMCA35PLL, "armca35pll_ck", "armca35pll", 1,
+ 1),
+ FACTOR(CLK_TOP_ARMCA35PLL_600M, "armca35pll_600m", "armca35pll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_ARMCA35PLL_400M, "armca35pll_400m", "armca35pll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_ARMCA72PLL, "armca72pll_ck", "armca72pll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1,
+ 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "syspll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "syspll_ck", 1,
+ 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "syspll_ck", 1,
+ 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_ck", 1,
+ 7),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_ck", 1,
+ 26),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll_ck", 1,
+ 52),
+ FACTOR(CLK_TOP_UNIVPLL_D104, "univpll_d104", "univpll_ck", 1,
+ 104),
+ FACTOR(CLK_TOP_UNIVPLL_D208, "univpll_d208", "univpll_ck", 1,
+ 208),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_ck", 1,
+ 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1,
+ 8),
+ FACTOR(CLK_TOP_F_MP0_PLL1, "f_mp0_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_MP0_PLL2, "f_mp0_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BIG_PLL1, "f_big_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BIG_PLL2, "f_big_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BUS_PLL1, "f_bus_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BUS_PLL2, "f_bus_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL1_D16, "apll1_d16", "apll1_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL2_D16, "apll2_d16", "apll2_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1,
+ 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_LVDSPLL2, "lvdspll2_ck", "lvdspll2", 1,
+ 1),
+ FACTOR(CLK_TOP_LVDSPLL2_D2, "lvdspll2_d2", "lvdspll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_LVDSPLL2_D4, "lvdspll2_d4", "lvdspll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_LVDSPLL2_D8, "lvdspll2_d8", "lvdspll2_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_ETHERPLL_125M, "etherpll_125m", "etherpll", 1,
+ 1),
+ FACTOR(CLK_TOP_ETHERPLL_50M, "etherpll_50m", "etherpll", 1,
+ 1),
+ FACTOR(CLK_TOP_CVBS, "cvbs", "cvbspll", 1,
+ 1),
+ FACTOR(CLK_TOP_CVBS_D2, "cvbs_d2", "cvbs", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1,
+ 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1,
+ 1),
+ FACTOR(CLK_TOP_VCODECPLL_D2, "vcodecpll_d2", "vcodecpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_429M, "tvdpll_429m", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_429M_D2, "tvdpll_429m_d2", "tvdpll_429m", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_429M_D4, "tvdpll_429m_d4", "tvdpll_429m", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
+ 4),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "msdcpll2_ck"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_429m",
+ "univpll_d3",
+ "vencpll_ck",
+ "syspll_d3",
+ "univpll1_d2",
+ "mmpll_d2",
+ "syspll3_d2",
+ "tvdpll_ck"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "univpll1_d2",
+ "mmpll_d2",
+ "tvdpll_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "vcodecpll_d2",
+ "univpll2_d2",
+ "syspll3_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "univpll_d3",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll2_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d52",
+ "univpll_d208",
+ "univpll_d104",
+ "clk26m_d2",
+ "univpll_d26",
+ "univpll2_d8",
+ "syspll3_d4",
+ "syspll3_d2",
+ "univpll1_d4",
+ "univpll2_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll1_d4",
+ "univpll2_d2",
+ "univpll3_d2",
+ "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+ "clk26m",
+ "univpll3_d2",
+ "univpll3_d4",
+ "univpll2_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "msdcpll_d4",
+ "vencpll_d2",
+ "univpll1_d2",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "msdcpll2_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "univpll_d7",
+ "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+ "clk26m",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "univpll2_d2",
+ "msdcpll2_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d2",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "msdcpll_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d8",
+ "syspll3_d2",
+ "syspll3_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "syspll3_d4"
+};
+
+static const char * const dpilvds1_parents[] = {
+ "clk26m",
+ "lvdspll2_ck",
+ "lvdspll2_d2",
+ "lvdspll2_d4",
+ "lvdspll2_d8",
+ "clkfpc"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5"
+};
+
+static const char * const nr_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d8",
+ "univpll3_d2",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll4_d4",
+ "univpll3_d4",
+ "univpll1_d8",
+ "syspll2_d4",
+ "univpll3_d2",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2",
+ "syspll_d5",
+ "syspll1_d2"
+};
+
+static const char * const irda_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const cci400_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "armca35pll_600m",
+ "armca35pll_400m",
+ "univpll_d2",
+ "syspll_d2",
+ "msdcpll_ck",
+ "univpll_d3"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const mem_mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "univpll_d3"
+};
+
+static const char * const axi_mfg_parents[] = {
+ "clk26m",
+ "axi_sel",
+ "univpll_d5"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "syspll2_d4"
+};
+
+static const char * const nfiecc_parents[] = {
+ "clk26m",
+ "nfi2x_sel",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2",
+ "univpll_d5",
+ "syspll1_d2"
+};
+
+static const char * const pe2_mac_p0_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4",
+ "syspll3_d2"
+};
+
+static const char * const dpilvds_parents[] = {
+ "clk26m",
+ "lvdspll_ck",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8",
+ "clkfpc"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "syspll3_d4",
+ "univpll2_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll_d52",
+ "univpll2_d8"
+};
+
+static const char * const rtc_parents[] = {
+ "clkrtc_int",
+ "clkrtc_ext",
+ "clk26m",
+ "univpll3_d8"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "syspll4_d4",
+ "univpll2_d8",
+ "univpll3_d4",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4",
+ "etherpll_125m",
+ "syspll1_d4"
+};
+
+static const char * const apll_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8",
+ "apll1_d16",
+ "apll2_ck",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8",
+ "apll2_d16",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const a2sys_hp_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const asm_l_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const i2so1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll2_ck"
+};
+
+static const char * const ether_125m_parents[] = {
+ "clk26m",
+ "etherpll_125m",
+ "univpll3_d2"
+};
+
+static const char * const ether_50m_parents[] = {
+ "clk26m",
+ "etherpll_50m",
+ "univpll_d26",
+ "univpll3_d4"
+};
+
+static const char * const jpgdec_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "tvdpll_429m",
+ "vencpll_ck",
+ "syspll_d3",
+ "vcodecpll_ck",
+ "univpll1_d2",
+ "armca35pll_400m",
+ "tvdpll_429m_d2",
+ "tvdpll_429m_d4"
+};
+
+static const char * const spislv_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll1_d4",
+ "univpll2_d2",
+ "univpll3_d2",
+ "univpll1_d8",
+ "univpll1_d2",
+ "univpll_d5"
+};
+
+static const char * const ether_parents[] = {
+ "clk26m",
+ "etherpll_50m",
+ "univpll_d26"
+};
+
+static const char * const di_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "vencpll_ck",
+ "vencpll_d2",
+ "cvbs",
+ "cvbs_d2"
+};
+
+static const char * const tvd_parents[] = {
+ "clk26m",
+ "cvbs_d2",
+ "univpll2_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const msdc0p_aes_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "vcodecpll_ck"
+};
+
+static const char * const cmsys_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll2_d2"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll3_d2",
+ "univpll_d3"
+};
+
+static const char * const aud_apll1_parents[] = {
+ "apll1",
+ "clkaud_ext_i_1"
+};
+
+static const char * const aud_apll2_parents[] = {
+ "apll2",
+ "clkaud_ext_i_2"
+};
+
+static const char * const audull_vtx_parents[] = {
+ "d2a_ulclk_6p5m",
+ "clkaud_ext_i_0"
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x040, 0, 3,
+ 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040, 8, 1,
+ 15, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel",
+ mm_parents, 0x040, 24, 3, 31),
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel",
+ pwm_parents, 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel",
+ vdec_parents, 0x050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel",
+ venc_parents, 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel",
+ mfg_parents, 0x050, 24, 4, 31),
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel",
+ camtg_parents, 0x060, 0, 4, 7),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel",
+ uart_parents, 0x060, 8, 1, 15),
+ MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel",
+ spi_parents, 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel",
+ usb20_parents, 0x060, 24, 2, 31),
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel",
+ usb30_parents, 0x070, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc50_0_h_sel",
+ msdc50_0_h_parents, 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, 0x070, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, 0x070, 24, 3, 31),
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
+ msdc30_1_parents, 0x080, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel",
+ msdc30_3_parents, 0x080, 8, 4, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel",
+ audio_parents, 0x080, 16, 2, 23),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, 0x080, 24, 3, 31),
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel",
+ pmicspi_parents, 0x090, 0, 3, 7),
+ MUX_GATE(CLK_TOP_DPILVDS1_SEL, "dpilvds1_sel",
+ dpilvds1_parents, 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel",
+ atb_parents, 0x090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_NR_SEL, "nr_sel",
+ nr_parents, 0x090, 24, 3, 31),
+ /* CLK_CFG_6 */
+ MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel",
+ nfi2x_parents, 0x0a0, 0, 4, 7),
+ MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel",
+ irda_parents, 0x0a0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel",
+ cci400_parents, 0x0a0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel",
+ aud_1_parents, 0x0a0, 24, 2, 31),
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel",
+ aud_2_parents, 0x0b0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MEM_MFG_IN_AS_SEL, "mem_mfg_sel",
+ mem_mfg_parents, 0x0b0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AXI_MFG_IN_AS_SEL, "axi_mfg_sel",
+ axi_mfg_parents, 0x0b0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel",
+ scam_parents, 0x0b0, 24, 2, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE(CLK_TOP_NFIECC_SEL, "nfiecc_sel",
+ nfiecc_parents, 0x0c0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PE2_MAC_P0_SEL, "pe2_mac_p0_sel",
+ pe2_mac_p0_parents, 0x0c0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_PE2_MAC_P1_SEL, "pe2_mac_p1_sel",
+ pe2_mac_p0_parents, 0x0c0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel",
+ dpilvds_parents, 0x0c0, 24, 3, 31),
+ /* CLK_CFG_9 */
+ MUX_GATE(CLK_TOP_MSDC50_3_HCLK_SEL, "msdc50_3_h_sel",
+ msdc50_0_h_parents, 0x0d0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel",
+ hdcp_parents, 0x0d0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel",
+ hdcp_24m_parents, 0x0d0, 16, 2, 23),
+ MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x0d0, 24, 2,
+ 31, CLK_IS_CRITICAL),
+ /* CLK_CFG_10 */
+ MUX_GATE(CLK_TOP_SPINOR_SEL, "spinor_sel",
+ spinor_parents, 0x500, 0, 4, 7),
+ MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel",
+ apll_parents, 0x500, 8, 4, 15),
+ MUX_GATE(CLK_TOP_APLL2_SEL, "apll2_sel",
+ apll_parents, 0x500, 16, 4, 23),
+ MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel",
+ a1sys_hp_parents, 0x500, 24, 3, 31),
+ /* CLK_CFG_11 */
+ MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel",
+ a2sys_hp_parents, 0x510, 0, 3, 7),
+ MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel",
+ asm_l_parents, 0x510, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel",
+ asm_l_parents, 0x510, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel",
+ asm_l_parents, 0x510, 24, 2, 31),
+ /* CLK_CFG_12 */
+ MUX_GATE(CLK_TOP_I2SO1_SEL, "i2so1_sel",
+ i2so1_parents, 0x520, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SO2_SEL, "i2so2_sel",
+ i2so1_parents, 0x520, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SO3_SEL, "i2so3_sel",
+ i2so1_parents, 0x520, 16, 2, 23),
+ MUX_GATE(CLK_TOP_TDMO0_SEL, "tdmo0_sel",
+ i2so1_parents, 0x520, 24, 2, 31),
+ /* CLK_CFG_13 */
+ MUX_GATE(CLK_TOP_TDMO1_SEL, "tdmo1_sel",
+ i2so1_parents, 0x530, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SI1_SEL, "i2si1_sel",
+ i2so1_parents, 0x530, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SI2_SEL, "i2si2_sel",
+ i2so1_parents, 0x530, 16, 2, 23),
+ MUX_GATE(CLK_TOP_I2SI3_SEL, "i2si3_sel",
+ i2so1_parents, 0x530, 24, 2, 31),
+ /* CLK_CFG_14 */
+ MUX_GATE(CLK_TOP_ETHER_125M_SEL, "ether_125m_sel",
+ ether_125m_parents, 0x540, 0, 2, 7),
+ MUX_GATE(CLK_TOP_ETHER_50M_SEL, "ether_50m_sel",
+ ether_50m_parents, 0x540, 8, 2, 15),
+ MUX_GATE(CLK_TOP_JPGDEC_SEL, "jpgdec_sel",
+ jpgdec_parents, 0x540, 16, 4, 23),
+ MUX_GATE(CLK_TOP_SPISLV_SEL, "spislv_sel",
+ spislv_parents, 0x540, 24, 3, 31),
+ /* CLK_CFG_15 */
+ MUX_GATE(CLK_TOP_ETHER_50M_RMII_SEL, "ether_sel",
+ ether_parents, 0x550, 0, 2, 7),
+ MUX_GATE(CLK_TOP_CAM2TG_SEL, "cam2tg_sel",
+ camtg_parents, 0x550, 8, 4, 15),
+ MUX_GATE(CLK_TOP_DI_SEL, "di_sel",
+ di_parents, 0x550, 16, 3, 23),
+ MUX_GATE(CLK_TOP_TVD_SEL, "tvd_sel",
+ tvd_parents, 0x550, 24, 2, 31),
+ /* CLK_CFG_16 */
+ MUX_GATE(CLK_TOP_I2C_SEL, "i2c_sel",
+ i2c_parents, 0x560, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PWM_INFRA_SEL, "pwm_infra_sel",
+ pwm_parents, 0x560, 8, 2, 15),
+ MUX_GATE(CLK_TOP_MSDC0P_AES_SEL, "msdc0p_aes_sel",
+ msdc0p_aes_parents, 0x560, 16, 2, 23),
+ MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel",
+ cmsys_parents, 0x560, 24, 3, 31),
+ /* CLK_CFG_17 */
+ MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel",
+ gcpu_parents, 0x570, 0, 3, 7),
+ /* CLK_AUDDIV_4 */
+ MUX(CLK_TOP_AUD_APLL1_SEL, "aud_apll1_sel",
+ aud_apll1_parents, 0x134, 0, 1),
+ MUX(CLK_TOP_AUD_APLL2_SEL, "aud_apll2_sel",
+ aud_apll2_parents, 0x134, 1, 1),
+ MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
+ audull_vtx_parents, 0x134, 31, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+ "clk26m",
+ "armca35pll_ck",
+ "f_mp0_pll1_ck",
+ "f_mp0_pll2_ck"
+};
+
+static const char * const mcu_mp2_parents[] = {
+ "clk26m",
+ "armca72pll_ck",
+ "f_big_pll1_ck",
+ "f_big_pll2_ck"
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "cci400_sel",
+ "f_bus_pll1_ck",
+ "f_bus_pll2_ck"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* mp0_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0,
+ 9, 2, -1, CLK_IS_CRITICAL),
+ /* mp2_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8,
+ 9, 2, -1, CLK_IS_CRITICAL),
+ /* bus_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
+ 9, 2, -1, CLK_IS_CRITICAL),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL_DIV0, "apll_div0", "i2so1_sel", 0x124, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV1, "apll_div1", "i2so2_sel", 0x124, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV2, "apll_div2", "i2so3_sel", 0x124, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV3, "apll_div3", "tdmo0_sel", 0x124, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV4, "apll_div4", "tdmo1_sel", 0x128, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV5, "apll_div5", "i2si1_sel", 0x128, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV6, "apll_div6", "i2si2_sel", 0x128, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV7, "apll_div7", "i2si3_sel", 0x128, 24, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+ .set_ofs = 0x120,
+ .clr_ofs = 0x120,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x40,
+};
+
+#define GATE_INFRA(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+ GATE_INFRA(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+ GATE_INFRA(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+ GATE_INFRA(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+ GATE_INFRA(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "spi_sel", 24),
+ GATE_INFRA(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "spislv_sel", 25),
+ GATE_INFRA(CLK_INFRA_AO_UART5, "infra_ao_uart5", "axi_sel", 26),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0xc,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs peri2_cg_regs = {
+ .set_ofs = 0x42c,
+ .clr_ofs = 0x42c,
+ .sta_ofs = 0x42c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate peri_clks[] = {
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_NFI, "per_nfi",
+ "axi_sel", 0),
+ GATE_PERI0(CLK_PERI_THERM, "per_therm",
+ "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM0, "per_pwm0",
+ "pwm_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM1, "per_pwm1",
+ "pwm_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM2, "per_pwm2",
+ "pwm_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM3, "per_pwm3",
+ "pwm_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM4, "per_pwm4",
+ "pwm_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM5, "per_pwm5",
+ "pwm_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM6, "per_pwm6",
+ "pwm_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM7, "per_pwm7",
+ "pwm_sel", 9),
+ GATE_PERI0(CLK_PERI_PWM, "per_pwm",
+ "pwm_sel", 10),
+ GATE_PERI0(CLK_PERI_AP_DMA, "per_ap_dma",
+ "axi_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "per_msdc30_0",
+ "msdc50_0_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "per_msdc30_1",
+ "msdc30_1_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "per_msdc30_2",
+ "msdc30_2_sel", 16),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "per_msdc30_3",
+ "msdc30_3_sel", 17),
+ GATE_PERI0(CLK_PERI_UART0, "per_uart0",
+ "uart_sel", 20),
+ GATE_PERI0(CLK_PERI_UART1, "per_uart1",
+ "uart_sel", 21),
+ GATE_PERI0(CLK_PERI_UART2, "per_uart2",
+ "uart_sel", 22),
+ GATE_PERI0(CLK_PERI_UART3, "per_uart3",
+ "uart_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C0, "per_i2c0",
+ "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C1, "per_i2c1",
+ "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C2, "per_i2c2",
+ "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C3, "per_i2c3",
+ "axi_sel", 27),
+ GATE_PERI0(CLK_PERI_I2C4, "per_i2c4",
+ "axi_sel", 28),
+ GATE_PERI0(CLK_PERI_AUXADC, "per_auxadc",
+ "ltepll_fs26m", 29),
+ GATE_PERI0(CLK_PERI_SPI0, "per_spi0",
+ "spi_sel", 30),
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_SPI, "per_spi",
+ "spinor_sel", 1),
+ GATE_PERI1(CLK_PERI_I2C5, "per_i2c5",
+ "axi_sel", 3),
+ GATE_PERI1(CLK_PERI_SPI2, "per_spi2",
+ "spi_sel", 5),
+ GATE_PERI1(CLK_PERI_SPI3, "per_spi3",
+ "spi_sel", 6),
+ GATE_PERI1(CLK_PERI_SPI5, "per_spi5",
+ "spi_sel", 8),
+ GATE_PERI1(CLK_PERI_UART4, "per_uart4",
+ "uart_sel", 9),
+ GATE_PERI1(CLK_PERI_SFLASH, "per_sflash",
+ "uart_sel", 11),
+ GATE_PERI1(CLK_PERI_GMAC, "per_gmac",
+ "uart_sel", 12),
+ GATE_PERI1(CLK_PERI_PCIE0, "per_pcie0",
+ "uart_sel", 14),
+ GATE_PERI1(CLK_PERI_PCIE1, "per_pcie1",
+ "uart_sel", 15),
+ GATE_PERI1(CLK_PERI_GMAC_PCLK, "per_gmac_pclk",
+ "uart_sel", 16),
+ /* PERI2 */
+ GATE_PERI2(CLK_PERI_MSDC50_0_EN, "per_msdc50_0_en",
+ "msdc50_0_sel", 0),
+ GATE_PERI2(CLK_PERI_MSDC30_1_EN, "per_msdc30_1_en",
+ "msdc30_1_sel", 1),
+ GATE_PERI2(CLK_PERI_MSDC30_2_EN, "per_msdc30_2_en",
+ "msdc30_2_sel", 2),
+ GATE_PERI2(CLK_PERI_MSDC30_3_EN, "per_msdc30_3_en",
+ "msdc30_3_sel", 3),
+ GATE_PERI2(CLK_PERI_MSDC50_0_HCLK_EN, "per_msdc50_0_h",
+ "msdc50_0_h_sel", 4),
+ GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
+ "msdc50_3_h_sel", 5),
+};
+
+#define MT2712_PLL_FMAX (3000UL * MHZ)
+
+#define CON0_MT2712_RST_BAR BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT2712_RST_BAR, \
+ .fmax = MT2712_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, NULL)
+
+static const struct mtk_pll_div_table armca35pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1202500000 },
+ { .div = 2, .freq = 500500000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table armca72pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 994500000 },
+ { .div = 2, .freq = 520000000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1001000000 },
+ { .div = 2, .freq = 601250000 },
+ { .div = 3, .freq = 250250000 },
+ { .div = 4, .freq = 125125000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000101,
+ HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000101,
+ HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000101,
+ 0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000101,
+ 0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000101,
+ 0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000101,
+ 0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000101,
+ 0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
+ PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000101,
+ 0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000101,
+ 0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000101,
+ 0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000101,
+ 0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000101,
+ 0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0,
+ mmpll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000101,
+ HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0,
+ armca35pll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000101,
+ 0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0,
+ armca72pll_div_table),
+ PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000101,
+ 0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
+};
+
+static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt2712_top_init_early(struct device_node *node)
+{
+ int r, i;
+
+ if (!top_clk_data) {
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+
+CLK_OF_DECLARE_DRIVER(mt2712_topckgen, "mediatek,mt2712-topckgen",
+ clk_mt2712_top_init_early);
+
+static int clk_mt2712_top_probe(struct platform_device *pdev)
+{
+ int r, i;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ if (!top_clk_data) {
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ } else {
+ for (i = 0; i < CLK_TOP_NR_CLK; i++) {
+ if (top_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
+ top_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ }
+ }
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ top_clk_data);
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt2712_clk_lock, top_clk_data);
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ &mt2712_clk_lock, top_clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ top_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static int clk_mt2712_infra_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ mtk_register_reset_controller(node, 2, 0x30);
+
+ return r;
+}
+
+static int clk_mt2712_peri_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ mtk_register_reset_controller(node, 2, 0);
+
+ return r;
+}
+
+static int clk_mt2712_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+ mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ &mt2712_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712[] = {
+ {
+ .compatible = "mediatek,mt2712-apmixedsys",
+ .data = clk_mt2712_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt2712-topckgen",
+ .data = clk_mt2712_top_probe,
+ }, {
+ .compatible = "mediatek,mt2712-infracfg",
+ .data = clk_mt2712_infra_probe,
+ }, {
+ .compatible = "mediatek,mt2712-pericfg",
+ .data = clk_mt2712_peri_probe,
+ }, {
+ .compatible = "mediatek,mt2712-mcucfg",
+ .data = clk_mt2712_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt2712_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r != 0)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2712_drv = {
+ .probe = clk_mt2712_probe,
+ .driver = {
+ .name = "clk-mt2712",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_clk_mt2712,
+ },
+};
+
+static int __init clk_mt2712_init(void)
+{
+ return platform_driver_register(&clk_mt2712_drv);
+}
+
+arch_initcall(clk_mt2712_init);
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
new file mode 100644
index 000000000000..fad7d9fc53ba
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs audio2_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs audio3_cg_regs = {
+ .set_ofs = 0x634,
+ .clr_ofs = 0x634,
+ .sta_ofs = 0x634,
+};
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "audio_afe", "rtc", 2),
+ GATE_AUDIO0(CLK_AUDIO_HDMI, "audio_hdmi", "apll1_ck_sel", 20),
+ GATE_AUDIO0(CLK_AUDIO_SPDF, "audio_spdf", "apll1_ck_sel", 21),
+ GATE_AUDIO0(CLK_AUDIO_APLL, "audio_apll", "apll1_ck_sel", 23),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2SIN1, "audio_i2sin1", "a1sys_hp_sel", 0),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN2, "audio_i2sin2", "a1sys_hp_sel", 1),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN3, "audio_i2sin3", "a1sys_hp_sel", 2),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN4, "audio_i2sin4", "a1sys_hp_sel", 3),
+ GATE_AUDIO1(CLK_AUDIO_I2SO1, "audio_i2so1", "a1sys_hp_sel", 6),
+ GATE_AUDIO1(CLK_AUDIO_I2SO2, "audio_i2so2", "a1sys_hp_sel", 7),
+ GATE_AUDIO1(CLK_AUDIO_I2SO3, "audio_i2so3", "a1sys_hp_sel", 8),
+ GATE_AUDIO1(CLK_AUDIO_I2SO4, "audio_i2so4", "a1sys_hp_sel", 9),
+ GATE_AUDIO1(CLK_AUDIO_ASRCI1, "audio_asrci1", "asm_h_sel", 12),
+ GATE_AUDIO1(CLK_AUDIO_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ GATE_AUDIO1(CLK_AUDIO_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ GATE_AUDIO1(CLK_AUDIO_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_AUDIO1(CLK_AUDIO_INTDIR, "audio_intdir", "intdir_sel", 20),
+ GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
+ GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
+ /* AUDIO2 */
+ GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
+ GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
+ GATE_AUDIO2(CLK_AUDIO_UL3, "audio_ul3", "a1sys_hp_sel", 2),
+ GATE_AUDIO2(CLK_AUDIO_UL4, "audio_ul4", "a1sys_hp_sel", 3),
+ GATE_AUDIO2(CLK_AUDIO_UL5, "audio_ul5", "a1sys_hp_sel", 4),
+ GATE_AUDIO2(CLK_AUDIO_UL6, "audio_ul6", "a1sys_hp_sel", 5),
+ GATE_AUDIO2(CLK_AUDIO_DL1, "audio_dl1", "a1sys_hp_sel", 6),
+ GATE_AUDIO2(CLK_AUDIO_DL2, "audio_dl2", "a1sys_hp_sel", 7),
+ GATE_AUDIO2(CLK_AUDIO_DL3, "audio_dl3", "a1sys_hp_sel", 8),
+ GATE_AUDIO2(CLK_AUDIO_DL4, "audio_dl4", "a1sys_hp_sel", 9),
+ GATE_AUDIO2(CLK_AUDIO_DL5, "audio_dl5", "a1sys_hp_sel", 10),
+ GATE_AUDIO2(CLK_AUDIO_DL6, "audio_dl6", "a1sys_hp_sel", 11),
+ GATE_AUDIO2(CLK_AUDIO_DLMCH, "audio_dlmch", "a1sys_hp_sel", 12),
+ GATE_AUDIO2(CLK_AUDIO_ARB1, "audio_arb1", "a1sys_hp_sel", 13),
+ GATE_AUDIO2(CLK_AUDIO_AWB, "audio_awb", "a1sys_hp_sel", 14),
+ GATE_AUDIO2(CLK_AUDIO_AWB2, "audio_awb2", "a1sys_hp_sel", 15),
+ GATE_AUDIO2(CLK_AUDIO_DAI, "audio_dai", "a1sys_hp_sel", 16),
+ GATE_AUDIO2(CLK_AUDIO_MOD, "audio_mod", "a1sys_hp_sel", 17),
+ /* AUDIO3 */
+ GATE_AUDIO3(CLK_AUDIO_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ GATE_AUDIO3(CLK_AUDIO_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ GATE_AUDIO3(CLK_AUDIO_ASRCO3, "audio_asrco3", "asm_h_sel", 6),
+ GATE_AUDIO3(CLK_AUDIO_ASRCO4, "audio_asrco4", "asm_h_sel", 7),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC1, "audio_mem_asrc1", "asm_h_sel", 10),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC2, "audio_mem_asrc2", "asm_h_sel", 11),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC3, "audio_mem_asrc3", "asm_h_sel", 12),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC4, "audio_mem_asrc4", "asm_h_sel", 13),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
+};
+
+static int clk_mt7622_audiosys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_aud[] = {
+ {
+ .compatible = "mediatek,mt7622-audsys",
+ .data = clk_mt7622_audiosys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_aud_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_aud_drv = {
+ .probe = clk_mt7622_aud_probe,
+ .driver = {
+ .name = "clk-mt7622-aud",
+ .of_match_table = of_match_clk_mt7622_aud,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
new file mode 100644
index 000000000000..6328127bbb3c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate eth_clks[] = {
+ GATE_ETH(CLK_ETH_HSDMA_EN, "eth_hsdma_en", "eth_sel", 5),
+ GATE_ETH(CLK_ETH_ESW_EN, "eth_esw_en", "eth_500m", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "txclk_src_pre", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "txclk_src_pre", 8),
+ GATE_ETH(CLK_ETH_GP0_EN, "eth_gp0_en", "txclk_src_pre", 9),
+};
+
+static const struct mtk_gate_regs sgmii_cg_regs = {
+ .set_ofs = 0xE4,
+ .clr_ofs = 0xE4,
+ .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii_clks[] = {
+ GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
+ "ssusb_tx250m", 2),
+ GATE_SGMII(CLK_SGMII_RX250M_EN, "sgmii_rx250m_en",
+ "ssusb_eq_rx250m", 3),
+ GATE_SGMII(CLK_SGMII_CDR_REF, "sgmii_cdr_ref",
+ "ssusb_cdr_ref", 4),
+ GATE_SGMII(CLK_SGMII_CDR_FB, "sgmii_cdr_fb",
+ "ssusb_cdr_fb", 5),
+};
+
+static int clk_mt7622_ethsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+
+ mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_eth[] = {
+ {
+ .compatible = "mediatek,mt7622-ethsys",
+ .data = clk_mt7622_ethsys_init,
+ }, {
+ .compatible = "mediatek,mt7622-sgmiisys",
+ .data = clk_mt7622_sgmiisys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_eth_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_eth_drv = {
+ .probe = clk_mt7622_eth_probe,
+ .driver = {
+ .name = "clk-mt7622-eth",
+ .of_match_table = of_match_clk_mt7622_eth,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
new file mode 100644
index 000000000000..a6e8534276c6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_PCIE(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pcie_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_SSUSB(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ssusb_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs pcie_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate_regs ssusb_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate ssusb_clks[] = {
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_1P_EN, "ssusb_u2_phy_1p",
+ "to_u2_phy_1p", 0),
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_EN, "ssusb_u2_phy_en", "to_u2_phy", 1),
+ GATE_SSUSB(CLK_SSUSB_REF_EN, "ssusb_ref_en", "to_usb3_ref", 5),
+ GATE_SSUSB(CLK_SSUSB_SYS_EN, "ssusb_sys_en", "to_usb3_sys", 6),
+ GATE_SSUSB(CLK_SSUSB_MCU_EN, "ssusb_mcu_en", "axi_sel", 7),
+ GATE_SSUSB(CLK_SSUSB_DMA_EN, "ssusb_dma_en", "hif_sel", 8),
+};
+
+static const struct mtk_gate pcie_clks[] = {
+ GATE_PCIE(CLK_PCIE_P1_AUX_EN, "pcie_p1_aux_en", "p1_1mhz", 12),
+ GATE_PCIE(CLK_PCIE_P1_OBFF_EN, "pcie_p1_obff_en", "free_run_4mhz", 13),
+ GATE_PCIE(CLK_PCIE_P1_AHB_EN, "pcie_p1_ahb_en", "axi_sel", 14),
+ GATE_PCIE(CLK_PCIE_P1_AXI_EN, "pcie_p1_axi_en", "hif_sel", 15),
+ GATE_PCIE(CLK_PCIE_P1_MAC_EN, "pcie_p1_mac_en", "pcie1_mac_en", 16),
+ GATE_PCIE(CLK_PCIE_P1_PIPE_EN, "pcie_p1_pipe_en", "pcie1_pipe_en", 17),
+ GATE_PCIE(CLK_PCIE_P0_AUX_EN, "pcie_p0_aux_en", "p0_1mhz", 18),
+ GATE_PCIE(CLK_PCIE_P0_OBFF_EN, "pcie_p0_obff_en", "free_run_4mhz", 19),
+ GATE_PCIE(CLK_PCIE_P0_AHB_EN, "pcie_p0_ahb_en", "axi_sel", 20),
+ GATE_PCIE(CLK_PCIE_P0_AXI_EN, "pcie_p0_axi_en", "hif_sel", 21),
+ GATE_PCIE(CLK_PCIE_P0_MAC_EN, "pcie_p0_mac_en", "pcie0_mac_en", 22),
+ GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
+ GATE_PCIE(CLK_SATA_AHB_EN, "sata_ahb_en", "axi_sel", 26),
+ GATE_PCIE(CLK_SATA_AXI_EN, "sata_axi_en", "hif_sel", 27),
+ GATE_PCIE(CLK_SATA_ASIC_EN, "sata_asic_en", "sata_asic", 28),
+ GATE_PCIE(CLK_SATA_RBC_EN, "sata_rbc_en", "sata_rbc", 29),
+ GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
+};
+
+static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+
+ mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7622_pciesys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+
+ mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_hif[] = {
+ {
+ .compatible = "mediatek,mt7622-pciesys",
+ .data = clk_mt7622_pciesys_init,
+ }, {
+ .compatible = "mediatek,mt7622-ssusbsys",
+ .data = clk_mt7622_ssusbsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_hif_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_hif_drv = {
+ .probe = clk_mt7622_hif_probe,
+ .driver = {
+ .name = "clk-mt7622-hif",
+ .of_match_table = of_match_clk_mt7622_hif,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
new file mode 100644
index 000000000000..92f7e32770c6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-cpumux.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+#include <linux/clk.h> /* for consumer */
+
+#define MT7622_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7622_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table, _parent_name) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT7622_RST_BAR, \
+ .fmax = MT7622_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL, "clkxtal")
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &apmixed_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_INFRA(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static DEFINE_SPINLOCK(mt7622_clk_lock);
+
+static const char * const infra_mux1_parents[] = {
+ "clkxtal",
+ "armpll",
+ "main_core_en",
+ "armpll"
+};
+
+static const char * const axi_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "univpll_d7"
+};
+
+static const char * const mem_parents[] = {
+ "clkxtal",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clkxtal",
+ "syspll1_d8"
+};
+
+static const char * const eth_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "clk_null",
+ "univpll_d7"
+};
+
+static const char * const pwm_parents[] = {
+ "clkxtal",
+ "univpll2_d4"
+};
+
+static const char * const f10m_ref_parents[] = {
+ "clkxtal",
+ "syspll4_d16"
+};
+
+static const char * const nfi_infra_parents[] = {
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "univpll2_d8",
+ "syspll1_d8",
+ "univpll1_d8",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll3_d2",
+ "syspll1_d4"
+};
+
+static const char * const flash_parents[] = {
+ "clkxtal",
+ "univpll_d80_d4",
+ "syspll2_d8",
+ "syspll3_d4",
+ "univpll3_d4",
+ "univpll1_d8",
+ "syspll2_d4",
+ "univpll2_d4"
+};
+
+static const char * const uart_parents[] = {
+ "clkxtal",
+ "univpll2_d8"
+};
+
+static const char * const spi0_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const spi1_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll4_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const msdc30_0_parents[] = {
+ "clkxtal",
+ "univpll2_d16",
+ "univ48m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clkxtal",
+ "aud1pll_ck",
+ "aud2pll_ck",
+ "clkxtal"
+};
+
+static const char * const intdir_parents[] = {
+ "clkxtal",
+ "syspll_d2",
+ "univpll_d2",
+ "sgmiipll_ck"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clkxtal",
+ "syspll1_d4",
+ "syspll4_d2",
+ "syspll3_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clkxtal",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "univpll2_d16"
+};
+
+static const char * const atb_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5"
+};
+
+static const char * const audio_parents[] = {
+ "clkxtal",
+ "syspll3_d4",
+ "syspll4_d4",
+ "univpll1_d16"
+};
+
+static const char * const usb20_parents[] = {
+ "clkxtal",
+ "univpll3_d4",
+ "syspll1_d8",
+ "clkxtal"
+};
+
+static const char * const aud1_parents[] = {
+ "clkxtal",
+ "aud1pll_ck"
+};
+
+static const char * const aud2_parents[] = {
+ "clkxtal",
+ "aud2pll_ck"
+};
+
+static const char * const asm_l_parents[] = {
+ "clkxtal",
+ "syspll_d5",
+ "univpll2_d2",
+ "univpll2_d4"
+};
+
+static const char * const apll1_ck_parents[] = {
+ "aud1_sel",
+ "aud2_sel"
+};
+
+static const char * const peribus_ck_parents[] = {
+ "syspll1_d8",
+ "syspll1_d4"
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x120,
+ .clr_ofs = 0x120,
+ .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x128,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x128,
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0xC,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x1C,
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+ PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+ HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+ 0, 21, 0x0300, 1, 0, 0x0304, 0),
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+ 0, 21, 0x0314, 1, 0, 0x0318, 0),
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0x00000001,
+ 0, 31, 0x0324, 1, 0, 0x0328, 0),
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0x00000001,
+ 0, 31, 0x0334, 1, 0, 0x0338, 0),
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0x00000001,
+ 0, 21, 0x0344, 1, 0, 0x0348, 0),
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+ 0, 21, 0x0358, 1, 0, 0x035C, 0),
+};
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+};
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "axi_sel", 0),
+ GATE_INFRA(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 2),
+ GATE_INFRA(CLK_INFRA_AUDIO_PD, "infra_audio_pd", "aud_intbus_sel", 5),
+ GATE_INFRA(CLK_INFRA_IRRX_PD, "infra_irrx_pd", "irrx_sel", 16),
+ GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "f10m_ref_sel", 18),
+ GATE_INFRA(CLK_INFRA_PMIC_PD, "infra_pmic_pd", "pmicspi_sel", 22),
+};
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_TO_U2_PHY, "to_u2_phy", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_TO_U2_PHY_1P, "to_u2_phy_1p", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_PCIE0_PIPE_EN, "pcie0_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_PCIE1_PIPE_EN, "pcie1_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_SSUSB_TX250M, "ssusb_tx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_EQ_RX250M, "ssusb_eq_rx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_REF, "ssusb_cdr_ref", "clkxtal",
+ 33333333),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_FB, "ssusb_cdr_fb", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_ASIC, "sata_asic", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_RBC, "sata_rbc", "clkxtal",
+ 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_TO_USB3_SYS, "to_usb3_sys", "eth1pll", 1, 4),
+ FACTOR(CLK_TOP_P1_1MHZ, "p1_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_4MHZ, "free_run_4mhz", "eth1pll", 1, 125),
+ FACTOR(CLK_TOP_P0_1MHZ, "p0_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_TXCLK_SRC_PRE, "txclk_src_pre", "sgmiipll_d2", 1, 1),
+ FACTOR(CLK_TOP_RTC, "rtc", "clkxtal", 1, 1024),
+ FACTOR(CLK_TOP_MEMPLL, "mempll", "clkxtal", 32, 1),
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_SYSPLL4_D16, "syspll4_d16", "mainpll", 1, 112),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL1_D16, "univpll1_d16", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL3_D16, "univpll3_d16", "univpll", 1, 80),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D80_D4, "univpll_d80_d4", "univpll", 1, 320),
+ FACTOR(CLK_TOP_UNIV48M, "univ48m", "univpll", 1, 25),
+ FACTOR(CLK_TOP_SGMIIPLL, "sgmiipll_ck", "sgmipll", 1, 1),
+ FACTOR(CLK_TOP_SGMIIPLL_D2, "sgmiipll_d2", "sgmipll", 1, 2),
+ FACTOR(CLK_TOP_AUD1PLL, "aud1pll_ck", "aud1pll", 1, 1),
+ FACTOR(CLK_TOP_AUD2PLL, "aud2pll_ck", "aud2pll", 1, 1),
+ FACTOR(CLK_TOP_AUD_I2S2_MCK, "aud_i2s2_mck", "i2s2_mck_sel", 1, 2),
+ FACTOR(CLK_TOP_TO_USB3_REF, "to_usb3_ref", "univpll2_d4", 1, 4),
+ FACTOR(CLK_TOP_PCIE1_MAC_EN, "pcie1_mac_en", "univpll1_d4", 1, 1),
+ FACTOR(CLK_TOP_PCIE0_MAC_EN, "pcie0_mac_en", "univpll1_d4", 1, 1),
+ FACTOR(CLK_TOP_ETH_500M, "eth_500m", "eth1pll", 1, 1),
+};
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_APLL1_DIV_PD, "apll1_ck_div_pd", "apll1_ck_div", 0),
+ GATE_TOP0(CLK_TOP_APLL2_DIV_PD, "apll2_ck_div_pd", "apll2_ck_div", 1),
+ GATE_TOP0(CLK_TOP_I2S0_MCK_DIV_PD, "i2s0_mck_div_pd", "i2s0_mck_div",
+ 2),
+ GATE_TOP0(CLK_TOP_I2S1_MCK_DIV_PD, "i2s1_mck_div_pd", "i2s1_mck_div",
+ 3),
+ GATE_TOP0(CLK_TOP_I2S2_MCK_DIV_PD, "i2s2_mck_div_pd", "i2s2_mck_div",
+ 4),
+ GATE_TOP0(CLK_TOP_I2S3_MCK_DIV_PD, "i2s3_mck_div_pd", "i2s3_mck_div",
+ 5),
+
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_A1SYS_HP_DIV_PD, "a1sys_div_pd", "a1sys_div", 0),
+ GATE_TOP1(CLK_TOP_A2SYS_HP_DIV_PD, "a2sys_div_pd", "a2sys_div", 16),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL1_DIV, "apll1_ck_div", "apll1_ck_sel",
+ 0x120, 24, 3),
+ DIV_ADJ(CLK_TOP_APLL2_DIV, "apll2_ck_div", "apll2_ck_sel",
+ 0x120, 28, 3),
+ DIV_ADJ(CLK_TOP_I2S0_MCK_DIV, "i2s0_mck_div", "i2s0_mck_sel",
+ 0x124, 0, 7),
+ DIV_ADJ(CLK_TOP_I2S1_MCK_DIV, "i2s1_mck_div", "i2s1_mck_sel",
+ 0x124, 8, 7),
+ DIV_ADJ(CLK_TOP_I2S2_MCK_DIV, "i2s2_mck_div", "aud_i2s2_mck",
+ 0x124, 16, 7),
+ DIV_ADJ(CLK_TOP_I2S3_MCK_DIV, "i2s3_mck_div", "i2s3_mck_sel",
+ 0x124, 24, 7),
+ DIV_ADJ(CLK_TOP_A1SYS_HP_DIV, "a1sys_div", "a1sys_hp_sel",
+ 0x128, 8, 7),
+ DIV_ADJ(CLK_TOP_A2SYS_HP_DIV, "a2sys_div", "a2sys_hp_sel",
+ 0x128, 24, 7),
+};
+
+static const struct mtk_gate peri_clks[] = {
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_THERM_PD, "peri_therm_pd", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM1_PD, "peri_pwm1_pd", "clkxtal", 2),
+ GATE_PERI0(CLK_PERI_PWM2_PD, "peri_pwm2_pd", "clkxtal", 3),
+ GATE_PERI0(CLK_PERI_PWM3_PD, "peri_pwm3_pd", "clkxtal", 4),
+ GATE_PERI0(CLK_PERI_PWM4_PD, "peri_pwm4_pd", "clkxtal", 5),
+ GATE_PERI0(CLK_PERI_PWM5_PD, "peri_pwm5_pd", "clkxtal", 6),
+ GATE_PERI0(CLK_PERI_PWM6_PD, "peri_pwm6_pd", "clkxtal", 7),
+ GATE_PERI0(CLK_PERI_PWM7_PD, "peri_pwm7_pd", "clkxtal", 8),
+ GATE_PERI0(CLK_PERI_PWM_PD, "peri_pwm_pd", "clkxtal", 9),
+ GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
+ GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
+ GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
+ GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
+ GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
+ GATE_PERI0(CLK_PERI_UART4_PD, "peri_uart4_pd", "axi_sel", 21),
+ GATE_PERI0(CLK_PERI_BTIF_PD, "peri_btif_pd", "axi_sel", 22),
+ GATE_PERI0(CLK_PERI_I2C0_PD, "peri_i2c0_pd", "axi_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C1_PD, "peri_i2c1_pd", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C2_PD, "peri_i2c2_pd", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_SPI1_PD, "peri_spi1_pd", "spi1_sel", 26),
+ GATE_PERI0(CLK_PERI_AUXADC_PD, "peri_auxadc_pd", "clkxtal", 27),
+ GATE_PERI0(CLK_PERI_SPI0_PD, "peri_spi0_pd", "spi0_sel", 28),
+ GATE_PERI0(CLK_PERI_SNFI_PD, "peri_snfi_pd", "nfi_infra_sel", 29),
+ GATE_PERI0(CLK_PERI_NFI_PD, "peri_nfi_pd", "axi_sel", 30),
+ GATE_PERI0(CLK_PERI_NFIECC_PD, "peri_nfiecc_pd", "axi_sel", 31),
+
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_FLASH_PD, "peri_flash_pd", "flash_sel", 1),
+ GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2),
+};
+
+static struct mtk_composite infra_muxes[] __initdata = {
+ MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents,
+ 0x000, 2, 2),
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x040, 8, 1, 15),
+ MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x040, 16, 1, 23),
+ MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 24, 3, 31),
+
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_F10M_REF_SEL, "f10m_ref_sel", f10m_ref_parents,
+ 0x050, 8, 1, 15),
+ MUX_GATE(CLK_TOP_NFI_INFRA_SEL, "nfi_infra_sel", nfi_infra_parents,
+ 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+ 0x050, 24, 3, 31),
+
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x060, 0, 1, 7),
+ MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi0_parents,
+ 0x060, 8, 3, 15),
+ MUX_GATE(CLK_TOP_SPI1_SEL, "spi1_sel", spi1_parents,
+ 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", uart_parents,
+ 0x060, 24, 3, 31),
+
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_parents,
+ 0x070, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_0_parents,
+ 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel", a1sys_hp_parents,
+ 0x070, 16, 2, 23),
+ MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel", a1sys_hp_parents,
+ 0x070, 24, 2, 31),
+
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_INTDIR_SEL, "intdir_sel", intdir_parents,
+ 0x080, 0, 2, 7),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x080, 8, 2, 15),
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x080, 16, 3, 23),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", ddrphycfg_parents,
+ 0x080, 24, 2, 31),
+
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents,
+ 0x090, 0, 2, 7),
+ MUX_GATE(CLK_TOP_HIF_SEL, "hif_sel", eth_parents,
+ 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_U2_SEL, "usb20_sel", usb20_parents,
+ 0x090, 24, 2, 31),
+
+ /* CLK_CFG_6 */
+ MUX_GATE(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x0A0, 0, 1, 7),
+ MUX_GATE(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+ 0x0A0, 8, 1, 15),
+ MUX_GATE(CLK_TOP_IRRX_SEL, "irrx_sel", f10m_ref_parents,
+ 0x0A0, 16, 1, 23),
+ MUX_GATE(CLK_TOP_IRTX_SEL, "irtx_sel", f10m_ref_parents,
+ 0x0A0, 24, 1, 31),
+
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel", asm_l_parents,
+ 0x0B0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_l_parents,
+ 0x0B0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_l_parents,
+ 0x0B0, 16, 2, 23),
+
+ /* CLK_AUDDIV_0 */
+ MUX(CLK_TOP_APLL1_SEL, "apll1_ck_sel", apll1_ck_parents,
+ 0x120, 6, 1),
+ MUX(CLK_TOP_APLL2_SEL, "apll2_ck_sel", apll1_ck_parents,
+ 0x120, 7, 1),
+ MUX(CLK_TOP_I2S0_MCK_SEL, "i2s0_mck_sel", apll1_ck_parents,
+ 0x120, 8, 1),
+ MUX(CLK_TOP_I2S1_MCK_SEL, "i2s1_mck_sel", apll1_ck_parents,
+ 0x120, 9, 1),
+ MUX(CLK_TOP_I2S2_MCK_SEL, "i2s2_mck_sel", apll1_ck_parents,
+ 0x120, 10, 1),
+ MUX(CLK_TOP_I2S3_MCK_SEL, "i2s3_mck_sel", apll1_ck_parents,
+ 0x120, 11, 1),
+};
+
+static struct mtk_composite peri_muxes[] = {
+ /* PERI_GLOBALCON_CKSEL */
+ MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+ base, &mt7622_clk_lock, clk_data);
+
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt7622_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int __init mtk_infrasys_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get,
+ clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 1, 0x30);
+
+ return 0;
+}
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+ clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ &mt7622_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+
+ mtk_register_reset_controller(node, 2, 0x0);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_clk_mt7622[] = {
+ {
+ .compatible = "mediatek,mt7622-apmixedsys",
+ .data = mtk_apmixedsys_init,
+ }, {
+ .compatible = "mediatek,mt7622-infracfg",
+ .data = mtk_infrasys_init,
+ }, {
+ .compatible = "mediatek,mt7622-topckgen",
+ .data = mtk_topckgen_init,
+ }, {
+ .compatible = "mediatek,mt7622-pericfg",
+ .data = mtk_pericfg_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_drv = {
+ .probe = clk_mt7622_probe,
+ .driver = {
+ .name = "clk-mt7622",
+ .of_match_table = of_match_clk_mt7622,
+ },
+};
+
+static int clk_mt7622_init(void)
+{
+ return platform_driver_register(&clk_mt7622_drv);
+}
+
+arch_initcall(clk_mt7622_init);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f5d6b70ce189..f10250dcece4 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -207,6 +207,8 @@ struct mtk_pll_data {
uint32_t en_mask;
uint32_t pd_reg;
uint32_t tuner_reg;
+ uint32_t tuner_en_reg;
+ uint8_t tuner_en_bit;
int pd_shift;
unsigned int flags;
const struct clk_ops *ops;
@@ -216,6 +218,7 @@ struct mtk_pll_data {
uint32_t pcw_reg;
int pcw_shift;
const struct mtk_pll_div_table *div_table;
+ const char *parent_name;
};
void mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index a409142e9346..f54e4015b0b1 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -47,6 +47,7 @@ struct mtk_clk_pll {
void __iomem *pd_addr;
void __iomem *pwr_addr;
void __iomem *tuner_addr;
+ void __iomem *tuner_en_addr;
void __iomem *pcw_addr;
const struct mtk_pll_data *data;
};
@@ -227,7 +228,10 @@ static int mtk_pll_prepare(struct clk_hw *hw)
r |= pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
- if (pll->tuner_addr) {
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
writel(r, pll->tuner_addr);
}
@@ -254,7 +258,10 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->base_addr + REG_CON0);
}
- if (pll->tuner_addr) {
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
writel(r, pll->tuner_addr);
}
@@ -297,13 +304,18 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pcw_addr = base + data->pcw_reg;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
+ if (data->tuner_en_reg)
+ pll->tuner_en_addr = base + data->tuner_en_reg;
pll->hw.init = &init;
pll->data = data;
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = &mtk_pll_ops;
- init.parent_names = &parent_name;
+ if (data->parent_name)
+ init.parent_names = &data->parent_name;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
clk = clk_register(NULL, &pll->hw);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index b2d1e8ed7152..ae385310e980 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -1131,6 +1131,253 @@ static struct clk_gate gxbb_sd_emmc_c_clk0 = {
},
};
+/* VPU Clock */
+
+static u32 mux_table_vpu[] = {0, 1, 2, 3};
+static const char * const gxbb_vpu_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vpu_0_sel = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .lock = &clk_lock,
+ .table = mux_table_vpu,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 9:10 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vpu_0_div = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vpu_0 = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vpu_1_sel = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .lock = &clk_lock,
+ .table = mux_table_vpu,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 25:26 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vpu_1_div = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vpu_1 = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vpu = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vpu_0 or vpu_1
+ */
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static u32 mux_table_vapb[] = {0, 1, 2, 3};
+static const char * const gxbb_vapb_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vapb_0_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .lock = &clk_lock,
+ .table = mux_table_vapb,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 9:10 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vapb_0_div = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vapb_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb_0 = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vapb_1_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .lock = &clk_lock,
+ .table = mux_table_vapb,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 25:26 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vapb_1_div = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vapb_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb_1 = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vapb_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vapb_0 or vapb_1
+ */
+ .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1349,6 +1596,21 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [CLKID_VPU_0_SEL] = &gxbb_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &gxbb_vpu_0_div.hw,
+ [CLKID_VPU_0] = &gxbb_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &gxbb_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &gxbb_vpu_1_div.hw,
+ [CLKID_VPU_1] = &gxbb_vpu_1.hw,
+ [CLKID_VPU] = &gxbb_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &gxbb_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &gxbb_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &gxbb_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &gxbb_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &gxbb_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &gxbb_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
+ [CLKID_VAPB] = &gxbb_vapb.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1481,6 +1743,21 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [CLKID_VPU_0_SEL] = &gxbb_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &gxbb_vpu_0_div.hw,
+ [CLKID_VPU_0] = &gxbb_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &gxbb_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &gxbb_vpu_1_div.hw,
+ [CLKID_VPU_1] = &gxbb_vpu_1.hw,
+ [CLKID_VPU] = &gxbb_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &gxbb_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &gxbb_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &gxbb_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &gxbb_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &gxbb_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &gxbb_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
+ [CLKID_VAPB] = &gxbb_vapb.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1600,6 +1877,11 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_sd_emmc_a_clk0,
&gxbb_sd_emmc_b_clk0,
&gxbb_sd_emmc_c_clk0,
+ &gxbb_vpu_0,
+ &gxbb_vpu_1,
+ &gxbb_vapb_0,
+ &gxbb_vapb_1,
+ &gxbb_vapb,
};
static struct clk_mux *const gxbb_clk_muxes[] = {
@@ -1615,6 +1897,12 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_sd_emmc_a_clk0_sel,
&gxbb_sd_emmc_b_clk0_sel,
&gxbb_sd_emmc_c_clk0_sel,
+ &gxbb_vpu_0_sel,
+ &gxbb_vpu_1_sel,
+ &gxbb_vpu,
+ &gxbb_vapb_0_sel,
+ &gxbb_vapb_1_sel,
+ &gxbb_vapb_sel,
};
static struct clk_divider *const gxbb_clk_dividers[] = {
@@ -1627,6 +1915,10 @@ static struct clk_divider *const gxbb_clk_dividers[] = {
&gxbb_sd_emmc_a_clk0_div,
&gxbb_sd_emmc_b_clk0_div,
&gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
};
static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 5b1d4b374d1c..aee6fbba2004 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -190,8 +190,12 @@
#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_C_CLK0_SEL 123
#define CLKID_SD_EMMC_C_CLK0_DIV 124
+#define CLKID_VPU_0_DIV 127
+#define CLKID_VPU_1_DIV 130
+#define CLKID_VAPB_0_DIV 134
+#define CLKID_VAPB_1_DIV 137
-#define NR_CLKS 126
+#define NR_CLKS 141
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c
index 4c717db05f2d..fb294ada0b03 100644
--- a/drivers/clk/mmp/clk-apbc.c
+++ b/drivers/clk/mmp/clk-apbc.c
@@ -114,7 +114,7 @@ static void clk_apbc_unprepare(struct clk_hw *hw)
spin_unlock_irqrestore(apbc->lock, flags);
}
-static struct clk_ops clk_apbc_ops = {
+static const struct clk_ops clk_apbc_ops = {
.prepare = clk_apbc_prepare,
.unprepare = clk_apbc_unprepare,
};
diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c
index 47b5542ce50f..b7ce8f52026e 100644
--- a/drivers/clk/mmp/clk-apmu.c
+++ b/drivers/clk/mmp/clk-apmu.c
@@ -60,7 +60,7 @@ static void clk_apmu_disable(struct clk_hw *hw)
spin_unlock_irqrestore(apmu->lock, flags);
}
-static struct clk_ops clk_apmu_ops = {
+static const struct clk_ops clk_apmu_ops = {
.enable = clk_apmu_enable,
.disable = clk_apmu_disable,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 584a9927993b..cb43d54735b0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -149,7 +149,7 @@ static void clk_factor_init(struct clk_hw *hw)
spin_unlock_irqrestore(factor->lock, flags);
}
-static struct clk_ops clk_factor_ops = {
+static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
.round_rate = clk_factor_round_rate,
.set_rate = clk_factor_set_rate,
@@ -172,10 +172,8 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
}
factor = kzalloc(sizeof(*factor), GFP_KERNEL);
- if (!factor) {
- pr_err("%s: could not allocate factor clk\n", __func__);
+ if (!factor)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_aux assignments */
factor->base = base;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index d20cd3431ac2..7355595c42e2 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -103,10 +103,8 @@ struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
/* allocate the gate */
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- pr_err("%s:%s could not allocate gate clk\n", __func__, name);
+ if (!gate)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &mmp_clk_gate_ops;
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index c554833cffc5..90814b2613c0 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -229,7 +229,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
parent_rate = clk_hw_get_rate(parent);
mix_rate = parent_rate / item->divisor;
gap = abs(mix_rate - req->rate);
- if (parent_best == NULL || gap < gap_best) {
+ if (!parent_best || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
mix_rate_best = mix_rate;
@@ -247,7 +247,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
div = _get_div(mix, j);
mix_rate = parent_rate / div;
gap = abs(mix_rate - req->rate);
- if (parent_best == NULL || gap < gap_best) {
+ if (!parent_best || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
mix_rate_best = mix_rate;
@@ -451,11 +451,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
size_t table_bytes;
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
- if (!mix) {
- pr_err("%s:%s: could not allocate mmp mix clk\n",
- __func__, name);
+ if (!mix)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.flags = flags | CLK_GET_RATE_NOCACHE;
@@ -467,12 +464,9 @@ struct clk *mmp_clk_register_mix(struct device *dev,
if (config->table) {
table_bytes = sizeof(*config->table) * config->table_size;
mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
- if (!mix->table) {
- pr_err("%s:%s: could not allocate mmp mix table\n",
- __func__, name);
- kfree(mix);
- return ERR_PTR(-ENOMEM);
- }
+ if (!mix->table)
+ goto free_mix;
+
mix->table_size = config->table_size;
}
@@ -481,11 +475,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
mix->mux_table = kmemdup(config->mux_table, table_bytes,
GFP_KERNEL);
if (!mix->mux_table) {
- pr_err("%s:%s: could not allocate mmp mix mux-table\n",
- __func__, name);
kfree(mix->table);
- kfree(mix);
- return ERR_PTR(-ENOMEM);
+ goto free_mix;
}
}
@@ -509,4 +500,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
}
return clk;
+
+free_mix:
+ kfree(mix);
+ return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 038023483b98..7460031714da 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -83,19 +83,19 @@ void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index a9ef9209532a..8e2551ab8462 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -75,19 +75,19 @@ void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index a520cf7702a1..7a7965141918 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -74,25 +74,25 @@ void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbcp_base = ioremap(apbcp_phys, SZ_4K);
- if (apbcp_base == NULL) {
+ if (!apbcp_base) {
pr_err("error to ioremap APBC extension base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index f75e989c578f..ccebd014fc1e 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -67,7 +67,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static struct clk_ops clk_div_ops = {
+static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
.round_rate = clk_div_round_rate,
.set_rate = clk_div_set_rate,
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index f8dd10f6df3d..27b3372adc37 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -107,7 +107,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
return mxs_clk_wait(frac->reg, frac->busy);
}
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 74f64c3c4290..b80dc9d5855c 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -147,9 +147,7 @@ void pxa2xx_core_turbo_switch(bool on)
" b 3f\n"
"2: b 1b\n"
"3: nop\n"
- : "=&r" (unused)
- : "r" (clkcfg)
- : );
+ : "=&r" (unused) : "r" (clkcfg));
local_irq_restore(flags);
}
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 1b3e8d265bdb..a2495457e564 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -156,7 +156,6 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @hid_width: number of bits in half integer divider
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
- * @current_freq: last cached frequency when using branches with shared RCGs
* @clkr: regmap clock handle
*
*/
@@ -166,7 +165,6 @@ struct clk_rcg2 {
u8 hid_width;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
- unsigned long current_freq;
struct clk_regmap clkr;
};
@@ -174,7 +172,6 @@ struct clk_rcg2 {
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
-extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 1a0985ae20d2..bbeaf9c09dbb 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -358,85 +358,6 @@ const struct clk_ops clk_rcg2_floor_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
-static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- const char *name = clk_hw_get_name(hw);
- int ret, count;
-
- /* force enable RCG */
- ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
- CMD_ROOT_EN, CMD_ROOT_EN);
- if (ret)
- return ret;
-
- /* wait for RCG to turn ON */
- for (count = 500; count > 0; count--) {
- ret = clk_rcg2_is_enabled(hw);
- if (ret)
- break;
- udelay(1);
- }
- if (!count)
- pr_err("%s: RCG did not turn on\n", name);
-
- /* set clock rate */
- ret = __clk_rcg2_set_rate(hw, rate, CEIL);
- if (ret)
- return ret;
-
- /* clear force enable RCG */
- return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
- CMD_ROOT_EN, 0);
-}
-
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- /* cache the rate */
- rcg->current_freq = rate;
-
- if (!__clk_is_enabled(hw->clk))
- return 0;
-
- return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static unsigned long
-clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
-}
-
-static int clk_rcg2_shared_enable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static void clk_rcg2_shared_disable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- /* switch to XO, which is the lowest entry in the freq table */
- clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
-}
-
-const struct clk_ops clk_rcg2_shared_ops = {
- .enable = clk_rcg2_shared_enable,
- .disable = clk_rcg2_shared_disable,
- .get_parent = clk_rcg2_get_parent,
- .recalc_rate = clk_rcg2_shared_recalc_rate,
- .determine_rate = clk_rcg2_determine_rate,
- .set_rate = clk_rcg2_shared_set_rate,
-};
-EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
-
struct frac_entry {
int num;
int den;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index df3e5fe8442a..c60f61b10c7f 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -56,6 +56,18 @@
}, \
}
+#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_fixed_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
static struct clk_rpm _platform##_##_active; \
static struct clk_rpm _platform##_##_name = { \
@@ -143,6 +155,13 @@ static int clk_rpm_handoff(struct clk_rpm *r)
int ret;
u32 value = INT_MAX;
+ /*
+ * The vendor tree simply reads the status for this
+ * RPM clock.
+ */
+ if (r->rpm_clk_id == QCOM_RPM_PLL_4)
+ return 0;
+
ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
r->rpm_clk_id, &value, 1);
if (ret)
@@ -269,6 +288,32 @@ out:
mutex_unlock(&rpm_clk_lock);
}
+static int clk_rpm_fixed_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ u32 value = 1;
+ int ret;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (!ret)
+ r->enabled = true;
+
+ return ret;
+}
+
+static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ u32 value = 0;
+ int ret;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (!ret)
+ r->enabled = false;
+}
+
static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
@@ -333,6 +378,13 @@ static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
+static const struct clk_ops clk_rpm_fixed_ops = {
+ .prepare = clk_rpm_fixed_prepare,
+ .unprepare = clk_rpm_fixed_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
static const struct clk_ops clk_rpm_ops = {
.prepare = clk_rpm_prepare,
.unprepare = clk_rpm_unprepare,
@@ -348,6 +400,45 @@ static const struct clk_ops clk_rpm_branch_ops = {
.recalc_rate = clk_rpm_recalc_rate,
};
+/* MSM8660/APQ8060 */
+DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK);
+DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000);
+
+static struct clk_rpm *msm8660_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk,
+ [RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk,
+ [RPM_SFPB_CLK] = &msm8660_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk,
+ [RPM_CFPB_CLK] = &msm8660_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk,
+ [RPM_MMFPB_CLK] = &msm8660_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk,
+ [RPM_SMI_CLK] = &msm8660_smi_clk,
+ [RPM_SMI_A_CLK] = &msm8660_smi_a_clk,
+ [RPM_EBI1_CLK] = &msm8660_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk,
+ [RPM_PLL4_CLK] = &msm8660_pll4_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_msm8660 = {
+ .clks = msm8660_clks,
+ .num_clks = ARRAY_SIZE(msm8660_clks),
+};
+
/* apq8064 */
DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
@@ -386,6 +477,8 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = {
};
static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
+ { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index cc03d5508627..c26d9007bfc4 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -530,9 +530,91 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.clks = msm8974_clks,
.num_clks = ARRAY_SIZE(msm8974_clks),
};
+
+/* msm8996 */
+DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 2, 1000);
+DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_a, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_a, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_a, 0xd);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8996_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
+ [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8996_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8996_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8996_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8996_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8996_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8996_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a,
+ [RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
+ [RPM_SMD_DIV_CLK1] = &msm8996_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &msm8996_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &msm8996_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
+ .clks = msm8996_clks,
+ .num_clks = ARRAY_SIZE(msm8996_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index d523991c945f..b8064a336d46 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -111,16 +111,6 @@ qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
}
EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
-static void qcom_cc_del_clk_provider(void *data)
-{
- of_clk_del_provider(data);
-}
-
-static void qcom_cc_reset_unregister(void *data)
-{
- reset_controller_unregister(data);
-}
-
static void qcom_cc_gdsc_unregister(void *data)
{
gdsc_unregister(data);
@@ -143,8 +133,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
int ret;
clocks_node = of_find_node_by_path("/clocks");
- if (clocks_node)
- node = of_find_node_by_name(clocks_node, path);
+ if (clocks_node) {
+ node = of_get_child_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+ }
if (!node) {
fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
@@ -248,13 +240,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
return ret;
}
- ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
- pdev->dev.of_node);
-
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
if (ret)
return ret;
@@ -266,13 +252,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
reset->regmap = regmap;
reset->reset_map = desc->resets;
- ret = reset_controller_register(&reset->rcdev);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
- &reset->rcdev);
-
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
return ret;
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index acbb38151ba1..43b5a89c4b28 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -15,6 +15,7 @@ config CLK_RENESAS
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77970 if ARCH_R8A77970
select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
@@ -95,6 +96,10 @@ config CLK_R8A7796
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77970
+ bool "R-Car V3M clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index cbbb081e2145..34c4e0b37afa 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3e0040c0ac87..151336d2ba59 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -14,8 +14,10 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include "clk-div6.h"
@@ -32,6 +34,7 @@
* @src_shift: Shift to access the register bits to select the parent clock
* @src_width: Number of register bits to select the parent clock (may be 0)
* @parents: Array to map from valid parent clocks indices to hardware indices
+ * @nb: Notifier block to save/restore clock state for system resume
*/
struct div6_clock {
struct clk_hw hw;
@@ -40,6 +43,7 @@ struct div6_clock {
u32 src_shift;
u32 src_width;
u8 *parents;
+ struct notifier_block nb;
};
#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -176,6 +180,29 @@ static const struct clk_ops cpg_div6_clock_ops = {
.set_rate = cpg_div6_clock_set_rate,
};
+static int cpg_div6_clock_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct div6_clock *clock = container_of(nb, struct div6_clock, nb);
+
+ switch (action) {
+ case PM_EVENT_RESUME:
+ /*
+ * TODO: This does not yet support DIV6 clocks with multiple
+ * parents, as the parent selection bits are not restored.
+ * Fortunately so far such DIV6 clocks are found only on
+ * R/SH-Mobile SoCs, while the resume functionality is only
+ * needed on R-Car Gen3.
+ */
+ if (__clk_get_enable_count(clock->hw.clk))
+ cpg_div6_clock_enable(&clock->hw);
+ else
+ cpg_div6_clock_disable(&clock->hw);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
/**
* cpg_div6_register - Register a DIV6 clock
@@ -183,11 +210,13 @@ static const struct clk_ops cpg_div6_clock_ops = {
* @num_parents: Number of parent clocks of the DIV6 clock (1, 4, or 8)
* @parent_names: Array containing the names of the parent clocks
* @reg: Mapped register used to control the DIV6 clock
+ * @notifiers: Optional notifier chain to save/restore state for system resume
*/
struct clk * __init cpg_div6_register(const char *name,
unsigned int num_parents,
const char **parent_names,
- void __iomem *reg)
+ void __iomem *reg,
+ struct raw_notifier_head *notifiers)
{
unsigned int valid_parents;
struct clk_init_data init;
@@ -258,6 +287,11 @@ struct clk * __init cpg_div6_register(const char *name,
if (IS_ERR(clk))
goto free_parents;
+ if (notifiers) {
+ clock->nb.notifier_call = cpg_div6_clock_notifier_call;
+ raw_notifier_chain_register(notifiers, &clock->nb);
+ }
+
return clk;
free_parents:
@@ -301,7 +335,7 @@ static void __init cpg_div6_clock_init(struct device_node *np)
for (i = 0; i < num_parents; i++)
parent_names[i] = of_clk_get_parent_name(np, i);
- clk = cpg_div6_register(clk_name, num_parents, parent_names, reg);
+ clk = cpg_div6_register(clk_name, num_parents, parent_names, reg, NULL);
if (IS_ERR(clk)) {
pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
__func__, np->name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-div6.h b/drivers/clk/renesas/clk-div6.h
index 065dfb49adf6..3af640a0b08d 100644
--- a/drivers/clk/renesas/clk-div6.h
+++ b/drivers/clk/renesas/clk-div6.h
@@ -3,6 +3,7 @@
#define __RENESAS_CLK_DIV6_H__
struct clk *cpg_div6_register(const char *name, unsigned int num_parents,
- const char **parent_names, void __iomem *reg);
+ const char **parent_names, void __iomem *reg,
+ struct raw_notifier_head *notifiers);
#endif
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 500a9e4e03c4..c944cc421e30 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -156,10 +156,8 @@ static struct clk * __init cpg_mstp_clock_register(const char *name,
struct clk *clk;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock) {
- pr_err("%s: failed to allocate MSTP clock.\n", __func__);
+ if (!clock)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &cpg_mstp_clock_ops;
@@ -196,7 +194,6 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
if (group == NULL || clks == NULL) {
kfree(group);
kfree(clks);
- pr_err("%s: failed to allocate group\n", __func__);
return;
}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 0b2e56d0d94b..d14cbe1ca29a 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -423,7 +423,6 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
- pr_err("%s: failed to allocate cpg\n", __func__);
return;
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 5adb934326d1..127c58135c8f 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -1,5 +1,5 @@
/*
- * rz Core CPG Clocks
+ * RZ/A1 Core CPG Clocks
*
* Copyright (C) 2013 Ideas On Board SPRL
* Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 9e2360a8e14b..2859504cc866 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -129,6 +129,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("scif2", 719, R8A7745_CLK_P),
DEF_MOD("scif1", 720, R8A7745_CLK_P),
DEF_MOD("scif0", 721, R8A7745_CLK_P),
+ DEF_MOD("du1", 723, R8A7745_CLK_ZX),
DEF_MOD("du0", 724, R8A7745_CLK_ZX),
DEF_MOD("ipmmu-sgx", 800, R8A7745_CLK_ZX),
DEF_MOD("vin1", 810, R8A7745_CLK_ZG),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 762b2f8824f1..b1d9f48eae9e 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -149,7 +149,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7795_CLK_R),
DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
@@ -348,6 +348,7 @@ static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
{ MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */
{ MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */
{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
+ { MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */
{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
{ MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index e5e7fb212288..b3767472088a 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -143,7 +143,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
new file mode 100644
index 000000000000..72f98527473a
--- /dev/null
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -0,0 +1,199 @@
+/*
+ * r8a77970 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77970-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77970_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("ztr", R8A77970_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77970_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77970_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77970_CLK_ZX, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED("s1d1", R8A77970_CLK_S1D1, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("s1d2", R8A77970_CLK_S1D2, CLK_PLL1_DIV2, 8, 1),
+ DEF_FIXED("s1d4", R8A77970_CLK_S1D4, CLK_PLL1_DIV2, 16, 1),
+ DEF_FIXED("s2d1", R8A77970_CLK_S2D1, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("s2d2", R8A77970_CLK_S2D2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("s2d4", R8A77970_CLK_S2D4, CLK_PLL1_DIV2, 24, 1),
+
+ DEF_FIXED("cl", R8A77970_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77970_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77970_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("mso", R8A77970_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("csi0", R8A77970_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+
+ DEF_FIXED("osc", R8A77970_CLK_OSC, CLK_PLL1_DIV2, 12*1024, 1),
+ DEF_FIXED("r", R8A77970_CLK_R, CLK_EXTALR, 1, 1),
+};
+
+static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
+ DEF_MOD("ivcp1e", 127, R8A77970_CLK_S2D1),
+ DEF_MOD("scif4", 203, R8A77970_CLK_S2D4),
+ DEF_MOD("scif3", 204, R8A77970_CLK_S2D4),
+ DEF_MOD("scif1", 206, R8A77970_CLK_S2D4),
+ DEF_MOD("scif0", 207, R8A77970_CLK_S2D4),
+ DEF_MOD("msiof3", 208, R8A77970_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77970_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77970_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77970_CLK_MSO),
+ DEF_MOD("mfis", 213, R8A77970_CLK_S2D2),
+ DEF_MOD("sys-dmac2", 217, R8A77970_CLK_S2D1),
+ DEF_MOD("sys-dmac1", 218, R8A77970_CLK_S2D1),
+ DEF_MOD("rwdt", 402, R8A77970_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77970_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif3", 517, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif2", 518, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif1", 519, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif0", 520, R8A77970_CLK_S2D1),
+ DEF_MOD("thermal", 522, R8A77970_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77970_CLK_S2D4),
+ DEF_MOD("fcpvd0", 603, R8A77970_CLK_S2D1),
+ DEF_MOD("vspd0", 623, R8A77970_CLK_S2D1),
+ DEF_MOD("csi40", 716, R8A77970_CLK_CSI0),
+ DEF_MOD("du0", 724, R8A77970_CLK_S2D1),
+ DEF_MOD("vin3", 808, R8A77970_CLK_S2D1),
+ DEF_MOD("vin2", 809, R8A77970_CLK_S2D1),
+ DEF_MOD("vin1", 810, R8A77970_CLK_S2D1),
+ DEF_MOD("vin0", 811, R8A77970_CLK_S2D1),
+ DEF_MOD("etheravb", 812, R8A77970_CLK_S2D2),
+ DEF_MOD("gpio5", 907, R8A77970_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A77970_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A77970_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A77970_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A77970_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A77970_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c4", 927, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c3", 928, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c2", 929, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c1", 930, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c0", 931, R8A77970_CLK_S2D2),
+};
+
+static const unsigned int r8a77970_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz)
+ *-------------------------------------------------
+ * 0 0 0 16.66 x 1 x192 x192 x96
+ * 0 0 1 16.66 x 1 x192 x192 x80
+ * 0 1 0 20 x 1 x160 x160 x80
+ * 0 1 1 20 x 1 x160 x160 x66
+ * 1 0 0 27 / 2 x236 x236 x118
+ * 1 0 1 27 / 2 x236 x236 x98
+ * 1 1 0 33.33 / 2 x192 x192 x96
+ * 1 1 1 33.33 / 2 x192 x192 x80
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 96, 1, },
+ { 1, 192, 1, 80, 1, },
+ { 1, 160, 1, 80, 1, },
+ { 1, 160, 1, 66, 1, },
+ { 2, 236, 1, 118, 1, },
+ { 2, 236, 1, 98, 1, },
+ { 2, 192, 1, 96, 1, },
+ { 2, 192, 1, 80, 1, },
+};
+
+static int __init r8a77970_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77970_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77970_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77970_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77970_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77970_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77970_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77970_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index e594cf8ee63b..ea4cafbe6e85 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -127,7 +127,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("usb-dmac1", 331, R8A77995_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A77995_CLK_R),
DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A77995_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index 123b1e622179..feb14579a71b 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -262,10 +262,9 @@ static unsigned int cpg_pll0_div __initdata;
static u32 cpg_mode __initdata;
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core,
- const struct cpg_mssr_info *info,
- struct clk **clks,
- void __iomem *base)
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
{
const struct clk_div_table *table = NULL;
const struct clk *parent;
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index 9eba07ff8b11..020a3baad015 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -34,9 +34,9 @@ struct rcar_gen2_cpg_pll_config {
};
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core,
- const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 951105816547..0904886f5501 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -29,6 +30,36 @@
#define CPG_PLL2CR 0x002c
#define CPG_PLL4CR 0x01f4
+struct cpg_simple_notifier {
+ struct notifier_block nb;
+ void __iomem *reg;
+ u32 saved;
+};
+
+static int cpg_simple_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpg_simple_notifier *csn =
+ container_of(nb, struct cpg_simple_notifier, nb);
+
+ switch (action) {
+ case PM_EVENT_SUSPEND:
+ csn->saved = readl(csn->reg);
+ return NOTIFY_OK;
+
+ case PM_EVENT_RESUME:
+ writel(csn->saved, csn->reg);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn)
+{
+ csn->nb.notifier_call = cpg_simple_notifier_call;
+ raw_notifier_chain_register(notifiers, &csn->nb);
+}
/*
* SDn Clock
@@ -55,8 +86,8 @@ struct sd_div_table {
struct sd_clock {
struct clk_hw hw;
- void __iomem *reg;
const struct sd_div_table *div_table;
+ struct cpg_simple_notifier csn;
unsigned int div_num;
unsigned int div_min;
unsigned int div_max;
@@ -97,12 +128,12 @@ static const struct sd_div_table cpg_sd_div_table[] = {
static int cpg_sd_clock_enable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- u32 val = readl(clock->reg);
+ u32 val = readl(clock->csn.reg);
val &= ~(CPG_SD_STP_MASK);
val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
- writel(val, clock->reg);
+ writel(val, clock->csn.reg);
return 0;
}
@@ -111,14 +142,14 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- writel(readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+ writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- return !(readl(clock->reg) & CPG_SD_STP_MASK);
+ return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
}
static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
@@ -170,10 +201,10 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->cur_div_idx = i;
- val = readl(clock->reg);
+ val = readl(clock->csn.reg);
val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- writel(val, clock->reg);
+ writel(val, clock->csn.reg);
return 0;
}
@@ -188,8 +219,8 @@ static const struct clk_ops cpg_sd_clock_ops = {
};
static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
- const char *parent_name)
+ void __iomem *base, const char *parent_name,
+ struct raw_notifier_head *notifiers)
{
struct clk_init_data init;
struct sd_clock *clock;
@@ -207,12 +238,12 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
init.parent_names = &parent_name;
init.num_parents = 1;
- clock->reg = base + core->offset;
+ clock->csn.reg = base + core->offset;
clock->hw.init = &init;
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
- sd_fc = readl(clock->reg) & CPG_SD_FC_MASK;
+ sd_fc = readl(clock->csn.reg) & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
break;
@@ -233,8 +264,13 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
clk = clk_register(NULL, &clock->hw);
if (IS_ERR(clk))
- kfree(clock);
+ goto free_clock;
+ cpg_simple_notifier_register(notifiers, &clock->csn);
+ return clk;
+
+free_clock:
+ kfree(clock);
return clk;
}
@@ -265,7 +301,8 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base)
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
{
const struct clk *parent;
unsigned int mult = 1;
@@ -331,22 +368,32 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+ return cpg_sd_clk_register(core, base, __clk_get_name(parent),
+ notifiers);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
+ struct cpg_simple_notifier *csn;
+
+ csn = kzalloc(sizeof(*csn), GFP_KERNEL);
+ if (!csn)
+ return ERR_PTR(-ENOMEM);
+
+ csn->reg = base + CPG_RCKCR;
+
/*
* RINT is default.
* Only if EXTALR is populated, we switch to it.
*/
- value = readl(base + CPG_RCKCR) & 0x3f;
+ value = readl(csn->reg) & 0x3f;
if (clk_get_rate(clks[cpg_clk_extalr])) {
parent = clks[cpg_clk_extalr];
value |= BIT(15);
}
- writel(value, base + CPG_RCKCR);
+ writel(value, csn->reg);
+ cpg_simple_notifier_register(notifiers, csn);
break;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index d756ef8b78eb..2e4284399f53 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -44,7 +44,8 @@ struct rcar_gen3_cpg_pll_config {
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e580a5e6346c..e3d03ffea4bc 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -106,6 +107,9 @@ static const u16 srcr[] = {
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
+ * @smstpcr_saved[].val: Saved values of SMSTPCR[]
*/
struct cpg_mssr_priv {
#ifdef CONFIG_RESET_CONTROLLER
@@ -119,6 +123,12 @@ struct cpg_mssr_priv {
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
+
+ struct raw_notifier_head notifiers;
+ struct {
+ u32 mask;
+ u32 val;
+ } smstpcr_saved[ARRAY_SIZE(smstpcr)];
};
@@ -293,7 +303,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
- priv->base + core->offset);
+ priv->base + core->offset,
+ &priv->notifiers);
} else {
clk = clk_register_fixed_factor(NULL, core->name,
parent_name, 0,
@@ -304,7 +315,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
- priv->clks, priv->base);
+ priv->clks, priv->base,
+ &priv->notifiers);
else
dev_err(dev, "%s has unsupported core clock type %u\n",
core->name, core->type);
@@ -382,6 +394,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
priv->clks[id] = clk;
+ priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
return;
fail:
@@ -680,6 +693,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77970
+ {
+ .compatible = "renesas,r8a77970-cpg-mssr",
+ .data = &r8a77970_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77995
{
.compatible = "renesas,r8a77995-cpg-mssr",
@@ -694,6 +713,85 @@ static void cpg_mssr_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static int cpg_mssr_suspend_noirq(struct device *dev)
+{
+ struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+ unsigned int reg;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ /* Save module registers with bits under our control */
+ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+ if (priv->smstpcr_saved[reg].mask)
+ priv->smstpcr_saved[reg].val =
+ readl(priv->base + SMSTPCR(reg));
+ }
+
+ /* Save core clocks */
+ raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+
+ return 0;
+}
+
+static int cpg_mssr_resume_noirq(struct device *dev)
+{
+ struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+ unsigned int reg, i;
+ u32 mask, oldval, newval;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ /* Restore core clocks */
+ raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+
+ /* Restore module clocks */
+ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+ mask = priv->smstpcr_saved[reg].mask;
+ if (!mask)
+ continue;
+
+ oldval = readl(priv->base + SMSTPCR(reg));
+ newval = oldval & ~mask;
+ newval |= priv->smstpcr_saved[reg].val & mask;
+ if (newval == oldval)
+ continue;
+
+ writel(newval, priv->base + SMSTPCR(reg));
+
+ /* Wait until enabled clocks are really enabled */
+ mask &= ~priv->smstpcr_saved[reg].val;
+ if (!mask)
+ continue;
+
+ for (i = 1000; i > 0; --i) {
+ oldval = readl(priv->base + MSTPSR(reg));
+ if (!(oldval & mask))
+ break;
+ cpu_relax();
+ }
+
+ if (!i)
+ dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
+ priv->base + SMSTPCR(reg), oldval & mask);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops cpg_mssr_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
+ cpg_mssr_resume_noirq)
+};
+#define DEV_PM_OPS &cpg_mssr_pm
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
static int __init cpg_mssr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -729,10 +827,12 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
+ dev_set_drvdata(dev, priv);
priv->clks = clks;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
+ RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
for (i = 0; i < nclks; i++)
clks[i] = ERR_PTR(-ENOENT);
@@ -769,6 +869,7 @@ static struct platform_driver cpg_mssr_driver = {
.driver = {
.name = "renesas-cpg-mssr",
.of_match_table = cpg_mssr_match,
+ .pm = DEV_PM_OPS,
},
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 94b9071d1061..0745b0930308 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -127,7 +127,8 @@ struct cpg_mssr_info {
struct clk *(*cpg_clk_register)(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
};
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
@@ -138,6 +139,7 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 0e09684d43a5..32c19c0f1e14 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -322,8 +322,6 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
sizeof(*rates) * nrates,
GFP_KERNEL);
if (!cpuclk->rate_table) {
- pr_err("%s: could not allocate memory for cpuclk rates\n",
- __func__);
ret = -ENOMEM;
goto unregister_notifier;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 00ad0e5f8d66..67e73fd71f09 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -290,15 +290,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(0), 6, 2, DFLAGS | CLK_DIVIDER_READ_ONLY,
div_core_peri_t, RK2928_CLKGATE_CON(0), 0, GFLAGS),
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(ACLK_VEPU, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 9, GFLAGS),
- GATE(0, "hclk_vepu", "aclk_vepu", 0,
+ GATE(HCLK_VEPU, "hclk_vepu", "aclk_vepu", 0,
RK2928_CLKGATE_CON(3), 10, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(ACLK_VDPU, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
- GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
+ GATE(HCLK_VDPU, "hclk_vdpu", "aclk_vdpu", 0,
RK2928_CLKGATE_CON(3), 12, GFLAGS),
GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
@@ -644,13 +644,13 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
- GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
+ GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(5), 14, GFLAGS),
- GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
+ GATE(ACLK_CIF1, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
GATE(PCLK_TIMER1, "pclk_timer1", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 8, GFLAGS),
GATE(PCLK_TIMER2, "pclk_timer2", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index fc56565379dd..7c4d242f19c1 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -711,7 +711,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS),
GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS),
- GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS),
/*
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 23835001e8bd..ef8900bc077f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o
obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
+obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 6686e8ba61f9..d2c99d8916b8 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -457,8 +457,6 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
if (!cpuclk->cfg) {
- pr_err("%s: could not allocate memory for cpuclk data\n",
- __func__);
ret = -ENOMEM;
goto unregister_clk_nb;
}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index b117783ed404..5bfc92ee3129 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -18,6 +18,7 @@
#include <linux/syscore_ops.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/clock/exynos-audss-clk.h>
@@ -36,14 +37,13 @@ static struct clk *epll;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
-#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ ASS_CLK_SRC, 0 },
{ ASS_CLK_DIV, 0 },
{ ASS_CLK_GATE, 0 },
};
-static int exynos_audss_clk_suspend(struct device *dev)
+static int __maybe_unused exynos_audss_clk_suspend(struct device *dev)
{
int i;
@@ -53,7 +53,7 @@ static int exynos_audss_clk_suspend(struct device *dev)
return 0;
}
-static int exynos_audss_clk_resume(struct device *dev)
+static int __maybe_unused exynos_audss_clk_resume(struct device *dev)
{
int i;
@@ -62,7 +62,6 @@ static int exynos_audss_clk_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
struct exynos_audss_clk_drvdata {
unsigned int has_adma_clk:1;
@@ -135,6 +134,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
const struct exynos_audss_clk_drvdata *variant;
struct clk_hw **clk_table;
struct resource *res;
+ struct device *dev = &pdev->dev;
int i, ret = 0;
variant = of_device_get_match_data(&pdev->dev);
@@ -142,15 +142,15 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res);
+ reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(reg_base)) {
- dev_err(&pdev->dev, "failed to map audss registers\n");
+ dev_err(dev, "failed to map audss registers\n");
return PTR_ERR(reg_base);
}
epll = ERR_PTR(-ENODEV);
- clk_data = devm_kzalloc(&pdev->dev,
+ clk_data = devm_kzalloc(dev,
sizeof(*clk_data) +
sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS,
GFP_KERNEL);
@@ -160,8 +160,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
clk_data->num = variant->num_clks;
clk_table = clk_data->hws;
- pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- pll_in = devm_clk_get(&pdev->dev, "pll_in");
+ pll_ref = devm_clk_get(dev, "pll_ref");
+ pll_in = devm_clk_get(dev, "pll_in");
if (!IS_ERR(pll_ref))
mout_audss_p[0] = __clk_get_name(pll_ref);
if (!IS_ERR(pll_in)) {
@@ -172,88 +172,103 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
ret = clk_prepare_enable(epll);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"failed to prepare the epll clock\n");
return ret;
}
}
}
- clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
+
+ /*
+ * Enable runtime PM here to allow the clock core using runtime PM
+ * for the registered clocks. Additionally, we increase the runtime
+ * PM usage count before registering the clocks, to prevent the
+ * clock core from runtime suspending the device.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(dev, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
- cdclk = devm_clk_get(&pdev->dev, "cdclk");
- sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio");
+ cdclk = devm_clk_get(dev, "cdclk");
+ sclk_audio = devm_clk_get(dev, "sclk_audio");
if (!IS_ERR(cdclk))
mout_i2s_p[1] = __clk_get_name(cdclk);
if (!IS_ERR(sclk_audio))
mout_i2s_p[2] = __clk_get_name(sclk_audio);
- clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(NULL, "mout_i2s",
+ clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(dev, "mout_i2s",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
- clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp",
+ clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(dev, "dout_srp",
"mout_audss", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
- clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
+ clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(dev,
"dout_aud_bus", "dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
- clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s",
+ clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(dev, "dout_i2s",
"mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0,
&lock);
- clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(NULL, "srp_clk",
+ clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(dev, "srp_clk",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 0, 0, &lock);
- clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(NULL, "i2s_bus",
+ clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(dev, "i2s_bus",
"dout_aud_bus", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 2, 0, &lock);
- clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(NULL, "sclk_i2s",
+ clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(dev, "sclk_i2s",
"dout_i2s", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 3, 0, &lock);
- clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(NULL, "pcm_bus",
+ clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(dev, "pcm_bus",
"sclk_pcm", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
- sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
+ sclk_pcm_in = devm_clk_get(dev, "sclk_pcm_in");
if (!IS_ERR(sclk_pcm_in))
sclk_pcm_p = __clk_get_name(sclk_pcm_in);
- clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(NULL, "sclk_pcm",
+ clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(dev, "sclk_pcm",
sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
if (variant->has_adma_clk) {
- clk_table[EXYNOS_ADMA] = clk_hw_register_gate(NULL, "adma",
+ clk_table[EXYNOS_ADMA] = clk_hw_register_gate(dev, "adma",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 9, 0, &lock);
}
for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clk_table[i])) {
- dev_err(&pdev->dev, "failed to register clock %d\n", i);
+ dev_err(dev, "failed to register clock %d\n", i);
ret = PTR_ERR(clk_table[i]);
goto unregister;
}
}
- ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
clk_data);
if (ret) {
- dev_err(&pdev->dev, "failed to add clock provider\n");
+ dev_err(dev, "failed to add clock provider\n");
goto unregister;
}
+ pm_runtime_put_sync(dev);
+
return 0;
unregister:
exynos_audss_clk_teardown();
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
if (!IS_ERR(epll))
clk_disable_unprepare(epll);
@@ -266,6 +281,7 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
exynos_audss_clk_teardown();
+ pm_runtime_disable(&pdev->dev);
if (!IS_ERR(epll))
clk_disable_unprepare(epll);
@@ -274,8 +290,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops exynos_audss_clk_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_audss_clk_suspend,
- exynos_audss_clk_resume)
+ SET_RUNTIME_PM_OPS(exynos_audss_clk_suspend, exynos_audss_clk_resume,
+ NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver exynos_audss_clk_driver = {
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index a21aea062bae..f29fb5824005 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -144,8 +144,6 @@ static void __init exynos4_clkout_init(struct device_node *node)
}
CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
- exynos4_clkout_init);
CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index d8d3cb67b402..134f25f2a913 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -550,9 +550,8 @@ static const struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __
/* list of mux clocks supported in all exynos4 soc's */
static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
- MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
- CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0,
- "mout_apll"),
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
@@ -737,7 +736,7 @@ static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
- DIV(CLK_ARM_CLK, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -837,6 +836,12 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
DIV(0, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(0, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(0, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+ DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+ DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
+};
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
CLK_GET_RATE_NOCACHE, 0),
DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
@@ -846,18 +851,10 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
4, 3, CLK_GET_RATE_NOCACHE, 0),
DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
8, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
- DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
- DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
};
/* list of gate clocks supported in all exynos4 soc's */
static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = {
- /*
- * After all Exynos4 based platforms are migrated to use device tree,
- * the device name and clock alias names specified below for some
- * of the clocks can be removed.
- */
GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
GATE(CLK_PPMURIGHT, "ppmuright", "aclk200", GATE_IP_RIGHTBUS, 1, 0, 0),
GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
@@ -1147,6 +1144,13 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0, 0),
GATE(CLK_I2S0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
+ 0),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -1199,24 +1203,6 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
- GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
- GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
- 0),
-};
-
-static const struct samsung_clock_alias exynos4_aliases[] __initconst = {
- ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
- ALIAS(CLK_ARM_CLK, NULL, "armclk"),
- ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
-};
-
-static const struct samsung_clock_alias exynos4210_aliases[] __initconst = {
- ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
-};
-
-static const struct samsung_clock_alias exynos4x12_aliases[] __initconst = {
- ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
};
/*
@@ -1355,14 +1341,14 @@ static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst =
};
static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
- E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
- [epll] = PLL_A(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
- EPLL_LOCK, EPLL_CON0, "fout_epll", NULL),
- [vpll] = PLL_A(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
- VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+ [apll] = PLL(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+ E4210_MPLL_LOCK, E4210_MPLL_CON0, NULL),
+ [epll] = PLL(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, NULL),
};
static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
@@ -1416,24 +1402,6 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
{ 0 },
};
-static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
- { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
- { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
- { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
- { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
- { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
- { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
- { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
- { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
- { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
- { 0 },
-};
-
#define E4412_CPU_DIV1(cores, hpm, copy) \
(((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
@@ -1527,8 +1495,6 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4210_div_clks));
samsung_clk_register_gate(ctx, exynos4210_gate_clks,
ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_alias(ctx, exynos4210_aliases,
- ARRAY_SIZE(exynos4210_aliases));
samsung_clk_register_fixed_factor(ctx,
exynos4210_fixed_factor_clks,
ARRAY_SIZE(exynos4210_fixed_factor_clks));
@@ -1537,32 +1503,31 @@ static void __init exynos4_clk_init(struct device_node *np,
e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
} else {
+ struct resource res;
+
samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
samsung_clk_register_div(ctx, exynos4x12_div_clks,
ARRAY_SIZE(exynos4x12_div_clks));
samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_alias(ctx, exynos4x12_aliases,
- ARRAY_SIZE(exynos4x12_aliases));
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
- if (of_machine_is_compatible("samsung,exynos4412")) {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
- e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
- } else {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
- e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+
+ of_address_to_resource(np, 0, &res);
+ if (resource_size(&res) > 0x18000) {
+ samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+ ARRAY_SIZE(exynos4x12_isp_div_clks));
+ samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+ ARRAY_SIZE(exynos4x12_isp_gate_clks));
}
- }
- samsung_clk_register_alias(ctx, exynos4_aliases,
- ARRAY_SIZE(exynos4_aliases));
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ }
if (soc == EXYNOS4X12)
exynos4x12_core_down_clock();
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
new file mode 100644
index 000000000000..d5f1ccb36300
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos4412 ISP module.
+*/
+
+#include <dt-bindings/clock/exynos4.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "clk.h"
+
+/* Exynos4x12 specific registers, which belong to ISP power domain */
+#define E4X12_DIV_ISP0 0x0300
+#define E4X12_DIV_ISP1 0x0304
+#define E4X12_GATE_ISP0 0x0800
+#define E4X12_GATE_ISP1 0x0804
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+static struct samsung_clk_reg_dump *exynos4x12_save_isp;
+
+static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
+ E4X12_DIV_ISP0,
+ E4X12_DIV_ISP1,
+ E4X12_GATE_ISP0,
+ E4X12_GATE_ISP1,
+};
+
+PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
+ DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
+ DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV(CLK_ISP_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp",
+ E4X12_DIV_ISP1, 4, 3),
+ DIV(CLK_ISP_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0",
+ E4X12_DIV_ISP1, 8, 3),
+ DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
+ GATE(CLK_ISP_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0, 0, 0),
+ GATE(CLK_ISP_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1, 0, 0),
+ GATE(CLK_ISP_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2, 0, 0),
+ GATE(CLK_ISP_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 0, 0),
+ GATE(CLK_ISP_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 0, 0),
+ GATE(CLK_ISP_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 0, 0),
+ GATE(CLK_ISP_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 0, 0),
+ GATE(CLK_ISP_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 0, 0),
+ GATE(CLK_ISP_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 0, 0),
+ GATE(CLK_ISP_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 0, 0),
+ GATE(CLK_ISP_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+ 0, 0),
+ GATE(CLK_ISP_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+ 0, 0),
+ GATE(CLK_ISP_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+ 0, 0),
+ GATE(CLK_ISP_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+ 0, 0),
+ GATE(CLK_ISP_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+ 0, 0),
+ GATE(CLK_ISP_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+ 0, 0),
+ GATE(CLK_ISP_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+ 0, 0),
+ GATE(CLK_ISP_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+ 0, 0),
+ GATE(CLK_ISP_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+ 0, 0),
+ GATE(CLK_ISP_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 0, 0),
+ GATE(CLK_ISP_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 0, 0),
+ GATE(CLK_ISP_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+ 0, 0),
+ GATE(CLK_ISP_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+ 0, 0),
+ GATE(CLK_ISP_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+ 0, 0),
+ GATE(CLK_ISP_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+ 0, 0),
+ GATE(CLK_ISP_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+ 0, 0),
+};
+
+static int __maybe_unused exynos4x12_isp_clk_suspend(struct device *dev)
+{
+ struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+ samsung_clk_save(ctx->reg_base, exynos4x12_save_isp,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ return 0;
+}
+
+static int __maybe_unused exynos4x12_isp_clk_resume(struct device *dev)
+{
+ struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+ samsung_clk_restore(ctx->reg_base, exynos4x12_save_isp,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ return 0;
+}
+
+static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
+{
+ struct samsung_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *reg_base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(reg_base);
+ }
+
+ exynos4x12_save_isp = samsung_clk_alloc_reg_dump(exynos4x12_clk_isp_save,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ if (!exynos4x12_save_isp)
+ return -ENOMEM;
+
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_ISP_CLKS);
+ ctx->dev = dev;
+
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+ ARRAY_SIZE(exynos4x12_isp_div_clks));
+ samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+ ARRAY_SIZE(exynos4x12_isp_gate_clks));
+
+ samsung_clk_of_add_provider(np, ctx);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos4x12_isp_clk_of_match[] = {
+ { .compatible = "samsung,exynos4412-isp-clock", },
+ { },
+};
+
+static const struct dev_pm_ops exynos4x12_isp_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos4x12_isp_clk_suspend,
+ exynos4x12_isp_clk_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos4x12_isp_clk_driver __refdata = {
+ .driver = {
+ .name = "exynos4x12-isp-clk",
+ .of_match_table = exynos4x12_isp_clk_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &exynos4x12_isp_pm_ops,
+ },
+ .probe = exynos4x12_isp_clk_probe,
+};
+
+static int __init exynos4x12_isp_clk_init(void)
+{
+ return platform_driver_register(&exynos4x12_isp_clk_driver);
+}
+core_initcall(exynos4x12_isp_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 27a227d6620c..9b073c98a891 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -293,14 +293,14 @@ static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
/*
* CMU_CPU
*/
- MUX_FA(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
- CLK_SET_RATE_PARENT, 0, "mout_apll"),
- MUX_A(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+ MUX_F(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
/*
* CMU_CORE
*/
- MUX_A(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+ MUX(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
/*
* CMU_TOP
@@ -391,7 +391,7 @@ static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
*/
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV_A(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+ DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
/*
* CMU_TOP
@@ -743,10 +743,10 @@ static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
};
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
- MPLL_LOCK, MPLL_CON0, "fout_mpll", NULL),
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
[bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
[gpll] = PLL(pll_35xx, CLK_FOUT_GPLL, "fout_gpll", "fin_pll", GPLL_LOCK,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 25601967d1cd..45d34f601e9e 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -600,8 +600,7 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
TOP_SPARE2, 4, 1),
MUX(0, "mout_aclk400_isp", mout_group1_p, SRC_TOP0, 0, 2),
- MUX_A(0, "mout_aclk400_mscl", mout_group1_p,
- SRC_TOP0, 4, 2, "aclk400_mscl"),
+ MUX(0, "mout_aclk400_mscl", mout_group1_p, SRC_TOP0, 4, 2),
MUX(0, "mout_aclk400_wcore", mout_group1_p, SRC_TOP0, 16, 2),
MUX(0, "mout_aclk100_noc", mout_group1_p, SRC_TOP0, 20, 2),
@@ -998,7 +997,7 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
- GATE_BUS_TOP, 17, 0, 0),
+ GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 11343a597093..db270908037a 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -9,9 +9,13 @@
* Common Clock Framework support for Exynos5433 SoC.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/clock/exynos5433.h>
@@ -1991,6 +1995,14 @@ static const unsigned long fsys_clk_regs[] __initconst = {
ENABLE_IP_FSYS1,
};
+static const struct samsung_clk_reg_dump fsys_suspend_regs[] = {
+ { MUX_SEL_FSYS0, 0 },
+ { MUX_SEL_FSYS1, 0 },
+ { MUX_SEL_FSYS2, 0 },
+ { MUX_SEL_FSYS3, 0 },
+ { MUX_SEL_FSYS4, 0 },
+};
+
static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
/* PHY clocks from USBDRD30_PHY */
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
@@ -2296,16 +2308,11 @@ static const struct samsung_cmu_info fsys_cmu_info __initconst = {
.nr_clk_ids = FSYS_NR_CLK,
.clk_regs = fsys_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .suspend_regs = fsys_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(fsys_suspend_regs),
+ .clk_name = "aclk_fsys_200",
};
-static void __init exynos5433_cmu_fsys_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &fsys_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
- exynos5433_cmu_fsys_init);
-
/*
* Register offset definitions for CMU_G2D
*/
@@ -2335,6 +2342,10 @@ static const unsigned long g2d_clk_regs[] __initconst = {
DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D,
};
+static const struct samsung_clk_reg_dump g2d_suspend_regs[] = {
+ { MUX_SEL_G2D0, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aclk_g2d_266_user_p) = { "oscclk", "aclk_g2d_266", };
PNAME(mout_aclk_g2d_400_user_p) = { "oscclk", "aclk_g2d_400", };
@@ -2420,16 +2431,11 @@ static const struct samsung_cmu_info g2d_cmu_info __initconst = {
.nr_clk_ids = G2D_NR_CLK,
.clk_regs = g2d_clk_regs,
.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs),
+ .suspend_regs = g2d_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(g2d_suspend_regs),
+ .clk_name = "aclk_g2d_400",
};
-static void __init exynos5433_cmu_g2d_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &g2d_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d",
- exynos5433_cmu_g2d_init);
-
/*
* Register offset definitions for CMU_DISP
*/
@@ -2494,6 +2500,18 @@ static const unsigned long disp_clk_regs[] __initconst = {
CLKOUT_CMU_DISP_DIV_STAT,
};
+static const struct samsung_clk_reg_dump disp_suspend_regs[] = {
+ /* PLL has to be enabled for suspend */
+ { DISP_PLL_CON0, 0x85f40502 },
+ /* ignore status of external PHY muxes during suspend to avoid hangs */
+ { MUX_IGNORE_DISP2, 0x00111111 },
+ { MUX_SEL_DISP0, 0 },
+ { MUX_SEL_DISP1, 0 },
+ { MUX_SEL_DISP2, 0 },
+ { MUX_SEL_DISP3, 0 },
+ { MUX_SEL_DISP4, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_disp_pll_p) = { "oscclk", "fout_disp_pll", };
PNAME(mout_sclk_dsim1_user_p) = { "oscclk", "sclk_dsim1_disp", };
@@ -2841,16 +2859,11 @@ static const struct samsung_cmu_info disp_cmu_info __initconst = {
.nr_clk_ids = DISP_NR_CLK,
.clk_regs = disp_clk_regs,
.nr_clk_regs = ARRAY_SIZE(disp_clk_regs),
+ .suspend_regs = disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(disp_suspend_regs),
+ .clk_name = "aclk_disp_333",
};
-static void __init exynos5433_cmu_disp_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &disp_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp",
- exynos5433_cmu_disp_init);
-
/*
* Register offset definitions for CMU_AUD
*/
@@ -2885,6 +2898,11 @@ static const unsigned long aud_clk_regs[] __initconst = {
ENABLE_IP_AUD1,
};
+static const struct samsung_clk_reg_dump aud_suspend_regs[] = {
+ { MUX_SEL_AUD0, 0 },
+ { MUX_SEL_AUD1, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
@@ -3011,16 +3029,11 @@ static const struct samsung_cmu_info aud_cmu_info __initconst = {
.nr_clk_ids = AUD_NR_CLK,
.clk_regs = aud_clk_regs,
.nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+ .suspend_regs = aud_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(aud_suspend_regs),
+ .clk_name = "fout_aud_pll",
};
-static void __init exynos5433_cmu_aud_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &aud_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_aud, "samsung,exynos5433-cmu-aud",
- exynos5433_cmu_aud_init);
-
-
/*
* Register offset definitions for CMU_BUS{0|1|2}
*/
@@ -3222,6 +3235,10 @@ static const unsigned long g3d_clk_regs[] __initconst = {
CLK_STOPCTRL,
};
+static const struct samsung_clk_reg_dump g3d_suspend_regs[] = {
+ { MUX_SEL_G3D, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aclk_g3d_400_p) = { "mout_g3d_pll", "aclk_g3d_400", };
PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", };
@@ -3295,15 +3312,11 @@ static const struct samsung_cmu_info g3d_cmu_info __initconst = {
.nr_clk_ids = G3D_NR_CLK,
.clk_regs = g3d_clk_regs,
.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+ .suspend_regs = g3d_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(g3d_suspend_regs),
+ .clk_name = "aclk_g3d_400",
};
-static void __init exynos5433_cmu_g3d_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &g3d_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
- exynos5433_cmu_g3d_init);
-
/*
* Register offset definitions for CMU_GSCL
*/
@@ -3342,6 +3355,12 @@ static const unsigned long gscl_clk_regs[] __initconst = {
ENABLE_IP_GSCL_SECURE_SMMU_GSCL2,
};
+static const struct samsung_clk_reg_dump gscl_suspend_regs[] = {
+ { MUX_SEL_GSCL, 0 },
+ { ENABLE_ACLK_GSCL, 0xfff },
+ { ENABLE_PCLK_GSCL, 0xff },
+};
+
/* list of all parent clock list */
PNAME(aclk_gscl_111_user_p) = { "oscclk", "aclk_gscl_111", };
PNAME(aclk_gscl_333_user_p) = { "oscclk", "aclk_gscl_333", };
@@ -3436,15 +3455,11 @@ static const struct samsung_cmu_info gscl_cmu_info __initconst = {
.nr_clk_ids = GSCL_NR_CLK,
.clk_regs = gscl_clk_regs,
.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs),
+ .suspend_regs = gscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(gscl_suspend_regs),
+ .clk_name = "aclk_gscl_111",
};
-static void __init exynos5433_cmu_gscl_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &gscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl",
- exynos5433_cmu_gscl_init);
-
/*
* Register offset definitions for CMU_APOLLO
*/
@@ -3970,6 +3985,11 @@ static const unsigned long mscl_clk_regs[] __initconst = {
ENABLE_IP_MSCL_SECURE_SMMU_JPEG,
};
+static const struct samsung_clk_reg_dump mscl_suspend_regs[] = {
+ { MUX_SEL_MSCL0, 0 },
+ { MUX_SEL_MSCL1, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_sclk_jpeg_user_p) = { "oscclk", "sclk_jpeg_mscl", };
PNAME(mout_aclk_mscl_400_user_p) = { "oscclk", "aclk_mscl_400", };
@@ -4082,15 +4102,11 @@ static const struct samsung_cmu_info mscl_cmu_info __initconst = {
.nr_clk_ids = MSCL_NR_CLK,
.clk_regs = mscl_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mscl_clk_regs),
+ .suspend_regs = mscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(mscl_suspend_regs),
+ .clk_name = "aclk_mscl_400",
};
-static void __init exynos5433_cmu_mscl_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &mscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
- exynos5433_cmu_mscl_init);
-
/*
* Register offset definitions for CMU_MFC
*/
@@ -4120,6 +4136,10 @@ static const unsigned long mfc_clk_regs[] __initconst = {
ENABLE_IP_MFC_SECURE_SMMU_MFC,
};
+static const struct samsung_clk_reg_dump mfc_suspend_regs[] = {
+ { MUX_SEL_MFC, 0 },
+};
+
PNAME(mout_aclk_mfc_400_user_p) = { "oscclk", "aclk_mfc_400", };
static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
@@ -4190,15 +4210,11 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = {
.nr_clk_ids = MFC_NR_CLK,
.clk_regs = mfc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+ .suspend_regs = mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(mfc_suspend_regs),
+ .clk_name = "aclk_mfc_400",
};
-static void __init exynos5433_cmu_mfc_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &mfc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
- exynos5433_cmu_mfc_init);
-
/*
* Register offset definitions for CMU_HEVC
*/
@@ -4228,6 +4244,10 @@ static const unsigned long hevc_clk_regs[] __initconst = {
ENABLE_IP_HEVC_SECURE_SMMU_HEVC,
};
+static const struct samsung_clk_reg_dump hevc_suspend_regs[] = {
+ { MUX_SEL_HEVC, 0 },
+};
+
PNAME(mout_aclk_hevc_400_user_p) = { "oscclk", "aclk_hevc_400", };
static const struct samsung_mux_clock hevc_mux_clks[] __initconst = {
@@ -4300,15 +4320,11 @@ static const struct samsung_cmu_info hevc_cmu_info __initconst = {
.nr_clk_ids = HEVC_NR_CLK,
.clk_regs = hevc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(hevc_clk_regs),
+ .suspend_regs = hevc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(hevc_suspend_regs),
+ .clk_name = "aclk_hevc_400",
};
-static void __init exynos5433_cmu_hevc_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &hevc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
- exynos5433_cmu_hevc_init);
-
/*
* Register offset definitions for CMU_ISP
*/
@@ -4342,6 +4358,10 @@ static const unsigned long isp_clk_regs[] __initconst = {
ENABLE_IP_ISP3,
};
+static const struct samsung_clk_reg_dump isp_suspend_regs[] = {
+ { MUX_SEL_ISP, 0 },
+};
+
PNAME(mout_aclk_isp_dis_400_user_p) = { "oscclk", "aclk_isp_dis_400", };
PNAME(mout_aclk_isp_400_user_p) = { "oscclk", "aclk_isp_400", };
@@ -4553,15 +4573,11 @@ static const struct samsung_cmu_info isp_cmu_info __initconst = {
.nr_clk_ids = ISP_NR_CLK,
.clk_regs = isp_clk_regs,
.nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+ .suspend_regs = isp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(isp_suspend_regs),
+ .clk_name = "aclk_isp_400",
};
-static void __init exynos5433_cmu_isp_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &isp_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp",
- exynos5433_cmu_isp_init);
-
/*
* Register offset definitions for CMU_CAM0
*/
@@ -4625,6 +4641,15 @@ static const unsigned long cam0_clk_regs[] __initconst = {
ENABLE_IP_CAM02,
ENABLE_IP_CAM03,
};
+
+static const struct samsung_clk_reg_dump cam0_suspend_regs[] = {
+ { MUX_SEL_CAM00, 0 },
+ { MUX_SEL_CAM01, 0 },
+ { MUX_SEL_CAM02, 0 },
+ { MUX_SEL_CAM03, 0 },
+ { MUX_SEL_CAM04, 0 },
+};
+
PNAME(mout_aclk_cam0_333_user_p) = { "oscclk", "aclk_cam0_333", };
PNAME(mout_aclk_cam0_400_user_p) = { "oscclk", "aclk_cam0_400", };
PNAME(mout_aclk_cam0_552_user_p) = { "oscclk", "aclk_cam0_552", };
@@ -5030,15 +5055,11 @@ static const struct samsung_cmu_info cam0_cmu_info __initconst = {
.nr_clk_ids = CAM0_NR_CLK,
.clk_regs = cam0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam0_clk_regs),
+ .suspend_regs = cam0_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(cam0_suspend_regs),
+ .clk_name = "aclk_cam0_400",
};
-static void __init exynos5433_cmu_cam0_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &cam0_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0",
- exynos5433_cmu_cam0_init);
-
/*
* Register offset definitions for CMU_CAM1
*/
@@ -5085,6 +5106,12 @@ static const unsigned long cam1_clk_regs[] __initconst = {
ENABLE_IP_CAM12,
};
+static const struct samsung_clk_reg_dump cam1_suspend_regs[] = {
+ { MUX_SEL_CAM10, 0 },
+ { MUX_SEL_CAM11, 0 },
+ { MUX_SEL_CAM12, 0 },
+};
+
PNAME(mout_sclk_isp_uart_user_p) = { "oscclk", "sclk_isp_uart_cam1", };
PNAME(mout_sclk_isp_spi1_user_p) = { "oscclk", "sclk_isp_spi1_cam1", };
PNAME(mout_sclk_isp_spi0_user_p) = { "oscclk", "sclk_isp_spi0_cam1", };
@@ -5403,11 +5430,223 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
.nr_clk_ids = CAM1_NR_CLK,
.clk_regs = cam1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam1_clk_regs),
+ .suspend_regs = cam1_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(cam1_suspend_regs),
+ .clk_name = "aclk_cam1_400",
+};
+
+
+struct exynos5433_cmu_data {
+ struct samsung_clk_reg_dump *clk_save;
+ unsigned int nr_clk_save;
+ const struct samsung_clk_reg_dump *clk_suspend;
+ unsigned int nr_clk_suspend;
+
+ struct clk *clk;
+ struct clk **pclks;
+ int nr_pclks;
+
+ /* must be the last entry */
+ struct samsung_clk_provider ctx;
+};
+
+static int __maybe_unused exynos5433_cmu_suspend(struct device *dev)
+{
+ struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ samsung_clk_save(data->ctx.reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ /* for suspend some registers have to be set to certain values */
+ samsung_clk_restore(data->ctx.reg_base, data->clk_suspend,
+ data->nr_clk_suspend);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int __maybe_unused exynos5433_cmu_resume(struct device *dev)
+{
+ struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ clk_prepare_enable(data->clk);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ samsung_clk_restore(data->ctx.reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ return 0;
+}
+
+static int __init exynos5433_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct exynos5433_cmu_data *data;
+ struct samsung_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg_base;
+ int i;
+
+ info = of_device_get_match_data(dev);
+
+ data = devm_kzalloc(dev, sizeof(*data) +
+ sizeof(*data->ctx.clk_data.hws) * info->nr_clk_ids,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ ctx = &data->ctx;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(reg_base);
+ }
+
+ for (i = 0; i < info->nr_clk_ids; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ ctx->clk_data.num = info->nr_clk_ids;
+ ctx->reg_base = reg_base;
+ ctx->dev = dev;
+ spin_lock_init(&ctx->lock);
+
+ data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
+ info->nr_clk_regs);
+ data->nr_clk_save = info->nr_clk_regs;
+ data->clk_suspend = info->suspend_regs;
+ data->nr_clk_suspend = info->nr_suspend_regs;
+ data->nr_pclks = of_count_phandle_with_args(dev->of_node, "clocks",
+ "#clock-cells");
+ if (data->nr_pclks > 0) {
+ data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
+ data->nr_pclks, GFP_KERNEL);
+
+ for (i = 0; i < data->nr_pclks; i++) {
+ struct clk *clk = of_clk_get(dev->of_node, i);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ data->pclks[i] = clk;
+ }
+ }
+
+ if (info->clk_name)
+ data->clk = clk_get(dev, info->clk_name);
+ clk_prepare_enable(data->clk);
+
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * Enable runtime PM here to allow the clock core using runtime PM
+ * for the registered clocks. Additionally, we increase the runtime
+ * PM usage count before registering the clocks, to prevent the
+ * clock core from runtime suspending the device.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ if (info->pll_clks)
+ samsung_clk_register_pll(ctx, info->pll_clks, info->nr_pll_clks,
+ reg_base);
+ if (info->mux_clks)
+ samsung_clk_register_mux(ctx, info->mux_clks,
+ info->nr_mux_clks);
+ if (info->div_clks)
+ samsung_clk_register_div(ctx, info->div_clks,
+ info->nr_div_clks);
+ if (info->gate_clks)
+ samsung_clk_register_gate(ctx, info->gate_clks,
+ info->nr_gate_clks);
+ if (info->fixed_clks)
+ samsung_clk_register_fixed_rate(ctx, info->fixed_clks,
+ info->nr_fixed_clks);
+ if (info->fixed_factor_clks)
+ samsung_clk_register_fixed_factor(ctx, info->fixed_factor_clks,
+ info->nr_fixed_factor_clks);
+
+ samsung_clk_of_add_provider(dev->of_node, ctx);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos5433_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos5433-cmu-aud",
+ .data = &aud_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-cam0",
+ .data = &cam0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-cam1",
+ .data = &cam1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-disp",
+ .data = &disp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-g2d",
+ .data = &g2d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-g3d",
+ .data = &g3d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-gscl",
+ .data = &gscl_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-mfc",
+ .data = &mfc_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-hevc",
+ .data = &hevc_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-isp",
+ .data = &isp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-mscl",
+ .data = &mscl_cmu_info,
+ }, {
+ },
+};
+
+static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
+ NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos5433_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos5433-cmu",
+ .of_match_table = exynos5433_cmu_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &exynos5433_cmu_pm_ops,
+ },
+ .probe = exynos5433_cmu_probe,
};
-static void __init exynos5433_cmu_cam1_init(struct device_node *np)
+static int __init exynos5433_cmu_init(void)
{
- samsung_cmu_register_one(np, &cam1_cmu_info);
+ return platform_driver_register(&exynos5433_cmu_driver);
}
-CLK_OF_DECLARE(exynos5433_cmu_cam1, "samsung,exynos5433-cmu-cam1",
- exynos5433_cmu_cam1_init);
+core_initcall(exynos5433_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index a80f3ef20801..b08bd54c5e76 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -53,8 +53,7 @@ static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __
/* mux clocks */
static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = {
MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
- MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
- CPU_CLK_STATUS, 0, 1, "armclk"),
+ MUX(CLK_ARM_CLK, "arm_clk", mout_armclk_p, CPU_CLK_STATUS, 0, 1),
};
/* divider clocks */
@@ -117,6 +116,13 @@ static const struct samsung_pll_clock exynos5440_plls[] __initconst = {
PLL(pll_2550x, CLK_CPLLB, "cpllb", "xtal", 0, 0x50, NULL),
};
+/*
+ * Clock aliases for legacy clkdev look-up.
+ */
+static const struct samsung_clock_alias exynos5440_aliases[] __initconst = {
+ ALIAS(CLK_ARM_CLK, NULL, "armclk"),
+};
+
/* register exynos5440 clocks */
static void __init exynos5440_clk_init(struct device_node *np)
{
@@ -147,6 +153,8 @@ static void __init exynos5440_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5440_div_clks));
samsung_clk_register_gate(ctx, exynos5440_gate_clks,
ARRAY_SIZE(exynos5440_gate_clks));
+ samsung_clk_register_alias(ctx, exynos5440_aliases,
+ ARRAY_SIZE(exynos5440_aliases));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 037c61484098..1c4c7a3039f1 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1388,7 +1388,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
pll->lock_reg = base + pll_clk->lock_offset;
pll->con_reg = base + pll_clk->con_offset;
- ret = clk_hw_register(NULL, &pll->hw);
+ ret = clk_hw_register(ctx->dev, &pll->hw);
if (ret) {
pr_err("%s: failed to register pll clock %s : %d\n",
__func__, pll_clk->name, ret);
@@ -1397,15 +1397,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, &pll->hw, pll_clk->id);
-
- if (!pll_clk->alias)
- return;
-
- ret = clk_hw_register_clkdev(&pll->hw, pll_clk->alias,
- pll_clk->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup for %s : %d",
- __func__, pll_clk->name, ret);
}
void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index abb935c42916..d94b85a42356 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -117,8 +117,8 @@ struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
- MUX_A(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1, "msysclk"),
- MUX_A(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1, "armclk"),
+ MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
+ MUX(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1),
MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
};
@@ -189,6 +189,10 @@ struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
};
struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+ ALIAS(MSYSCLK, NULL, "msysclk"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(MPLL, NULL, "mpll"),
+ ALIAS(EPLL, NULL, "epll"),
ALIAS(HCLK, NULL, "hclk"),
ALIAS(HCLK_SSMC, NULL, "nand"),
ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
@@ -221,9 +225,9 @@ struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
/* S3C2416 specific clocks */
static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- [mpll] = PLL(pll_6552_s3c2416, 0, "mpll", "mpllref",
+ [mpll] = PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref",
LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_6553, 0, "epll", "epllref",
+ [epll] = PLL(pll_6553, EPLL, "epll", "epllref",
LOCKCON1, EPLLCON, NULL),
};
@@ -275,9 +279,9 @@ struct samsung_clock_alias s3c2416_aliases[] __initdata = {
/* S3C2443 specific clocks */
static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- [mpll] = PLL(pll_3000, 0, "mpll", "mpllref",
+ [mpll] = PLL(pll_3000, MPLL, "mpll", "mpllref",
LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_2126, 0, "epll", "epllref",
+ [epll] = PLL(pll_2126, EPLL, "epll", "epllref",
LOCKCON1, EPLLCON, NULL),
};
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 7ce0fa86c5ff..8634884aa11c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -134,7 +134,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_fixed_rate(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
list->parent_name, list->flags, list->fixed_rate);
if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
@@ -163,7 +163,7 @@ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_fixed_factor(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
list->parent_name, list->flags, list->mult, list->div);
if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
@@ -181,10 +181,10 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_mux(NULL, list->name,
+ clk_hw = clk_hw_register_mux(ctx->dev, list->name,
list->parent_names, list->num_parents, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->mux_flags, &ctx->lock);
@@ -195,15 +195,6 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
}
}
@@ -213,17 +204,17 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
if (list->table)
- clk_hw = clk_hw_register_divider_table(NULL,
+ clk_hw = clk_hw_register_divider_table(ctx->dev,
list->name, list->parent_name, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->div_flags,
list->table, &ctx->lock);
else
- clk_hw = clk_hw_register_divider(NULL, list->name,
+ clk_hw = clk_hw_register_divider(ctx->dev, list->name,
list->parent_name, list->flags,
ctx->reg_base + list->offset, list->shift,
list->width, list->div_flags, &ctx->lock);
@@ -234,15 +225,6 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
}
}
@@ -252,10 +234,10 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_gate(NULL, list->name, list->parent_name,
+ clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
list->flags, ctx->reg_base + list->offset,
list->bit_idx, list->gate_flags, &ctx->lock);
if (IS_ERR(clk_hw)) {
@@ -264,15 +246,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
continue;
}
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
-
samsung_clk_add_lookup(ctx, clk_hw, list->id);
}
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index b8ca0dd3a38b..3880d2f9d582 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -24,6 +24,7 @@
*/
struct samsung_clk_provider {
void __iomem *reg_base;
+ struct device *dev;
spinlock_t lock;
/* clk_data must be the last entry due to variable lenght 'hws' array */
struct clk_hw_onecell_data clk_data;
@@ -106,7 +107,6 @@ struct samsung_fixed_factor_clock {
/**
* struct samsung_mux_clock: information about mux clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this mux clock.
* @parent_names: array of pointer to parent clock names.
* @num_parents: number of parents listed in @parent_names.
@@ -115,11 +115,9 @@ struct samsung_fixed_factor_clock {
* @shift: starting bit location of the mux control bit-field in @reg.
* @width: width of the mux control bit-field in @reg.
* @mux_flags: flags for mux-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_mux_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *const *parent_names;
u8 num_parents;
@@ -128,13 +126,11 @@ struct samsung_mux_clock {
u8 shift;
u8 width;
u8 mux_flags;
- const char *alias;
};
-#define __MUX(_id, dname, cname, pnames, o, s, w, f, mf, a) \
+#define __MUX(_id, cname, pnames, o, s, w, f, mf) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -143,36 +139,26 @@ struct samsung_mux_clock {
.shift = s, \
.width = w, \
.mux_flags = mf, \
- .alias = a, \
}
#define MUX(_id, cname, pnames, o, s, w) \
- __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, NULL)
-
-#define MUX_A(_id, cname, pnames, o, s, w, a) \
- __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, a)
+ __MUX(_id, cname, pnames, o, s, w, 0, 0)
#define MUX_F(_id, cname, pnames, o, s, w, f, mf) \
- __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, NULL)
-
-#define MUX_FA(_id, cname, pnames, o, s, w, f, mf, a) \
- __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, a)
+ __MUX(_id, cname, pnames, o, s, w, f, mf)
/**
* @id: platform specific id of the clock.
* struct samsung_div_clock: information about div clock
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this div clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @offset: offset of the register for configuring the div.
* @shift: starting bit location of the div control bit-field in @reg.
* @div_flags: flags for div-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_div_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
@@ -180,14 +166,12 @@ struct samsung_div_clock {
u8 shift;
u8 width;
u8 div_flags;
- const char *alias;
struct clk_div_table *table;
};
-#define __DIV(_id, dname, cname, pname, o, s, w, f, df, a, t) \
+#define __DIV(_id, cname, pname, o, s, w, f, df, t) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_name = pname, \
.flags = f, \
@@ -195,70 +179,51 @@ struct samsung_div_clock {
.shift = s, \
.width = w, \
.div_flags = df, \
- .alias = a, \
.table = t, \
}
#define DIV(_id, cname, pname, o, s, w) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, NULL)
-
-#define DIV_A(_id, cname, pname, o, s, w, a) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, a, NULL)
+ __DIV(_id, cname, pname, o, s, w, 0, 0, NULL)
#define DIV_F(_id, cname, pname, o, s, w, f, df) \
- __DIV(_id, NULL, cname, pname, o, s, w, f, df, NULL, NULL)
+ __DIV(_id, cname, pname, o, s, w, f, df, NULL)
#define DIV_T(_id, cname, pname, o, s, w, t) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, t)
+ __DIV(_id, cname, pname, o, s, w, 0, 0, t)
/**
* struct samsung_gate_clock: information about gate clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this gate clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @offset: offset of the register for configuring the gate.
* @bit_idx: bit index of the gate control bit-field in @reg.
* @gate_flags: flags for gate-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_gate_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
unsigned long offset;
u8 bit_idx;
u8 gate_flags;
- const char *alias;
};
-#define __GATE(_id, dname, cname, pname, o, b, f, gf, a) \
+#define __GATE(_id, cname, pname, o, b, f, gf) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_name = pname, \
.flags = f, \
.offset = o, \
.bit_idx = b, \
.gate_flags = gf, \
- .alias = a, \
}
#define GATE(_id, cname, pname, o, b, f, gf) \
- __GATE(_id, NULL, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_A(_id, cname, pname, o, b, f, gf, a) \
- __GATE(_id, NULL, cname, pname, o, b, f, gf, a)
-
-#define GATE_D(_id, dname, cname, pname, o, b, f, gf) \
- __GATE(_id, dname, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
- __GATE(_id, dname, cname, pname, o, b, f, gf, a)
+ __GATE(_id, cname, pname, o, b, f, gf)
#define PNAME(x) static const char * const x[] __initconst
@@ -275,18 +240,15 @@ struct samsung_clk_reg_dump {
/**
* struct samsung_pll_clock: information about pll clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this pll clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @con_offset: offset of the register for configuring the PLL.
* @lock_offset: offset of the register for locking the PLL.
* @type: Type of PLL to be registered.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_pll_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
@@ -294,31 +256,23 @@ struct samsung_pll_clock {
int lock_offset;
enum samsung_pll_type type;
const struct samsung_pll_rate_table *rate_table;
- const char *alias;
};
-#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con, \
- _rtable, _alias) \
+#define __PLL(_typ, _id, _name, _pname, _flags, _lock, _con, _rtable) \
{ \
.id = _id, \
.type = _typ, \
- .dev_name = _dname, \
.name = _name, \
.parent_name = _pname, \
- .flags = CLK_GET_RATE_NOCACHE, \
+ .flags = _flags, \
.con_offset = _con, \
.lock_offset = _lock, \
.rate_table = _rtable, \
- .alias = _alias, \
}
#define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable) \
- __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
- _lock, _con, _rtable, _name)
-
-#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
- __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
- _lock, _con, _rtable, _alias)
+ __PLL(_typ, _id, _name, _pname, CLK_GET_RATE_NOCACHE, _lock, \
+ _con, _rtable)
struct samsung_clock_reg_cache {
struct list_head node;
@@ -352,6 +306,12 @@ struct samsung_cmu_info {
/* list and number of clocks registers */
const unsigned long *clk_regs;
unsigned int nr_clk_regs;
+
+ /* list and number of clocks registers to set before suspend */
+ const struct samsung_clk_reg_dump *suspend_regs;
+ unsigned int nr_suspend_regs;
+ /* name of the parent clock needed for CMU register access */
+ const char *clk_name;
};
extern struct samsung_clk_provider *__init samsung_clk_init(
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index 665fa681b2e1..0cd11e6893af 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
},
};
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
.name = "nand",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index d0c6c9a2d06a..be012b4bab46 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -392,7 +392,7 @@ static const char * const pll_clk_parents[] = {
"xin",
};
-static struct clk_init_data clk_cpupll_init = {
+static const struct clk_init_data clk_cpupll_init = {
.name = "cpupll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -406,7 +406,7 @@ static struct clk_pll clk_cpupll = {
},
};
-static struct clk_init_data clk_mempll_init = {
+static const struct clk_init_data clk_mempll_init = {
.name = "mempll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -420,7 +420,7 @@ static struct clk_pll clk_mempll = {
},
};
-static struct clk_init_data clk_sys0pll_init = {
+static const struct clk_init_data clk_sys0pll_init = {
.name = "sys0pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -434,7 +434,7 @@ static struct clk_pll clk_sys0pll = {
},
};
-static struct clk_init_data clk_sys1pll_init = {
+static const struct clk_init_data clk_sys1pll_init = {
.name = "sys1pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -448,7 +448,7 @@ static struct clk_pll clk_sys1pll = {
},
};
-static struct clk_init_data clk_sys2pll_init = {
+static const struct clk_init_data clk_sys2pll_init = {
.name = "sys2pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -462,7 +462,7 @@ static struct clk_pll clk_sys2pll = {
},
};
-static struct clk_init_data clk_sys3pll_init = {
+static const struct clk_init_data clk_sys3pll_init = {
.name = "sys3pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -596,7 +596,7 @@ static const char * const audiodto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_audiodto_init = {
+static const struct clk_init_data clk_audiodto_init = {
.name = "audio_dto",
.ops = &dto_ops,
.parent_names = audiodto_clk_parents,
@@ -617,7 +617,7 @@ static const char * const disp0dto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_disp0dto_init = {
+static const struct clk_init_data clk_disp0dto_init = {
.name = "disp0_dto",
.ops = &dto_ops,
.parent_names = disp0dto_clk_parents,
@@ -638,7 +638,7 @@ static const char * const disp1dto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_disp1dto_init = {
+static const struct clk_init_data clk_disp1dto_init = {
.name = "disp1_dto",
.ops = &dto_ops,
.parent_names = disp1dto_clk_parents,
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 77e1e2491689..d8f9efa5129a 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -184,7 +184,7 @@ static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
return clk_hw_get_rate(parent_clk);
}
-static struct clk_ops std_pll_ops = {
+static const struct clk_ops std_pll_ops = {
.recalc_rate = pll_clk_recalc_rate,
.round_rate = pll_clk_round_rate,
.set_rate = pll_clk_set_rate,
@@ -194,21 +194,21 @@ static const char * const pll_clk_parents[] = {
"osc",
};
-static struct clk_init_data clk_pll1_init = {
+static const struct clk_init_data clk_pll1_init = {
.name = "pll1",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
.num_parents = ARRAY_SIZE(pll_clk_parents),
};
-static struct clk_init_data clk_pll2_init = {
+static const struct clk_init_data clk_pll2_init = {
.name = "pll2",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
.num_parents = ARRAY_SIZE(pll_clk_parents),
};
-static struct clk_init_data clk_pll3_init = {
+static const struct clk_init_data clk_pll3_init = {
.name = "pll3",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
@@ -265,13 +265,13 @@ static unsigned long usb_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long pa
return (reg & SIRFSOC_USBPHY_PLL_BYPASS) ? parent_rate : 48*MHZ;
}
-static struct clk_ops usb_pll_ops = {
+static const struct clk_ops usb_pll_ops = {
.enable = usb_pll_clk_enable,
.disable = usb_pll_clk_disable,
.recalc_rate = usb_pll_clk_recalc_rate,
};
-static struct clk_init_data clk_usb_pll_init = {
+static const struct clk_init_data clk_usb_pll_init = {
.name = "usb_pll",
.ops = &usb_pll_ops,
.parent_names = pll_clk_parents,
@@ -437,7 +437,7 @@ static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret2 ? ret2 : ret1;
}
-static struct clk_ops msi_ops = {
+static const struct clk_ops msi_ops = {
.set_rate = dmn_clk_set_rate,
.round_rate = dmn_clk_round_rate,
.recalc_rate = dmn_clk_recalc_rate,
@@ -445,7 +445,7 @@ static struct clk_ops msi_ops = {
.get_parent = dmn_clk_get_parent,
};
-static struct clk_init_data clk_mem_init = {
+static const struct clk_init_data clk_mem_init = {
.name = "mem",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -459,7 +459,7 @@ static struct clk_dmn clk_mem = {
},
};
-static struct clk_init_data clk_sys_init = {
+static const struct clk_init_data clk_sys_init = {
.name = "sys",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -474,7 +474,7 @@ static struct clk_dmn clk_sys = {
},
};
-static struct clk_init_data clk_io_init = {
+static const struct clk_init_data clk_io_init = {
.name = "io",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -488,7 +488,7 @@ static struct clk_dmn clk_io = {
},
};
-static struct clk_ops cpu_ops = {
+static const struct clk_ops cpu_ops = {
.set_parent = dmn_clk_set_parent,
.get_parent = dmn_clk_get_parent,
.set_rate = cpu_clk_set_rate,
@@ -496,7 +496,7 @@ static struct clk_ops cpu_ops = {
.recalc_rate = cpu_clk_recalc_rate,
};
-static struct clk_init_data clk_cpu_init = {
+static const struct clk_init_data clk_cpu_init = {
.name = "cpu",
.ops = &cpu_ops,
.parent_names = dmn_clk_parents,
@@ -511,7 +511,7 @@ static struct clk_dmn clk_cpu = {
},
};
-static struct clk_ops dmn_ops = {
+static const struct clk_ops dmn_ops = {
.is_enabled = std_clk_is_enabled,
.enable = std_clk_enable,
.disable = std_clk_disable,
@@ -524,7 +524,7 @@ static struct clk_ops dmn_ops = {
/* dsp, gfx, mm, lcd and vpp domain */
-static struct clk_init_data clk_dsp_init = {
+static const struct clk_init_data clk_dsp_init = {
.name = "dsp",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -539,7 +539,7 @@ static struct clk_dmn clk_dsp = {
},
};
-static struct clk_init_data clk_gfx_init = {
+static const struct clk_init_data clk_gfx_init = {
.name = "gfx",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -554,7 +554,7 @@ static struct clk_dmn clk_gfx = {
},
};
-static struct clk_init_data clk_mm_init = {
+static const struct clk_init_data clk_mm_init = {
.name = "mm",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -574,7 +574,7 @@ static struct clk_dmn clk_mm = {
*/
#define clk_gfx2d clk_mm
-static struct clk_init_data clk_lcd_init = {
+static const struct clk_init_data clk_lcd_init = {
.name = "lcd",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -589,7 +589,7 @@ static struct clk_dmn clk_lcd = {
},
};
-static struct clk_init_data clk_vpp_init = {
+static const struct clk_init_data clk_vpp_init = {
.name = "vpp",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -604,21 +604,21 @@ static struct clk_dmn clk_vpp = {
},
};
-static struct clk_init_data clk_mmc01_init = {
+static const struct clk_init_data clk_mmc01_init = {
.name = "mmc01",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_init_data clk_mmc23_init = {
+static const struct clk_init_data clk_mmc23_init = {
.name = "mmc23",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_init_data clk_mmc45_init = {
+static const struct clk_init_data clk_mmc45_init = {
.name = "mmc45",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -679,13 +679,13 @@ static const char * const std_clk_io_parents[] = {
"io",
};
-static struct clk_ops ios_ops = {
+static const struct clk_ops ios_ops = {
.is_enabled = std_clk_is_enabled,
.enable = std_clk_enable,
.disable = std_clk_disable,
};
-static struct clk_init_data clk_cphif_init = {
+static const struct clk_init_data clk_cphif_init = {
.name = "cphif",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -699,7 +699,7 @@ static struct clk_std clk_cphif = {
},
};
-static struct clk_init_data clk_dmac0_init = {
+static const struct clk_init_data clk_dmac0_init = {
.name = "dmac0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -713,7 +713,7 @@ static struct clk_std clk_dmac0 = {
},
};
-static struct clk_init_data clk_dmac1_init = {
+static const struct clk_init_data clk_dmac1_init = {
.name = "dmac1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -727,7 +727,7 @@ static struct clk_std clk_dmac1 = {
},
};
-static struct clk_init_data clk_audio_init = {
+static const struct clk_init_data clk_audio_init = {
.name = "audio",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -741,7 +741,7 @@ static struct clk_std clk_audio = {
},
};
-static struct clk_init_data clk_uart0_init = {
+static const struct clk_init_data clk_uart0_init = {
.name = "uart0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -755,7 +755,7 @@ static struct clk_std clk_uart0 = {
},
};
-static struct clk_init_data clk_uart1_init = {
+static const struct clk_init_data clk_uart1_init = {
.name = "uart1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -769,7 +769,7 @@ static struct clk_std clk_uart1 = {
},
};
-static struct clk_init_data clk_uart2_init = {
+static const struct clk_init_data clk_uart2_init = {
.name = "uart2",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -783,7 +783,7 @@ static struct clk_std clk_uart2 = {
},
};
-static struct clk_init_data clk_usp0_init = {
+static const struct clk_init_data clk_usp0_init = {
.name = "usp0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -797,7 +797,7 @@ static struct clk_std clk_usp0 = {
},
};
-static struct clk_init_data clk_usp1_init = {
+static const struct clk_init_data clk_usp1_init = {
.name = "usp1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -811,7 +811,7 @@ static struct clk_std clk_usp1 = {
},
};
-static struct clk_init_data clk_usp2_init = {
+static const struct clk_init_data clk_usp2_init = {
.name = "usp2",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -825,7 +825,7 @@ static struct clk_std clk_usp2 = {
},
};
-static struct clk_init_data clk_vip_init = {
+static const struct clk_init_data clk_vip_init = {
.name = "vip",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -839,7 +839,7 @@ static struct clk_std clk_vip = {
},
};
-static struct clk_init_data clk_spi0_init = {
+static const struct clk_init_data clk_spi0_init = {
.name = "spi0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -853,7 +853,7 @@ static struct clk_std clk_spi0 = {
},
};
-static struct clk_init_data clk_spi1_init = {
+static const struct clk_init_data clk_spi1_init = {
.name = "spi1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -867,7 +867,7 @@ static struct clk_std clk_spi1 = {
},
};
-static struct clk_init_data clk_tsc_init = {
+static const struct clk_init_data clk_tsc_init = {
.name = "tsc",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -881,7 +881,7 @@ static struct clk_std clk_tsc = {
},
};
-static struct clk_init_data clk_i2c0_init = {
+static const struct clk_init_data clk_i2c0_init = {
.name = "i2c0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -895,7 +895,7 @@ static struct clk_std clk_i2c0 = {
},
};
-static struct clk_init_data clk_i2c1_init = {
+static const struct clk_init_data clk_i2c1_init = {
.name = "i2c1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -909,7 +909,7 @@ static struct clk_std clk_i2c1 = {
},
};
-static struct clk_init_data clk_pwmc_init = {
+static const struct clk_init_data clk_pwmc_init = {
.name = "pwmc",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -923,7 +923,7 @@ static struct clk_std clk_pwmc = {
},
};
-static struct clk_init_data clk_efuse_init = {
+static const struct clk_init_data clk_efuse_init = {
.name = "efuse",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -937,7 +937,7 @@ static struct clk_std clk_efuse = {
},
};
-static struct clk_init_data clk_pulse_init = {
+static const struct clk_init_data clk_pulse_init = {
.name = "pulse",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -955,7 +955,7 @@ static const char * const std_clk_dsp_parents[] = {
"dsp",
};
-static struct clk_init_data clk_gps_init = {
+static const struct clk_init_data clk_gps_init = {
.name = "gps",
.ops = &ios_ops,
.parent_names = std_clk_dsp_parents,
@@ -969,7 +969,7 @@ static struct clk_std clk_gps = {
},
};
-static struct clk_init_data clk_mf_init = {
+static const struct clk_init_data clk_mf_init = {
.name = "mf",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -987,7 +987,7 @@ static const char * const std_clk_sys_parents[] = {
"sys",
};
-static struct clk_init_data clk_security_init = {
+static const struct clk_init_data clk_security_init = {
.name = "security",
.ops = &ios_ops,
.parent_names = std_clk_sys_parents,
@@ -1005,7 +1005,7 @@ static const char * const std_clk_usb_parents[] = {
"usb_pll",
};
-static struct clk_init_data clk_usb0_init = {
+static const struct clk_init_data clk_usb0_init = {
.name = "usb0",
.ops = &ios_ops,
.parent_names = std_clk_usb_parents,
@@ -1019,7 +1019,7 @@ static struct clk_std clk_usb0 = {
},
};
-static struct clk_init_data clk_usb1_init = {
+static const struct clk_init_data clk_usb1_init = {
.name = "usb1",
.ops = &ios_ops,
.parent_names = std_clk_usb_parents,
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index aac1c8ec151a..2f824320c318 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
},
};
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
.name = "nand",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index f271c350ef94..906410413bc1 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -29,7 +29,7 @@
#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
-static struct aux_clk_masks default_aux_masks = {
+static const struct aux_clk_masks default_aux_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = AUX_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
@@ -128,7 +128,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_aux_ops = {
+static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
.round_rate = clk_aux_round_rate,
.set_rate = clk_aux_set_rate,
@@ -136,7 +136,7 @@ static struct clk_ops clk_aux_ops = {
struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
const char *parent_name, unsigned long flags, void __iomem *reg,
- struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
{
struct clk_aux *aux;
@@ -149,10 +149,8 @@ struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
}
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
- if (!aux) {
- pr_err("could not allocate aux clk\n");
+ if (!aux)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_aux assignments */
if (!masks)
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 58d678b5b40a..229c96daece6 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -116,7 +116,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
@@ -136,10 +136,8 @@ struct clk *clk_register_frac(const char *name, const char *parent_name,
}
frac = kzalloc(sizeof(*frac), GFP_KERNEL);
- if (!frac) {
- pr_err("could not allocate frac clk\n");
+ if (!frac)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_frac assignments */
frac->reg = reg;
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1a722e99e76e..28262f422562 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -105,7 +105,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_gpt_ops = {
+static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
.round_rate = clk_gpt_round_rate,
.set_rate = clk_gpt_set_rate,
@@ -125,10 +125,8 @@ struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
}
gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
- if (!gpt) {
- pr_err("could not allocate gpt clk\n");
+ if (!gpt)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_gpt assignments */
gpt->reg = reg;
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dc21ca4601aa..c08dec30bfa6 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -165,7 +165,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
@@ -266,7 +266,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_vco_ops = {
+static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
.round_rate = clk_vco_round_rate,
.set_rate = clk_vco_set_rate,
@@ -292,16 +292,12 @@ struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
}
vco = kzalloc(sizeof(*vco), GFP_KERNEL);
- if (!vco) {
- pr_err("could not allocate vco clk\n");
+ if (!vco)
return ERR_PTR(-ENOMEM);
- }
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("could not allocate pll clk\n");
+ if (!pll)
goto free_vco;
- }
/* struct clk_vco assignments */
vco->mode_reg = mode_reg;
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 9834944f08b1..af0e25f496c1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -49,7 +49,7 @@ struct aux_rate_tbl {
struct clk_aux {
struct clk_hw hw;
void __iomem *reg;
- struct aux_clk_masks *masks;
+ const struct aux_clk_masks *masks;
struct aux_rate_tbl *rtbl;
u8 rtbl_cnt;
spinlock_t *lock;
@@ -112,7 +112,7 @@ typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
/* clk register routines */
struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
const char *parent_name, unsigned long flags, void __iomem *reg,
- struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
struct clk *clk_register_frac(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg,
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 2f86e3f94efa..591248c9a88e 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -284,7 +284,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
};
/* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index cbb19a90f2d6..e5bc8c828cf0 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -323,7 +323,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
};
/* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index d1c2fa93ddd9..4141c3fe08ae 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -11,6 +11,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
# Multi-factor clocks
lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 286b0049b7b6..ffa5dac221e4 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -28,6 +28,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun4i-a10.h"
@@ -51,16 +52,29 @@ static struct ccu_nkmp pll_core_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names.
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN4I_PLL_AUDIO_REG 0x008
+
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
.m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+ 0x00c, BIT(31)),
.common = {
.reg = 0x008,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.hw.init = CLK_HW_INIT("pll-audio-base",
"hosc",
&ccu_nm_ops,
@@ -223,7 +237,7 @@ static struct ccu_mux cpu_clk = {
.hw.init = CLK_HW_INIT_PARENTS("cpu",
cpu_parents,
&ccu_mux_ops,
- CLK_IS_CRITICAL),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
}
};
@@ -1021,9 +1035,9 @@ static struct ccu_common *sun4i_sun7i_ccu_clks[] = {
&out_b_clk.common
};
-/* Post-divider for pll-audio is hardcoded to 4 */
+/* Post-divider for pll-audio is hardcoded to 1 */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1420,10 +1434,10 @@ static void __init sun4i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN4I_PLL_AUDIO_REG);
val &= ~GENMASK(29, 26);
- writel(val | (4 << 26), reg + SUN4I_PLL_AUDIO_REG);
+ writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
/*
* Use the peripheral PLL6 as the AHB parent, instead of CPU /
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
index c5947c7c050e..23c908ad509f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
@@ -29,7 +29,7 @@
#define CLK_PLL_AUDIO_4X 6
#define CLK_PLL_AUDIO_8X 7
#define CLK_PLL_VIDEO0 8
-#define CLK_PLL_VIDEO0_2X 9
+/* The PLL_VIDEO0_2X clock is exported */
#define CLK_PLL_VE 10
#define CLK_PLL_DDR_BASE 11
#define CLK_PLL_DDR 12
@@ -38,7 +38,7 @@
#define CLK_PLL_PERIPH 15
#define CLK_PLL_PERIPH_SATA 16
#define CLK_PLL_VIDEO1 17
-#define CLK_PLL_VIDEO1_2X 18
+/* The PLL_VIDEO1_2X clock is exported */
#define CLK_PLL_GPU 19
/* The CPU clock is exported */
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index ab9e850b3707..fa2c2dd77102 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun5i.h"
@@ -49,11 +50,20 @@ static struct ccu_nkmp pll_core_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN5I_PLL_AUDIO_REG 0x008
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
@@ -63,8 +73,11 @@ static struct ccu_nm pll_audio_base_clk = {
* offset
*/
.m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+ 0x00c, BIT(31)),
.common = {
.reg = 0x008,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.hw.init = CLK_HW_INIT("pll-audio-base",
"hosc",
&ccu_nm_ops,
@@ -597,9 +610,9 @@ static struct ccu_common *sun5i_a10s_ccu_clks[] = {
&iep_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -980,10 +993,10 @@ static void __init sun5i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN5I_PLL_AUDIO_REG);
- val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG);
+ val &= ~GENMASK(29, 26);
+ writel(val | (0 << 26), reg + SUN5I_PLL_AUDIO_REG);
/*
* Use the peripheral PLL as the AHB parent, instead of CPU /
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 8af434815fba..72b16ed1012b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -31,6 +31,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun6i-a31.h"
@@ -48,18 +49,29 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_cpu_clk, "pll-cpu",
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN6I_A31_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
"osc24M", 0x010,
@@ -608,7 +620,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
0x150, 0, 4, 24, 2, BIT(31),
CLK_SET_RATE_PARENT);
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
@@ -950,9 +962,9 @@ static struct ccu_common *sun6i_a31_ccu_clks[] = {
&out_c_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1221,10 +1233,10 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN6I_A31_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
index 4e434011e9e7..27e6ad4133ab 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
@@ -27,7 +27,9 @@
#define CLK_PLL_AUDIO_4X 4
#define CLK_PLL_AUDIO_8X 5
#define CLK_PLL_VIDEO0 6
-#define CLK_PLL_VIDEO0_2X 7
+
+/* The PLL_VIDEO0_2X clock is exported */
+
#define CLK_PLL_VE 8
#define CLK_PLL_DDR 9
@@ -35,7 +37,9 @@
#define CLK_PLL_PERIPH_2X 11
#define CLK_PLL_VIDEO1 12
-#define CLK_PLL_VIDEO1_2X 13
+
+/* The PLL_VIDEO1_2X clock is exported */
+
#define CLK_PLL_GPU 14
#define CLK_PLL_MIPI 15
#define CLK_PLL9 16
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index d93b452f0df9..a4fa2945f230 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun8i-a23-a33.h"
@@ -52,18 +53,29 @@ static struct ccu_nkmp pll_cpux_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_A23_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
"osc24M", 0x010,
@@ -538,9 +550,9 @@ static struct ccu_common *sun8i_a23_ccu_clks[] = {
&ats_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -720,10 +732,10 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN8I_A23_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index e43acebdfbcd..5cedcd0d8be8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1",
static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
0x06c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
- 0x06c, BIT(0), 0);
+ 0x06c, BIT(1), 0);
static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
- 0x06c, BIT(0), 0);
+ 0x06c, BIT(2), 0);
static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
0x06c, BIT(16), 0);
static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
@@ -506,7 +506,7 @@ static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
csi_mclk_parents, csi_mclk_table,
0x134,
0, 5, /* M */
- 10, 3, /* mux */
+ 8, 3, /* mux */
BIT(15), /* gate */
0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 5cdaf52669e4..5cc9d9952121 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -41,11 +41,16 @@ static SUNXI_CCU_GATE(wb_clk, "wb", "wb-div",
static SUNXI_CCU_M(mixer0_div_clk, "mixer0-div", "de", 0x0c, 0, 4,
CLK_SET_RATE_PARENT);
-static SUNXI_CCU_M(mixer1_div_clk, "mixer1-div", "de", 0x0c, 4, 4,
- CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(wb_div_clk, "wb-div", "de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer0_div_a83_clk, "mixer0-div", "pll-de", 0x0c, 0, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
+ CLK_SET_RATE_PARENT);
+
static struct ccu_common *sun8i_a83t_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
@@ -55,9 +60,9 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
};
static struct ccu_common *sun8i_v3s_de2_clks[] = {
@@ -81,9 +86,9 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
[CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
[CLK_BUS_WB] = &bus_wb_clk.common.hw,
- [CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
- [CLK_MIXER1_DIV] = &mixer1_div_clk.common.hw,
- [CLK_WB_DIV] = &wb_div_clk.common.hw,
+ [CLK_MIXER0_DIV] = &mixer0_div_a83_clk.common.hw,
+ [CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw,
+ [CLK_WB_DIV] = &wb_div_a83_clk.common.hw,
},
.num = CLK_NUMBER,
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 1729ff6a5aae..29bc0566b776 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun8i-h3.h"
@@ -37,25 +38,36 @@ static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpux_clk, "pll-cpux",
16, 2, /* P */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
/*
* The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_H3_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- 0);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
"osc24M", 0x0010,
@@ -67,7 +79,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x0018,
@@ -79,7 +91,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
"osc24M", 0x020,
@@ -88,7 +100,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
0, 2, /* M */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
"osc24M", 0x028,
@@ -97,7 +109,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
BIT(31), /* gate */
BIT(28), /* lock */
2, /* post-div */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
"osc24M", 0x0038,
@@ -109,7 +121,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
"osc24M", 0x044,
@@ -118,7 +130,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
BIT(31), /* gate */
BIT(28), /* lock */
2, /* post-div */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
"osc24M", 0x0048,
@@ -130,7 +142,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static const char * const cpux_parents[] = { "osc32k", "osc24M",
"pll-cpux" , "pll-cpux" };
@@ -484,7 +496,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
- 0x1a0, 0, 3, BIT(31), 0);
+ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
static struct ccu_common *sun8i_h3_ccu_clks[] = {
&pll_cpux_clk.common,
@@ -707,9 +719,9 @@ static struct ccu_common *sun50i_h5_ccu_clks[] = {
&gpu_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1129,10 +1141,10 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
sunxi_ccu_probe(node, reg, desc);
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index cadd1a9f93b6..5d684ce77c54 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -24,6 +24,7 @@
#define CCU_FEATURE_ALL_PREDIV BIT(4)
#define CCU_FEATURE_LOCK_REG BIT(5)
#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6)
+#define CCU_FEATURE_SIGMA_DELTA_MOD BIT(7)
/* MMC timing mode switch bit */
#define CCU_MMC_NEW_TIMING_MODE BIT(30)
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a32158e8f2e3..7620aa973a6e 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -90,6 +90,14 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
if (!m)
m++;
+ if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) {
+ unsigned long rate =
+ ccu_sdm_helper_read_rate(&nm->common, &nm->sdm,
+ m, n);
+ if (rate)
+ return rate;
+ }
+
return parent_rate * n / m;
}
@@ -99,6 +107,12 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nm *nm = hw_to_ccu_nm(hw);
struct _ccu_nm _nm;
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
+ return rate;
+
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
+ return rate;
+
_nm.min_n = nm->n.min ?: 1;
_nm.max_n = nm->n.max ?: 1 << nm->n.width;
_nm.min_m = 1;
@@ -140,7 +154,16 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- ccu_nm_find_best(parent_rate, rate, &_nm);
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+ ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
+
+ /* Sigma delta modulation requires specific N and M factors */
+ ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
+ &_nm.m, &_nm.n);
+ } else {
+ ccu_sdm_helper_disable(&nm->common, &nm->sdm);
+ ccu_nm_find_best(parent_rate, rate, &_nm);
+ }
spin_lock_irqsave(nm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index e87fd186da78..c623b0c7a23c 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -20,6 +20,7 @@
#include "ccu_div.h"
#include "ccu_frac.h"
#include "ccu_mult.h"
+#include "ccu_sdm.h"
/*
* struct ccu_nm - Definition of an N-M clock
@@ -33,10 +34,34 @@ struct ccu_nm {
struct ccu_mult_internal n;
struct ccu_div_internal m;
struct ccu_frac_internal frac;
+ struct ccu_sdm_internal sdm;
struct ccu_common common;
};
+#define SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _sdm_table, _sdm_en, \
+ _sdm_reg, _sdm_reg_en, \
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .sdm = _SUNXI_CCU_SDM(_sdm_table, _sdm_en, \
+ _sdm_reg, _sdm_reg_en),\
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_mshift, _mwidth, \
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 1dc4e98ea802..b67149143554 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -60,8 +60,22 @@ static int ccu_reset_reset(struct reset_controller_dev *rcdev,
return 0;
}
+static int ccu_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev);
+ const struct ccu_reset_map *map = &ccu->reset_map[id];
+
+ /*
+ * The reset control API expects 0 if reset is not asserted,
+ * which is the opposite of what our hardware uses.
+ */
+ return !(map->bit & readl(ccu->base + map->reg));
+}
+
const struct reset_control_ops ccu_reset_ops = {
.assert = ccu_reset_assert,
.deassert = ccu_reset_deassert,
.reset = ccu_reset_reset,
+ .status = ccu_reset_status,
};
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
new file mode 100644
index 000000000000..3b3dc9bdf2b0
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2017 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "ccu_sdm.h"
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm)
+{
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return false;
+
+ if (sdm->enable && !(readl(common->base + common->reg) & sdm->enable))
+ return false;
+
+ return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
+}
+
+void ccu_sdm_helper_enable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate)
+{
+ unsigned long flags;
+ unsigned int i;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return;
+
+ /* Set the pattern */
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate)
+ writel(sdm->table[i].pattern,
+ common->base + sdm->tuning_reg);
+
+ /* Make sure SDM is enabled */
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + sdm->tuning_reg);
+ writel(reg | sdm->tuning_enable, common->base + sdm->tuning_reg);
+ spin_unlock_irqrestore(common->lock, flags);
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg | sdm->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+void ccu_sdm_helper_disable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return;
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg & ~sdm->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + sdm->tuning_reg);
+ writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+/*
+ * Sigma delta modulation provides a way to do fractional-N frequency
+ * synthesis, in essence allowing the PLL to output any frequency
+ * within its operational range. On earlier SoCs such as the A10/A20,
+ * some PLLs support this. On later SoCs, all PLLs support this.
+ *
+ * The datasheets do not explain what the "wave top" and "wave bottom"
+ * parameters mean or do, nor how to calculate the effective output
+ * frequency. The only examples (and real world usage) are for the audio
+ * PLL to generate 24.576 and 22.5792 MHz clock rates used by the audio
+ * peripherals. The author lacks the underlying domain knowledge to
+ * pursue this.
+ *
+ * The goal and function of the following code is to support the two
+ * clock rates used by the audio subsystem, allowing for proper audio
+ * playback and capture without any pitch or speed changes.
+ */
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return false;
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate)
+ return true;
+
+ return false;
+}
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ u32 m, u32 n)
+{
+ unsigned int i;
+ u32 reg;
+
+ pr_debug("%s: Read sigma-delta modulation setting\n",
+ clk_hw_get_name(&common->hw));
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return 0;
+
+ pr_debug("%s: clock is sigma-delta modulated\n",
+ clk_hw_get_name(&common->hw));
+
+ reg = readl(common->base + sdm->tuning_reg);
+
+ pr_debug("%s: pattern reg is 0x%x",
+ clk_hw_get_name(&common->hw), reg);
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].pattern == reg &&
+ sdm->table[i].m == m && sdm->table[i].n == n)
+ return sdm->table[i].rate;
+
+ /* We can't calculate the effective clock rate, so just fail. */
+ return 0;
+}
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate,
+ unsigned long *m, unsigned long *n)
+{
+ unsigned int i;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return -EINVAL;
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate) {
+ *m = sdm->table[i].m;
+ *n = sdm->table[i].n;
+ return 0;
+ }
+
+ /* nothing found */
+ return -EINVAL;
+}
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.h b/drivers/clk/sunxi-ng/ccu_sdm.h
new file mode 100644
index 000000000000..2a9b4a2584d6
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_sdm.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SDM_H
+#define _CCU_SDM_H
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_sdm_setting {
+ unsigned long rate;
+
+ /*
+ * XXX We don't know what the step and bottom register fields
+ * mean. Just copy the whole register value from the vendor
+ * kernel for now.
+ */
+ u32 pattern;
+
+ /*
+ * M and N factors here should be the values used in
+ * calculation, not the raw values written to registers
+ */
+ u32 m;
+ u32 n;
+};
+
+struct ccu_sdm_internal {
+ struct ccu_sdm_setting *table;
+ u32 table_size;
+ /* early SoCs don't have the SDM enable bit in the PLL register */
+ u32 enable;
+ /* second enable bit in tuning register */
+ u32 tuning_enable;
+ u16 tuning_reg;
+};
+
+#define _SUNXI_CCU_SDM(_table, _enable, \
+ _reg, _reg_enable) \
+ { \
+ .table = _table, \
+ .table_size = ARRAY_SIZE(_table), \
+ .enable = _enable, \
+ .tuning_enable = _reg_enable, \
+ .tuning_reg = _reg, \
+ }
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm);
+void ccu_sdm_helper_enable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate);
+void ccu_sdm_helper_disable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm);
+
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate);
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ u32 m, u32 n);
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate,
+ unsigned long *m, unsigned long *n);
+
+#endif
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index dfe5e3e32d28..856fef65433b 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -276,13 +276,11 @@ void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
{
struct clk_hw *hw = __clk_get_hw(clk);
struct clk_factors *factors;
- const char *name;
if (!hw)
return;
factors = to_clk_factors(hw);
- name = clk_hw_get_name(hw);
of_clk_del_provider(node);
/* TODO: The composite clock stuff will leak a bit here. */
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 6041bdba2e97..a1a634253d6f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -124,7 +124,7 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
return PTR_ERR(data->clk);
}
- data->reset = devm_reset_control_get(&pdev->dev, NULL);
+ data->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(data->reset)) {
dev_err(&pdev->dev, "Could not get reset control\n");
return PTR_ERR(data->reset);
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 638ace64033b..a896692b74ec 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -55,6 +55,7 @@ struct tegra_bpmp_clk_message {
struct {
void *data;
size_t size;
+ int ret;
} rx;
};
@@ -64,6 +65,7 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
struct mrq_clk_request request;
struct tegra_bpmp_message msg;
void *req = &request;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd_and_id = (clk->cmd << 24) | clk->id;
@@ -84,7 +86,13 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
msg.rx.data = clk->rx.data;
msg.rx.size = clk->rx.size;
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_clk_prepare(struct clk_hw *hw)
@@ -414,11 +422,8 @@ static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp,
struct tegra_bpmp_clk_info *info = &clocks[count];
err = tegra_bpmp_clk_get_info(bpmp, id, info);
- if (err < 0) {
- dev_err(bpmp->dev, "failed to query clock %u: %d\n",
- id, err);
+ if (err < 0)
continue;
- }
if (info->num_parents >= U8_MAX) {
dev_err(bpmp->dev,
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 2c44aeb0b97c..0a7deee74eea 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1728,10 +1728,10 @@ EXPORT_SYMBOL(tegra_dfll_register);
* @pdev: DFLL platform_device *
*
* Unbind this driver from the DFLL hardware device represented by
- * @pdev. The DFLL must be disabled for this to succeed. Returns 0
- * upon success or -EBUSY if the DFLL is still active.
+ * @pdev. The DFLL must be disabled for this to succeed. Returns a
+ * soc pointer upon success or -EBUSY if the DFLL is still active.
*/
-int tegra_dfll_unregister(struct platform_device *pdev)
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
{
struct tegra_dfll *td = platform_get_drvdata(pdev);
@@ -1739,7 +1739,7 @@ int tegra_dfll_unregister(struct platform_device *pdev)
if (td->mode != DFLL_DISABLED) {
dev_err(&pdev->dev,
"must disable DFLL before removing driver\n");
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
}
debugfs_remove_recursive(td->debugfs_dir);
@@ -1753,6 +1753,6 @@ int tegra_dfll_unregister(struct platform_device *pdev)
reset_control_assert(td->dvco_rst);
- return 0;
+ return td->soc;
}
EXPORT_SYMBOL(tegra_dfll_unregister);
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index ed2ad888268f..83352c8078f2 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -43,7 +43,7 @@ struct tegra_dfll_soc_data {
int tegra_dfll_register(struct platform_device *pdev,
struct tegra_dfll_soc_data *soc);
-int tegra_dfll_unregister(struct platform_device *pdev);
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev);
int tegra_dfll_runtime_suspend(struct device *dev);
int tegra_dfll_runtime_resume(struct device *dev);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 11ee5f9ce99e..b616e33c5255 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -13,6 +13,7 @@ enum clk_id {
tegra_clk_amx,
tegra_clk_amx1,
tegra_clk_apb2ape,
+ tegra_clk_ahbdma,
tegra_clk_apbdma,
tegra_clk_apbif,
tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index cf80831de79d..9475c00b7cf9 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -203,3 +203,11 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
return _tegra_clk_register_periph(name, parent_names, num_parents,
periph, clk_base, offset, CLK_SET_RATE_PARENT);
}
+
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+ struct tegra_periph_init_data *init)
+{
+ return _tegra_clk_register_periph(init->name, init->p.parent_names,
+ init->num_parents, &init->periph,
+ clk_base, init->offset, init->flags);
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 848255cc0209..c02711927d79 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -129,7 +129,6 @@
#define CLK_SOURCE_NVDEC 0x698
#define CLK_SOURCE_NVJPG 0x69c
#define CLK_SOURCE_APE 0x6c0
-#define CLK_SOURCE_SOR1 0x410
#define CLK_SOURCE_SDMMC_LEGACY 0x694
#define CLK_SOURCE_QSPI 0x6c4
#define CLK_SOURCE_VI_I2C 0x6c8
@@ -278,7 +277,6 @@ static DEFINE_SPINLOCK(PLLP_OUTA_lock);
static DEFINE_SPINLOCK(PLLP_OUTB_lock);
static DEFINE_SPINLOCK(PLLP_OUTC_lock);
static DEFINE_SPINLOCK(sor0_lock);
-static DEFINE_SPINLOCK(sor1_lock);
#define MUX_I2S_SPDIF(_id) \
static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
@@ -604,18 +602,6 @@ static u32 mux_pllp_plld_plld2_clkm_idx[] = {
[0] = 0, [1] = 2, [2] = 5, [3] = 6
};
-static const char *mux_sor_safe_sor1_brick_sor1_src[] = {
- /*
- * Bit 0 of the mux selects sor1_brick, irrespective of bit 1, so the
- * sor1_brick parent appears twice in the list below. This is merely
- * to support clk_get_parent() if firmware happened to set these bits
- * to 0b11. While not an invalid setting, code should always set the
- * bits to 0b01 to select sor1_brick.
- */
- "sor_safe", "sor1_brick", "sor1_src", "sor1_brick"
-};
-#define mux_sor_safe_sor1_brick_sor1_src_idx NULL
-
static const char *mux_pllp_pllre_clkm[] = {
"pll_p", "pll_re_out1", "clk_m"
};
@@ -804,8 +790,6 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
- MUX8_NOGATE_LOCK("sor1_src", mux_pllp_plld_plld2_clkm, CLK_SOURCE_SOR1, tegra_clk_sor1_src, &sor1_lock),
- NODIV("sor1", mux_sor_safe_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 14, MASK(2), 183, 0, tegra_clk_sor1, &sor1_lock),
MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
@@ -823,7 +807,8 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("timer", "clk_m", 5, 0, tegra_clk_timer, CLK_IS_CRITICAL),
GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
- GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
+ GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
+ GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
@@ -927,10 +912,7 @@ static void __init periph_clk_init(void __iomem *clk_base,
continue;
data->periph.gate.regs = bank;
- clk = tegra_clk_register_periph(data->name,
- data->p.parent_names, data->num_parents,
- &data->periph, clk_base, data->offset,
- data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
*dt_clk = clk;
}
}
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4f6fd307cb70..10047107c1dc 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -166,7 +166,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
&sysrate_lock);
clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ CLK_IS_CRITICAL, clk_base + SYSTEM_CLK_RATE,
3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index fd1a99c05c2d..63087d17c3e2 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1092,9 +1092,7 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name,
- data->p.parent_names, data->num_parents,
- &data->periph, clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index ad1c1cc829cb..269d3595758b 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -125,19 +125,17 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, soc);
-
return 0;
}
static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
{
- struct tegra_dfll_soc_data *soc = platform_get_drvdata(pdev);
- int err;
+ struct tegra_dfll_soc_data *soc;
- err = tegra_dfll_unregister(pdev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to unregister DFLL: %d\n", err);
+ soc = tegra_dfll_unregister(pdev);
+ if (IS_ERR(soc))
+ dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
+ PTR_ERR(soc));
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 837e5cbd60e9..cbd5a2e5c569 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -522,6 +522,8 @@ static struct tegra_devclk devclks[] __initdata = {
};
static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_ahbdma] = { .dt_id = TEGRA20_CLK_AHBDMA, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA20_CLK_APBDMA, .present = true },
[tegra_clk_spdif_out] = { .dt_id = TEGRA20_CLK_SPDIF_OUT, .present = true },
[tegra_clk_spdif_in] = { .dt_id = TEGRA20_CLK_SPDIF_IN, .present = true },
[tegra_clk_sdmmc1] = { .dt_id = TEGRA20_CLK_SDMMC1, .present = true },
@@ -806,11 +808,6 @@ static void __init tegra20_periph_clk_init(void)
clk_base, 0, 3, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_AC97] = clk;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
- 0, 34, periph_clk_enb_refcnt);
- clks[TEGRA20_CLK_APBDMA] = clk;
-
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
@@ -850,9 +847,7 @@ static void __init tegra20_periph_clk_init(void)
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->p.parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
@@ -1025,7 +1020,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
{ TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
- { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 120000000, 1 },
+ { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
{ TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
{ TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 6d7a613f2656..9e6260869eb9 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -40,6 +40,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_SOR1 0x410
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -264,6 +265,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
/* possible OSC frequencies in Hz */
@@ -2566,8 +2568,8 @@ static int tegra210_enable_pllu(void)
reg |= PLL_ENABLE;
writel(reg, clk_base + PLLU_BASE);
- readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg,
- reg & PLL_BASE_LOCK, 2, 1000);
+ readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg,
+ reg & PLL_BASE_LOCK, 2, 1000);
if (!(reg & PLL_BASE_LOCK)) {
pr_err("Timed out waiting for PLL_U to lock\n");
return -ETIMEDOUT;
@@ -2628,10 +2630,35 @@ static int tegra210_init_pllu(void)
return 0;
}
+static const char * const sor1_out_parents[] = {
+ /*
+ * Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
+ * the sor1_pad_clkout parent appears twice in the list below. This is
+ * merely to support clk_get_parent() if firmware happened to set
+ * these bits to 0b11. While not an invalid setting, code should
+ * always set the bits to 0b01 to select sor1_pad_clkout.
+ */
+ "sor_safe", "sor1_pad_clkout", "sor1", "sor1_pad_clkout",
+};
+
+static const char * const sor1_parents[] = {
+ "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
+};
+
+static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+
+static struct tegra_periph_init_data tegra210_periph[] = {
+ TEGRA_INIT_DATA_TABLE("sor1", NULL, NULL, sor1_parents,
+ CLK_SOURCE_SOR1, 29, 0x7, 0, 0, 8, 1,
+ TEGRA_DIVIDER_ROUND_UP, 183, 0, tegra_clk_sor1,
+ sor1_parents_idx, 0, &sor1_lock),
+};
+
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
+ unsigned int i;
/* xusb_ss_div2 */
clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -2650,6 +2677,12 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 17, 207);
clks[TEGRA210_CLK_DPAUX1] = clk;
+ clk = clk_register_mux_table(NULL, "sor1_out", sor1_out_parents,
+ ARRAY_SIZE(sor1_out_parents), 0,
+ clk_base + CLK_SOURCE_SOR1, 14, 0x3,
+ 0, NULL, &sor1_lock);
+ clks[TEGRA210_CLK_SOR1_OUT] = clk;
+
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -2694,6 +2727,20 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
0, NULL);
clks[TEGRA210_CLK_ACLK] = clk;
+ for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
+ struct tegra_periph_init_data *init = &tegra210_periph[i];
+ struct clk **clkp;
+
+ clkp = tegra_lookup_dt_id(init->clk_id, tegra210_clks);
+ if (!clkp) {
+ pr_warn("clock %u not found\n", init->clk_id);
+ continue;
+ }
+
+ clk = tegra_clk_register_periph_data(clk_base, init);
+ *clkp = clk;
+ }
+
tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params);
}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index a2d163f759b4..bee84c554932 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -359,7 +359,7 @@ static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
};
/* PLL parameters */
-static struct tegra_clk_pll_params pll_c_params = {
+static struct tegra_clk_pll_params pll_c_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -388,7 +388,7 @@ static struct div_nmp pllm_nmp = {
.override_divp_shift = 15,
};
-static struct tegra_clk_pll_params pll_m_params = {
+static struct tegra_clk_pll_params pll_m_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -409,7 +409,7 @@ static struct tegra_clk_pll_params pll_m_params = {
TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_FIXED,
};
-static struct tegra_clk_pll_params pll_p_params = {
+static struct tegra_clk_pll_params pll_p_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -444,7 +444,7 @@ static struct tegra_clk_pll_params pll_a_params = {
TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_d_params = {
+static struct tegra_clk_pll_params pll_d_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -461,7 +461,7 @@ static struct tegra_clk_pll_params pll_d_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_d2_params = {
+static struct tegra_clk_pll_params pll_d2_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -478,7 +478,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_u_params = {
+static struct tegra_clk_pll_params pll_u_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -496,7 +496,7 @@ static struct tegra_clk_pll_params pll_u_params = {
TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_x_params = {
+static struct tegra_clk_pll_params pll_x_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -513,7 +513,7 @@ static struct tegra_clk_pll_params pll_x_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_e_params = {
+static struct tegra_clk_pll_params pll_e_params __ro_after_init = {
.input_min = 12000000,
.input_max = 216000000,
.cf_min = 12000000,
@@ -788,6 +788,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_extern3] = { .dt_id = TEGRA30_CLK_EXTERN3, .present = true },
[tegra_clk_disp1] = { .dt_id = TEGRA30_CLK_DISP1, .present = true },
[tegra_clk_disp2] = { .dt_id = TEGRA30_CLK_DISP2, .present = true },
+ [tegra_clk_ahbdma] = { .dt_id = TEGRA30_CLK_AHBDMA, .present = true },
[tegra_clk_apbdma] = { .dt_id = TEGRA30_CLK_APBDMA, .present = true },
[tegra_clk_rtc] = { .dt_id = TEGRA30_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA30_CLK_TIMER, .present = true },
@@ -964,7 +965,7 @@ static void __init tegra30_super_clk_init(void)
* U71 divider of cclk_lp.
*/
clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
- clk_base + SUPER_CCLKG_DIVIDER, 0,
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
@@ -1079,9 +1080,7 @@ static void __init tegra30_periph_clk_init(void)
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->p.parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 872f1189ad7f..3b2763df51c2 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -662,6 +662,9 @@ struct tegra_periph_init_data {
_clk_num, _gate_flags, _clk_id,\
NULL, 0, NULL)
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+ struct tegra_periph_init_data *init);
+
/**
* struct clk_super_mux - super clock
*
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 13eb04f72389..148815470431 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
/* Get configuration for the ATL instances */
snprintf(prop, sizeof(prop), "atl%u", i);
- of_node_get(node);
- cfg_node = of_find_node_by_name(node, prop);
+ cfg_node = of_get_child_by_name(node, prop);
if (cfg_node) {
ret = of_property_read_u32(cfg_node, "bws",
&cdesc->bws);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 88f04a4cb890..77f93f6d2806 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -292,10 +292,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div) {
- pr_err("%s: could not allocate divider clk\n", __func__);
+ if (!div)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &ti_clk_divider_ops;
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 18c267b38461..d4705803f3d3 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -108,10 +108,8 @@ static struct clk *_register_mux(struct device *dev, const char *name,
/* allocate the mux */
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- pr_err("%s: could not allocate mux clk\n", __func__);
+ if (!mux)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &ti_clk_mux_ops;
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 16e4d303f535..badc478a86c6 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -13,6 +13,8 @@
* GNU General Public License for more details.
*/
+#include <linux/stddef.h>
+
#include "clk-uniphier.h"
#define UNIPHIER_MIO_CLK_SD_FIXED \
@@ -73,15 +75,12 @@
#define UNIPHIER_MIO_CLK_USB2_PHY(idx, ch) \
UNIPHIER_CLK_GATE("usb2" #ch "-phy", (idx), "usb2", 0x20 + 0x200 * (ch), 29)
-#define UNIPHIER_MIO_CLK_DMAC(idx) \
- UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25)
-
const struct uniphier_clk_data uniphier_ld4_mio_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
UNIPHIER_MIO_CLK_SD(2, 2),
- UNIPHIER_MIO_CLK_DMAC(7),
+ UNIPHIER_CLK_GATE("miodmac", 7, NULL, 0x20, 25),
UNIPHIER_MIO_CLK_USB2(8, 0),
UNIPHIER_MIO_CLK_USB2(9, 1),
UNIPHIER_MIO_CLK_USB2(10, 2),
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 07f3b91a7daf..d244e724e198 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
- UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
+ UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index f50592775c9d..7cfb59c9136d 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -107,11 +107,9 @@ static struct clk *clk_reg_prcc(const char *name,
return ERR_PTR(-EINVAL);
}
- clk = kzalloc(sizeof(struct clk_prcc), GFP_KERNEL);
- if (!clk) {
- pr_err("clk_prcc: %s could not allocate clk\n", __func__);
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
clk->base = ioremap(phy_base, SZ_4K);
if (!clk->base)
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 6e3e16b2e5ca..9d1f2d4550ad 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -258,11 +258,9 @@ static struct clk *clk_reg_prcmu(const char *name,
return ERR_PTR(-EINVAL);
}
- clk = kzalloc(sizeof(struct clk_prcmu), GFP_KERNEL);
- if (!clk) {
- pr_err("clk_prcmu: %s could not allocate clk\n", __func__);
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
clk->cg_sel = cg_sel;
clk->is_prepared = 1;
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index 8a4e93ce1e42..7c0403b733ae 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -139,11 +139,9 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
return ERR_PTR(-EINVAL);
}
- clk = devm_kzalloc(dev, sizeof(struct clk_sysctrl), GFP_KERNEL);
- if (!clk) {
- dev_err(dev, "clk_sysctrl: could not allocate clk\n");
+ clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
/* set main clock registers */
clk->reg_sel[0] = reg_sel[0];
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 09fbe66f1f11..dafe7a45875d 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -359,16 +359,13 @@ static struct clk *icst_clk_setup(struct device *dev,
struct clk_init_data init;
struct icst_params *pclone;
- icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
- if (!icst) {
- pr_err("could not allocate ICST clock!\n");
+ icst = kzalloc(sizeof(*icst), GFP_KERNEL);
+ if (!icst)
return ERR_PTR(-ENOMEM);
- }
pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL);
if (!pclone) {
kfree(icst);
- pr_err("could not clone ICST params\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 538bfa8ba9b4..57cb2f00fc07 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -159,6 +159,7 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
* if we don't have the cp15 accessors we won't have a problem.
*/
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+EXPORT_SYMBOL_GPL(arch_timer_read_counter);
static u64 arch_counter_read(struct clocksource *cs)
{
@@ -218,6 +219,11 @@ static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
return __fsl_a008585_read_reg(cntv_tval_el0);
}
+static u64 notrace fsl_a008585_read_cntpct_el0(void)
+{
+ return __fsl_a008585_read_reg(cntpct_el0);
+}
+
static u64 notrace fsl_a008585_read_cntvct_el0(void)
{
return __fsl_a008585_read_reg(cntvct_el0);
@@ -259,6 +265,11 @@ static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
return __hisi_161010101_read_reg(cntv_tval_el0);
}
+static u64 notrace hisi_161010101_read_cntpct_el0(void)
+{
+ return __hisi_161010101_read_reg(cntpct_el0);
+}
+
static u64 notrace hisi_161010101_read_cntvct_el0(void)
{
return __hisi_161010101_read_reg(cntvct_el0);
@@ -289,6 +300,15 @@ static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
+static u64 notrace arm64_858921_read_cntpct_el0(void)
+{
+ u64 old, new;
+
+ old = read_sysreg(cntpct_el0);
+ new = read_sysreg(cntpct_el0);
+ return (((old ^ new) >> 32) & 1) ? old : new;
+}
+
static u64 notrace arm64_858921_read_cntvct_el0(void)
{
u64 old, new;
@@ -310,16 +330,19 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
struct clock_event_device *clk)
{
unsigned long ctrl;
- u64 cval = evt + arch_counter_get_cntvct();
+ u64 cval;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- if (access == ARCH_TIMER_PHYS_ACCESS)
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ cval = evt + arch_counter_get_cntpct();
write_sysreg(cval, cntp_cval_el0);
- else
+ } else {
+ cval = evt + arch_counter_get_cntvct();
write_sysreg(cval, cntv_cval_el0);
+ }
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
@@ -346,6 +369,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "Freescale erratum a005858",
.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
+ .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -358,6 +382,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+ .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -368,6 +393,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+ .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -378,6 +404,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_local_cap_id,
.id = (void *)ARM64_WORKAROUND_858921,
.desc = "ARM erratum 858921",
+ .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
@@ -901,7 +928,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
- if (IS_ENABLED(CONFIG_ARM64) ||
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
arch_timer_read_counter = arch_counter_get_cntvct;
else
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 7c64a5c1bfc1..a31990408153 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -177,7 +177,14 @@ out_fail:
return ret;
}
-void timer_of_exit(struct timer_of *to)
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
timer_irq_exit(&to->of_irq);
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 43f5ba3f8979..3f708f1be43d 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -68,6 +68,6 @@ static inline unsigned long timer_of_period(struct timer_of *to)
extern int __init timer_of_init(struct device_node *np,
struct timer_of *to);
-extern void timer_of_exit(struct timer_of *to);
+extern void __init timer_of_cleanup(struct timer_of *to);
#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 4ebae43118ef..d8addbce40bc 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
+ depends on LEMOTE_MACH2F
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
config LOONGSON1_CPUFREQ
tristate "Loongson1 CPUFreq Driver"
+ depends on LOONGSON1_LS1B
help
This option adds a CPUFreq driver for loongson1 processors which
support software configurable cpu frequency.
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 58d4f4e1ad6a..ca38229b045a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,6 +22,8 @@
#include "cpufreq_governor.h"
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
+
static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
+ unsigned int sampling_interval;
int ret;
- ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
- if (ret != 1)
+
+ ret = sscanf(buf, "%u", &sampling_interval);
+ if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
return -EINVAL;
+ dbs_data->sampling_rate = sampling_interval;
+
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (ret)
goto free_policy_dbs_info;
- dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+ /*
+ * The sampling interval should not be less than the transition latency
+ * of the CPU and it also cannot be too small for dbs_update() to work
+ * correctly.
+ */
+ dbs_data->sampling_rate = max_t(unsigned int,
+ CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+ cpufreq_policy_transition_delay_us(policy));
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 628fe899cb48..d9b2c2de49c4 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
- of_machine_is_compatible("fsl,imx6q"))
- if (dev_pm_opp_disable(dev, 1200000000))
- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000))
dev_warn(dev, "failed to disable 996MHz OPP\n");
- if (of_machine_is_compatible("fsl,imx6q")) {
+
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000))
dev_warn(dev, "failed to disable 852MHz OPP\n");
+ if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+ if (dev_pm_opp_disable(dev, 1200000000))
+ dev_warn(dev, "failed to disable 1.2GHz OPP\n");
}
iounmap(base);
put_node:
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 18c4bd9a5c65..e0d5090b303d 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -620,3 +620,7 @@ static int __init mtk_cpufreq_driver_init(void)
return 0;
}
device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index ed6531f075c6..e06605b21841 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -384,9 +384,9 @@ static int powernv_add_idle_states(void)
* Firmware passes residency and latency values in ns.
* cpuidle expects it in us.
*/
- exit_latency = latency_ns[i] / 1000;
+ exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
if (!rc)
- target_residency = residency_ns[i] / 1000;
+ target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
else
target_residency = 0;
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 0f20f5ec9617..f2246a5abcf6 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -46,7 +46,6 @@ struct nx842_workmem {
ktime_t start;
- struct vas_window *txwin; /* Used with VAS function */
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
@@ -65,7 +64,7 @@ struct nx842_coproc {
* Send the request to NX engine on the chip for the corresponding CPU
* where the process is executing. Use with VAS function.
*/
-static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst);
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
/* no cpu hotplug on powernv, so this list never changes after init */
static LIST_HEAD(nx842_coprocs);
@@ -586,16 +585,11 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
ccw = SET_FIELD(CCW_FC_842, ccw, fc);
crb->ccw = cpu_to_be32(ccw);
- txwin = wmem->txwin;
- /* shoudn't happen, we don't load without a coproc */
- if (!txwin) {
- pr_err_ratelimited("NX-842 coprocessor is not available");
- return -ENODEV;
- }
-
do {
wmem->start = ktime_get();
preempt_disable();
+ txwin = this_cpu_read(cpu_txwin);
+
/*
* VAS copy CRB into L2 cache. Refer <asm/vas.h>.
* @crb and @offset.
@@ -689,25 +683,6 @@ static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
list_add(&coproc->list, &nx842_coprocs);
}
-/*
- * Identify chip ID for each CPU and save coprocesor adddress for the
- * corresponding NX engine in percpu coproc_inst.
- * coproc_inst is used in crypto_init to open send window on the NX instance
- * for the corresponding CPU / chip where the open request is executed.
- */
-static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc)
-{
- unsigned int i, chip_id;
-
- for_each_possible_cpu(i) {
- chip_id = cpu_to_chip_id(i);
-
- if (coproc->chip_id == chip_id)
- per_cpu(coproc_inst, i) = coproc;
- }
-}
-
-
static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
{
struct vas_window *txwin = NULL;
@@ -725,15 +700,58 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
* Open a VAS send window which is used to send request to NX.
*/
txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
- if (IS_ERR(txwin)) {
+ if (IS_ERR(txwin))
pr_err("ibm,nx-842: Can not open TX window: %ld\n",
PTR_ERR(txwin));
- return NULL;
- }
return txwin;
}
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx842_open_percpu_txwins(void)
+{
+ struct nx842_coproc *coproc, *n;
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ struct vas_window *txwin = NULL;
+
+ chip_id = cpu_to_chip_id(i);
+
+ list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ /*
+ * Kernel requests use only high priority FIFOs. So
+ * open send windows for these FIFOs.
+ */
+
+ if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+ continue;
+
+ if (coproc->chip_id == chip_id) {
+ txwin = nx842_alloc_txwin(coproc);
+ if (IS_ERR(txwin))
+ return PTR_ERR(txwin);
+
+ per_cpu(cpu_txwin, i) = txwin;
+ break;
+ }
+ }
+
+ if (!per_cpu(cpu_txwin, i)) {
+ /* shoudn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not availavle for CPU %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
int vasid)
{
@@ -819,14 +837,6 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.id = vasid;
nx842_add_coprocs_list(coproc, chip_id);
- /*
- * Kernel requests use only high priority FIFOs. So save coproc
- * info in percpu coproc_inst which will be used to open send
- * windows for crypto open requests later.
- */
- if (coproc->ct == VAS_COP_TYPE_842_HIPRI)
- nx842_set_per_cpu_coproc(coproc);
-
return 0;
err_out:
@@ -847,24 +857,12 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
return -EINVAL;
}
- for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") {
- if (of_get_ibm_chip_id(dn) == chip_id)
- break;
- }
-
- if (!dn) {
- pr_err("Missing VAS device node\n");
+ vasid = chip_to_vas_id(chip_id);
+ if (vasid < 0) {
+ pr_err("Unable to map chip_id %d to vasid\n", chip_id);
return -EINVAL;
}
- if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) {
- pr_err("Missing ibm,vas-id device property\n");
- of_node_put(dn);
- return -EINVAL;
- }
-
- of_node_put(dn);
-
for_each_child_of_node(pn, dn) {
if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
ret = vas_cfg_coproc_info(dn, chip_id, vasid);
@@ -928,6 +926,19 @@ static int __init nx842_powernv_probe(struct device_node *dn)
static void nx842_delete_coprocs(void)
{
struct nx842_coproc *coproc, *n;
+ struct vas_window *txwin;
+ int i;
+
+ /*
+ * close percpu txwins that are opened for the corresponding coproc.
+ */
+ for_each_possible_cpu(i) {
+ txwin = per_cpu(cpu_txwin, i);
+ if (txwin)
+ vas_win_close(txwin);
+
+ per_cpu(cpu_txwin, i) = 0;
+ }
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
if (coproc->vas.rxwin)
@@ -954,46 +965,6 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
- struct nx842_coproc *coproc;
- int ret;
-
- ret = nx842_crypto_init(tfm, &nx842_powernv_driver);
-
- if (ret)
- return ret;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
- coproc = per_cpu(coproc_inst, smp_processor_id());
-
- ret = -EINVAL;
- if (coproc && coproc->vas.rxwin) {
- wmem->txwin = nx842_alloc_txwin(coproc);
- if (!IS_ERR(wmem->txwin))
- return 0;
-
- ret = PTR_ERR(wmem->txwin);
- }
-
- return ret;
-}
-
-void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
-
- if (wmem && wmem->txwin)
- vas_win_close(wmem->txwin);
-
- nx842_crypto_exit(tfm);
-}
-
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
{
return nx842_crypto_init(tfm, &nx842_powernv_driver);
@@ -1044,9 +1015,13 @@ static __init int nx842_powernv_init(void)
nx842_powernv_exec = nx842_exec_icswx;
} else {
+ ret = nx842_open_percpu_txwins();
+ if (ret) {
+ nx842_delete_coprocs();
+ return ret;
+ }
+
nx842_powernv_exec = nx842_exec_vas;
- nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas;
- nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas;
}
ret = crypto_register_alg(&nx842_powernv_alg);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index da3cb8c35ec7..d94e25df503b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
spin_lock_init(&ctx->lock);
ctx->driver = driver;
- ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e9f3b3e4bbf4..7b0bf825c4e7 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -222,7 +222,8 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
struct resource *res;
- phys_addr_t phys;
+ /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */
+ phys_addr_t uninitialized_var(phys);
int i;
for (i = 0; i < dev_dax->num_resources; i++) {
@@ -427,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
}
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *filp = vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+ struct dax_region *dax_region = dev_dax->region;
+
+ if (!IS_ALIGNED(addr, dax_region->align))
+ return -EINVAL;
+ return 0;
+}
+
static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault,
+ .split = dev_dax_split,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 557b93703532..3ec804672601 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -92,21 +92,21 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
long len;
if (blocksize != PAGE_SIZE) {
- pr_err("VFS (%s): error: unsupported blocksize for dax\n",
+ pr_debug("VFS (%s): error: unsupported blocksize for dax\n",
sb->s_id);
return -EINVAL;
}
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
if (err) {
- pr_err("VFS (%s): error: unaligned partition for dax\n",
+ pr_debug("VFS (%s): error: unaligned partition for dax\n",
sb->s_id);
return err;
}
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
if (!dax_dev) {
- pr_err("VFS (%s): error: device does not support dax\n",
+ pr_debug("VFS (%s): error: device does not support dax\n",
sb->s_id);
return -EOPNOTSUPP;
}
@@ -118,7 +118,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
put_dax(dax_dev);
if (len < 1) {
- pr_err("VFS (%s): error: dax access failed (%ld)",
+ pr_debug("VFS (%s): error: dax access failed (%ld)\n",
sb->s_id, len);
return len < 0 ? len : -EIO;
}
@@ -273,9 +273,6 @@ EXPORT_SYMBOL_GPL(dax_copy_from_iter);
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
- if (unlikely(!dax_alive(dax_dev)))
- return;
-
if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
return;
@@ -344,6 +341,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
struct inode *inode;
dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
+ if (!dax_dev)
+ return NULL;
+
inode = &dax_dev->inode;
inode->i_rdev = 0;
return inode;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a038dcf5361..bc1cb284111c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
- struct sg_table *sg_table = ERR_PTR(-EINVAL);
+ struct sg_table *sg_table;
might_sleep();
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 9a302799040e..5d101c4053e0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -27,7 +27,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index dec3a815455d..b44d9d7db347 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object
* @src: the source reservation object
*
-* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
-* held.
+* Copy all fences from src to dst. dst-lock must be held.
*/
int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src)
@@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
- src_list = reservation_object_get_list(src);
+ rcu_read_lock();
+ src_list = rcu_dereference(src->fence);
+retry:
if (src_list) {
- size = offsetof(typeof(*src_list),
- shared[src_list->shared_count]);
+ unsigned shared_count = src_list->shared_count;
+
+ size = offsetof(typeof(*src_list), shared[shared_count]);
+ rcu_read_unlock();
+
dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list)
return -ENOMEM;
- dst_list->shared_count = src_list->shared_count;
- dst_list->shared_max = src_list->shared_count;
- for (i = 0; i < src_list->shared_count; ++i)
- dst_list->shared[i] =
- dma_fence_get(src_list->shared[i]);
+ rcu_read_lock();
+ src_list = rcu_dereference(src->fence);
+ if (!src_list || src_list->shared_count > shared_count) {
+ kfree(dst_list);
+ goto retry;
+ }
+
+ dst_list->shared_count = 0;
+ dst_list->shared_max = shared_count;
+ for (i = 0; i < src_list->shared_count; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(src_list->shared[i]);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ continue;
+
+ if (!dma_fence_get_rcu(fence)) {
+ kfree(dst_list);
+ src_list = rcu_dereference(src->fence);
+ goto retry;
+ }
+
+ if (dma_fence_is_signaled(fence)) {
+ dma_fence_put(fence);
+ continue;
+ }
+
+ dst_list->shared[dst_list->shared_count++] = fence;
+ }
} else {
dst_list = NULL;
}
+ new = dma_fence_get_rcu_safe(&src->fence_excl);
+ rcu_read_unlock();
+
kfree(dst->staged);
dst->staged = NULL;
src_list = reservation_object_get_list(dst);
-
old = reservation_object_get_excl(dst);
- new = reservation_object_get_excl(src);
-
- dma_fence_get(new);
preempt_disable();
write_seqcount_begin(&dst->seq);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 38cc7389a6c1..24f83f9eeaed 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct sync_timeline *obj = file->private_data;
+ struct sync_pt *pt, *next;
+
+ spin_lock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ dma_fence_set_error(&pt->base, -ENOENT);
+ dma_fence_signal_locked(&pt->base);
+ }
- smp_wmb();
+ spin_unlock_irq(&obj->lock);
sync_timeline_put(obj);
return 0;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271b3bf9..a861b5b4d443 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct data_chunk *first = xt->sgl;
+ struct data_chunk *first;
struct at_desc *desc = NULL;
size_t xfer_count;
unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
return NULL;
+ first = xt->sgl;
+
dev_info(chan2dev(chan),
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273fed715..afd5e10f8927 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
irq = platform_get_irq(pdev, 0);
ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7fbf91f..ec5f9d2bc820 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COUNT_MASK 0x1f
#define PATTERN_MEMSET_IDX 0x01
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
u8 **dsts;
u8 **udsts;
enum dma_transaction_type type;
+ wait_queue_head_t done_wait;
+ struct dmatest_done test_done;
bool done;
};
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
- bool done;
- wait_queue_head_t *wait;
-};
static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
-
- done->done = true;
- wake_up_all(done->wait);
+ struct dmatest_thread *thread =
+ container_of(arg, struct dmatest_thread, done_wait);
+ if (!thread->done) {
+ done->done = true;
+ wake_up_all(done->wait);
+ } else {
+ /*
+ * If thread->done, it means that this callback occurred
+ * after the parent thread has cleaned up. This can
+ * happen in the case that driver doesn't implement
+ * the terminate_all() functionality and a dma operation
+ * did not occur within the timeout period
+ */
+ WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+ }
}
static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
*/
static int dmatest_func(void *data)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
- struct dmatest_done done = { .wait = &done_wait };
+ struct dmatest_done *done = &thread->test_done;
struct dmatest_info *info;
struct dmatest_params *params;
struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
continue;
}
- done.done = false;
+ done->done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &done;
+ tx->callback_param = done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait, done.done,
+ wait_event_freezable_timeout(thread->done_wait, done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (!done.done) {
- /*
- * We're leaving the timed out dma operation with
- * dangling pointer to done_wait. To make this
- * correct, we'll need to allocate wait_done for
- * each test iteration and perform "who's gonna
- * free it this time?" dancing. For now, just
- * leave it dangling.
- */
- WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+ if (!done->done) {
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
- if (ret)
+ if (ret || failed_tests)
dmaengine_terminate_all(chan);
thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
+ thread->test_done.wait = &thread->done_wait;
+ init_waitqueue_head(&thread->done_wait);
smp_wmb();
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
}
}
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
{
int i;
- for (i = 0; i < DMAMUX_NR; i++)
+ for (i = 0; i < nr_clocks; i++)
clk_disable_unprepare(fsl_edma->muxclk[i]);
}
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsl_edma->muxbase[i]))
+ if (IS_ERR(fsl_edma->muxbase[i])) {
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxbase[i]);
+ }
sprintf(clkname, "dmamux%d", i);
fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret) {
- /* disable only clks which were enabled on error */
- for (; i >= 0; i--)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
-
- dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
- return ret;
- }
+ if (ret)
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
}
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return 0;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d0caa6..7792a9186f9c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
unmap_dma:
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6e4ed5a9c6fd..fa87a055905e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -215,6 +215,17 @@ config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
+config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+ bool "Qualcomm download mode enabled by default"
+ depends on QCOM_SCM
+ help
+ A device with "download mode" enabled will upon an unexpected
+ warm-restart enter a special debug mode that allows the user to
+ "download" memory content over USB for offline postmortem analysis.
+ The feature can be enabled/disabled on the kernel command line.
+
+ Say Y here to enable "download mode" by default.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index f70febf680c3..557a47829d03 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -109,6 +109,8 @@ struct kobject *efi_kobj;
/*
* Let's not leave out systab information that snuck into
* the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
*/
static ssize_t systab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -143,8 +145,7 @@ static ssize_t systab_show(struct kobject *kobj,
return str - buf;
}
-static struct kobj_attribute efi_attr_systab =
- __ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
#define EFI_FIELD(var) efi.var
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index bd7ed3c1148a..c47e0c6ec00f 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
};
/* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
{
char *str = buf;
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
return str - buf;
}
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
- esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
#define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
{ \
return sprintf(buf, fmt "\n", \
le##size##_to_cpu(entry->esre.esre1->name)); \
} \
\
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
- esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
esre_attr_decl(fw_type, 32, "%u");
esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
/* support for displaying ESRT fields at the top level */
#define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
{ \
return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
} \
\
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
- esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
esrt_attr_decl(fw_resource_count, 32, "%u");
esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -431,7 +428,7 @@ err_remove_group:
err_remove_esrt:
kobject_put(esrt_kobj);
err:
- kfree(esrt);
+ memunmap(esrt);
esrt = NULL;
return error;
}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 8e64b77aeac9..f377609ff141 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
return map_attr->show(entry, buf);
}
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
/*
* These are default attributes that are added for every memmap entry.
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 35e553b3b190..e4b40f2b4627 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
if (ret)
return ret;
- return vpd_sections_init(entry.cbmem_addr);
+ vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+ if (!vpd_kobj)
+ return -ENOMEM;
+
+ ret = vpd_sections_init(entry.cbmem_addr);
+ if (ret) {
+ kobject_put(vpd_kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vpd_remove(struct platform_device *pdev)
+{
+ vpd_section_destroy(&ro_vpd);
+ vpd_section_destroy(&rw_vpd);
+
+ kobject_put(vpd_kobj);
+
+ return 0;
}
static struct platform_driver vpd_driver = {
.probe = vpd_probe,
+ .remove = vpd_remove,
.driver = {
.name = "vpd",
},
};
+static struct platform_device *vpd_pdev;
+
static int __init vpd_platform_init(void)
{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("vpd", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ int ret;
- vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
- if (!vpd_kobj)
- return -ENOMEM;
+ ret = platform_driver_register(&vpd_driver);
+ if (ret)
+ return ret;
- platform_driver_register(&vpd_driver);
+ vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
+ if (IS_ERR(vpd_pdev)) {
+ platform_driver_unregister(&vpd_driver);
+ return PTR_ERR(vpd_pdev);
+ }
return 0;
}
static void __exit vpd_platform_exit(void)
{
- vpd_section_destroy(&ro_vpd);
- vpd_section_destroy(&rw_vpd);
- kobject_put(vpd_kobj);
+ platform_device_unregister(vpd_pdev);
+ platform_driver_unregister(&vpd_driver);
}
module_init(vpd_platform_init);
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 6523ce962865..f3f4f810e5df 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -220,7 +220,7 @@ out_free_cpus:
return err;
}
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
static int suspend_cpu(int index, bool broadcast)
{
@@ -287,7 +287,7 @@ static int suspend_test_thread(void *arg)
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
- setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
@@ -340,6 +340,7 @@ static int suspend_test_thread(void *arg)
* later.
*/
del_timer(&wakeup_timer);
+ destroy_timer_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 93e3b96b6dfa..dfbd894d5bb7 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -561,6 +561,12 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
return ret ? : le32_to_cpu(out);
}
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+ enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
+}
+
int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
{
struct {
@@ -579,6 +585,13 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : le32_to_cpu(scm_ret);
}
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ return -ENODEV;
+}
+
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare)
{
@@ -596,3 +609,21 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
{
return -ENODEV;
}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
+ if (ret >= 0)
+ *val = ret;
+
+ return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+ addr, val);
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 6e6d561708e2..688525dd4aee 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -382,6 +382,33 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : res.a1;
}
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = mem_region;
+ desc.args[1] = mem_sz;
+ desc.args[2] = src;
+ desc.args[3] = src_sz;
+ desc.args[4] = dest;
+ desc.args[5] = dest_sz;
+ desc.args[6] = 0;
+
+ desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+ QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+ QCOM_MEM_PROT_ASSIGN_ID,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
{
struct qcom_scm_desc desc = {0};
@@ -439,3 +466,47 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
return ret;
}
+
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
+ desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+ &desc, &res);
+}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = addr;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
+ &desc, &res);
+ if (ret >= 0)
+ *val = res.a1;
+
+ return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = addr;
+ desc.args[1] = val;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+ &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index bb16510d75ba..af4c75217ea6 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -19,15 +19,20 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include "qcom_scm.h"
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
#define SCM_HAS_CORE_CLK BIT(0)
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)
@@ -38,6 +43,21 @@ struct qcom_scm {
struct clk *iface_clk;
struct clk *bus_clk;
struct reset_controller_dev reset;
+
+ u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
};
static struct qcom_scm *__scm;
@@ -333,6 +353,66 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ return __qcom_scm_io_readl(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ return __qcom_scm_io_writel(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
+ enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
@@ -348,6 +428,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
}
EXPORT_SYMBOL(qcom_scm_set_remote_state);
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corrsponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *srcvm,
+ struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret;
+ int len;
+ int i;
+
+ src_sz = hweight_long(*srcvm) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ len = hweight_long(*srcvm);
+ for (i = 0; i < len; i++) {
+ src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+ *srcvm ^= 1 << (ffs(*srcvm) - 1);
+ }
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++) {
+ destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+ destvm[i].perm = cpu_to_le32(newvm[i].perm);
+ destvm[i].ctx = 0;
+ destvm[i].ctx_size = 0;
+ next_vm |= BIT(newvm[i].vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
@@ -358,6 +520,10 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm)
return -ENOMEM;
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
clks = (unsigned long)of_device_get_match_data(&pdev->dev);
if (clks & SCM_HAS_CORE_CLK) {
scm->core_clk = devm_clk_get(&pdev->dev, "core");
@@ -406,9 +572,24 @@ static int qcom_scm_probe(struct platform_device *pdev)
__qcom_scm_init();
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+ if (download_mode)
+ qcom_scm_set_download_mode(true);
+
return 0;
}
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+ /* Clean shutdown, disable download mode to allow normal restart */
+ if (download_mode)
+ qcom_scm_set_download_mode(false);
+}
+
static const struct of_device_id qcom_scm_dt_match[] = {
{ .compatible = "qcom,scm-apq8064",
/* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
@@ -436,6 +617,7 @@ static struct platform_driver qcom_scm_driver = {
.of_match_table = qcom_scm_dt_match,
},
.probe = qcom_scm_probe,
+ .shutdown = qcom_scm_shutdown,
};
static int __init qcom_scm_init(void)
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 9bea691f30fb..dcd7f7917fc7 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -14,9 +14,11 @@
#define QCOM_SCM_SVC_BOOT 0x1
#define QCOM_SCM_BOOT_ADDR 0x1
+#define QCOM_SCM_SET_DLOAD_MODE 0x10
#define QCOM_SCM_BOOT_ADDR_MC 0x11
#define QCOM_SCM_SET_REMOTE_STATE 0xa
extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
+extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
#define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
@@ -30,6 +32,12 @@ extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10
extern void __qcom_scm_cpu_power_down(u32 flags);
+#define QCOM_SCM_SVC_IO 0x5
+#define QCOM_SCM_IO_READ 0x1
+#define QCOM_SCM_IO_WRITE 0x2
+extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
+extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
@@ -95,5 +103,10 @@ extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+#define QCOM_MEM_PROT_ASSIGN_ID 0x16
+extern int __qcom_scm_assign_mem(struct device *dev,
+ phys_addr_t mem_region, size_t mem_sz,
+ phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz);
#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 0e2011636fbb..deb483064f53 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -10,9 +10,9 @@
* and select subsets of aarch64), a Device Tree node (on arm), or using
* a kernel module (or command line) parameter with the following syntax:
*
- * [fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
+ * [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
* or
- * [fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
+ * [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
*
* where:
* <size> := size of ioport or mmio range
@@ -21,9 +21,9 @@
* <data_off> := (optional) offset of data register
*
* e.g.:
- * fw_cfg.ioport=2@0x510:0:1 (the default on x86)
+ * qemu_fw_cfg.ioport=2@0x510:0:1 (the default on x86)
* or
- * fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
+ * qemu_fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
*/
#include <linux/module.h>
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
{
pr_debug("fw_cfg: unloading.\n");
fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ fw_cfg_io_cleanup();
fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
- fw_cfg_io_cleanup();
return 0;
}
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index e34a2f79e1ad..1b826dcca719 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,2 +1,4 @@
-obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
+obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
new file mode 100644
index 000000000000..f7f6a0a5cb07
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct seqbuf {
+ char *buf;
+ size_t pos;
+ size_t size;
+};
+
+static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
+{
+ seqbuf->buf = buf;
+ seqbuf->size = size;
+ seqbuf->pos = 0;
+}
+
+static size_t seqbuf_avail(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
+}
+
+static size_t seqbuf_status(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
+}
+
+static int seqbuf_eof(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos >= seqbuf->size;
+}
+
+static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
+{
+ nbyte = min(nbyte, seqbuf_avail(seqbuf));
+ memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
+ seqbuf->pos += nbyte;
+ return seqbuf_status(seqbuf);
+}
+
+static int seqbuf_read_u32(struct seqbuf *seqbuf, uint32_t *v)
+{
+ int err;
+
+ err = seqbuf_read(seqbuf, v, 4);
+ *v = le32_to_cpu(*v);
+ return err;
+}
+
+static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
+{
+ *str = seqbuf->buf + seqbuf->pos;
+ seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
+ seqbuf->pos++;
+ return seqbuf_status(seqbuf);
+}
+
+static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
+{
+ seqbuf->pos += offset;
+}
+
+/* map filename in Linux debugfs to corresponding entry in BPMP */
+static const char *get_filename(struct tegra_bpmp *bpmp,
+ const struct file *file, char *buf, int size)
+{
+ char root_path_buf[512];
+ const char *root_path;
+ const char *filename;
+ size_t root_len;
+
+ root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ sizeof(root_path_buf));
+ if (IS_ERR(root_path))
+ return NULL;
+
+ root_len = strlen(root_path);
+
+ filename = dentry_path(file->f_path.dentry, buf, size);
+ if (IS_ERR(filename))
+ return NULL;
+
+ if (strlen(filename) < root_len ||
+ strncmp(filename, root_path, root_len))
+ return NULL;
+
+ filename += root_len;
+
+ return filename;
+}
+
+static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data,
+ size_t *nbytes)
+{
+ struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_READ),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+
+ *nbytes = (size_t)resp.fop.nbytes;
+
+ return 0;
+}
+
+static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_WRITE),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ };
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
+ size_t size, size_t *nbytes)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_DUMPDIR),
+ .dumpdir = {
+ .dataaddr = cpu_to_le32((uint32_t)addr),
+ .datalen = cpu_to_le32((uint32_t)size),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+
+ *nbytes = (size_t)resp.dumpdir.nbytes;
+
+ return 0;
+}
+
+static int debugfs_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = m->size;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char buf[256];
+ const char *filename;
+ size_t len, nbytes;
+ int ret;
+
+ filename = get_filename(bpmp, file, buf, sizeof(buf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ ret = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ &nbytes);
+
+ if (!ret)
+ seq_write(m, datavirt, nbytes);
+
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return ret;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, debugfs_show, file, SZ_128K);
+}
+
+static ssize_t debugfs_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = count;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char fnamebuf[256];
+ const char *filename;
+ size_t len;
+ int ret;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ ret = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ if (copy_from_user(datavirt, buf, count)) {
+ ret = -EFAULT;
+ goto free_databuf;
+ }
+
+ ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ count);
+
+free_databuf:
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return ret ?: count;
+}
+
+static const struct file_operations debugfs_fops = {
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = debugfs_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
+ struct dentry *parent, uint32_t depth)
+{
+ int err;
+ uint32_t d, t;
+ const char *name;
+ struct dentry *dentry;
+
+ while (!seqbuf_eof(seqbuf)) {
+ err = seqbuf_read_u32(seqbuf, &d);
+ if (err < 0)
+ return err;
+
+ if (d < depth) {
+ seqbuf_seek(seqbuf, -4);
+ /* go up a level */
+ return 0;
+ } else if (d != depth) {
+ /* malformed data received from BPMP */
+ return -EIO;
+ }
+
+ err = seqbuf_read_u32(seqbuf, &t);
+ if (err < 0)
+ return err;
+ err = seqbuf_read_str(seqbuf, &name);
+ if (err < 0)
+ return err;
+
+ if (t & DEBUGFS_S_ISDIR) {
+ dentry = debugfs_create_dir(name, parent);
+ if (!dentry)
+ return -ENOMEM;
+ err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
+ if (err < 0)
+ return err;
+ } else {
+ umode_t mode;
+
+ mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
+ mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
+ dentry = debugfs_create_file(name, mode,
+ parent, bpmp,
+ &debugfs_fops);
+ if (!dentry)
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
+ size_t bufsize, struct dentry *root)
+{
+ struct seqbuf seqbuf;
+ int err;
+
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (!bpmp->debugfs_mirror)
+ return -ENOMEM;
+
+ seqbuf_init(&seqbuf, buf, bufsize);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+ if (err < 0) {
+ debugfs_remove_recursive(bpmp->debugfs_mirror);
+ bpmp->debugfs_mirror = NULL;
+ }
+
+ return err;
+}
+
+static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+ struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
+ struct mrq_query_abi_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_ABI,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int ret;
+
+ ret = tegra_bpmp_transfer(bpmp, &msg);
+ if (ret < 0) {
+ /* something went wrong; assume not supported */
+ dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret);
+ return 0;
+ }
+
+ return resp.status ? 0 : 1;
+}
+
+int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
+{
+ dma_addr_t phys;
+ void *virt;
+ const size_t sz = SZ_256K;
+ size_t nbytes;
+ int ret;
+ struct dentry *root;
+
+ if (!mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ return 0;
+
+ root = debugfs_create_dir("bpmp", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
+ if (ret < 0)
+ goto free;
+
+ ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+out:
+ if (ret < 0)
+ debugfs_remove(root);
+
+ return ret;
+}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 73ca55b7b7ec..a7f461f2e650 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -194,16 +194,24 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
}
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
- void *data, size_t size)
+ void *data, size_t size, int *ret)
{
+ int err;
+
if (data && size > 0)
memcpy(data, channel->ib->data, size);
- return tegra_ivc_read_advance(channel->ivc);
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (err < 0)
+ return err;
+
+ *ret = channel->ib->code;
+
+ return 0;
}
static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
- void *data, size_t size)
+ void *data, size_t size, int *ret)
{
struct tegra_bpmp *bpmp = channel->bpmp;
unsigned long flags;
@@ -217,7 +225,7 @@ static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
}
spin_lock_irqsave(&bpmp->lock, flags);
- err = __tegra_bpmp_channel_read(channel, data, size);
+ err = __tegra_bpmp_channel_read(channel, data, size, ret);
clear_bit(index, bpmp->threaded.allocated);
spin_unlock_irqrestore(&bpmp->lock, flags);
@@ -337,7 +345,8 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
if (err < 0)
return err;
- return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
@@ -371,7 +380,8 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
if (err == 0)
return -ETIMEDOUT;
- return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
@@ -387,8 +397,8 @@ static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
return NULL;
}
-static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
- int code, const void *data, size_t size)
+void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
+ const void *data, size_t size)
{
unsigned long flags = channel->ib->flags;
struct tegra_bpmp *bpmp = channel->bpmp;
@@ -426,6 +436,7 @@ static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
mbox_client_txdone(bpmp->mbox.channel, 0);
}
}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq,
@@ -824,6 +835,10 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
+ err = tegra_bpmp_init_debugfs(bpmp);
+ if (err < 0)
+ dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
+
return 0;
free_mrq:
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 00cfed3c3e1a..23b12d99ddfe 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -439,7 +439,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+ dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index e359930bebc8..0d7743089414 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -79,7 +79,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
return !status;
}
-static struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
+static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_set = xlnx_pr_decoupler_enable_set,
.enable_show = xlnx_pr_decoupler_enable_show,
};
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4ea63d9bd131..e318bf8c623c 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -185,7 +185,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
return 0;
}
-int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
uint32_t irq, stat;
@@ -215,8 +215,8 @@ int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
-int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
- size_t size)
+static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
+ uint32_t addr, size_t size)
{
struct fsi_master *master = slave->master;
int rc, link;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 29fc15423299..d6a8e851ad13 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -834,15 +834,6 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
-config GPIO_SX150X
- bool "Semtech SX150x I2C GPIO expander (deprecated)"
- depends on PINCTRL && I2C=y
- select PINCTRL_SX150X
- default n
- help
- Say yes here to provide support for Semtech SX150x-series I2C
- GPIO expanders. The GPIO driver was replaced by a Pinctrl version.
-
config GPIO_TPIC2810
tristate "TPIC2810 8-Bit I2C GPO expander"
help
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 6b535ec858cc..15a1f4b348c4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -23,6 +23,7 @@
struct gen_74x164_chip {
struct gpio_chip gpio_chip;
struct mutex lock;
+ struct gpio_desc *gpiod_oe;
u32 registers;
/*
* Since the registers are chained, every byte sent will make
@@ -31,8 +32,7 @@ struct gen_74x164_chip {
* register at the end of the transfer. So, to have a logical
* numbering, store the bytes in reverse order.
*/
- u8 buffer[0];
- struct gpio_desc *gpiod_oe;
+ u8 buffer[];
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 8781817d9003..6b3ca6601af2 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -539,12 +539,12 @@ static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
if (!have_gpio(gpiochip_get_data(chip), offset))
return -ENODEV;
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index f75d8443ecaf..e4b3d7db68c9 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
u32 mask;
d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
- g = (struct davinci_gpio_regs __iomem *)d->regs;
+ g = (struct davinci_gpio_regs __iomem *)d->regs[0];
mask = __gpio_mask(data->irq - d->base_irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 8d32ccc980d9..b86e09e1b13b 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -239,12 +239,12 @@ static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
static int em_gio_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void em_gio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index babb7bd2ba59..a0a5f9730aa7 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -947,7 +947,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
- { .compatible = "onsemi,pca9654", .data = OF_953X( 8, PCA_INT), },
+ { .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
{ }
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 6029899789f3..f480fb896963 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -330,16 +330,6 @@ static int pxa_gpio_of_xlate(struct gpio_chip *gc,
}
#endif
-static int pxa_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void pxa_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- pinctrl_free_gpio(chip->base + offset);
-}
-
static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
struct device_node *np, void __iomem *regbase)
{
@@ -358,8 +348,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = pxa_gpio_request;
- pchip->chip.free = pxa_gpio_free;
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0ea998a3e357..e76de57dd617 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -250,7 +250,7 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
if (error < 0)
return error;
- error = pinctrl_request_gpio(chip->base + offset);
+ error = pinctrl_gpio_request(chip->base + offset);
if (error)
pm_runtime_put(&p->pdev->dev);
@@ -261,7 +261,7 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index fbaf974277df..8db47f671708 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -141,14 +141,14 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- pinctrl_free_gpio(offset);
+ pinctrl_gpio_free(offset);
tegra_gpio_disable(tgi, offset);
}
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 22c5be65051f..0bb9bb583889 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -232,7 +232,7 @@ static int tz1090_gpio_request(struct gpio_chip *chip, unsigned int offset)
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
int ret;
- ret = pinctrl_request_gpio(chip->base + offset);
+ ret = pinctrl_gpio_request(chip->base + offset);
if (ret)
return ret;
@@ -246,7 +246,7 @@ static void tz1090_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
tz1090_gpio_clear_bit(bank, REG_GPIO_BIT_EN, offset);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 641a5eb552cb..aad84a6306c4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1962,7 +1962,7 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
*/
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->gpiodev->base + offset);
+ return pinctrl_gpio_request(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -1973,7 +1973,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->gpiodev->base + offset);
+ pinctrl_gpio_free(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 83cb2a88c204..4d9f21831741 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
- depends on DRM_KMS_HELPER
+ depends on DRM
help
Say Y here, if you want to use EDID data to be loaded from the
/lib/firmware directory or one of the provided built-in
@@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
+ select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
@@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
+source "drivers/gpu/drm/amd/lib/Kconfig"
+
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@@ -278,6 +281,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/tve200/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8ce07039bb89..e9500844333e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o
+ drm_syncobj.o drm_lease.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
+drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
-drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
@@ -45,14 +45,13 @@ drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
-CFLAGS_drm_trace_points.o := -I$(src)
-
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@@ -101,3 +100,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_TVE200) += tve200/
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8a08e81ee90d..d4176a3fb706 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the ACP, which is a sub-component
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 26682454a446..e8af1f5e8a79 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 567b0377e1e2..90202cf4cd1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,16 +1,42 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
@@ -26,7 +52,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -133,6 +159,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
-obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
+ifneq ($(CONFIG_DRM_AMD_DC),)
-CFLAGS_amdgpu_trace_points.o := -I$(src)
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
+obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 103635ab784c..0b14b5373783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -65,6 +65,8 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -91,7 +93,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
-extern unsigned amdgpu_ip_block_mask;
+extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
@@ -100,18 +102,20 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
+extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size;
-extern unsigned amdgpu_pcie_gen_cap;
-extern unsigned amdgpu_pcie_lane_cap;
-extern unsigned amdgpu_cg_mask;
-extern unsigned amdgpu_pg_mask;
-extern unsigned amdgpu_sdma_phase_quantum;
+extern uint amdgpu_pcie_gen_cap;
+extern uint amdgpu_pcie_lane_cap;
+extern uint amdgpu_cg_mask;
+extern uint amdgpu_pg_mask;
+extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
-extern unsigned amdgpu_pp_feature_mask;
+extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split;
extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se;
@@ -120,6 +124,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
+extern int amdgpu_compute_multipipe;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -178,6 +183,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
+struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -292,14 +298,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs {
+ /* number of dw to reserve per operation */
+ unsigned copy_pte_num_dw;
+
/* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
+
/* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr);
+
+ /* maximum nums of PTEs/PDEs in a single operation */
+ uint32_t set_max_nums_pte_pde;
+
+ /* number of dw to reserve per operation */
+ unsigned set_pte_pde_num_dw;
+
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -332,6 +349,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
+ bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
@@ -399,6 +417,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
@@ -455,9 +474,10 @@ struct amdgpu_sa_bo {
*/
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj);
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -697,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring);
/*
@@ -715,10 +735,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
+ uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
- bool preamble_presented;
+ bool preamble_presented;
+ enum amd_sched_priority init_priority;
+ enum amd_sched_priority override_priority;
+ struct mutex lock;
};
struct amdgpu_ctx_mgr {
@@ -731,17 +755,22 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence);
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
/*
* file private structure
*/
@@ -753,7 +782,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
- u32 vram_lost_counter;
};
/*
@@ -854,7 +882,7 @@ struct amdgpu_mec {
struct amdgpu_kiq {
u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj;
- struct mutex ring_mutex;
+ spinlock_t ring_lock;
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
};
@@ -1014,11 +1042,14 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- bool in_reset;
/* s3/s4 mask */
bool in_suspend;
/* NGG */
struct amdgpu_ngg ngg;
+
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1056,6 +1087,7 @@ struct amdgpu_cs_parser {
/* buffer objects */
struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list;
+ struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
struct dma_fence *fence;
@@ -1096,6 +1128,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
/* user fence handling */
uint64_t uf_addr;
@@ -1121,7 +1154,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -1183,6 +1216,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */
const struct firmware *gpu_info_fw;
+
+ void *fw_buf_ptr;
+ uint64_t fw_buf_mc;
};
/*
@@ -1197,20 +1233,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
void amdgpu_test_moves(struct amdgpu_device *adev);
/*
- * MMU Notifier
- */
-#if defined(CONFIG_MMU_NOTIFIER)
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
-void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-#else
-static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
-{
- return -ENODEV;
-}
-static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
-#endif
-
-/*
* Debugfs
*/
struct amdgpu_debugfs {
@@ -1305,6 +1327,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1371,6 +1395,18 @@ struct amdgpu_atcs {
};
/*
+ * Firmware VRAM reservation
+ */
+struct amdgpu_fw_vram_usage {
+ u64 start_offset;
+ u64 size;
+ struct amdgpu_bo *reserved_bo;
+ void *va;
+};
+
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
+
+/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -1502,6 +1538,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -1519,7 +1556,6 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
- bool pp_enabled;
bool pp_force_state_enabled;
/* dpm */
@@ -1536,18 +1572,14 @@ struct amdgpu_device {
/* sdma */
struct amdgpu_sdma sdma;
- union {
- struct {
- /* uvd */
- struct amdgpu_uvd uvd;
+ /* uvd */
+ struct amdgpu_uvd uvd;
- /* vce */
- struct amdgpu_vce vce;
- };
+ /* vce */
+ struct amdgpu_vce vce;
- /* vcn */
- struct amdgpu_vcn vcn;
- };
+ /* vcn */
+ struct amdgpu_vcn vcn;
/* firmwares */
struct amdgpu_firmware firmware;
@@ -1558,6 +1590,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -1575,6 +1610,8 @@ struct amdgpu_device {
struct delayed_work late_init_work;
struct amdgpu_virt virt;
+ /* firmware VRAM reservation */
+ struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@@ -1592,6 +1629,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
+ bool in_sriov_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1618,6 +1656,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
/*
* Registers read & write functions.
*/
@@ -1759,6 +1800,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
+#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
@@ -1791,18 +1833,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
-struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
-bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated);
-bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
@@ -1836,8 +1866,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
@@ -1885,10 +1913,15 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo);
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
+
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index ebca22302ebb..c04f44a90392 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -35,41 +35,50 @@
#include "acp_gfx_if.h"
-#define ACP_TILE_ON_MASK 0x03
-#define ACP_TILE_OFF_MASK 0x02
-#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
-#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
-
-#define ACP_TILE_P1_MASK 0x3e
-#define ACP_TILE_P2_MASK 0x3d
-#define ACP_TILE_DSP0_MASK 0x3b
-#define ACP_TILE_DSP1_MASK 0x37
-
-#define ACP_TILE_DSP2_MASK 0x2f
-
-#define ACP_DMA_REGS_END 0x146c0
-#define ACP_I2S_PLAY_REGS_START 0x14840
-#define ACP_I2S_PLAY_REGS_END 0x148b4
-#define ACP_I2S_CAP_REGS_START 0x148b8
-#define ACP_I2S_CAP_REGS_END 0x1496c
-
-#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
-#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
-#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
-#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
-
-#define mmACP_PGFSM_RETAIN_REG 0x51c9
-#define mmACP_PGFSM_CONFIG_REG 0x51ca
-#define mmACP_PGFSM_READ_REG_0 0x51cc
-
-#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
-#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
-#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
-#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
-
-#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
-#define ACP_SRC_ID 162
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+
+#define ACP_TILE_DSP2_MASK 0x2f
+
+#define ACP_DMA_REGS_END 0x146c0
+#define ACP_I2S_PLAY_REGS_START 0x14840
+#define ACP_I2S_PLAY_REGS_END 0x148b4
+#define ACP_I2S_CAP_REGS_START 0x148b8
+#define ACP_I2S_CAP_REGS_END 0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+
+#define mmACP_PGFSM_RETAIN_REG 0x51c9
+#define mmACP_PGFSM_CONFIG_REG 0x51ca
+#define mmACP_PGFSM_READ_REG_0 0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
+
+#define mmACP_CONTROL 0x5131
+#define mmACP_STATUS 0x5133
+#define mmACP_SOFT_RESET 0x5134
+#define ACP_CONTROL__ClkEn_MASK 0x1
+#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
+#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
+#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
+#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
+
+#define ACP_TIMEOUT_LOOP 0x000000FF
+#define ACP_DEVS 3
+#define ACP_SRC_ID 162
enum {
ACP_TILE_P1 = 0,
@@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
@@ -402,6 +413,46 @@ static int acp_hw_init(void *handle)
}
}
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Enable clock to ACP and wait until the clock is enabled */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val = val | ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Deassert the SOFT RESET flags */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
return 0;
}
@@ -414,6 +465,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle)
{
int i, ret;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -421,6 +474,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell)
return 0;
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Disable ACP clock */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val &= ~ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b9dbbf9cb8b0..1e3e9be7d77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@@ -336,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -354,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
@@ -369,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
+ unsigned long end_jiffies;
uint32_t sdma_base_addr;
+ uint32_t data;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl);
@@ -564,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 309f2419c6d8..056929b8ccd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@@ -290,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -337,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index ce443586a0c7..f450b69323fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true;
}
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@@ -1807,6 +1805,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+ u64 start_addr;
+ u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1815,7 +1815,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
+ size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f9ffe8ef0cd6..ff8efd0f8fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -71,19 +71,33 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
+ struct vram_usagebyfirmware_v2_1 * firmware_usage;
+ uint32_t start_addr, size;
uint16_t data_offset;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
- struct vram_usagebyfirmware_v2_1 *firmware_usage =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
-
+ firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
- usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
+ start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
+ size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c21adf60a7f2..057e1ecd83ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index fd435a96481c..f2b72c7c6857 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,10 +42,31 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
+static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
+ int (*call_back_func)(struct amd_pp_init *, void **))
+{
+ CGS_FUNC_ADEV;
+ struct amd_pp_init pp_init;
+ struct amd_powerplay *amd_pp;
+
+ if (call_back_func == NULL)
+ return NULL;
+
+ amd_pp = &(adev->powerplay);
+ pp_init.chip_family = adev->family;
+ pp_init.chip_id = adev->asic_type;
+ pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ pp_init.feature_mask = amdgpu_pp_feature_mask;
+ pp_init.device = cgs_device;
+ if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
+ return NULL;
+
+ return adev->powerplay.pp_handle;
+}
+
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
@@ -53,13 +74,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
- struct ttm_placement placement;
- struct ttm_place place;
-
- if (min_offset > max_offset) {
- BUG_ON(1);
- return -EINVAL;
- }
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
@@ -73,41 +87,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (max_offset > adev->mc.real_vram_size)
- return -EINVAL;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- place.fpfn =
- max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
- place.lpfn =
- min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_UNCACHED;
break;
default:
return -EINVAL;
@@ -116,15 +108,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0;
- placement.placement = &place;
- placement.num_placement = 1;
- placement.busy_placement = &place;
- placement.num_busy_placement = 1;
-
- ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
- true, domain, flags,
- NULL, &placement, NULL,
- 0, &obj);
+ ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
+ NULL, NULL, 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -155,19 +140,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr)
{
int r;
- u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
- min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
- max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
-
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
- min_offset, max_offset, mcaddr);
+ r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
@@ -675,6 +655,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ strcpy(fw_name, "radeon/tahiti_smc.bin");
+ break;
+ case CHIP_PITCAIRN:
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/pitcairn_smc.bin");
+ }
+ break;
+ case CHIP_VERDE:
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/verde_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/verde_smc.bin");
+ }
+ break;
+ case CHIP_OLAND:
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/oland_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/oland_smc.bin");
+ }
+ break;
+ case CHIP_HAINAN:
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hainan_k_smc.bin");
+ } else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/banks_k_2_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hainan_smc.bin");
+ }
+ break;
+ case CHIP_BONAIRE:
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hawaii_smc.bin");
+ }
+ break;
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
@@ -838,6 +897,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor;
break;
+ case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
+ sys_info->value = adev->pdev->devfn;
+ break;
default:
return -ENODEV;
}
@@ -849,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
struct cgs_mode_info *mode_info;
if (info == NULL)
@@ -866,30 +924,43 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info->ref_clock = adev->clock.spll.reference_freq;
}
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- mode_info = NULL;
+ if (!amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct drm_device *ddev = adev->ddev;
+ struct drm_crtc *crtc;
+ uint32_t line_time_us, vblank_lines;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled) {
+ info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ info->display_count++;
+ }
+ if (mode_info != NULL &&
+ crtc->enabled && amdgpu_crtc->enabled &&
+ amdgpu_crtc->hw_mode.clock) {
+ line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+ amdgpu_crtc->hw_mode.clock;
+ vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2);
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
+ }
}
}
+ } else {
+ info->display_count = adev->pm.pm_display_cfg.num_display;
+ if (mode_info != NULL) {
+ mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
+ mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
}
-
return 0;
}
@@ -1139,6 +1210,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
+ .register_pp_handle = amdgpu_cgs_register_pp_handle,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8d1cf2d3e663..df9cbc78e168 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->edid) {
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
- }
+ kfree(amdgpu_connector->edid);
+ amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@@ -374,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1079,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1136,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1155,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1296,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1325,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 60d8bedb694d..57abf7abd7a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/pagemap.h>
+#include <linux/sync_file.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -89,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
- goto put_ctx;
+ goto free_chunk;
}
p->nchunks = cs->in.num_chunks;
@@ -102,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
- goto put_ctx;
+ goto free_chunk;
}
for (i = 0; i < p->nchunks; i++) {
@@ -169,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
+ if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ ret = -ECANCELED;
+ goto free_all_kdata;
+ }
+
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
@@ -182,8 +190,6 @@ free_partial_kdata:
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
-put_ctx:
- amdgpu_ctx_put(p->ctx);
free_chunk:
kfree(chunk_array);
@@ -403,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
if (candidate->robj == validated)
break;
+ /* We can't move pinned BOs here */
+ if (bo->pin_count)
+ continue;
+
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* Check if this BO is in one of the domains we need space for */
@@ -473,11 +483,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
- size_t size = sizeof(struct page *);
-
- size *= bo->tbo.ttm->num_pages;
- memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
+ amdgpu_ttm_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
+ false);
+ if (r)
+ return r;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ lobj->user_pages);
binding_userptr = true;
}
@@ -502,7 +517,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- bool need_mmap_lock = false;
unsigned i, tries = 10;
int r;
@@ -510,9 +524,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
- need_mmap_lock = p->bo_list->first_userptr !=
- p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev);
}
INIT_LIST_HEAD(&duplicates);
@@ -521,9 +535,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
-
while (1) {
struct list_head need_pages;
unsigned i;
@@ -543,23 +554,24 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
+ bo = e->robj;
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
- if (e->robj->tbo.ttm->state != tt_bound &&
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
@@ -636,9 +648,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- fpriv->vm.last_eviction_counter =
- atomic64_read(&p->adev->num_evictions);
-
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -679,9 +688,6 @@ error_validate:
error_free_pages:
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
-
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
@@ -691,8 +697,7 @@ error_free_pages:
continue;
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ e->robj->tbo.ttm->num_pages);
kvfree(e->user_pages);
}
}
@@ -707,7 +712,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
+ amdgpu_bo_explicit_sync(e->robj));
if (r)
return r;
@@ -728,11 +734,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (!error)
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- parser->fence);
- else if (backoff)
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@@ -742,8 +744,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
- if (parser->ctx)
+ if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
+ }
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
@@ -768,10 +772,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -825,7 +825,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
+ r = amdgpu_vm_handle_moved(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ if (r)
+ return r;
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -835,7 +841,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo);
+ amdgpu_vm_bo_invalidate(adev, bo, false);
}
}
@@ -848,19 +854,63 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
- int i, r;
+ int r;
/* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs) {
- for (i = 0; i < p->job->num_ibs; i++) {
- r = amdgpu_ring_parse_cs(ring, p, i);
+ if (p->job->ring->funcs->parse_cs) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj = NULL;
+ struct amdgpu_cs_chunk *chunk;
+ struct amdgpu_ib *ib;
+ uint64_t offset;
+ uint8_t *kptr;
+
+ chunk = &p->chunks[i];
+ ib = &p->job->ibs[j];
+ chunk_ib = chunk->kdata;
+
+ if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+ continue;
+
+ r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
+
+ if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+ kptr += chunk_ib->va_start - offset;
+
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
if (r)
return r;
+
+ j++;
}
}
if (p->job->vm) {
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p);
if (r)
@@ -922,54 +972,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring = ring;
- if (ring->funcs->parse_cs) {
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- uint64_t offset;
- uint8_t *kptr;
-
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
- DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
- }
-
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
-
- r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
- } else {
- r = amdgpu_ib_get(adev, vm, 0, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
+ r = amdgpu_ib_get(adev, vm,
+ ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+ ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
}
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
+
j++;
}
@@ -979,7 +993,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return 0;
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1133,14 +1147,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+
int r;
+ amdgpu_mn_lock(p->mn);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
+ }
+ }
+ }
+
job = p->job;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
return r;
}
@@ -1148,21 +1179,36 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+ r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+ if (r) {
+ dma_fence_put(p->fence);
+ dma_fence_put(&job->base.s_fence->finished);
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
+ }
+
amdgpu_cs_post_dependencies(p);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
- job->uf_sequence = cs->out.handle;
+ cs->out.handle = seq;
+ job->uf_sequence = seq;
+
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
@@ -1170,8 +1216,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
parser.adev = adev;
parser.filp = filp;
@@ -1182,6 +1226,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
+ r = amdgpu_cs_ib_fill(adev, &parser);
+ if (r)
+ goto out;
+
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
@@ -1192,9 +1240,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
- if (r)
- goto out;
r = amdgpu_cs_dependencies(adev, &parser);
if (r) {
@@ -1230,16 +1275,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
-
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
@@ -1257,6 +1298,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = PTR_ERR(fence);
else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
dma_fence_put(fence);
} else
r = 1;
@@ -1304,6 +1347,62 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
return fence;
}
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_fence_to_handle *info = data;
+ struct dma_fence *fence;
+ struct drm_syncobj *syncobj;
+ struct sync_file *sync_file;
+ int fd, r;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ switch (info->in.what) {
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ dma_fence_put(fence);
+ return fd;
+ }
+
+ sync_file = sync_file_create(fence);
+ dma_fence_put(fence);
+ if (!sync_file) {
+ put_unused_fd(fd);
+ return -ENOMEM;
+ }
+
+ fd_install(fd, sync_file->file);
+ info->out.handle = fd;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* amdgpu_cs_wait_all_fence - wait on all fences to signal
*
@@ -1338,6 +1437,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
if (r == 0)
break;
+
+ if (fence->error)
+ return fence->error;
}
memset(wait, 0, sizeof(*wait));
@@ -1383,6 +1485,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
+ first = i;
goto out;
}
}
@@ -1396,8 +1499,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = 0;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
@@ -1418,15 +1524,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
@@ -1462,78 +1565,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- unsigned i;
-
- if (!parser->bo_list)
- return NULL;
-
- addr /= AMDGPU_GPU_PAGE_SIZE;
-
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo_list_entry *lobj;
-
- lobj = &parser->bo_list->array[i];
- if (!lobj->bo_va)
- continue;
-
- list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
-
- list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
- }
-
- return NULL;
-}
-
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
int r;
- if (!parser->bo_list)
- return 0;
+ addr /= AMDGPU_GPU_PAGE_SIZE;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
+ *bo = mapping->bo_va->base.bo;
+ *map = mapping;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
+ /* Double check that the BO is reserved by this CS */
+ if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ return -EINVAL;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
+ if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
+ false);
+ if (r)
return r;
}
- return 0;
+ return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a11e44340b23..c184468e2b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -23,13 +23,41 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_auth.h>
#include "amdgpu.h"
+#include "amdgpu_sched.h"
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ enum amd_sched_priority priority)
+{
+ /* NORMAL and below are accessible by everyone */
+ if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ enum amd_sched_priority priority,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
+ if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
+ r = amdgpu_ctx_priority_permit(filp, priority);
+ if (r)
+ return r;
+
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
if (!ctx->fences)
return -ENOMEM;
+ mutex_init(&ctx->lock);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->init_priority = priority;
+ ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+ mutex_destroy(&ctx->lock);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
+ struct drm_file *filp,
+ enum amd_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx);
return r;
}
+
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, ctx);
+ r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
+ enum amd_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
+ priority = amdgpu_to_sched_priority(args->in.priority);
+
+ /* For backwards compatibility reasons, we need to accept
+ * ioctls with garbage in the priority field */
+ if (priority == AMD_SCHED_PRIORITY_INVALID)
+ priority = AMD_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id);
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
@@ -246,8 +291,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence)
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t* handler)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
@@ -256,12 +301,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
- if (other) {
- signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
- }
+ if (other)
+ BUG_ON(!dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -271,8 +312,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock);
dma_fence_put(other);
+ if (handler)
+ *handler = seq;
- return seq;
+ return 0;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -303,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority)
+{
+ int i;
+ struct amdgpu_device *adev = ctx->adev;
+ struct amd_sched_rq *rq;
+ struct amd_sched_entity *entity;
+ struct amdgpu_ring *ring;
+ enum amd_sched_priority ctx_prio;
+
+ ctx->override_priority = priority;
+
+ ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ entity = &ctx->rings[i].entity;
+ rq = &ring->sched.sched_rq[ctx_prio];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+ amd_sched_entity_set_rq(entity, rq);
+ }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+ struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+ unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+ struct dma_fence *other = cring->fences[idx];
+
+ if (other) {
+ signed long r;
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e630d918fefc..3573ecdb06ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
@@ -56,6 +57,7 @@
#include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -65,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
"TAHITI",
@@ -107,10 +110,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_rreg(adev, reg);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -135,10 +136,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_wreg(adev, reg, v);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -402,6 +401,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+ adev->doorbell.base = 0;
+ adev->doorbell.size = 0;
+ adev->doorbell.num_doorbells = 0;
+ adev->doorbell.ptr = NULL;
+ return 0;
+ }
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -539,7 +547,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- *wb = offset * 8; /* convert to dw offset */
+ *wb = offset << 3; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -557,7 +565,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
- __clear_bit(wb, adev->wb.used);
+ __clear_bit(wb >> 3, adev->wb.used);
}
/**
@@ -647,42 +655,96 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
}
/*
- * GPU helpers function.
+ * Firmware Reservation functions
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_fw_reserve_vram_fini - free fw reserved vram
*
* @adev: amdgpu_device pointer
*
- * Check if the asic has been initialized (all asics) at driver startup
- * or post is needed if hw reset is performed.
- * Returns true if need or false if not.
+ * free fw reserved vram if it has been reserved.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
- uint32_t reg;
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
- if (adev->has_hw_reset) {
- adev->has_hw_reset = false;
- return true;
- }
+/**
+ * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+ u64 gpu_addr;
+ u64 vram_size = adev->mc.visible_vram_size;
- /* bios scratch used on CIK+ */
- if (adev->asic_type >= CHIP_BONAIRE)
- return amdgpu_atombios_scratch_need_asic_init(adev);
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
- /* check MEM_SIZE for older asics */
- reg = amdgpu_asic_get_config_memsize(adev);
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
- if ((reg != 0) && (reg != 0xffffffff))
- return false;
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+ PAGE_SIZE, true, 0,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
- return true;
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), &gpu_addr);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
}
-static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * amdgpu_need_post - check if the hw need post or not
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the asic has been initialized (all asics) at driver startup
+ * or post is needed if hw reset is performed.
+ * Returns true if need or false if not.
+ */
+bool amdgpu_need_post(struct amdgpu_device *adev)
{
+ uint32_t reg;
+
if (amdgpu_sriov_vf(adev))
return false;
@@ -705,7 +767,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
}
}
- return amdgpu_need_post(adev);
+
+ if (adev->has_hw_reset) {
+ adev->has_hw_reset = false;
+ return true;
+ }
+
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
+
+ /* check MEM_SIZE for older asics */
+ reg = amdgpu_asic_get_config_memsize(adev);
+
+ if ((reg != 0) && (reg != 0xffffffff))
+ return false;
+
+ return true;
}
/**
@@ -887,6 +965,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
@@ -906,6 +998,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
}
/**
@@ -922,6 +1015,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
if (!atom_card_info)
return -ENOMEM;
@@ -958,6 +1052,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
}
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
return 0;
}
@@ -1757,10 +1858,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
- if (amdgpu_sriov_vf(adev)) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+ if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- }
return 0;
}
@@ -1848,6 +1947,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
@@ -1933,16 +2033,67 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
- if (adev->is_atom_fw) {
- if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
- } else {
- if (amdgpu_atombios_has_gpu_virtualization_table(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->is_atom_fw) {
+ if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ } else {
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ }
+
+ if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
+ }
+}
+
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+ switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+ return amdgpu_dc != 0;
+#endif
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ return amdgpu_dc > 0;
+ case CHIP_VEGA10:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+#endif
+ return amdgpu_dc != 0;
+#endif
+ default:
+ return false;
}
}
/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
+/**
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
@@ -1979,6 +2130,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1995,7 +2147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
-
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -2007,8 +2158,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
+ mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
amdgpu_check_arguments(adev);
@@ -2051,9 +2204,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- if (adev->asic_type >= CHIP_BONAIRE)
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ /* doorbell bar mapping */
+ amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2095,7 +2247,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2103,10 +2255,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_vpost_needed(adev)) {
+ if (amdgpu_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@@ -2114,7 +2265,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
@@ -2126,7 +2276,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atomfirmware_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
} else {
@@ -2134,18 +2284,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_atombios_i2c_init(adev);
}
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2155,7 +2306,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@@ -2175,7 +2326,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@@ -2183,8 +2334,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
amdgpu_fbdev_init(adev);
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+
r = amdgpu_gem_debugfs_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2201,6 +2359,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ r = amdgpu_debugfs_vbios_dump_init(adev);
+ if (r)
+ DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -2220,7 +2382,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2252,6 +2414,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
+ amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
@@ -2262,7 +2425,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -2276,8 +2440,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- if (adev->asic_type >= CHIP_BONAIRE)
- amdgpu_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2313,12 +2477,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
amdgpu_amdkfd_suspend(adev);
@@ -2461,13 +2627,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ drm_modeset_unlock_all(dev);
+ } else {
+ /*
+ * There is no equivalent atomic helper to turn on
+ * display, so we defined our own function for this,
+ * once suspend resume is supported by the atomic
+ * framework this will be reworked
+ */
+ amdgpu_dm_display_resume(adev);
}
- drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
@@ -2484,7 +2662,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
#endif
- drm_helper_hpd_irq_event(dev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
@@ -2504,6 +2685,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
int i;
bool asic_hang = false;
+ if (amdgpu_sriov_vf(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2546,7 +2730,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
@@ -2654,7 +2839,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->gfx.in_reset = true;
+ adev->in_sriov_reset = true;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -2765,7 +2950,7 @@ give_up_reset:
dev_info(adev->dev, "GPU reset successed!\n");
}
- adev->gfx.in_reset = false;
+ adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset);
return r;
}
@@ -2780,6 +2965,7 @@ give_up_reset:
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
+ struct drm_atomic_state *state = NULL;
int i, r;
int resched;
bool need_full_reset, vram_lost = false;
@@ -2793,6 +2979,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -2902,7 +3091,6 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@@ -2910,13 +3098,16 @@ out:
}
}
- drm_helper_resume_force_mode(adev->ddev);
+ if (amdgpu_device_has_dc_support(adev)) {
+ r = drm_atomic_helper_resume(adev->ddev, state);
+ amdgpu_dm_display_resume(adev);
+ } else
+ drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
}
else {
dev_info(adev->dev, "GPU reset successed!\n");
@@ -3070,9 +3261,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3146,9 +3337,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3463,10 +3654,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
- else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
- r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
- &valuesize);
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else
return -EINVAL;
@@ -3499,12 +3687,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
@@ -3549,14 +3737,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -3754,6 +3942,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{
return 0;
}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_vbios_dump_list[] = {
+ {"amdgpu_vbios",
+ amdgpu_debugfs_get_vbios_dump,
+ 0, NULL},
+};
+
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev,
+ amdgpu_vbios_dump_list, 1);
+}
#else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{
@@ -3763,5 +3973,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6ad243293a78..138beb550a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-static void amdgpu_output_poll_changed(struct drm_device *dev)
+void amdgpu_output_poll_changed(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
amdgpu_fb_output_poll_changed(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
new file mode 100644
index 000000000000..3cc0ef0c055e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+void amdgpu_output_poll_changed(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1cb52fd19060..e997ebbe43ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
}
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 8c96a4caa715..56caaeee6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -241,179 +241,125 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*check_state_equal)(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal);
- int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
- int *size);
-
- struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
- int (*reset_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(struct amdgpu_device *adev,
- enum amd_pp_profile_type type);
-};
+#define amdgpu_dpm_pre_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+ ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_dpm_print_power_state(adev, ps) \
+ ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+ ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_enable_bapm(adev, e) \
+ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
- (adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
+ ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
+ ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+ ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
+ ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
- -EINVAL)
+ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
+ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
+ ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
+ ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+ ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+ ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+ ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+ ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+ ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
+ ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
-#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
- (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+ ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
- (adev)->pm.dpm.forced_level)
+#define amdgpu_dpm_get_performance_level(adev) \
+ ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_state(adev, query) \
- ((adev)->powerplay.pp_funcs->get_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->get_power_profile_state(\
(adev)->powerplay.pp_handle, query))
#define amdgpu_dpm_set_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->set_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->set_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
+ ((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type))
+#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
+ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
+ (adev)->powerplay.pp_handle, msg_id))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -485,10 +431,9 @@ struct amdgpu_pm {
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
};
#define R600_SSTU_DFLT 0
@@ -551,6 +496,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+amdgpu_get_vce_clock_state(void *handle, u32 idx);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0f16986ec5bc..c2f414ffb2cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,9 +69,13 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode
+ * - 3.20.0 - Add support for local BOs
+ * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
+ * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
+ * - 3.23.0 - Add query for VRAM lost counter
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 19
+#define KMS_DRIVER_MINOR 23
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -91,7 +95,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
-unsigned amdgpu_ip_block_mask = 0xffffffff;
+uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
@@ -102,18 +106,20 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
+int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
-unsigned amdgpu_pcie_gen_cap = 0;
-unsigned amdgpu_pcie_lane_cap = 0;
-unsigned amdgpu_cg_mask = 0xffffffff;
-unsigned amdgpu_pg_mask = 0xffffffff;
-unsigned amdgpu_sdma_phase_quantum = 32;
+uint amdgpu_pcie_gen_cap = 0;
+uint amdgpu_pcie_lane_cap = 0;
+uint amdgpu_cg_mask = 0xffffffff;
+uint amdgpu_pg_mask = 0xffffffff;
+uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-unsigned amdgpu_pp_feature_mask = 0xffffffff;
+uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -121,6 +127,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
+int amdgpu_compute_multipipe = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -206,6 +213,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
@@ -264,6 +277,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
+MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -510,17 +526,17 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
- {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
- {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0, 0, 0}
};
@@ -608,6 +624,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
drm_dev_unref(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
}
static void
@@ -852,6 +870,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
+ .gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9afa9c097e1f..90fa8e8bc6fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
static int
amdgpufb_open(struct fb_info *info, int user)
@@ -149,7 +144,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, &gobj);
+ true, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -303,10 +298,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
@@ -353,7 +348,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(adev->ddev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_disable_unused_functions(adev->ddev);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 303b5e099a98..2fa95aef74d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -169,6 +169,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
}
/**
+ * amdgpu_fence_emit_polling - emit a fence on the requeste ring
+ *
+ * @ring: ring the fence is associated with
+ * @s: resulting sequence number
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Used For polling fence.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+{
+ uint32_t seq;
+
+ if (!s)
+ return -EINVAL;
+
+ seq = ++ring->fence_drv.sync_seq;
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ seq, AMDGPU_FENCE_FLAG_INT);
+
+ *s = seq;
+
+ return 0;
+}
+
+/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
@@ -242,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
@@ -282,6 +309,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_wait_polling - busy wait for givn sequence number
+ *
+ * @ring: ring index the fence is associated with
+ * @wait_seq: sequence number to wait
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns left time if no timeout, 0 or minus if timeout.
+ */
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout)
+{
+ uint32_t seq;
+
+ do {
+ seq = amdgpu_fence_read(ring);
+ udelay(5);
+ timeout -= 5;
+ } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
+
+ return timeout > 0 ? timeout : 0;
+}
+/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
* @ring: ring the fence is associated with
@@ -372,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
@@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+ continue;
+
+ /* set in CP_VMID_PREEMPT and preemption occurred */
+ seq_printf(m, "Last preempted 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
+ /* set in CP_VMID_RESET and reset occurred */
+ seq_printf(m, "Last reset 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
+ /* Both preemption and reset occurred */
+ seq_printf(m, "Last both 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index f4370081f6e6..fe818501c520 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
adev->gart.pages[p] = pagelist[i];
#endif
- if (adev->gart.ptr) {
- r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
- adev->gart.ptr);
- if (r)
- return r;
- }
+ if (!adev->gart.ptr)
+ return 0;
+
+ r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+ if (r)
+ return r;
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6149a47fe63d..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj)
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj)
{
- struct amdgpu_bo *robj;
+ struct amdgpu_bo *bo;
int r;
*obj = NULL;
@@ -59,9 +60,14 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, 0, &robj);
+ flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
@@ -71,7 +77,7 @@ retry:
}
return r;
}
- *obj = &robj->gem_base;
+ *obj = &bo->gem_base;
return 0;
}
@@ -112,7 +118,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
+ struct mm_struct *mm;
int r;
+
+ mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
+ if (mm && mm != current->mm)
+ return -EPERM;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
+ abo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return -EPERM;
+
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
@@ -127,35 +143,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
-static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
-{
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (!amdgpu_bo_gpu_accessible(bo))
- return -ERESTARTSYS;
-
- if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct amdgpu_bo *bo =
- container_of(entry->bo, struct amdgpu_bo, tbo);
- if (amdgpu_gem_vm_check(NULL, bo))
- return false;
- }
-
- return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
-}
-
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@@ -165,13 +152,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
- struct list_head list;
+ struct list_head list, duplicates;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@@ -179,7 +167,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -189,7 +177,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
- if (amdgpu_gem_vm_ready(adev, vm, &list)) {
+ if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
@@ -214,18 +202,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
+ uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
+ struct reservation_object *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
- bool kernel = false;
int r;
/* reject invalid gem flags */
- if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED))
+ if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED |
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+
return -EINVAL;
/* reject invalid gem domains */
@@ -240,7 +234,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
- kernel = true;
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@@ -252,10 +246,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
}
size = roundup(size, PAGE_SIZE);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ if (r)
+ return r;
+
+ resv = vm->root.base.bo->tbo.resv;
+ }
+
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- args->in.domain_flags,
- kernel, &gobj);
+ flags, false, resv, &gobj);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ if (!r) {
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ abo->parent = amdgpu_bo_ref(vm->root.base.bo);
+ }
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ }
if (r)
return r;
@@ -297,9 +306,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
/* create a gem object to contain this object in */
- r = amdgpu_gem_object_create(adev, args->size, 0,
- AMDGPU_GEM_DOMAIN_CPU, 0,
- 0, &gobj);
+ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+ 0, 0, NULL, &gobj);
if (r)
return r;
@@ -317,12 +325,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
-
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
@@ -333,8 +339,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
-
- up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@@ -347,10 +351,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
-
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -511,10 +512,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
- int r = -ERESTARTSYS;
+ int r;
- if (!amdgpu_gem_vm_ready(adev, vm, list))
- goto error;
+ if (!amdgpu_vm_ready(vm))
+ return;
r = amdgpu_vm_update_directories(adev, vm);
if (r)
@@ -551,15 +552,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
- struct list_head list;
+ struct list_head list, duplicates;
uint64_t va_flags;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
@@ -580,13 +580,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation);
return -EINVAL;
}
- if ((args->operation == AMDGPU_VA_OP_MAP) ||
- (args->operation == AMDGPU_VA_OP_REPLACE)) {
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
- }
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@@ -603,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -669,6 +665,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
@@ -716,6 +713,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+ if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ amdgpu_vm_bo_invalidate(adev, robj, true);
+
amdgpu_bo_unreserve(robj);
break;
default:
@@ -745,8 +745,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- ttm_bo_type_device,
- &gobj);
+ false, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 4f6c68fc1dd9..ef043361009f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
}
}
+static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
+{
+ if (amdgpu_compute_multipipe != -1) {
+ DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ amdgpu_compute_multipipe);
+ return amdgpu_compute_multipipe == 1;
+ }
+
+ /* FIXME: spreading the queues across pipes causes perf regressions
+ * on POLARIS11 compute workloads */
+ if (adev->asic_type == CHIP_POLARIS11)
+ return false;
+
+ return adev->gfx.mec.num_mec > 1;
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
+ bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
/* policy for amdgpu compute queue ownership */
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
@@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
- /* FIXME: spreading the queues across pipes causes perf regressions */
- if (0) {
+ if (multipipe_policy) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
@@ -185,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
int r = 0;
- mutex_init(&kiq->ring_mutex);
+ spin_lock_init(&kiq->ring_lock);
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
@@ -260,8 +276,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
+ /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
+ * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
+ * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
+ * KIQ MQD no matter SRIOV or Bare-metal
+ */
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0d15eb7d31d7..00e0ce10862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
@@ -169,7 +163,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (atomic64_read(&mgr->available) < mem->num_pages) {
+ if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
+ atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
@@ -244,8 +239,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
+ s64 result = man->size - atomic64_read(&mgr->available);
- return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+ return (result > 0 ? result : 0) * PAGE_SIZE;
}
/**
@@ -265,7 +261,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
- drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 3ab4c65ecc8b..f5f27e4f0f7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -169,6 +169,12 @@ restart_ih:
while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2;
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev)) {
+ adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
+ continue;
+ }
+
/* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]);
@@ -190,3 +196,79 @@ restart_ih:
return IRQ_HANDLED;
}
+
+/**
+ * amdgpu_ih_add_fault - Add a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a retry page fault interrupt is
+ * received. If this is a new page fault, it will be added to a hash
+ * table. The return value indicates whether this is a new fault, or
+ * a fault that was already known and is already being handled.
+ *
+ * If there are too many pending page faults, this will fail. Retry
+ * interrupts should be ignored in this case until there is enough
+ * free space.
+ *
+ * Returns 0 if the fault was added, 1 if the fault was already known,
+ * -ENOSPC if there are too many pending faults.
+ */
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r = -ENOSPC;
+
+ if (WARN_ON_ONCE(!adev->irq.ih.faults))
+ /* Should be allocated in <IP>_ih_sw_init on GPUs that
+ * support retry faults and require retry filtering.
+ */
+ return r;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ /* Only let the hash table fill up to 50% for best performance */
+ if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
+ goto unlock_out;
+
+ r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
+ if (!r)
+ adev->irq.ih.faults->count++;
+
+ /* chash_table_copy_in should never fail unless we're losing count */
+ WARN_ON_ONCE(r < 0);
+
+unlock_out:
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+ return r;
+}
+
+/**
+ * amdgpu_ih_clear_fault - Remove a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a page fault has been handled. Any
+ * future interrupt with this key will be processed as a new
+ * page fault.
+ */
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r;
+
+ if (!adev->irq.ih.faults)
+ return;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
+ if (!WARN_ON_ONCE(r < 0)) {
+ adev->irq.ih.faults->count--;
+ WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
+ }
+
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3de8e74e5b3a..ada89358e220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+#include <linux/chash.h>
+
struct amdgpu_device;
/*
* vega10+ IH clients
@@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0
+#define AMDGPU_PAGEFAULT_HASH_BITS 8
+struct amdgpu_retryfault_hashtable {
+ DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spinlock_t lock;
+ int count;
+};
+
/*
* R6xx+ IH ring
*/
@@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell;
bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+ struct amdgpu_retryfault_hashtable *faults;
};
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
@@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev);
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 538e5f27d120..47c5ce9807db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
- adev->ddev->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
-
/* enable msi */
adev->irq.msi_enabled = false;
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev->ddev->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627ae83e..0cfc68db575b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
+ (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
return 0;
}
@@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
return 0;
@@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_device *adev;
struct amdgpu_job *job;
- struct amdgpu_fpriv *fpriv = NULL;
int r;
if (!sched_job) {
@@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
+ adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vm)
- fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
/* skip ib schedule when vram is lost */
- if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+ dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n");
- else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+ } else {
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ &fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e16229000a98..720139e182a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
+#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
@@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VRAM_LOST_COUNTER:
+ ui32 = atomic_read(&adev->vram_lost_counter);
+ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv)
-{
- return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
@@ -825,7 +820,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
r = amdgpu_vm_init(adev, &fpriv->vm,
- AMDGPU_VM_CONTEXT_GFX);
+ AMDGPU_VM_CONTEXT_GFX, 0);
if (r) {
kfree(fpriv);
goto out_suspend;
@@ -841,8 +836,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
- if (r)
+ if (r) {
+ amdgpu_vm_fini(adev, &fpriv->vm);
+ kfree(fpriv);
goto out_suspend;
+ }
}
mutex_init(&fpriv->bo_list_lock);
@@ -850,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
- fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv;
out_suspend:
@@ -1020,7 +1017,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1031,7 +1030,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3b0f2ec6eec7..bd67f4cb8e6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node;
/* objects protected by lock */
- struct mutex lock;
+ struct rw_semaphore lock;
struct rb_root_cached objects;
+ struct mutex read_lock;
+ atomic_t recursion;
};
struct amdgpu_mn_node {
@@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) {
@@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work);
}
+
+/**
+ * amdgpu_mn_lock - take the write side lock for this mn
+ */
+void amdgpu_mn_lock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ down_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_unlock - drop the write side lock for this mn
+ */
+void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ up_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_read_lock - take the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Take the rmn read side lock.
+ */
+static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+{
+ mutex_lock(&rmn->read_lock);
+ if (atomic_inc_return(&rmn->recursion) == 1)
+ down_read_non_owner(&rmn->lock);
+ mutex_unlock(&rmn->read_lock);
+}
+
+/**
+ * amdgpu_mn_read_unlock - drop the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Drop the rmn read side lock.
+ */
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+{
+ if (atomic_dec_return(&rmn->recursion) == 0)
+ up_read_non_owner(&rmn->lock);
+}
+
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
@@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
+ amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
}
}
@@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end);
}
+}
- mutex_unlock(&rmn->lock);
+/**
+ * amdgpu_mn_invalidate_range_end - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * Release the lock again to allow new command submissions.
+ */
+static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+
+ amdgpu_mn_read_unlock(rmn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
+ .invalidate_range_end = amdgpu_mn_invalidate_range_end,
};
/**
@@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
*
* Creates a notifier context for current->mm.
*/
-static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn;
@@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
- mutex_init(&rmn->lock);
+ init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED;
+ mutex_init(&rmn->read_lock);
+ atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
@@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return -ENOMEM;
}
}
@@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return 0;
}
@@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
new file mode 100644
index 000000000000..d0095a3793b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_MN_H__
+#define __AMDGPU_MN_H__
+
+/*
+ * MMU Notifier
+ */
+struct amdgpu_mn;
+
+#if defined(CONFIG_MMU_NOTIFIER)
+void amdgpu_mn_lock(struct amdgpu_mn *mn);
+void amdgpu_mn_unlock(struct amdgpu_mn *mn);
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
+int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+#else
+static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
+static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
+static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+{
+ return NULL;
+}
+static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2af2678ddaf6..ffde1e9666e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
@@ -53,9 +57,13 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
+
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
+ /* it is used to enter or exit into free sync mode */
+ int (*notify_freesync)(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ /* it is used to allow enablement of freesync mode */
+ int (*set_freesync_property)(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+
+ /* caching for later use */
+ uint64_t address;
+};
+
+struct amdgpu_fbdev {
+ struct drm_fb_helper helper;
+ struct amdgpu_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct amdgpu_device *adev;
};
struct amdgpu_mode_info {
@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ const enum drm_plane_type *plane_type;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -400,6 +434,14 @@ struct amdgpu_crtc {
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
+
+ int otg_inst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct amdgpu_plane {
+ struct drm_plane base;
+ enum drm_plane_type plane_type;
};
struct amdgpu_encoder_atom_dig {
@@ -489,6 +531,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+ struct i2c_adapter base;
+
+ struct ddc_service *ddc_service;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -500,6 +555,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+ const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +573,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+ struct semaphore mst_sem;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9e495da0bb03..ea25164e7f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -40,9 +40,7 @@
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo;
-
- bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
amdgpu_bo_kunmap(bo);
@@ -64,11 +62,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false;
}
-static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
- struct ttm_placement *placement,
- struct ttm_place *places,
- u32 domain, u64 flags)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct ttm_placement *placement = &abo->placement;
+ struct ttm_place *places = abo->placements;
+ u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
@@ -151,27 +150,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
-
- amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
- domain, abo->flags);
-}
-
-static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
- struct ttm_placement *placement)
-{
- BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
-
- memcpy(bo->placements, placement->placement,
- placement->num_placement * sizeof(struct ttm_place));
- bo->placement.num_placement = placement->num_placement;
- bo->placement.num_busy_placement = placement->num_busy_placement;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
-}
-
/**
* amdgpu_bo_create_reserved - create reserved BO for kernel use
*
@@ -303,14 +281,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr)
+static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain, u64 flags,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ uint64_t init_value,
+ struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
@@ -384,13 +361,17 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif
- amdgpu_fill_placement_to_bo(bo, placement);
- /* Kernel allocation are uninterruptible */
+ bo->tbo.bdev = &adev->mman.bdev;
+ amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+ /* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
+ if (unlikely(r != 0))
+ return r;
+
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@@ -400,9 +381,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
- if (unlikely(r != 0))
- return r;
-
if (kernel)
bo->tbo.priority = 1;
@@ -442,27 +420,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align,
struct amdgpu_bo *bo)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r;
if (bo->shadow)
return 0;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW,
- NULL, &placement,
- bo->tbo.resv,
- 0,
- &bo->shadow);
+ r = amdgpu_bo_do_create(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW,
+ NULL, bo->tbo.resv, 0,
+ &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -484,18 +452,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- domain, parent_flags);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
- parent_flags, sg, &placement, resv,
- init_value, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
+ parent_flags, sg, resv, init_value, bo_ptr);
if (r)
return r;
@@ -672,7 +633,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
- unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
@@ -704,22 +664,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ /* force to pin into visible video ram */
+ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- /* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset >
- adev->mc.visible_vram_size)) {
- if (WARN_ON_ONCE(min_offset >
- adev->mc.visible_vram_size))
- return -EINVAL;
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- } else {
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
- }
+ unsigned fpfn, lpfn;
+
+ fpfn = min_offset >> PAGE_SHIFT;
+ lpfn = max_offset >> PAGE_SHIFT;
+
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn ||
@@ -928,8 +882,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
- abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(adev, abo);
+ abo = ttm_to_amdgpu_bo(bo);
+ amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
@@ -955,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a288fa6d72c8..428aae048f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
struct list_head list;
struct rb_node rb;
uint64_t start;
@@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */
- struct dma_fence *last_pt_update;
unsigned ref_count;
+ /* all other members protected by the VM PD being reserved */
+ struct dma_fence *last_pt_update;
+
/* mappings for this bo_va */
struct list_head invalids;
struct list_head valids;
+
+ /* If the mappings are cleared or filled */
+ bool cleared;
};
struct amdgpu_bo {
@@ -88,6 +94,11 @@ struct amdgpu_bo {
};
};
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct amdgpu_bo, tbo);
+}
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -182,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
}
}
+/**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+}
+
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -189,14 +208,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7df503aedb69..f8edf5483f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1,4 +1,6 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -64,17 +66,13 @@ static const struct cg_flag_name clocks[] = {
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
- if (adev->pp_enabled)
- /* TODO */
- return;
-
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
adev->pm.dpm.ac_power = true;
else
adev->pm.dpm.ac_power = false;
- if (adev->pm.funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
@@ -88,9 +86,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
- } else
+ else
pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n",
@@ -118,8 +116,8 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
@@ -140,13 +138,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
+ enum amd_dpm_forced_level level = 0xff;
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ level = amdgpu_dpm_get_performance_level(adev);
+ else
+ level = adev->pm.dpm.forced_level;
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -167,7 +169,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
+ enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
/* Can't force performance level when the card is off */
@@ -175,7 +177,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- current_level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -203,9 +206,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
- if (adev->pp_enabled)
- amdgpu_dpm_force_performance_level(adev, level);
- else {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -233,7 +234,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -257,8 +258,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0;
- if (adev->pp_enabled) {
-
+ if (adev->powerplay.pp_funcs->get_current_power_state
+ && adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
@@ -280,25 +281,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- enum amd_pm_state_type pm = 0;
- int i;
-
- if (adev->pp_force_state_enabled && adev->pp_enabled) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-
- } else
+ if (adev->pp_force_state_enabled)
+ return amdgpu_get_pp_cur_state(dev, attr, buf);
+ else
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -315,7 +301,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else if (adev->pp_enabled) {
+ else if (adev->powerplay.pp_funcs->dispatch_tasks &&
+ adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
@@ -330,7 +317,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
adev->pp_force_state_enabled = true;
}
}
@@ -347,7 +334,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -368,7 +355,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -380,14 +367,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@@ -416,10 +400,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
mask |= 1 << level;
}
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
+
fail:
return count;
}
@@ -430,14 +413,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
-
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -465,11 +445,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
+
fail:
return count;
}
@@ -480,14 +458,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
-
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -515,11 +490,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+
fail:
return count;
}
@@ -532,10 +505,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
- else if (adev->pm.funcs->get_sclk_od)
- value = adev->pm.funcs->get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -556,12 +527,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
-
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->set_sclk_od)
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_sclk_od) {
- adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -578,10 +549,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
- else if (adev->pm.funcs->get_mclk_od)
- value = adev->pm.funcs->get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -602,12 +571,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
-
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->set_mclk_od)
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_mclk_od) {
- adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -621,14 +590,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
+ int ret = 0xff;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_power_profile_state)
ret = amdgpu_dpm_get_power_profile_state(
adev, query);
- else if (adev->pm.funcs->get_power_profile_state)
- ret = adev->pm.funcs->get_power_profile_state(
- adev, query);
if (ret)
return ret;
@@ -675,15 +641,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
char *sub_str, buf_cpy[128], *tmp_str;
const char delimiter[3] = {' ', '\n', '\0'};
long int value;
- int ret = 0;
+ int ret = 0xff;
if (strncmp("reset", buf, strlen("reset")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->reset_power_profile_state)
ret = amdgpu_dpm_reset_power_profile_state(
adev, request);
- else if (adev->pm.funcs->reset_power_profile_state)
- ret = adev->pm.funcs->reset_power_profile_state(
- adev, request);
if (ret) {
count = -EINVAL;
goto fail;
@@ -692,12 +655,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
}
if (strncmp("set", buf, strlen("set")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+
if (ret) {
count = -EINVAL;
goto fail;
@@ -745,13 +706,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
loop++;
}
-
- if (adev->pp_enabled)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
+ ret = amdgpu_dpm_set_power_profile_state(adev, request);
if (ret)
count = -EINVAL;
@@ -831,7 +787,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
+ if (!adev->powerplay.pp_funcs->get_temperature)
temp = 0;
else
temp = amdgpu_dpm_get_temperature(adev);
@@ -862,7 +818,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@@ -879,7 +835,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
- if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@@ -919,9 +875,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+ if (err)
+ return err;
+ }
return count;
}
@@ -932,11 +890,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+ if (err)
+ return err;
+ }
speed = (speed * 255) / 100;
@@ -949,11 +909,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
+ if (err)
+ return err;
+ }
return sprintf(buf, "%i\n", speed);
}
@@ -986,6 +948,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -996,9 +962,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
- if (adev->pp_enabled)
- return effective_mode;
-
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@@ -1008,21 +971,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->pm.funcs->get_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->pm.funcs->get_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
- if ((!adev->pm.funcs->set_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->pm.funcs->set_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
/* hide max/min values if we can't both query and manage the fan */
- if ((!adev->pm.funcs->set_fan_speed_percent &&
- !adev->pm.funcs->get_fan_speed_percent) &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
@@ -1055,7 +1018,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->pm.funcs->get_temperature) {
+ if (adev->powerplay.pp_funcs->get_temperature) {
int temp = amdgpu_dpm_get_temperature(adev);
if (temp < adev->pm.dpm.thermal.min_temp)
@@ -1087,7 +1050,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
true : false;
/* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->pm.funcs->vblank_too_short) {
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev))
single_display = false;
}
@@ -1216,7 +1179,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
- bool equal;
+ bool equal = false;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1236,7 +1199,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- if (amdgpu_dpm == 1) {
+ if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n");
@@ -1245,15 +1208,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
-
- amdgpu_dpm_display_configuration_changed(adev);
+ if (adev->powerplay.pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
- equal = false;
+ if (adev->powerplay.pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
if (equal)
return;
@@ -1264,7 +1229,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- if (adev->pm.funcs->force_performance_level) {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
@@ -1280,7 +1245,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->powergate_uvd) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
@@ -1302,7 +1267,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->powergate_vce) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
@@ -1337,8 +1302,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{
int i;
- if (adev->pp_enabled)
- /* TO DO */
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
@@ -1353,10 +1317,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
- if (!adev->pp_enabled) {
- if (adev->pm.funcs->get_temperature == NULL)
- return 0;
- }
+ if (adev->pm.dpm_enabled == 0)
+ return 0;
+
+ if (adev->powerplay.pp_funcs->get_temperature == NULL)
+ return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
@@ -1379,27 +1344,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- if (adev->pp_enabled) {
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_num_states\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_cur_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_force_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_table);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_table\n");
+ return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
@@ -1455,16 +1419,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ if (adev->pm.dpm_enabled == 0)
+ return;
+
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (adev->pp_enabled) {
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
- }
+
+ device_remove_file(adev->dev, &dev_attr_pp_num_states);
+ device_remove_file(adev->dev, &dev_attr_pp_cur_state);
+ device_remove_file(adev->dev, &dev_attr_pp_force_state);
+ device_remove_file(adev->dev, &dev_attr_pp_table);
+
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
@@ -1495,8 +1462,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0;
@@ -1505,7 +1472,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
+ if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
@@ -1630,15 +1597,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->pp_enabled) {
- return amdgpu_debugfs_pm_info_pp(m, adev);
- } else {
+ } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
- if (adev->pm.funcs->debugfs_print_current_performance_level)
- adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
+ if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ } else {
+ return amdgpu_debugfs_pm_info_pp(m, adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b7e1c026c0c8..5f5aa5fddc16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -34,24 +34,6 @@
#include "cik_dpm.h"
#include "vi_dpm.h"
-static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
-{
- struct amd_pp_init pp_init;
- struct amd_powerplay *amd_pp;
- int ret;
-
- amd_pp = &(adev->powerplay);
- pp_init.chip_family = adev->family;
- pp_init.chip_id = adev->asic_type;
- pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- pp_init.feature_mask = amdgpu_pp_feature_mask;
- pp_init.device = amdgpu_cgs_create_device(adev);
- ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
int ret = 0;
amd_pp = &(adev->powerplay);
- adev->pp_enabled = false;
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
@@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY:
case CHIP_VEGA10:
case CHIP_RAVEN:
- adev->pp_enabled = true;
- if (amdgpu_create_pp_handle(adev))
- return -EINVAL;
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
break;
@@ -87,17 +66,26 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND:
case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ amd_pp->pp_funcs = &si_dpm_funcs;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ if (amdgpu_dpm == -1) {
+ amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ amd_pp->pp_funcs = &ci_dpm_funcs;
+ } else {
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
+ amd_pp->ip_funcs = &pp_ip_funcs;
+ amd_pp->pp_funcs = &pp_dpm_funcs;
+ }
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
+ amd_pp->pp_funcs = &kv_dpm_funcs;
break;
#endif
default:
@@ -107,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
- adev->powerplay.pp_handle);
+ amd_pp->cgs_device ? amd_pp->cgs_device :
+ amd_pp->pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
return ret;
}
@@ -126,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->pm.dpm_enabled) {
- amdgpu_pm_sysfs_init(adev);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
- }
-
return ret;
}
@@ -165,21 +145,13 @@ static int amdgpu_pp_hw_init(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
-
- if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
- adev->pm.dpm_enabled = true;
-
return ret;
}
@@ -188,14 +160,11 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->pm.dpm_enabled)
- amdgpu_pm_sysfs_fini(adev);
-
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
return ret;
@@ -209,9 +178,8 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
-
- if (adev->pp_enabled)
- amd_powerplay_destroy(adev->powerplay.pp_handle);
+ if (adev->powerplay.cgs_device)
+ amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 5b3f92891f89..ae9c106979d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ unsigned asize = amdgpu_bo_size(bo);
+ int ret;
+
+ if (!vma->vm_file)
+ return -ENODEV;
+
+ if (adev == NULL)
+ return -ENODEV;
+
+ /* Check for valid size. */
+ if (asize < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
+ return -EPERM;
+ }
+ vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
+
+ /* prime mmap does not need to check access, so allow here */
+ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
+ drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
+
+ return ret;
+}
+
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -135,9 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8c2204c7b384..447d446b5015 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
+ psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
+ psp->mode1_reset = psp_v3_1_mode1_reset;
break;
case CHIP_RAVEN:
-#if 0
psp->init_microcode = psp_v10_0_init_microcode;
-#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
+ psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
+ psp->mode1_reset = psp_v10_0_mode1_reset;
break;
default:
return -EINVAL;
@@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
return 0;
}
@@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp)
{
+ struct amdgpu_device *adev = psp->adev;
int ret;
- ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
- return ret;
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ ret = psp_bootloader_load_sysdrv(psp);
+ if (ret)
+ return ret;
- ret = psp_bootloader_load_sos(psp);
- if (ret)
- return ret;
+ ret = psp_bootloader_load_sos(psp);
+ if (ret)
+ return ret;
+ }
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
@@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+
+ ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ DRM_ERROR("PSP ring stop failed\n");
+ return ret;
+ }
+
return 0;
}
@@ -487,6 +508,22 @@ failed:
return ret;
}
+static bool psp_check_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ return true;
+
+ return false;
+}
+
+static int psp_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ return psp_mode1_reset(&adev->psp);
+}
+
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
@@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
+ .check_soft_reset = psp_check_reset,
.wait_for_idle = NULL,
- .soft_reset = NULL,
+ .soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 538fa9dbfb21..ce4654550416 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_stop)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
@@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
+ int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
@@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
+#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
@@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
+#define psp_mode1_reset(psp) \
+ ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index befc09b68543..93d86619e802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int ring,
+ u32 ring,
struct amdgpu_ring **out_ring)
{
switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring,
+ u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
}
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
- j, out_ring);
+ j, lru_pipe_order, out_ring);
if (r)
return r;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
*/
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
int r, ip_num_rings;
@@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
+ r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
+ break;
case AMDGPU_HW_IP_COMPUTE:
- r = amdgpu_lru_map(adev, mapper, ring, out_ring);
+ r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
break;
default:
*out_ring = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5ce65280b396..a98fbbb4739f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
- amdgpu_ring_lru_touch(ring->adev, ring);
+ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
+ amdgpu_ring_lru_touch(ring->adev, ring);
}
/**
@@ -155,6 +156,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ int i;
+
+ if (!ring->funcs->set_priority)
+ return;
+
+ if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+ return;
+
+ /* no need to restore if the job is already at the lowest priority */
+ if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ return;
+
+ mutex_lock(&ring->priority_mutex);
+ /* something higher prio is executing, no need to decay */
+ if (ring->priority > priority)
+ goto out_unlock;
+
+ /* decay priority to the next level with a job available */
+ for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+ if (i == AMD_SCHED_PRIORITY_NORMAL
+ || atomic_read(&ring->num_jobs[i])) {
+ ring->priority = i;
+ ring->funcs->set_priority(ring, i);
+ break;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ if (!ring->funcs->set_priority)
+ return;
+
+ atomic_inc(&ring->num_jobs[priority]);
+
+ mutex_lock(&ring->priority_mutex);
+ if (priority <= ring->priority)
+ goto out_unlock;
+
+ ring->priority = priority;
+ ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -169,7 +239,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type)
{
- int r;
+ int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
/* Set the hw submission limit higher for KIQ because
@@ -247,9 +317,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
+ ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
+ for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ atomic_set(&ring->num_jobs[i], 0);
+
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
@@ -315,14 +390,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
* @type: amdgpu_ring_type enum
* @blacklist: blacklisted ring ids array
* @num_blacklist: number of entries in @blacklist
+ * @lru_pipe_order: find a ring from the least recently used pipe
* @ring: output ring
*
* Retrieve the amdgpu_ring structure for the least recently used ring of
* a specific IP block (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring)
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring)
{
struct amdgpu_ring *entry;
@@ -337,10 +414,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
continue;
- *ring = entry;
- amdgpu_ring_lru_touch_locked(adev, *ring);
- break;
+ if (!*ring) {
+ *ring = entry;
+
+ /* We are done for ring LRU */
+ if (!lru_pipe_order)
+ break;
+ }
+
+ /* Move all rings on the same pipe to the end of the list */
+ if (entry->pipe == (*ring)->pipe)
+ amdgpu_ring_lru_touch_locked(adev, entry);
}
+
+ /* Move the ring we found to the end of the list */
+ if (*ring)
+ amdgpu_ring_lru_touch_locked(adev, *ring);
+
spin_unlock(&adev->ring_lru_list_lock);
if (!*ring) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 322d25299a00..b18c2b96691f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
+#include <drm/amdgpu_drm.h>
#include "gpu_scheduler.h"
/* max number of rings */
@@ -56,6 +57,7 @@ struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
+struct amdgpu_job;
/*
* Fences.
@@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
@@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
};
struct amdgpu_ring {
@@ -187,6 +196,12 @@ struct amdgpu_ring {
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
bool has_compute_vm_bug;
+
+ atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ struct mutex priority_mutex;
+ /* protected by priority_mutex */
+ int priority;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
@@ -197,12 +212,17 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..290cc3f9c433
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#include <linux/fdtable.h>
+#include <linux/pid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "amdgpu_vm.h"
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+{
+ switch (amdgpu_priority) {
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_HW;
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_SW;
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return AMD_SCHED_PRIORITY_NORMAL;
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return AMD_SCHED_PRIORITY_LOW;
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return AMD_SCHED_PRIORITY_UNSET;
+ default:
+ WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+ return AMD_SCHED_PRIORITY_INVALID;
+ }
+}
+
+static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ int fd,
+ enum amd_sched_priority priority)
+{
+ struct file *filp = fcheck(fd);
+ struct drm_file *file;
+ struct pid *pid;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+
+ if (!filp)
+ return -EINVAL;
+
+ pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+
+ mutex_lock(&adev->ddev->filelist_mutex);
+ list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+ if (file->pid != pid)
+ continue;
+
+ fpriv = file->driver_priv;
+ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
+ }
+ mutex_unlock(&adev->ddev->filelist_mutex);
+
+ put_pid(pid);
+
+ return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_sched *args = data;
+ struct amdgpu_device *adev = dev->dev_private;
+ enum amd_sched_priority priority;
+ int r;
+
+ priority = amdgpu_to_sched_priority(args->in.priority);
+ if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_process_priority_override(adev,
+ args->in.fd,
+ priority);
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 9ef96aab3f24..b28c067d3822 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#ifndef _EVENTINIT_H_
-#define _EVENTINIT_H_
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
-#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
+#include <drm/drmP.h>
-void pem_init_feature_info(struct pp_eventmgr *eventmgr);
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
-int pem_register_interrupts(struct pp_eventmgr *eventmgr);
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
-#endif /* _EVENTINIT_H_ */
+#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586f44312f9..a4bf21f8f1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @shared: true if we should only sync to the exclusive fence
+ * @explicit_sync: true if we should only sync to the exclusive fence
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner)
+ void *owner, bool explicit_sync)
{
struct reservation_object_list *flist;
struct dma_fence *f;
@@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
+ if (explicit_sync)
+ return r;
+
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index dc7687993317..70d7e3a279a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner);
+ void *owner,
+ bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 34c99a3c8d2d..06525f2c36c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
@@ -15,62 +37,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_ttm_tt_populate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
-TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -474,5 +440,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 89680d554ed8..b160b958e5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,5 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
* Author : Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bc746131987f..ad5bf86ee8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -42,7 +42,9 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include "amdgpu.h"
+#include "amdgpu_object.h"
#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
@@ -208,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs &&
@@ -256,7 +258,7 @@ gtt:
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
@@ -288,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
return addr;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+ * corresponding to @offset. It also modifies the offset to be
+ * within the drm_mm_node returned
+ */
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *mm_node = mem->mm_node;
- struct drm_mm_node *old_mm, *new_mm;
- uint64_t old_start, old_size, new_start, new_size;
- unsigned long num_pages;
- struct dma_fence *fence = NULL;
- int r;
+ while (*offset >= (mm_node->size << PAGE_SHIFT)) {
+ *offset -= (mm_node->size << PAGE_SHIFT);
+ ++mm_node;
+ }
+ return mm_node;
+}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+/**
+ * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ * @f: Returns the last fence if multiple jobs are submitted.
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f)
+{
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *src_mm, *dst_mm;
+ uint64_t src_node_start, dst_node_start, src_node_size,
+ dst_node_size, src_page_offset, dst_page_offset;
+ struct dma_fence *fence = NULL;
+ int r = 0;
+ const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- old_mm = old_mem->mm_node;
- old_size = old_mm->size;
- old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
+ src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
+ src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
+ src->offset;
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
- new_mm = new_mem->mm_node;
- new_size = new_mm->size;
- new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
+ dst->offset;
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
- num_pages = new_mem->num_pages;
mutex_lock(&adev->mman.gtt_window_lock);
- while (num_pages) {
- unsigned long cur_pages = min(min(old_size, new_size),
- (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
- uint64_t from = old_start, to = new_start;
+
+ while (size) {
+ unsigned long cur_size;
+ uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next;
- if (old_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(old_mem)) {
- r = amdgpu_map_buffer(bo, old_mem, cur_pages,
- old_start, 0, ring, &from);
+ /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
+ * begins at an offset, then adjust the size accordingly
+ */
+ cur_size = min3(min(src_node_size, dst_node_size), size,
+ GTT_MAX_BYTES);
+ if (cur_size + src_page_offset > GTT_MAX_BYTES ||
+ cur_size + dst_page_offset > GTT_MAX_BYTES)
+ cur_size -= max(src_page_offset, dst_page_offset);
+
+ /* Map only what needs to be accessed. Map src to window 0 and
+ * dst to window 1
+ */
+ if (src->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ r = amdgpu_map_buffer(src->bo, src->mem,
+ PFN_UP(cur_size + src_page_offset),
+ src_node_start, 0, ring,
+ &from);
if (r)
goto error;
+ /* Adjust the offset because amdgpu_map_buffer returns
+ * start of mapped page
+ */
+ from += src_page_offset;
}
- if (new_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(new_mem)) {
- r = amdgpu_map_buffer(bo, new_mem, cur_pages,
- new_start, 1, ring, &to);
+ if (dst->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ r = amdgpu_map_buffer(dst->bo, dst->mem,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_node_start, 1, ring,
+ &to);
if (r)
goto error;
+ to += dst_page_offset;
}
- r = amdgpu_copy_buffer(ring, from, to,
- cur_pages * PAGE_SIZE,
- bo->resv, &next, false, true);
+ r = amdgpu_copy_buffer(ring, from, to, cur_size,
+ resv, &next, false, true);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
- num_pages -= cur_pages;
- if (!num_pages)
+ size -= cur_size;
+ if (!size)
break;
- old_size -= cur_pages;
- if (!old_size) {
- old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
- old_size = old_mm->size;
+ src_node_size -= cur_size;
+ if (!src_node_size) {
+ src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
+ src->mem);
+ src_node_size = (src_mm->size << PAGE_SHIFT);
} else {
- old_start += cur_pages * PAGE_SIZE;
+ src_node_start += cur_size;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
}
-
- new_size -= cur_pages;
- if (!new_size) {
- new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
- new_size = new_mm->size;
+ dst_node_size -= cur_size;
+ if (!dst_node_size) {
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
+ dst->mem);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else {
- new_start += cur_pages * PAGE_SIZE;
+ dst_node_start += cur_size;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
}
}
+error:
mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_copy_mem src, dst;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ src.bo = bo;
+ dst.bo = bo;
+ src.mem = old_mem;
+ dst.mem = new_mem;
+ src.offset = 0;
+ dst.offset = 0;
+
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ new_mem->num_pages << PAGE_SHIFT,
+ bo->resv, &fence);
+ if (r)
+ goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
- mutex_unlock(&adev->mman.gtt_window_lock);
-
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -483,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r;
/* Can't move a pinned BO */
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
@@ -581,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
- struct drm_mm_node *mm = bo->mem.mm_node;
- uint64_t size = mm->size;
- uint64_t offset = page_offset;
+ struct drm_mm_node *mm;
+ unsigned long offset = (page_offset << PAGE_SHIFT);
- page_offset = do_div(offset, size);
- mm += offset;
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+ mm = amdgpu_find_mm_node(&bo->mem, &offset);
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
+ (offset >> PAGE_SHIFT);
}
/*
@@ -608,6 +689,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ uint32_t last_set_pages;
struct list_head list;
};
@@ -621,6 +703,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE;
+ down_read(&current->mm->mmap_sem);
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -628,8 +712,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
+ if (!vma || vma->vm_file || vma->vm_end < end) {
+ up_read(&current->mm->mmap_sem);
return -EPERM;
+ }
}
do {
@@ -656,42 +742,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages);
+ up_read(&current->mm->mmap_sem);
return 0;
release_pages:
- release_pages(pages, pinned, 0);
+ release_pages(pages, pinned);
+ up_read(&current->mm->mmap_sem);
return r;
}
-static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_populate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i])
+ put_page(ttm->pages[i]);
+
+ ttm->pages[i] = pages ? pages[i] : NULL;
}
}
-static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_unpopulate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ for (i = 0; i < ttm->num_pages; ++i) {
+ struct page *page = ttm->pages[i];
+
+ if (!page)
+ continue;
+
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
}
}
@@ -721,8 +809,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- amdgpu_trace_dma_map(ttm);
-
return 0;
release_sg:
@@ -734,7 +820,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -747,16 +832,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
- mark_page_accessed(page);
- put_page(page);
- }
-
- amdgpu_trace_dma_unmap(ttm);
+ amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
}
@@ -818,7 +894,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg tmp;
-
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -834,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
@@ -941,8 +1017,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -962,52 +1036,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- r = 0;
- goto trace_mappings;
+ return 0;
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- r = ttm_dma_populate(&gtt->ttm, adev->dev);
- goto trace_mappings;
+ return ttm_dma_populate(&gtt->ttm, adev->dev);
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
-
- r = 0;
-trace_mappings:
- if (likely(!r))
- amdgpu_trace_dma_map(ttm);
- return r;
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
+ amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
@@ -1018,8 +1066,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
- amdgpu_trace_dma_unmap(ttm);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1027,14 +1073,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -1051,6 +1090,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
+ gtt->last_set_pages = 0;
return 0;
}
@@ -1103,6 +1143,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
+ return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1143,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
- return ttm_bo_eviction_valuable(bo, place);
-
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
@@ -1160,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
- break;
+ return false;
default:
break;
@@ -1173,9 +1220,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+ struct drm_mm_node *nodes;
uint32_t value = 0;
int ret = 0;
uint64_t pos;
@@ -1184,10 +1231,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- while (offset >= (nodes->size << PAGE_SHIFT)) {
- offset -= nodes->size << PAGE_SHIFT;
- ++nodes;
- }
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) {
@@ -1202,14 +1246,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
}
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff)
- value = RREG32(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
- WREG32(mmMM_DATA, value);
+ WREG32_NO_KIQ(mmMM_DATA, value);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) {
@@ -1286,6 +1330,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ /*
+ *The reserved vram for firmware must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_fw_reserve_vram_init(adev);
+ if (r) {
+ return r;
+ }
+
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
@@ -1510,7 +1563,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1557,8 +1611,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ uint32_t max_bytes = 8 *
+ adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@@ -1590,8 +1644,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
++mm_node;
}
- /* 10 double words for each SDMA_OP_PTEPDE cmd */
- num_dw = num_loops * 10;
+ /* num of dwords for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */
num_dw += 64;
@@ -1602,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1697,9 +1751,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32(mmMM_DATA);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
@@ -1715,10 +1769,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
- .llseek = default_llseek
+ .write = amdgpu_ttm_vram_write,
+ .llseek = default_llseek,
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@@ -1770,6 +1864,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
+static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int r;
+ uint64_t phys;
+ struct iommu_domain *dom;
+
+ // always return 8 bytes
+ if (size != 8)
+ return -EINVAL;
+
+ // only accept page addresses
+ if (*pos & 0xFFF)
+ return -EINVAL;
+
+ dom = iommu_get_domain_for_dev(adev->dev);
+ if (dom)
+ phys = iommu_iova_to_phys(dom, *pos);
+ else
+ phys = *pos;
+
+ r = copy_to_user(buf, &phys, 8);
+ if (r)
+ return -EFAULT;
+
+ return 8;
+}
+
+static const struct file_operations amdgpu_ttm_iova_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_iova_to_phys_read,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+ int domain;
+} ttm_debugfs_entries[] = {
+ { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
+#endif
+ { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
+};
+
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@@ -1780,22 +1921,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
- ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
- adev->mman.vram = ent;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gart_size);
- adev->mman.gtt = ent;
+ for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
+ ent = debugfs_create_file(
+ ttm_debugfs_entries[count].name,
+ S_IFREG | S_IRUGO, root,
+ adev,
+ ttm_debugfs_entries[count].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
+ i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+ else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
+ i_size_write(ent->d_inode, adev->mc.gart_size);
+ adev->mman.debugfs_entries[count] = ent;
+ }
-#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@@ -1805,7 +1945,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
-
return 0;
#endif
}
@@ -1813,14 +1952,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
- debugfs_remove(adev->mman.vram);
- adev->mman.vram = NULL;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- debugfs_remove(adev->mman.gtt);
- adev->mman.gtt = NULL;
-#endif
-
+ for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
+ debugfs_remove(adev->mman.debugfs_entries[i]);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 43093bffa2cf..abd4084982a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
+#include "amdgpu.h"
#include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
+ struct dentry *debugfs_entries[8];
#endif
/* buffer handling */
@@ -58,6 +58,12 @@ struct amdgpu_mman {
struct amd_sched_entity entity;
};
+struct amdgpu_copy_mem {
+ struct ttm_buffer_object *bo;
+ struct ttm_mem_reg *mem;
+ unsigned long offset;
+};
+
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
@@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data,
struct reservation_object *resv,
@@ -82,4 +94,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags);
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 36c763310df5..65649026b836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
- if (!load_type)
- return AMDGPU_FW_LOAD_DIRECT;
- else
- return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
- if (load_type != 2)
+ if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
@@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
- uint64_t fw_mc_addr;
- void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0;
int i, err;
struct amdgpu_firmware_info *ucode = NULL;
@@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
- err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- goto failed;
- }
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
+ goto failed;
+ }
- err = amdgpu_bo_reserve(*bo, false);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed_reserve;
- }
+ err = amdgpu_bo_reserve(*bo, false);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
+ goto failed_reserve;
+ }
- err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &fw_mc_addr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed_pin;
- }
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf_mc);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
+ goto failed_pin;
+ }
- err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- goto failed_kmap;
- }
+ err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
+ goto failed_kmap;
+ }
- amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unreserve(*bo);
+ }
- memset(fw_buf_ptr, 0, adev->firmware.fw_size);
+ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
@@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data;
- amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
- (void *)((uint8_t *)fw_buf_ptr + fw_offset));
+ amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
- fw_buf_ptr + fw_offset);
+ amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e19928dae8e3..e8bd50cf9785 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
+ int i;
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
@@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring);
+ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+
release_firmware(adev->uvd.fw);
return 0;
@@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
@@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
start = amdgpu_bo_gpu_offset(bo);
@@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- r = amdgpu_cs_sysvm_access_required(parser);
- if (r)
- return r;
-
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c855366521ab..2918de2f39ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
+ int r;
if (index == 0xffffffff)
index = 0;
@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
- mapping = amdgpu_cs_find_mapping(p, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index);
- return -EINVAL;
+ return r;
}
if ((addr + (uint64_t)size) >
@@ -647,15 +648,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r, idx = 0;
+ int i, r = 0, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- r = amdgpu_cs_sysvm_access_required(p);
- if (r)
- return r;
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
index 45ac91861965..7f7097931c6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -25,30 +25,26 @@
#include "amdgpu_vf_error.h"
#include "mxgpu_ai.h"
-#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
-
-/* struct error_entry - amdgpu VF error information. */
-struct amdgpu_vf_error_buffer {
- int read_count;
- int write_count;
- uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
-};
-
-struct amdgpu_vf_error_buffer admgpu_vf_errors;
-
-
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data)
{
int index;
- uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+ uint16_t error_code;
- index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- admgpu_vf_errors.code [index] = error_code;
- admgpu_vf_errors.flags [index] = error_flags;
- admgpu_vf_errors.data [index] = error_data;
- admgpu_vf_errors.write_count ++;
+ if (!amdgpu_sriov_vf(adev))
+ return;
+
+ error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ mutex_lock(&adev->virt.vf_errors.lock);
+ index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ adev->virt.vf_errors.code [index] = error_code;
+ adev->virt.vf_errors.flags [index] = error_flags;
+ adev->virt.vf_errors.data [index] = error_data;
+ adev->virt.vf_errors.write_count ++;
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
@@ -58,7 +54,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
u32 data1, data2, data3;
int index;
- if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
+ (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return;
}
/*
@@ -68,18 +65,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
return;
}
*/
+
+ mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */
- if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
- admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
}
- while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
- index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
- data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
- data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+ while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
+ index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
+ adev->virt.vf_errors.flags[index]);
+ data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
- admgpu_vf_errors.read_count ++;
+ adev->virt.vf_errors.read_count ++;
}
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
index 2a3278ec76ba..6436bd053325 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
AMDGIM_ERROR_CATEGORY_MAX
};
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data);
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ab05121b9272..6738df836a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,7 @@
*/
#include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT 100000
+#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{
@@ -114,27 +114,25 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r;
- uint32_t val;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t val, seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
- dma_fence_put(f);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
return ~0;
}
-
val = adev->wb.wb[adev->virt.reg_val_offs];
return val;
@@ -143,23 +141,23 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1)
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- dma_fence_put(f);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
}
/**
@@ -274,3 +272,80 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
+
+
+int amdgpu_virt_fw_reserve_get_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum)
+{
+ unsigned int ret = key;
+ unsigned long i = 0;
+ unsigned char *pos;
+
+ pos = (char *)obj;
+ /* calculate checksum */
+ for (i = 0; i < obj_size; ++i)
+ ret += *(pos + i);
+ /* minus the chksum itself */
+ pos = (char *)&chksum;
+ for (i = 0; i < sizeof(chksum); ++i)
+ ret -= *(pos + i);
+ return ret;
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
+ uint32_t pf2vf_ver = 0;
+ uint32_t pf2vf_size = 0;
+ uint32_t checksum = 0;
+ uint32_t checkval;
+ char *str;
+
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ adev->virt.fw_reserve.p_vf2pf = NULL;
+
+ if (adev->fw_vram_usage.va != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amdgim_pf2vf_info_header *)(
+ adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
+ pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
+
+ /* pf2vf message must be in 4K */
+ if (pf2vf_size > 0 && pf2vf_size < 4096) {
+ checkval = amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checkval == checksum) {
+ adev->virt.fw_reserve.p_vf2pf =
+ ((void *)adev->virt.fw_reserve.p_pf2vf +
+ pf2vf_size);
+ memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
+ AMDGPU_FW_VRAM_VF2PF_VER);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
+ &str);
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(str, THIS_MODULE->version);
+ else
+#endif
+ strcpy(str, "N/A");
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
+ 0);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
+ amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_vf2pf,
+ pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, 0));
+ }
+ }
+ }
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index afcfb8bcfb65..b89d37fc406f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -36,6 +36,18 @@ struct amdgpu_mm_table {
uint64_t gpu_addr;
};
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ struct mutex lock;
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -46,6 +58,179 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
+/*
+ * Firmware Reserve Frame buffer
+ */
+struct amdgpu_virt_fw_reserve {
+ struct amdgim_pf2vf_info_header *p_pf2vf;
+ struct amdgim_vf2pf_info_header *p_vf2pf;
+ unsigned int checksum_key;
+};
+/*
+ * Defination between PF and VF
+ * Structures forcibly aligned to 4 to keep the same style as PF.
+ */
+#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
+
+#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
+ (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
+
+enum AMDGIM_FEATURE_FLAG {
+ /* GIM supports feature of Error log collecting */
+ AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
+ /* GIM supports feature of loading uCodes */
+ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+};
+
+struct amdgim_pf2vf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /* version of this structure, written by the GIM */
+ uint32_t version;
+} __aligned(4);
+struct amdgim_pf2vf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* max_width * max_height */
+ unsigned int uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ unsigned int vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of visible frame buffer */
+ unsigned int mecfw_kboffset;
+ /* The features flags of the GIM driver supports. */
+ unsigned int feature_flags;
+ /* use private key from mailbox 2 to create chueksum */
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_pf2vf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create chueksum */
+ uint32_t checksum;
+ /* The features flags of the GIM driver supports. */
+ uint32_t feature_flags;
+ /* max_width * max_height */
+ uint32_t uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ uint32_t vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of VF visible frame buffer */
+ uint64_t mecfw_kboffset;
+ /* MEC FW size in KB */
+ uint32_t mecfw_ksize;
+ /* UVD FW position in kb from the start of VF visible frame buffer */
+ uint64_t uvdfw_kboffset;
+ /* UVD FW size in KB */
+ uint32_t uvdfw_ksize;
+ /* VCE FW position in kb from the start of VF visible frame buffer */
+ uint64_t vcefw_kboffset;
+ /* VCE FW size in KB */
+ uint32_t vcefw_ksize;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+} __aligned(4);
+
+
+struct amdgim_vf2pf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /*version of this structure, written by the guest */
+ uint32_t version;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ /* driver version */
+ char driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ unsigned int driver_cert;
+ /* guest OS type and version: need a define */
+ unsigned int os_info;
+ /* in the unit of 1M */
+ unsigned int fb_usage;
+ /* guest gfx engine usage percentage */
+ unsigned int gfx_usage;
+ /* guest gfx engine health percentage */
+ unsigned int gfx_health;
+ /* guest compute engine usage percentage */
+ unsigned int compute_usage;
+ /* guest compute engine health percentage */
+ unsigned int compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ unsigned int vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ unsigned int vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_health;
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version: need a define */
+ uint32_t os_info;
+ /* in the unit of 1M */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ uint32_t vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ uint32_t vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_health;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+} __aligned(4);
+
+#define AMDGPU_FW_VRAM_VF2PF_VER 2
+typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
+
+#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
+ do { \
+ ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
+ do { \
+ (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
+ do { \
+ if (!adev->virt.fw_reserve.p_pf2vf) \
+ *(val) = 0; \
+ else { \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
+ *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
+ *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ } \
+ } while (0)
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -59,6 +244,8 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
+ struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_virt_fw_reserve fw_reserve;
};
#define AMDGPU_CSA_SIZE (8 * 1024)
@@ -101,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum);
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd20ff018512..c8c26f21993c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -27,12 +27,59 @@
*/
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
+#include <linux/idr.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_vm_pasid_ida);
+
+/**
+ * amdgpu_vm_alloc_pasid - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_vm_alloc_pasid(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+/**
+ * amdgpu_vm_free_pasid - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_vm_free_pasid(unsigned int pasid)
+{
+ ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
+}
+
+/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
@@ -140,7 +187,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
- entry->robj = vm->root.bo;
+ entry->robj = vm->root.base.bo;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
@@ -149,86 +196,80 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_validate_layer - validate a single page table level
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
- * @parent: parent page table level
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
*/
-static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
- int (*validate)(void *, struct amdgpu_bo *),
- void *param, bool use_cpu_for_update,
- struct ttm_bo_global *glob)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
- unsigned i;
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r;
- if (use_cpu_for_update) {
- r = amdgpu_bo_kmap(parent->bo, NULL);
- if (r)
- return r;
- }
-
- if (!parent->entries)
- return 0;
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
- if (!entry->bo)
- continue;
+ bo = bo_base->bo;
+ BUG_ON(!bo);
+ if (bo->parent) {
+ r = validate(param, bo);
+ if (r)
+ return r;
- r = validate(param, entry->bo);
- if (r)
- return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
+ if (bo->shadow)
+ ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+ }
- spin_lock(&glob->lru_lock);
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- if (entry->bo->shadow)
- ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
- spin_unlock(&glob->lru_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel &&
+ vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(bo, NULL);
+ if (r)
+ return r;
+ }
- /*
- * Recurse into the sub directory. This is harmless because we
- * have only a maximum of 5 layers.
- */
- r = amdgpu_vm_validate_level(entry, validate, param,
- use_cpu_for_update, glob);
- if (r)
- return r;
+ spin_lock(&vm->status_lock);
+ if (bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->moved);
+ else
+ list_move(&bo_base->vm_status, &vm->relocated);
}
+ spin_unlock(&vm->status_lock);
- return r;
+ return 0;
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_ready - check VM is ready for updates
*
- * @adev: amdgpu device pointer
- * @vm: vm providing the BOs
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
+ * @vm: VM to check
*
- * Validate the page table BOs on command submission if neccessary.
+ * Check if all VM PDs/PTs are ready for updates
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- uint64_t num_evictions;
+ bool ready;
- /* We only need to validate the page tables
- * if they aren't already valid.
- */
- num_evictions = atomic64_read(&adev->num_evictions);
- if (num_evictions == vm->last_eviction_counter)
- return 0;
+ spin_lock(&vm->status_lock);
+ ready = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
- return amdgpu_vm_validate_level(&vm->root, validate, param,
- vm->use_cpu_for_update,
- adev->mman.bdev.glob);
+ return ready;
}
/**
@@ -287,18 +328,19 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_SHADOW);
if (vm->pte_support_ats) {
- init_value = AMDGPU_PTE_SYSTEM;
+ init_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != adev->vm_manager.num_level - 1)
init_value |= AMDGPU_PDE_PTE;
+
}
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
struct amdgpu_bo *pt;
- if (!entry->bo) {
+ if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true,
@@ -319,9 +361,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
- pt->parent = amdgpu_bo_ref(vm->root.bo);
-
- entry->bo = pt;
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+ entry->base.vm = vm;
+ entry->base.bo = pt;
+ list_add_tail(&entry->base.bo_list, &pt->va);
+ spin_lock(&vm->status_lock);
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
entry->addr = 0;
}
@@ -988,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
+ amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1007,18 +1054,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+ struct amdgpu_vm_pt *parent)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring = NULL;
uint64_t pd_addr, shadow_addr = 0;
- uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw = 0;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct dma_fence *fence = NULL;
+ uint32_t incr;
int r;
@@ -1027,10 +1073,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
- shadow = parent->bo->shadow;
+ shadow = parent->base.bo->shadow;
if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r))
return r;
@@ -1046,7 +1092,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* assume the worst case */
ndw += parent->last_entry_used * 6;
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
if (shadow) {
shadow_addr = amdgpu_bo_gpu_offset(shadow);
@@ -1066,12 +1112,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* walk over the address space and update the directory */
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *bo = entry->base.bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
+ spin_lock(&vm->status_lock);
+ list_del_init(&entry->base.vm_status);
+ spin_unlock(&vm->status_lock);
+
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
/* Don't update huge pages here */
@@ -1082,6 +1133,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
+ incr = amdgpu_bo_size(bo);
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1109,7 +1161,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
}
if (count) {
- if (vm->root.bo->shadow)
+ if (vm->root.base.bo->shadow)
params.func(&params, last_shadow, last_pt,
count, incr, AMDGPU_PTE_VALID);
@@ -1122,12 +1174,13 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
amdgpu_job_free(job);
} else {
amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &job->sync,
+ parent->base.bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
if (shadow)
amdgpu_sync_resv(adev, &job->sync,
shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
@@ -1135,26 +1188,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ amdgpu_bo_fence(parent->base.bo, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
}
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
- if (!entry->bo)
- continue;
-
- r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
- if (r)
- return r;
- }
return 0;
@@ -1170,7 +1208,8 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent)
{
unsigned pt_idx;
@@ -1181,11 +1220,15 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- if (!entry->bo)
+ if (!entry->base.bo)
continue;
entry->addr = ~0ULL;
- amdgpu_vm_invalidate_level(entry);
+ spin_lock(&vm->status_lock);
+ if (list_empty(&entry->base.vm_status))
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ amdgpu_vm_invalidate_level(vm, entry);
}
}
@@ -1201,11 +1244,40 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- int r;
+ int r = 0;
- r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
- if (r)
- amdgpu_vm_invalidate_level(&vm->root);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->relocated)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+
+ bo_base = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo->parent;
+ if (bo) {
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_pt *pt;
+
+ parent = list_first_entry(&bo->va,
+ struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+ r = amdgpu_vm_update_level(adev, vm, pt);
+ if (r) {
+ amdgpu_vm_invalidate_level(vm, &vm->root);
+ return r;
+ }
+ spin_lock(&vm->status_lock);
+ } else {
+ spin_lock(&vm->status_lock);
+ list_del_init(&bo_base->vm_status);
+ }
+ }
+ spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
@@ -1236,7 +1308,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
*entry = &p->vm->root;
while ((*entry)->entries) {
idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size((*entry)->bo) / 8;
+ idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
*parent = *entry;
*entry = &(*entry)->entries[idx];
}
@@ -1272,7 +1344,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
p->src ||
!(flags & AMDGPU_PTE_VALID)) {
- dst = amdgpu_bo_gpu_offset(entry->bo);
+ dst = amdgpu_bo_gpu_offset(entry->base.bo);
dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
@@ -1298,18 +1370,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
tmp = p->pages_addr;
p->pages_addr = NULL;
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
p->pages_addr = tmp;
} else {
- if (parent->bo->shadow) {
- pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+ if (parent->base.bo->shadow) {
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
@@ -1360,7 +1432,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (entry->addr & AMDGPU_PDE_PTE)
continue;
- pt = entry->bo;
+ pt = entry->base.bo;
if (use_cpu_update) {
pe_start = (unsigned long)amdgpu_bo_kptr(pt);
} else {
@@ -1396,8 +1468,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- int r;
-
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
* field in the PTE. When this field is set to a non-zero value, page
@@ -1416,39 +1486,38 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
- unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
- uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
- uint64_t frag_align = 1 << pages_per_frag;
-
- uint64_t frag_start = ALIGN(start, frag_align);
- uint64_t frag_end = end & ~(frag_align - 1);
+ unsigned max_frag = params->adev->vm_manager.fragment_size;
+ int r;
/* system pages are non continuously */
- if (params->src || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end))
+ if (params->src || !(flags & AMDGPU_PTE_VALID))
return amdgpu_vm_update_ptes(params, start, end, dst, flags);
- /* handle the 4K area at the beginning */
- if (start != frag_start) {
- r = amdgpu_vm_update_ptes(params, start, frag_start,
- dst, flags);
+ while (start != end) {
+ uint64_t frag_flags, frag_end;
+ unsigned frag;
+
+ /* This intentionally wraps around if no bit is set */
+ frag = min((unsigned)ffs(start) - 1,
+ (unsigned)fls64(end - start) - 1);
+ if (frag >= max_frag) {
+ frag_flags = AMDGPU_PTE_FRAG(max_frag);
+ frag_end = end & ~((1ULL << max_frag) - 1);
+ } else {
+ frag_flags = AMDGPU_PTE_FRAG(frag);
+ frag_end = start + (1 << frag);
+ }
+
+ r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
+ flags | frag_flags);
if (r)
return r;
- dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
- }
- /* handle the area in the middle */
- r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
- flags | frag_flags);
- if (r)
- return r;
-
- /* handle the 4K area at the end */
- if (frag_end != end) {
- dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
- r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
+ dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
+ start = frag_end;
}
- return r;
+
+ return 0;
}
/**
@@ -1456,7 +1525,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*
* @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
@@ -1470,7 +1538,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
@@ -1488,7 +1555,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -1517,10 +1583,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
nptes = last - start + 1;
/*
- * reserve space for one command every (1 << BLOCK_SIZE)
+ * reserve space for two commands every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
+ *
+ * The second command is for the shadow pagetables.
*/
- ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
/* padding, etc. */
ndw = 64;
@@ -1528,15 +1596,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* one PDE write for each huge page */
ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
- if (src) {
- /* only copy commands needed */
- ndw += ncmds * 7;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else if (pages_addr) {
+ if (pages_addr) {
/* copy commands needed */
- ndw += ncmds * 7;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
/* and also PTEs */
ndw += nptes * 2;
@@ -1545,10 +1607,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else {
/* set page commands needed */
- ndw += ncmds * 10;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
- /* two extra commands for begin/end of fragment */
- ndw += 2 * 10;
+ /* extra commands for begin/end fragments */
+ ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
+ * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1559,7 +1622,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.ib = &job->ibs[0];
- if (!src && pages_addr) {
+ if (pages_addr) {
uint64_t *pte;
unsigned i;
@@ -1580,12 +1643,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
- owner);
+ r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
+ owner, false);
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
if (r)
goto error_free;
@@ -1600,14 +1663,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(vm->root.bo, f, true);
+ amdgpu_bo_fence(vm->root.base.bo, f, true);
dma_fence_put(*fence);
*fence = f;
return 0;
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(&vm->root);
+ amdgpu_vm_invalidate_level(vm, &vm->root);
return r;
}
@@ -1636,7 +1699,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->start;
+ unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
+ uint64_t pfn, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1670,6 +1734,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
do {
+ dma_addr_t *dma_addr = NULL;
uint64_t max_entries;
uint64_t addr, last;
@@ -1683,16 +1748,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
if (pages_addr) {
+ uint64_t count;
+
max_entries = min(max_entries, 16ull * 1024ull);
- addr = 0;
+ for (count = 1; count < max_entries; ++count) {
+ uint64_t idx = pfn + count;
+
+ if (pages_addr[idx] !=
+ (pages_addr[idx - 1] + PAGE_SIZE))
+ break;
+ }
+
+ if (count < min_linear_pages) {
+ addr = pfn << PAGE_SHIFT;
+ dma_addr = pages_addr;
+ } else {
+ addr = pages_addr[pfn];
+ max_entries = count;
+ }
+
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
+ addr += pfn << PAGE_SHIFT;
}
- addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -1730,7 +1811,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive;
+ struct dma_fence *exclusive, **last_update;
uint64_t flags;
int r;
@@ -1756,38 +1837,43 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
else
flags = 0x0;
- spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->base.vm_status))
+ if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+ last_update = &vm->last_update;
+ else
+ last_update = &bo_va->last_pt_update;
+
+ if (!clear && bo_va->base.moved) {
+ bo_va->base.moved = false;
list_splice_init(&bo_va->valids, &bo_va->invalids);
- spin_unlock(&vm->status_lock);
+
+ } else if (bo_va->cleared != clear) {
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ }
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
mapping, flags, nodes,
- &bo_va->last_pt_update);
+ last_update);
if (r)
return r;
}
- if (trace_amdgpu_vm_bo_mapping_enabled()) {
- list_for_each_entry(mapping, &bo_va->valids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
-
- list_for_each_entry(mapping, &bo_va->invalids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
}
spin_lock(&vm->status_lock);
- list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->base.vm_status);
- if (clear)
- list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+
+ if (trace_amdgpu_vm_bo_mapping_enabled()) {
+ list_for_each_entry(mapping, &bo_va->valids, list)
+ trace_amdgpu_vm_bo_mapping(mapping);
}
return 0;
@@ -1895,7 +1981,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
@@ -1951,9 +2037,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
list_del(&mapping->list);
if (vm->pte_support_ats)
- init_pte_value = AMDGPU_PTE_SYSTEM;
+ init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
mapping->start, mapping->last,
init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -1975,29 +2061,35 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_moved - clear moved BOs in the PT
+ * amdgpu_vm_handle_moved - handle moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @sync: sync object to add fences to
*
- * Make sure all moved BOs are cleared in the PT.
+ * Make sure all BOs which are moved are updated in the PTs.
* Returns 0 for success.
*
- * PTs have to be reserved and mutex must be locked!
+ * PTs have to be reserved!
*/
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync)
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va = NULL;
+ bool clear;
int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
+ struct amdgpu_bo_va *bo_va;
+
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, true);
+ /* Per VM BOs never need to bo cleared in the page tables */
+ clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
@@ -2005,9 +2097,6 @@ int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_unlock(&vm->status_lock);
- if (bo_va)
- r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
-
return r;
}
@@ -2049,6 +2138,39 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
return bo_va;
}
+
+/**
+ * amdgpu_vm_bo_insert_mapping - insert a new mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @mapping: the mapping to insert
+ *
+ * Insert a new mapping into all structures.
+ */
+static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ mapping->bo_va = bo_va;
+ list_add(&mapping->list, &bo_va->invalids);
+ amdgpu_vm_it_insert(mapping, &vm->va);
+
+ if (mapping->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&vm->status_lock);
+ if (list_empty(&bo_va->base.vm_status))
+ list_add(&bo_va->base.vm_status, &vm->moved);
+ spin_unlock(&vm->status_lock);
+ }
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -2100,17 +2222,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- INIT_LIST_HEAD(&mapping->list);
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2137,7 +2254,6 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
- struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2171,11 +2287,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2221,6 +2333,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -2306,6 +2419,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (tmp->last > eaddr)
tmp->last = eaddr;
+ tmp->bo_va = NULL;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
}
@@ -2332,6 +2446,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
+ *
+ * Find a mapping by it's address.
+ */
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr)
+{
+ return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2356,6 +2483,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
@@ -2380,15 +2508,36 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* Mark @bo as invalid.
*/
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->evicted);
+ else
+ list_move_tail(&bo_base->vm_status,
+ &vm->evicted);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->relocated);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
- list_add(&bo_base->vm_status,
- &bo_base->vm->moved);
+ list_add(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2412,7 +2561,8 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* @adev: amdgpu_device pointer
* @fragment_size_default: the default fragment size if it's set auto
*/
-void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default)
{
if (amdgpu_vm_fragment_size == -1)
adev->vm_manager.fragment_size = fragment_size_default;
@@ -2426,7 +2576,8 @@ void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_s
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2458,7 +2609,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_
* Init @vm fields.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context)
+ int vm_context, unsigned int pasid)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8);
@@ -2474,8 +2625,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
+ INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
- INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */
@@ -2497,7 +2649,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (adev->asic_type == CHIP_RAVEN) {
vm->pte_support_ats = true;
- init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ init_pde_value = AMDGPU_PTE_DEFAULT_ATC
+ | AMDGPU_PDE_PTE;
+
}
} else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2506,7 +2660,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
"CPU update of VM recommended only for large BAR system\n");
- vm->last_dir_update = NULL;
+ vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
@@ -2519,30 +2673,47 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, init_pde_value, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->root.bo, false);
- if (r)
- goto error_free_root;
-
- vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ vm->root.base.vm = vm;
+ list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+ INIT_LIST_HEAD(&vm->root.base.vm_status);
if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(vm->root.bo, NULL);
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ if (r)
+ goto error_free_root;
+
+ r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
+ amdgpu_bo_unreserve(vm->root.base.bo);
if (r)
goto error_free_root;
}
- amdgpu_bo_unreserve(vm->root.bo);
+ if (pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
+ GFP_ATOMIC);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
+ INIT_KFIFO(vm->faults);
+ vm->fault_credit = 16;
return 0;
error_free_root:
- amdgpu_bo_unref(&vm->root.bo->shadow);
- amdgpu_bo_unref(&vm->root.bo);
- vm->root.bo = NULL;
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+ amdgpu_bo_unref(&vm->root.base.bo);
+ vm->root.base.bo = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
@@ -2561,9 +2732,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
{
unsigned i;
- if (level->bo) {
- amdgpu_bo_unref(&level->bo->shadow);
- amdgpu_bo_unref(&level->bo);
+ if (level->base.bo) {
+ list_del(&level->base.bo_list);
+ list_del(&level->base.vm_status);
+ amdgpu_bo_unref(&level->base.bo->shadow);
+ amdgpu_bo_unref(&level->base.bo);
}
if (level->entries)
@@ -2586,7 +2759,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
- int i;
+ struct amdgpu_bo *root;
+ u64 fault;
+ int i, r;
+
+ /* Clear pending page faults from IH when the VM is destroyed */
+ while (kfifo_get(&vm->faults, &fault))
+ amdgpu_ih_clear_fault(adev, fault);
+
+ if (vm->pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@@ -2609,13 +2796,51 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- amdgpu_vm_free_levels(&vm->root);
- dma_fence_put(vm->last_dir_update);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ r = amdgpu_bo_reserve(root, true);
+ if (r) {
+ dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
+ } else {
+ amdgpu_vm_free_levels(&vm->root);
+ amdgpu_bo_unreserve(root);
+ }
+ amdgpu_bo_unref(&root);
+ dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vm_free_reserved_vmid(adev, vm, i);
}
/**
+ * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: PASID do identify the VM
+ *
+ * This function is expected to be called in interrupt context. Returns
+ * true if there was fault credit, false otherwise
+ */
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ /* VM not found, can't track fault credit */
+ return true;
+
+ /* No lock needed. only accessed by IRQ handler */
+ if (!vm->fault_credit)
+ /* Too many faults in this VM */
+ return false;
+
+ vm->fault_credit--;
+ return true;
+}
+
+/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
@@ -2668,6 +2893,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->vm_manager.vm_update_mode = 0;
#endif
+ idr_init(&adev->vm_manager.pasid_idr);
+ spin_lock_init(&adev->vm_manager.pasid_lock);
}
/**
@@ -2681,6 +2908,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i, j;
+ WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
+ idr_destroy(&adev->vm_manager.pasid_idr);
+
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 6716355403ec..bae77353447b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
+#include <linux/idr.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
@@ -72,6 +73,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
+/* For Raven */
+#define AMDGPU_MTYPE_CC 2
+
+#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
+ | AMDGPU_PTE_SNOOPED \
+ | AMDGPU_PTE_EXECUTABLE \
+ | AMDGPU_PTE_READABLE \
+ | AMDGPU_PTE_WRITEABLE \
+ | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
+
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
@@ -83,7 +94,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
@@ -105,17 +117,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */
struct list_head vm_status;
+
+ /* protected by the BO being reserved */
+ bool moved;
};
struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ struct amdgpu_vm_bo_base base;
+ uint64_t addr;
/* array of page tables, one for each directory entry */
- struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
+ struct amdgpu_vm_pt *entries;
+ unsigned last_entry_used;
};
+#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
+#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
+#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -123,19 +142,21 @@ struct amdgpu_vm {
/* protecting invalidated */
spinlock_t status_lock;
+ /* BOs who needs a validation */
+ struct list_head evicted;
+
+ /* PT BOs which relocated and their parent need an update */
+ struct list_head relocated;
+
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_pt root;
- struct dma_fence *last_dir_update;
- uint64_t last_eviction_counter;
+ struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
@@ -143,8 +164,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
- /* client id */
+ /* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id;
+ unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
@@ -153,6 +175,12 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
+
+ /* Up to 128 pending retry page faults */
+ DECLARE_KFIFO(faults, u64, 128);
+
+ /* Limit non-retry fault storms */
+ unsigned int fault_credit;
};
struct amdgpu_vm_id {
@@ -215,16 +243,27 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/
int vm_update_mode;
+
+ /* PASID to VM mapping, will be used in interrupt context to
+ * look up VM of a page fault
+ */
+ struct idr pasid_idr;
+ spinlock_t pasid_lock;
};
+int amdgpu_vm_alloc_pasid(unsigned int bits);
+void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context);
+ int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
+bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
@@ -243,13 +282,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
+ struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -269,6 +308,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 26e900627971..4acca92f6a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index d69aa2e179bb..69500a8b4e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
idx = 0x80;
str = CSTR(idx);
- if (*str != '\0')
+ if (*str != '\0') {
pr_info("ATOM BIOS: %s\n", str);
+ strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
+ }
+
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index ddd8045accf3..a39170991afe 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -140,6 +140,7 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
+ char vbios_version[20];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index cb508a211b2f..68b505c768ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
@@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
return ret;
}
-static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void ci_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
pi->uvd_power_gated = gate;
@@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
}
}
-static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool ci_dpm_vblank_too_short(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
@@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.no_fan)
@@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
switch (mode) {
case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 ci_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (pi->fan_is_controlled_by_smc)
@@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
}
-static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
+static int ci_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i;
int ret;
@@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void ci_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
}
-static int ci_dpm_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void ci_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
ci_program_display_gap(adev);
}
@@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
}
static void
-ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ci_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(adev);
@@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
seq_printf(m, "GPU load: %u %%\n", activity_percent);
}
-static void ci_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void ci_dpm_print_power_state(void *handle, void *current_ps)
{
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct ci_ps *ps = ci_get_ps(rps);
struct ci_pl *pl;
int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
}
-static int ci_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int ci_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct ci_ps *ci_cps;
struct ci_ps *ci_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- ci_cps = ci_get_ps(cps);
- ci_rps = ci_get_ps(rps);
+ ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
+ ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
if (ci_cps == NULL) {
*equal = false;
@@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
}
/* get temperature in millidegrees */
-static int ci_dpm_get_temp(struct amdgpu_device *adev)
+static int ci_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ci_dpm_set_dpm_funcs(adev);
ci_dpm_set_irq_funcs(adev);
return 0;
@@ -6346,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -6551,9 +6569,10 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
-static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+static int ci_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
@@ -6618,9 +6637,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
return size;
}
-static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+static int ci_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
@@ -6664,8 +6684,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_sclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6680,8 +6701,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6698,8 +6720,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_mclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6714,8 +6737,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6732,9 +6756,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_get_power_profile_state(void *handle,
struct amd_pp_profile *query)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !query)
@@ -6851,9 +6876,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
return result;
}
-static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_set_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
int ret = -1;
@@ -6906,9 +6932,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_reset_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !request)
@@ -6927,9 +6954,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
return -EINVAL;
}
-static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
+static int ci_dpm_switch_power_profile(void *handle,
enum amd_pp_profile_type type)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amd_pp_profile request = {0};
@@ -6944,11 +6972,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
u32 activity_percent = 50;
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -7003,7 +7032,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
+const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@@ -7035,12 +7064,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.read_sensor = ci_dpm_read_sensor,
};
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &ci_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
.set = ci_dpm_set_interrupt_state,
.process = ci_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 567c4a5cf90c..a296f7bbe57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
@@ -1022,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, true},
};
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1047,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
- *value = cik_allowed_read_registers[i].grbm_indexed ?
- cik_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+ reg_offset);
return 0;
}
return -EINVAL;
@@ -1900,6 +1980,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1998,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
@@ -1928,6 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
@@ -1943,6 +2035,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index b1c8e7b446ea..c7b4349f6319 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -26,5 +26,6 @@
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
-
+extern const struct amd_pm_funcs ci_dpm_funcs;
+extern const struct amd_pm_funcs kv_dpm_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b8918432c572..a870b354e3f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,6 +228,34 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
+/**
+ * cik_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -433,6 +461,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
+ .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f508f4d01e4a..60cecd117705 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
+
.write_pte = cik_sdma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 0c1209cdd1cb..fa61d649bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,6 +208,34 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * cz_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -414,6 +442,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
+ .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b9ee9073cb0d..a8829af120c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 00868764a0dd..419ba0ce7ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
+
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4670,6 +4686,14 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc260c13b1da..9ecdf621a74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -125,24 +126,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@@ -918,8 +934,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
@@ -929,8 +954,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.me_fw);
@@ -941,8 +975,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
@@ -1012,8 +1055,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
@@ -1025,8 +1077,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
@@ -2053,9 +2114,19 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
@@ -3891,10 +3962,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices,
&indices_count,
- sizeof(unique_indices) / sizeof(int),
+ ARRAY_SIZE(unique_indices),
indirect_start_offsets,
&offset_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@@ -3916,14 +3987,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
/* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]);
/* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
- for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20);
@@ -4071,18 +4142,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- /* legacy rlc firmware loading */
- r = gfx_v8_0_rlc_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_RLC_G);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ /* legacy rlc firmware loading */
+ r = gfx_v8_0_rlc_load_microcode(adev);
+ if (r)
+ return r;
}
gfx_v8_0_rlc_start(adev);
@@ -4577,12 +4642,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
- if (!(adev->flags & AMD_IS_APU)) {
- mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- }
+ mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -4753,7 +4816,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4790,7 +4853,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4802,7 +4865,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4900,43 +4963,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy firmware loading */
- r = gfx_v8_0_cp_gfx_load_microcode(adev);
- if (r)
- return r;
+ r = gfx_v8_0_cp_gfx_load_microcode(adev);
+ if (r)
+ return r;
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_CE);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_PFP);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_ME);
- if (r)
- return -EINVAL;
-
- if (adev->asic_type == CHIP_TOPAZ) {
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_MEC1);
- if (r)
- return -EINVAL;
- }
- }
+ r = gfx_v8_0_cp_compute_load_microcode(adev);
+ if (r)
+ return r;
}
r = gfx_v8_0_cp_gfx_resume(adev);
@@ -4975,12 +5010,69 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
}
+static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@@ -5902,7 +5994,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5920,7 +6011,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5941,7 +6033,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -5953,7 +6046,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5971,7 +6063,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -5990,7 +6083,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -6011,7 +6105,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -6026,7 +6121,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6040,7 +6136,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -6307,6 +6404,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
+static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+ bool acquire)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int pipe_num, tmp, reg;
+ int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+ pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+ /* first me only has 2 entries, GFX and HP3D */
+ if (ring->me > 0)
+ pipe_num -= 2;
+
+ reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
+ tmp = RREG32(reg);
+ tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+ WREG32(reg, tmp);
+}
+
+static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ int i, pipe;
+ bool reserve;
+ struct amdgpu_ring *iring;
+
+ mutex_lock(&adev->gfx.pipe_reserve_mutex);
+ pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+ if (acquire)
+ set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ else
+ clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+ if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+ /* Clear all reservations - everyone reacquires all resources */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+ true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+ true);
+ } else {
+ /* Lower all pipes without a current reservation */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ iring = &adev->gfx.gfx_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ iring = &adev->gfx.compute_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+ }
+
+ mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+ uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+ mutex_lock(&adev->srbm_mutex);
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+
+ gfx_v8_0_hqd_set_priority(adev, ring, acquire);
+ gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6752,6 +6947,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .set_priority = gfx_v8_0_ring_set_priority_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@@ -6960,7 +7156,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
uint64_t ce_payload_addr;
int cnt_ce;
- static union {
+ union {
struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {};
@@ -6989,7 +7185,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de;
- static union {
+ union {
struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69182eeca264..da43813d67a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -66,38 +67,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
};
static const u32 golden_settings_gc_9_0[] =
@@ -174,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -209,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -352,6 +394,25 @@ err1:
return r;
}
+
+static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -936,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1120,30 +1191,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
int r;
- u32 data;
- u32 size;
- u32 base;
+ u32 data, base;
if (!amdgpu_ngg)
return 0;
/* Program buffer size */
- data = 0;
- size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_POS].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_POS].size >> 8);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
- data = 0;
- size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */
@@ -1306,7 +1369,10 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
- sprintf(ring->name, "gfx");
+ if (!i)
+ sprintf(ring->name, "gfx");
+ else
+ sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1346,7 +1412,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
- r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
+ r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r)
return r;
@@ -1398,9 +1464,19 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
+ gfx_v9_0_free_microcode(adev);
return 0;
}
@@ -1682,10 +1758,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs,
&unique_indirect_reg_count,
- sizeof(unique_indirect_regs)/sizeof(int),
+ ARRAY_SIZE(unique_indirect_regs),
indirect_start_offsets,
&indirect_start_offsets_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1722,12 +1798,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
/* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]);
/* load unique indirect regs*/
- for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
@@ -1740,11 +1816,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- u32 tmp = 0;
-
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
- tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
@@ -1822,16 +1894,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if (default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
+ enable ? 1 : 0);
+ if (default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
@@ -1841,16 +1908,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
+ enable ? 1 : 0);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
@@ -1860,16 +1922,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ CP_PG_DISABLE,
+ enable ? 0 : 1);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -1878,10 +1935,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_POWER_GATING_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1892,10 +1948,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_PIPELINE_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
@@ -1910,10 +1965,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ STATIC_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1924,10 +1978,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ DYN_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1967,13 +2020,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
-
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v9_0_wait_for_rlc_serdes(adev);
}
@@ -2045,8 +2093,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v9_0_init_csb(adev);
return 0;
+ }
gfx_v9_0_rlc_stop(adev);
@@ -2463,6 +2513,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
+ mqd->dynamic_cu_mask_addr_lo =
+ lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi =
+ upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -2486,10 +2543,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- }
- else
+ } else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
+ }
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -2692,10 +2749,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2707,7 +2764,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
- memset((void *)mqd, 0, sizeof(*mqd));
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2716,7 +2775,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@@ -2728,8 +2787,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
- memset((void *)mqd, 0, sizeof(*mqd));
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2737,11 +2798,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2882,12 +2943,70 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
+static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@@ -2930,15 +3049,10 @@ static bool gfx_v9_0_is_idle(void *handle)
static int gfx_v9_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
- GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v9_0_is_idle(handle))
return 0;
udelay(1);
}
@@ -3497,9 +3611,11 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
- if (ring->adev->asic_type == CHIP_VEGA10)
+ if (ring->adev->flags & AMD_IS_APU)
+ nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
+ else
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
@@ -3528,7 +3644,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
gfx_v9_0_write_data_to_reg(ring, 0, true,
- SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
+ SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -3718,7 +3834,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
- static struct v9_ce_ib_state ce_payload = {0};
+ struct v9_ce_ib_state ce_payload = {0};
uint64_t csa_addr;
int cnt;
@@ -3737,7 +3853,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
- static struct v9_de_ib_state de_payload = {0};
+ struct v9_de_ib_state de_payload = {0};
uint64_t csa_addr, gds_addr;
int cnt;
@@ -3757,6 +3873,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
+static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+ amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+}
+
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
@@ -3764,6 +3886,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
+ gfx_v9_0_ring_emit_tmz(ring, true);
+
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -3814,12 +3938,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
-}
-
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6c8040e616c4..c17996e18086 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5be9c83dfcf7..f4603a7c8ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
gmc_v6_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index eace9e7182c8..b0528ca9207b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
gmc_v7_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 3b3326daf32b..f368cfe2f585 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
gmc_v8_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d04d0b123212..c8f1aebeac7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -32,6 +32,8 @@
#include "vega10/DC/dce_12_0_offset.h"
#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
+#include "vega10/MMHUB/mmhub_1_0_offset.h"
+#include "vega10/ATHUB/athub_1_0_offset.h"
#include "soc15_common.h"
@@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
+static const u32 golden_settings_mmhub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+};
+
+static const u32 golden_settings_athub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+};
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
- u32 tmp, reg, bits, i;
+ u32 tmp, reg, bits, i, j;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
}
- break;
default:
break;
}
@@ -395,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
@@ -408,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
@@ -682,8 +688,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_mmhub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
default:
break;
@@ -713,12 +728,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- /* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
-
switch (adev->asic_type) {
case CHIP_RAVEN:
mmhub_v1_0_initialize_power_gating(adev);
@@ -736,13 +745,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ /* After HDP is initialized, flush HDP.*/
+ if (adev->flags & AMD_IS_APU)
+ nbio_v7_0_hdp_flush(adev);
+ else
+ nbio_v6_1_hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -751,7 +763,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
-
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -770,17 +781,11 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- u32 tmp;
-
/* Lockout access through VGA aperture*/
- tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
- tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
r = gmc_v9_0_gart_enable(adev);
@@ -822,9 +827,7 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_hw_fini(adev);
-
- return 0;
+ return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 7a0ea27ac429..bd592cb39f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,6 +208,34 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * iceland_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -412,6 +440,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
+ .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3bbf2ccfca89..f33d1ffdb20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,7 +42,6 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
+static void kv_dpm_enable_bapm(void *handle, bool enable)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
return kv_enable_acp_dpm(adev, !gate);
}
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
return ret;
}
-static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
+static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
@@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static int kv_dpm_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void kv_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
}
static void
-kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
@@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
}
static void
-kv_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+kv_dpm_print_power_state(void *handle, void *request_ps)
{
int i;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void kv_dpm_display_configuration_changed(void *handle)
{
}
-static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
@@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->levels[requested_state->num_levels - 1].sclk;
}
-static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk;
}
/* get temperature in millidegrees */
-static int kv_dpm_get_temp(struct amdgpu_device *adev)
+static int kv_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C);
@@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kv_dpm_set_dpm_funcs(adev);
kv_dpm_set_irq_funcs(adev);
return 0;
@@ -2960,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
@@ -3031,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -3222,14 +3224,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
}
-static int kv_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int kv_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct kv_ps *kv_cps;
struct kv_ps *kv_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
@@ -3262,9 +3267,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk;
u32 pl_index =
@@ -3312,7 +3318,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
+const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@@ -3330,12 +3336,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.read_sensor = &kv_dpm_read_sensor,
};
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &kv_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 74cb647da30e..cc21c4bdec27 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
{0x135, 0x12a810},
{0x149, 0x7a82c}
};
-#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
+#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
@@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
{0x1f0, 0x5000a7f6},
{0x1f1, 0x5000a7e4}
};
-#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
+#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
@@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 2812d88a8bdd..b4906d2f30d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -183,6 +183,12 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
return r;
}
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 1e91b9a1c591..67e78576a9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 5000
+#define AI_MAILBOX_TIMEDOUT 12000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index c791d73d2d54..f13dc6cc158f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -23,7 +23,7 @@
#ifndef __MXGPU_VI_H__
#define __MXGPU_VI_H__
-#define VI_MAILBOX_TIMEDOUT 5000
+#define VI_MAILBOX_TIMEDOUT 12000
#define VI_MAILBOX_RESET_TIME 12
/* VI mailbox messages request */
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 045988b18bc3..904a1bab9b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -215,31 +215,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
+};
-int nbio_v6_1_init(struct amdgpu_device *adev)
-{
- nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
- nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
- nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
+};
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 686e4b4d296a..14ca8d45a46c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
int nbio_v6_1_init(struct amdgpu_device *adev);
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 11b70d601922..f802b973410a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -185,28 +185,24 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+};
-int nbio_v7_0_init(struct amdgpu_device *adev)
-{
- nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
- nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v7_0_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
- nbio_v7_0_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 054ff49427e6..df8fa90f40d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
int nbio_v7_0_init(struct amdgpu_device *adev);
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index f7cf994b1da2..4e20d91d5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -35,6 +35,8 @@
#include "raven1/GC/gc_9_1_offset.h"
#include "raven1/SDMA0/sdma0_4_1_offset.h"
+MODULE_FIRMWARE("amdgpu/raven_asd.bin");
+
static int
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
{
@@ -136,15 +138,13 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
- struct common_firmware_header *header;
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
- header = (struct common_firmware_header *)ucode->fw;
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
+ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
@@ -209,7 +209,7 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -229,6 +229,19 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v10_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -244,16 +257,31 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
- if ((psp_write_ptr_reg % ring->ring_size) == 0)
- write_frame = ring->ring_mem;
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
+
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
/* Update KM RB frame */
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
@@ -263,8 +291,7 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_value = index;
/* Update the write Pointer in DWORDs */
- psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4;
- psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg;
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
@@ -390,3 +417,10 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
return true;
}
+
+
+int psp_v10_0_mode1_reset(struct psp_context *psp)
+{
+ DRM_INFO("psp mode 1 reset not supported now! \n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index e76cde2f01f9..451e8308303f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -34,6 +34,8 @@ extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
@@ -43,4 +45,6 @@ extern int psp_v10_0_cmd_submit(struct psp_context *psp,
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
+
+extern int psp_v10_0_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 2a535a4b8d5b..c7bcfe8e286c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -319,7 +319,7 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -339,6 +339,19 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v3_1_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -354,6 +367,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
uint32_t ring_size_dw = ring->ring_size / 4;
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
@@ -365,9 +381,16 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
/* write_frame ptr increments by size of rb_frame in bytes */
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring->ring_mem;
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
/* Initialize KM RB frame */
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
@@ -517,3 +540,37 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
+
+int psp_v3_1_mode1_reset(struct psp_context *psp)
+{
+ int ret;
+ uint32_t offset;
+ struct amdgpu_device *adev = psp->adev;
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+
+ if (ret) {
+ DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ return -EINVAL;
+ }
+
+ /*send the mode 1 reset command*/
+ WREG32(offset, 0x70000);
+
+ mdelay(1000);
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+
+ if (ret) {
+ DRM_INFO("psp mode 1 reset failed!\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("psp mode1 reset succeed \n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index 9dcd0b25c4c6..b05dbada7751 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -41,6 +41,8 @@ extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v3_1_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
@@ -51,4 +53,5 @@ extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
+extern int psp_v3_1_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f2d0710258cb..67f375bfe452 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -561,21 +561,11 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
}
/* halt the engine before programing */
@@ -1324,8 +1314,13 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v2_4_vm_copy_pte,
+
.write_pte = sdma_v2_4_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b1de44f22824..6d06f8eb659f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -379,8 +379,10 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -641,10 +643,11 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
+ u64 wptr_gpu_addr;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -707,6 +710,20 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
}
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -802,23 +819,12 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v3_0_start(struct amdgpu_device *adev)
{
- int r, i;
+ int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v3_0_load_microcode(adev);
- if (r)
- return r;
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- (i == 0) ?
- AMDGPU_UCODE_ID_SDMA0 :
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
- }
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v3_0_load_microcode(adev);
+ if (r)
+ return r;
}
/* disable sdma engine before programing it */
@@ -1713,11 +1719,11 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
}
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
- .copy_max_bytes = 0x1fffff,
+ .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
- .fill_max_bytes = 0x1fffff,
+ .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
};
@@ -1731,8 +1737,14 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v3_0_vm_copy_pte,
+
.write_pte = sdma_v3_0_vm_write_pte,
+
+ /* not 0x3fffff due to HW limitation */
+ .set_max_nums_pte_pde = 0x3fffe0 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd7c72aaafa6..46009db3d195 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,7 +54,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -89,7 +89,7 @@ static const u32 golden_settings_sdma_vg10[] = {
static const u32 golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -371,7 +371,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
@@ -398,7 +398,7 @@ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
amdgpu_ring_write(ring, 1);
}
@@ -1264,6 +1264,11 @@ static int sdma_v4_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+
return 0;
}
@@ -1714,8 +1719,13 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v4_0_vm_copy_pte,
+
.write_pte = sdma_v4_0_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x400000 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 112969f3301a..3fa2fbf8c9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -887,8 +887,13 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte_num_dw = 5,
.copy_pte = si_dma_vm_copy_pte,
+
.write_pte = si_dma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0xffff8 >> 3,
+ .set_pte_pde_num_dw = 9,
.set_pte_pde = si_dma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d63873f3f574..51fd0c9a20a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -1847,7 +1847,6 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
@@ -3060,9 +3059,9 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
return ret;
}
-static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool si_dpm_vblank_too_short(void *handle)
{
-
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
@@ -3871,9 +3870,10 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
0 : -EINVAL;
}
-static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+static int si_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
struct si_ps *ps = si_get_ps(rps);
u32 levels = ps->performance_level_count;
@@ -6575,11 +6575,12 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -6600,9 +6601,10 @@ static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
u32 duty, duty100;
@@ -6633,8 +6635,10 @@ static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6649,8 +6653,9 @@ static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 si_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
@@ -6946,8 +6951,9 @@ static void si_dpm_disable(struct amdgpu_device *adev)
ni_update_current_ps(adev, boot_ps);
}
-static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -6984,8 +6990,9 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
return 0;
}
-static int si_dpm_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
@@ -7086,8 +7093,9 @@ static int si_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void si_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
@@ -7103,8 +7111,10 @@ void si_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void si_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
si_program_display_gap(adev);
}
@@ -7486,9 +7496,10 @@ static void si_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -7593,11 +7604,6 @@ static int si_dpm_late_init(void *handle)
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
ret = si_set_temperature_range(adev);
if (ret)
return ret;
@@ -7753,7 +7759,6 @@ static int si_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -7860,10 +7865,11 @@ static int si_dpm_set_powergating_state(void *handle,
}
/* get temperature in millidegrees */
-static int si_dpm_get_temp(struct amdgpu_device *adev)
+static int si_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
CTF_TEMP_SHIFT;
@@ -7878,8 +7884,9 @@ static int si_dpm_get_temp(struct amdgpu_device *adev)
return actual_temp;
}
-static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7889,8 +7896,9 @@ static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7900,9 +7908,11 @@ static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
-static void si_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void si_dpm_print_power_state(void *handle,
+ void *current_ps)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
int i;
@@ -7927,7 +7937,6 @@ static int si_dpm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- si_dpm_set_dpm_funcs(adev);
si_dpm_set_irq_funcs(adev);
return 0;
}
@@ -7942,20 +7951,23 @@ static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
(si_cpl1->vddci == si_cpl2->vddci));
}
-static int si_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int si_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct si_ps *si_cps;
struct si_ps *si_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- si_cps = si_get_ps(cps);
- si_rps = si_get_ps(rps);
+ si_cps = si_get_ps((struct amdgpu_ps *)cps);
+ si_rps = si_get_ps((struct amdgpu_ps *)rps);
if (si_cps == NULL) {
printk("si_cps is NULL\n");
@@ -7983,9 +7995,10 @@ static int si_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int si_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -8041,7 +8054,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
.set_powergating_state = si_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+const struct amd_pm_funcs si_dpm_funcs = {
.get_temperature = &si_dpm_get_temp,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
@@ -8062,12 +8075,6 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.read_sensor = &si_dpm_read_sensor,
};
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &si_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
.set = si_dpm_set_interrupt_state,
.process = si_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 51ce21c5f4fb..9fe343de3477 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -246,6 +246,7 @@ enum si_display_gap
};
extern const struct amd_ip_funcs si_dpm_ip_funcs;
+extern const struct amd_pm_funcs si_dpm_funcs;
struct ni_leakage_coeffients
{
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index ce25e03a077d..d2c6b80309c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,6 +118,19 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
+/**
+ * si_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -288,6 +301,7 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
+ .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f2c3a49f73a0..4e67fe1e7955 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -101,7 +101,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -122,7 +122,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_VEGA10)
- return adev->clock.spll.reference_freq/4;
- else
- return adev->clock.spll.reference_freq;
+ return adev->clock.spll.reference_freq;
}
@@ -407,18 +404,27 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU reset\n");
/* disable BM */
pci_clear_master(adev->pdev);
- /* reset */
- amdgpu_pci_config_reset(adev);
- udelay(100);
+ pci_save_state(adev->pdev);
+
+ for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
+ adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
+ break;
+ }
+ }
+
+ pci_restore_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -430,14 +436,6 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
udelay(1);
}
-}
-
-static int soc15_asic_reset(struct amdgpu_device *adev)
-{
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- soc15_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
@@ -534,6 +532,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -547,6 +551,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
@@ -603,21 +613,6 @@ static int soc15_common_early_init(void *handle)
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
- /*
- * nbio need be used for both sdma and gfx9, but only
- * initializes once
- */
- switch(adev->asic_type) {
- case CHIP_VEGA10:
- nbio_v6_1_init(adev);
- break;
- case CHIP_RAVEN:
- nbio_v7_0_init(adev);
- break;
- default:
- return -EINVAL;
- }
-
adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 923df2c0e535..aa4e320e31f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,34 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * tonga_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -478,6 +506,7 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
+ .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 62cd16a23921..920910ac8663 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -38,6 +38,8 @@
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
@@ -48,6 +50,18 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
+* uvd_v6_0_enc_support - get encode support status
+*
+* @adev: amdgpu_device pointer
+*
+* Returns the current hardware encode support status
+*/
+static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
+{
+ return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
+}
+
+/**
* uvd_v6_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
@@ -62,6 +76,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc read pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_RPTR);
+ else
+ return RREG32(mmUVD_RB_RPTR2);
+}
+/**
* uvd_v6_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
@@ -76,6 +106,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc write pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_WPTR);
+ else
+ return RREG32(mmUVD_RB_WPTR2);
+}
+
+/**
* uvd_v6_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
@@ -89,6 +136,237 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
+/**
+ * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ WREG32(mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+ WREG32(mmUVD_RB_WPTR2,
+ lower_32_bits(ring->wptr));
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ unsigned i;
+ int r;
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r) {
+ DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ring_get_rptr(ring) != rptr)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
+ }
+
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ uint32_t handle,
+ bool direct, struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (direct) {
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+ }
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct dma_fence *fence = NULL;
+ long r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+error:
+ dma_fence_put(fence);
+ return r;
+}
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -98,6 +376,12 @@ static int uvd_v6_0_early_init(void *handle)
return -ENOENT;
uvd_v6_0_set_ring_funcs(adev);
+
+ if (uvd_v6_0_enc_support(adev)) {
+ adev->uvd.num_enc_rings = 2;
+ uvd_v6_0_set_enc_ring_funcs(adev);
+ }
+
uvd_v6_0_set_irq_funcs(adev);
return 0;
@@ -106,7 +390,7 @@ static int uvd_v6_0_early_init(void *handle)
static int uvd_v6_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
@@ -114,10 +398,31 @@ static int uvd_v6_0_sw_init(void *handle)
if (r)
return r;
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+ if (r)
+ return r;
+ }
+ }
+
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ struct amd_sched_rq *rq;
+ ring = &adev->uvd.ring_enc[0];
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+ return r;
+ }
+ }
+
r = amdgpu_uvd_resume(adev);
if (r)
return r;
@@ -125,19 +430,38 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ sprintf(ring->name, "uvd_enc%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+ }
+ }
return r;
}
static int uvd_v6_0_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+ }
+
return amdgpu_uvd_sw_fini(adev);
}
@@ -153,7 +477,7 @@ static int uvd_v6_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t tmp;
- int r;
+ int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
@@ -193,9 +517,25 @@ static int uvd_v6_0_hw_init(void *handle)
amdgpu_ring_commit(ring);
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+ }
+ }
+
done:
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
+ if (!r) {
+ if (uvd_v6_0_enc_support(adev))
+ DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+ else
+ DRM_INFO("UVD initialized successfully.\n");
+ }
return r;
}
@@ -512,6 +852,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
+ if (uvd_v6_0_enc_support(adev)) {
+ ring = &adev->uvd.ring_enc[0];
+ WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->uvd.ring_enc[1];
+ WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
+ }
+
return 0;
}
@@ -575,6 +931,26 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write enc a fence and a trap command to the ring.
+ */
+static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
+ amdgpu_ring_write(ring, addr);
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
+}
+
+/**
* uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
@@ -665,6 +1041,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+/**
+ * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write enc ring commands to execute the indirect buffer
+ */
+static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
@@ -716,6 +1110,33 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
+static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+}
+
+static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+}
+
+static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
+ amdgpu_ring_write(ring, vm_id);
+}
+
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -823,8 +1244,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ bool int_handled = true;
DRM_DEBUG("IH: UVD TRAP\n");
- amdgpu_fence_process(&adev->uvd.ring);
+
+ switch (entry->src_id) {
+ case 124:
+ amdgpu_fence_process(&adev->uvd.ring);
+ break;
+ case 119:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+ else
+ int_handled = false;
+ break;
+ case 120:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+ else
+ int_handled = false;
+ break;
+ }
+
+ if (false == int_handled)
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+
return 0;
}
@@ -1151,6 +1595,33 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.end_use = amdgpu_uvd_ring_end_use,
};
+static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD_ENC,
+ .align_mask = 0x3f,
+ .nop = HEVC_ENC_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v6_0_enc_ring_get_rptr,
+ .get_wptr = uvd_v6_0_enc_ring_get_wptr,
+ .set_wptr = uvd_v6_0_enc_ring_set_wptr,
+ .emit_frame_size =
+ 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
+ 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* uvd_v6_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
+ .emit_ib = uvd_v6_0_enc_ring_emit_ib,
+ .emit_fence = uvd_v6_0_enc_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
+ .test_ring = uvd_v6_0_enc_ring_test_ring,
+ .test_ib = uvd_v6_0_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = uvd_v6_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_POLARIS10) {
@@ -1162,6 +1633,16 @@ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
}
}
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+
+ DRM_INFO("UVD ENC is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
.set = uvd_v6_0_set_interrupt_state,
.process = uvd_v6_0_process_interrupt,
@@ -1169,7 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = 1;
+ if (uvd_v6_0_enc_support(adev))
+ adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+ else
+ adev->uvd.irq.num_types = 1;
+
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 23a85750edd6..6634545060fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -592,11 +592,7 @@ static int uvd_v7_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v7_0_resume(void *handle)
@@ -604,12 +600,10 @@ static int uvd_v7_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v7_0_hw_init(adev);
}
@@ -1161,7 +1155,7 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 11134d5f7443..75745544600a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1011,10 +1011,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
-
switch (entry->src_data[0]) {
case 0:
case 1:
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88401e1..0450ac5ba6b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -812,7 +812,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 56150e8d1ed2..697325737ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -219,14 +219,95 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
wptr, adev->irq.ih.rptr, tmp);
adev->irq.ih.rptr = tmp;
- tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
}
return (wptr & adev->irq.ih.ptr_mask);
}
/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 dw0, dw3, dw4, dw5;
+ u16 pasid;
+ u64 addr, key;
+ struct amdgpu_vm *vm;
+ int r;
+
+ dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
+ dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
+
+ /* Filter retry page faults, let only the first one pass. If
+ * there are too many outstanding faults, ignore them until
+ * some faults get cleared.
+ */
+ switch (dw0 & 0xff) {
+ case AMDGPU_IH_CLIENTID_VMC:
+ case AMDGPU_IH_CLIENTID_UTCL2:
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ pasid = dw3 & 0xffff;
+ /* No PASID, can't identify faulting process */
+ if (!pasid)
+ return true;
+
+ /* Not a retry fault, check fault credit */
+ if (!(dw5 & 0x80)) {
+ if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
+ goto ignore_iv;
+ return true;
+ }
+
+ addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
+ key = AMDGPU_VM_FAULT(pasid, addr);
+ r = amdgpu_ih_add_fault(adev, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0)
+ goto ignore_iv;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (WARN_ON_ONCE(!vm)) {
+ /* VM not found, process it normally */
+ amdgpu_ih_clear_fault(adev, key);
+ return true;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_ih_clear_fault(adev, key);
+ goto ignore_iv;
+ }
+
+ /* It's the first fault for this address, process it normally */
+ return true;
+
+ignore_iv:
+ adev->irq.ih.rptr += 32;
+ return false;
+}
+
+/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -310,6 +391,14 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
+ if (!adev->irq.ih.faults)
+ return -ENOMEM;
+ INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
+ AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spin_lock_init(&adev->irq.ih.faults->lock);
+ adev->irq.ih.faults->count = 0;
+
r = amdgpu_irq_init(adev);
return r;
@@ -322,6 +411,9 @@ static int vega10_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
+ kfree(adev->irq.ih.faults);
+ adev->irq.ih.faults = NULL;
+
return 0;
}
@@ -410,6 +502,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
+ .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9ff69b90df36..3a4c2fa7e36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
#endif
#include "dce_virtual.h"
#include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -1254,7 +1255,6 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
@@ -1271,7 +1271,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_MC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1289,7 +1290,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_SDMA,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
@@ -1307,7 +1309,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_HDP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
@@ -1321,7 +1324,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
if (state == AMD_CG_STATE_UNGATE)
@@ -1333,7 +1337,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
@@ -1347,7 +1352,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
@@ -1361,7 +1367,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
@@ -1496,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1512,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1530,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1544,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1561,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index a6485254a169..dbf3703cbd1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -465,6 +465,16 @@
#define VCE_CMD_UPDATE_PTB 0x00000107
#define VCE_CMD_FLUSH_TLB 0x00000108
+/* HEVC ENC */
+#define HEVC_ENC_CMD_NO_OP 0x00000000
+#define HEVC_ENC_CMD_END 0x00000001
+#define HEVC_ENC_CMD_FENCE 0x00000003
+#define HEVC_ENC_CMD_TRAP 0x00000004
+#define HEVC_ENC_CMD_IB_VM 0x00000102
+#define HEVC_ENC_CMD_WAIT_GE 0x00000106
+#define HEVC_ENC_CMD_UPDATE_PTB 0x00000107
+#define HEVC_ENC_CMD_FLUSH_TLB 0x00000108
+
/* mmPA_SC_RASTER_CONFIG mask */
#define RB_MAP_PKR0(x) ((x) << 0)
#define RB_MAP_PKR0_MASK (0x3 << 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index e13c67c8d2c0..bc5a2945bd2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
- depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
+ depends on DRM_AMDGPU && AMD_IOMMU_V2 && X86_64
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 7bb0bc0ca3d6..342c2d937b17 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 211fc48697fa..3d5ccb3755d4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -36,6 +36,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
/* Do not process in ISR, just request it to be forwarded to WQ. */
return (pasid != 0) &&
(ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE);
}
@@ -46,6 +47,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ uint32_t context_id = ihre->data & 0xfffffff;
pasid = (ihre->ring_id & 0xffff0000) >> 16;
@@ -53,9 +55,11 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
return;
if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, 0, 0);
+ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
+ kfd_signal_event_interrupt(pasid, context_id, 28);
else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, ihre->data & 0xFF, 8);
+ kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 79a16d24c1b8..109298b9d507 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -32,9 +32,10 @@ struct cik_ih_ring_entry {
uint32_t reserved;
};
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
+#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 660b3fbade41..505d39156acd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -282,8 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
- 0, q_properties.type, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
if (err != 0)
goto err_create_queue;
@@ -451,8 +450,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
return -EINVAL;
}
- mutex_lock(kfd_get_dbgmgr_mutex());
mutex_lock(&p->mutex);
+ mutex_lock(kfd_get_dbgmgr_mutex());
/*
* make sure that we have pdd, if this the first queue created for
@@ -480,8 +479,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
}
out:
- mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex());
+ mutex_unlock(&p->mutex);
return status;
}
@@ -836,15 +835,12 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
- enum kfd_event_wait_result wait_result;
int err;
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &wait_result);
-
- args->wait_result = wait_result;
+ args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 0aa021aa0aa1..c407f6bd9956 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -184,9 +184,10 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
+ properties.type = KFD_QUEUE_TYPE_DIQ;
+
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
- &properties, 0, KFD_QUEUE_TYPE_DIQ,
- &qid);
+ &properties, &qid);
if (status) {
pr_err("Failed to create DIQ\n");
@@ -769,13 +770,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
struct dbg_wave_control_info wac_info;
- int temp;
- int first_vmid_to_scan = 8;
- int last_vmid_to_scan = 15;
-
- first_vmid_to_scan = ffs(dev->shared_resources.compute_vmid_bitmap) - 1;
- temp = dev->shared_resources.compute_vmid_bitmap >> first_vmid_to_scan;
- last_vmid_to_scan = first_vmid_to_scan + ffz(temp);
+ int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
+ int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
reg_sq_cmd.u32All = 0;
status = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 61fff25b4ce7..621a3b53a038 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -92,6 +92,8 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume(struct kfd_dev *kfd);
+
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
@@ -168,23 +170,9 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
pasid_limit = min_t(unsigned int,
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
- /*
- * last pasid is used for kernel queues doorbells
- * in the future the last pasid might be used for a kernel thread.
- */
- pasid_limit = min_t(unsigned int,
- pasid_limit,
- kfd->doorbell_process_limit - 1);
-
- err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0) {
- dev_err(kfd_device, "error initializing iommu device\n");
- return false;
- }
if (!kfd_set_pasid_limit(pasid_limit)) {
dev_err(kfd_device, "error setting pasid limit\n");
- amd_iommu_free_device(kfd->pdev);
return false;
}
@@ -196,7 +184,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
if (dev)
- kfd_unbind_process_from_device(dev, pasid);
+ kfd_process_iommu_unbind_callback(dev, pasid);
}
/*
@@ -231,6 +219,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
+ kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+ - kfd->vm_info.first_vmid_kfd + 1;
+
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -280,29 +273,22 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_interrupt_error;
}
- if (!device_iommu_pasid_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing iommuv2 for device %x:%x\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto device_iommu_pasid_error;
- }
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
-
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
- if (kfd->dqm->ops.start(kfd->dqm)) {
+ if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
- "Error starting queue manager for device %x:%x\n",
+ "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- goto dqm_start_error;
+ goto device_iommu_pasid_error;
}
+ if (kfd_resume(kfd))
+ goto kfd_resume_error;
+
kfd->dbgmgr = NULL;
kfd->init_complete = true;
@@ -314,11 +300,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto out;
-dqm_start_error:
+kfd_resume_error:
+device_iommu_pasid_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
- amd_iommu_free_device(kfd->pdev);
-device_iommu_pasid_error:
kfd_interrupt_exit(kfd);
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
@@ -338,8 +323,8 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
+ kgd2kfd_suspend(kfd);
device_queue_manager_uninit(kfd->dqm);
- amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
@@ -352,35 +337,59 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
- if (kfd->init_complete) {
- kfd->dqm->ops.stop(kfd->dqm);
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
- amd_iommu_free_device(kfd->pdev);
- }
+ if (!kfd->init_complete)
+ return;
+
+ kfd->dqm->ops.stop(kfd->dqm);
+
+ kfd_unbind_processes_from_device(kfd);
+
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
}
int kgd2kfd_resume(struct kfd_dev *kfd)
{
- unsigned int pasid_limit;
- int err;
+ if (!kfd->init_complete)
+ return 0;
- pasid_limit = kfd_get_pasid_limit();
+ return kfd_resume(kfd);
- if (kfd->init_complete) {
- err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0) {
- dev_err(kfd_device, "failed to initialize iommu\n");
- return -ENXIO;
- }
+}
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
- kfd->dqm->ops.start(kfd->dqm);
+static int kfd_resume(struct kfd_dev *kfd)
+{
+ int err = 0;
+ unsigned int pasid_limit = kfd_get_pasid_limit();
+
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err)
+ return -ENXIO;
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev,
+ iommu_invalid_ppr_cb);
+
+ err = kfd_bind_processes_to_device(kfd);
+ if (err)
+ goto processes_bind_error;
+
+ err = kfd->dqm->ops.start(kfd->dqm);
+ if (err) {
+ dev_err(kfd_device,
+ "Error starting queue manager for device %x:%x\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto dqm_start_error;
}
- return 0;
+ return err;
+
+dqm_start_error:
+processes_bind_error:
+ amd_iommu_free_device(kfd->pdev);
+
+ return err;
}
/* This is called directly from KGD at ISR. */
@@ -394,7 +403,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry)
&& enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 53a66e821624..e202921c150e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -44,9 +44,14 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
-static int destroy_queues_cpsch(struct device_queue_manager *dqm,
- bool preempt_static_queues, bool lock);
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
+static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
+
+static int map_queues_cpsch(struct device_queue_manager *dqm);
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
@@ -113,11 +118,11 @@ static int allocate_vmid(struct device_queue_manager *dqm,
if (dqm->vmid_bitmap == 0)
return -ENOMEM;
- bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
+ bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
+ dqm->dev->vm_info.vmid_num_kfd);
clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
- /* Kaveri kfd vmid's starts from vmid 8 */
- allocated_vmid = bit + KFD_VMID_START_OFFSET;
+ allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -132,7 +137,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
@@ -184,6 +189,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
}
list_add(&q->list, &qpd->queues_list);
+ qpd->queue_count++;
if (q->properties.is_active)
dqm->queue_count++;
@@ -273,6 +279,9 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
dqm->dev->kfd2kgd->set_scratch_backing_va(
dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (!q->properties.is_active)
+ return 0;
+
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -288,65 +297,74 @@ out_deallocate_hqd:
return retval;
}
-static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
+ * to avoid asynchronized access
+ */
+static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int retval;
struct mqd_manager *mqd;
- retval = 0;
-
- mutex_lock(&dqm->lock);
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd)
+ return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL) {
- retval = -ENOMEM;
- goto out;
- }
deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (mqd == NULL) {
- retval = -ENOMEM;
- goto out;
- }
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
- retval = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ dqm->total_queue_count--;
retval = mqd->destroy_mqd(mqd, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
- QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
-
- if (retval)
- goto out;
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
- if (list_empty(&qpd->queues_list))
+ if (list_empty(&qpd->queues_list)) {
+ if (qpd->reset_wavefronts) {
+ pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
+ dqm->dev);
+ /* dbgdev_wave_reset_wavefronts has to be called before
+ * deallocate_vmid(), i.e. when vmid is still in use.
+ */
+ dbgdev_wave_reset_wavefronts(dqm->dev,
+ qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+ }
+
deallocate_vmid(dqm, qpd, q);
+ }
+ qpd->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
- /*
- * Unconditionally decrement this counter, regardless of the queue's
- * type
- */
- dqm->total_queue_count--;
- pr_debug("Total of %d queues are accountable so far\n",
- dqm->total_queue_count);
+ return retval;
+}
-out:
+static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+
+ mutex_lock(&dqm->lock);
+ retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
mutex_unlock(&dqm->lock);
+
return retval;
}
@@ -364,29 +382,56 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
goto out_unlock;
}
- if (q->properties.is_active)
- prev_active = true;
+ /* Save previous activity state for counters */
+ prev_active = q->properties.is_active;
+
+ /* Make sure the queue is unmapped before updating the MQD */
+ if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
+ retval = unmap_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (retval) {
+ pr_err("unmap queue failed\n");
+ goto out_unlock;
+ }
+ } else if (prev_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval) {
+ pr_err("destroy mqd failed\n");
+ goto out_unlock;
+ }
+ }
+
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
/*
- *
- * check active state vs. the previous state
- * and modify counter accordingly
+ * check active state vs. the previous state and modify
+ * counter accordingly. map_queues_cpsch uses the
+ * dqm->queue_count to determine whether a new runlist must be
+ * uploaded.
*/
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
- if ((q->properties.is_active) && (!prev_active))
+ if (q->properties.is_active && !prev_active)
dqm->queue_count++;
else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
- retval = execute_queues_cpsch(dqm, false);
+ retval = map_queues_cpsch(dqm);
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA))
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
-static struct mqd_manager *get_mqd_manager_nocpsch(
+static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
struct mqd_manager *mqd;
@@ -407,7 +452,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
return mqd;
}
-static int register_process_nocpsch(struct device_queue_manager *dqm,
+static int register_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
@@ -422,7 +467,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
list_add(&n->list, &dqm->queues);
- retval = dqm->ops_asic_specific.register_process(dqm, qpd);
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
dqm->processes_count++;
@@ -431,7 +476,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
return retval;
}
-static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+static int unregister_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
@@ -507,13 +552,13 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+ dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
return 0;
}
-static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+static void uninitialize(struct device_queue_manager *dqm)
{
int i;
@@ -577,14 +622,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
+ q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
pr_debug("SDMA id is: %d\n", q->sdma_id);
pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
@@ -613,8 +658,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
- res.vmid_mask <<= KFD_VMID_START_OFFSET;
+ res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
res.queue_mask = 0;
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
@@ -652,8 +696,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- int retval;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
@@ -661,16 +703,13 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- retval = dqm->ops_asic_specific.initialize(dqm);
- if (retval)
- mutex_destroy(&dqm->lock);
+ dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
- return retval;
+ return 0;
}
static int start_cpsch(struct device_queue_manager *dqm)
{
- struct device_process_node *node;
int retval;
retval = 0;
@@ -697,12 +736,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- list_for_each_entry(node, &dqm->queues, list)
- if (node->qpd->pqm->process && dqm->dev)
- kfd_bind_process_to_device(dqm->dev,
- node->qpd->pqm->process);
-
- execute_queues_cpsch(dqm, true);
+ mutex_lock(&dqm->lock);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
return 0;
fail_allocate_vidmem:
@@ -714,15 +750,10 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- struct device_process_node *node;
- struct kfd_process_device *pdd;
-
- destroy_queues_cpsch(dqm, true, true);
+ mutex_lock(&dqm->lock);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
- list_for_each_entry(node, &dqm->queues, list) {
- pdd = qpd_to_pdd(node->qpd);
- pdd->bound = false;
- }
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -752,7 +783,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
mutex_unlock(&dqm->lock);
return 0;
@@ -763,12 +794,10 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
mutex_lock(&dqm->lock);
- /* here we actually preempt the DIQ */
- destroy_queues_cpsch(dqm, true, false);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -779,14 +808,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
mutex_unlock(&dqm->lock);
}
-static void select_sdma_engine_id(struct queue *q)
-{
- static int sdma_id;
-
- q->sdma_id = sdma_id;
- sdma_id = (sdma_id + 1) % 2;
-}
-
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd, int *allocate_vmid)
{
@@ -807,9 +828,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
goto out;
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- select_sdma_engine_id(q);
-
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ if (retval)
+ goto out;
+ q->properties.sdma_queue_id =
+ q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id =
+ q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ }
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
@@ -818,16 +845,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
goto out;
}
- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out;
list_add(&q->list, &qpd->queues_list);
+ qpd->queue_count++;
if (q->properties.is_active) {
dqm->queue_count++;
- retval = execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -848,12 +877,12 @@ out:
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
- unsigned long timeout)
+ unsigned int timeout_ms)
{
- timeout += jiffies;
+ unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
while (*fence_addr != fence_value) {
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
return -ETIME;
}
@@ -863,44 +892,57 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int destroy_sdma_queues(struct device_queue_manager *dqm,
+static int unmap_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine);
}
-static int destroy_queues_cpsch(struct device_queue_manager *dqm,
- bool preempt_static_queues, bool lock)
+/* dqm->lock mutex has to be locked before calling this function */
+static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
- enum kfd_preempt_type_filter preempt_type;
- struct kfd_process_device *pdd;
- retval = 0;
+ if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ return 0;
+
+ if (dqm->active_runlist)
+ return 0;
+
+ retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ if (retval) {
+ pr_err("failed to execute runlist\n");
+ return retval;
+ }
+ dqm->active_runlist = true;
+
+ return retval;
+}
+
+/* dqm->lock mutex has to be locked before calling this function */
+static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param)
+{
+ int retval = 0;
- if (lock)
- mutex_lock(&dqm->lock);
if (!dqm->active_runlist)
- goto out;
+ return retval;
pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
- destroy_sdma_queues(dqm, 0);
- destroy_sdma_queues(dqm, 1);
+ unmap_sdma_queues(dqm, 0);
+ unmap_sdma_queues(dqm, 1);
}
- preempt_type = preempt_static_queues ?
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
- preempt_type, 0, false, 0);
+ filter, filter_param, false, 0);
if (retval)
- goto out;
+ return retval;
*dqm->fence_addr = KFD_FENCE_INIT;
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
@@ -908,55 +950,29 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
- if (retval) {
- pdd = kfd_get_process_device_data(dqm->dev,
- kfd_get_process(current));
- pdd->reset_wavefronts = true;
- goto out;
- }
+ if (retval)
+ return retval;
+
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
-out:
- if (lock)
- mutex_unlock(&dqm->lock);
return retval;
}
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+/* dqm->lock mutex has to be locked before calling this function */
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param)
{
int retval;
- if (lock)
- mutex_lock(&dqm->lock);
-
- retval = destroy_queues_cpsch(dqm, false, false);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
- goto out;
- }
-
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
- retval = 0;
- goto out;
- }
-
- if (dqm->active_runlist) {
- retval = 0;
- goto out;
- }
-
- retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
- pr_err("failed to execute runlist");
- goto out;
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ return retval;
}
- dqm->active_runlist = true;
-out:
- if (lock)
- mutex_unlock(&dqm->lock);
- return retval;
+ return map_queues_cpsch(dqm);
}
static int destroy_queue_cpsch(struct device_queue_manager *dqm,
@@ -991,14 +1007,20 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
goto failed;
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ }
list_del(&q->list);
+ qpd->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
- execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -1068,7 +1090,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit = limit >> 16;
}
- retval = dqm->ops_asic_specific.set_cache_memory_policy(
+ retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
@@ -1088,6 +1110,109 @@ out:
return retval;
}
+static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q, *next;
+ struct device_process_node *cur, *next_dpn;
+ int retval = 0;
+
+ mutex_lock(&dqm->lock);
+
+ /* Clear all user mode queues */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ int ret;
+
+ ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
+ if (ret)
+ retval = ret;
+ }
+
+ /* Unregister process */
+ list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ break;
+ }
+ }
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+
+static int process_termination_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct queue *q, *next;
+ struct kernel_queue *kq, *kq_next;
+ struct mqd_manager *mqd;
+ struct device_process_node *cur, *next_dpn;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+
+ retval = 0;
+
+ mutex_lock(&dqm->lock);
+
+ /* Clean all kernel queues */
+ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+ dqm->total_queue_count--;
+ filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
+ }
+
+ /* Clear all user mode queues */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count--;
+
+ if (q->properties.is_active)
+ dqm->queue_count--;
+
+ dqm->total_queue_count--;
+ }
+
+ /* Unregister process */
+ list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ break;
+ }
+ }
+
+ retval = execute_queues_cpsch(dqm, filter, 0);
+ if (retval || qpd->reset_wavefronts) {
+ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
+ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+ }
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ list_del(&q->list);
+ qpd->queue_count--;
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ }
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
struct device_queue_manager *dqm;
@@ -1109,13 +1234,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.stop = stop_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->ops.register_process = register_process_nocpsch;
- dqm->ops.unregister_process = unregister_process_nocpsch;
- dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.get_mqd_manager = get_mqd_manager;
+ dqm->ops.register_process = register_process;
+ dqm->ops.unregister_process = unregister_process;
+ dqm->ops.uninitialize = uninitialize;
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.process_termination = process_termination_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1124,12 +1250,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->ops.register_process = register_process_nocpsch;
- dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.get_mqd_manager = get_mqd_manager;
+ dqm->ops.register_process = register_process;
+ dqm->ops.unregister_process = unregister_process;
dqm->ops.initialize = initialize_nocpsch;
- dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.process_termination = process_termination_nocpsch;
break;
default:
pr_err("Invalid scheduling policy %d\n", sched_policy);
@@ -1138,12 +1265,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
switch (dev->device_info->asic_family) {
case CHIP_CARRIZO:
- device_queue_manager_init_vi(&dqm->ops_asic_specific);
+ device_queue_manager_init_vi(&dqm->asic_ops);
break;
case CHIP_KAVERI:
- device_queue_manager_init_cik(&dqm->ops_asic_specific);
+ device_queue_manager_init_cik(&dqm->asic_ops);
break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
+ goto out_free;
}
if (!dqm->ops.initialize(dqm))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index faf820a06400..5b77cb69f732 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -29,11 +29,9 @@
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
-#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
-#define CIK_VMID_NUM (8)
-#define KFD_VMID_START_OFFSET (8)
-#define VMID_PER_DEVICE CIK_VMID_NUM
-#define KFD_DQM_FIRST_PIPE (0)
+#define KFD_UNMAP_LATENCY_MS (4000)
+#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
+
#define CIK_SDMA_QUEUES (4)
#define CIK_SDMA_QUEUES_PER_ENGINE (2)
#define CIK_SDMA_ENGINE_NUM (2)
@@ -79,6 +77,8 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures.
*
+ * @process_termination: Clears all process queues belongs to that device.
+ *
*/
struct device_queue_manager_ops {
@@ -122,12 +122,14 @@ struct device_queue_manager_ops {
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+
+ int (*process_termination)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
};
struct device_queue_manager_asic_ops {
- int (*register_process)(struct device_queue_manager *dqm,
+ int (*update_qpd)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
- int (*initialize)(struct device_queue_manager *dqm);
bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -153,7 +155,7 @@ struct device_queue_manager_asic_ops {
struct device_queue_manager {
struct device_queue_manager_ops ops;
- struct device_queue_manager_asic_ops ops_asic_specific;
+ struct device_queue_manager_asic_ops asic_ops;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
@@ -176,8 +178,10 @@ struct device_queue_manager {
bool active_runlist;
};
-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
+void device_queue_manager_init_cik(
+ struct device_queue_manager_asic_ops *asic_ops);
+void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
unsigned int get_queues_num(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 72c3cbabc0a7..28e48c90c596 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -32,18 +32,17 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
-static int register_process_cik(struct device_queue_manager *dqm,
+static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-static int initialize_cpsch_cik(struct device_queue_manager *dqm);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops)
+void device_queue_manager_init_cik(
+ struct device_queue_manager_asic_ops *asic_ops)
{
- ops->set_cache_memory_policy = set_cache_memory_policy_cik;
- ops->register_process = register_process_cik;
- ops->initialize = initialize_cpsch_cik;
- ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+ asic_ops->update_qpd = update_qpd_cik;
+ asic_ops->init_sdma_vm = init_sdma_vm;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
@@ -99,7 +98,7 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
return true;
}
-static int register_process_cik(struct device_queue_manager *dqm,
+static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
@@ -148,8 +147,3 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
-
-static int initialize_cpsch_cik(struct device_queue_manager *dqm)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 40e9ddd096cd..2fbce57a2f21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -33,18 +33,17 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
-static int register_process_vi(struct device_queue_manager *dqm,
+static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-static int initialize_cpsch_vi(struct device_queue_manager *dqm);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
+void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops)
{
- ops->set_cache_memory_policy = set_cache_memory_policy_vi;
- ops->register_process = register_process_vi;
- ops->initialize = initialize_cpsch_vi;
- ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+ asic_ops->update_qpd = update_qpd_vi;
+ asic_ops->init_sdma_vm = init_sdma_vm;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
@@ -104,7 +103,7 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
return true;
}
-static int register_process_vi(struct device_queue_manager *dqm,
+static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
@@ -160,8 +159,3 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
-
-static int initialize_cpsch_vi(struct device_queue_manager *dqm)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index acf4d2a977ad..feb76c235b1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -24,16 +24,15 @@
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/idr.h>
/*
- * This extension supports a kernel level doorbells management for
- * the kernel queues.
- * Basically the last doorbells page is devoted to kernel queues
- * and that's assures that any user process won't get access to the
- * kernel doorbells page
+ * This extension supports a kernel level doorbells management for the
+ * kernel queues using the first doorbell page reserved for the kernel.
*/
-#define KERNEL_DOORBELL_PASID 1
+static DEFINE_IDA(doorbell_ida);
+static unsigned int max_doorbell_slices;
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
/*
@@ -84,13 +83,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
(doorbell_aperture_size - doorbell_start_offset) /
doorbell_process_allocation();
else
- doorbell_process_limit = 0;
+ return -ENOSPC;
+
+ if (!max_doorbell_slices ||
+ doorbell_process_limit < max_doorbell_slices)
+ max_doorbell_slices = doorbell_process_limit;
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
- kfd->doorbell_process_limit = doorbell_process_limit - 1;
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
@@ -185,11 +187,10 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL;
/*
- * Calculating the kernel doorbell offset using "faked" kernel
- * pasid that allocated for kernel queues only
+ * Calculating the kernel doorbell offset using the first
+ * doorbell page.
*/
- *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
- sizeof(u32)) + inx;
+ *doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -228,11 +229,12 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
{
/*
* doorbell_id_offset accounts for doorbells taken by KGD.
- * pasid * doorbell_process_allocation/sizeof(u32) adjusts
- * to the process's doorbells
+ * index * doorbell_process_allocation/sizeof(u32) adjusts to
+ * the process's doorbells.
*/
return kfd->doorbell_id_offset +
- process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+ process->doorbell_index
+ * doorbell_process_allocation() / sizeof(u32) +
queue_id;
}
@@ -250,5 +252,21 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
struct kfd_process *process)
{
return dev->doorbell_base +
- process->pasid * doorbell_process_allocation();
+ process->doorbell_index * doorbell_process_allocation();
+}
+
+int kfd_alloc_process_doorbells(struct kfd_process *process)
+{
+ int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+ GFP_KERNEL);
+ if (r > 0)
+ process->doorbell_index = r;
+
+ return r;
+}
+
+void kfd_free_process_doorbells(struct kfd_process *process)
+{
+ if (process->doorbell_index)
+ ida_simple_remove(&doorbell_ida, process->doorbell_index);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 944abfad39c1..cb92d4b72400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -24,8 +24,8 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
-#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/memory.h>
#include "kfd_priv.h"
@@ -33,185 +33,89 @@
#include <linux/device.h>
/*
- * A task can only be on a single wait_queue at a time, but we need to support
- * waiting on multiple events (any/all).
- * Instead of each event simply having a wait_queue with sleeping tasks, it
- * has a singly-linked list of tasks.
- * A thread that wants to sleep creates an array of these, one for each event
- * and adds one to each event's waiter chain.
+ * Wrapper around wait_queue_entry_t
*/
struct kfd_event_waiter {
- struct list_head waiters;
- struct task_struct *sleeping_task;
-
- /* Transitions to true when the event this belongs to is signaled. */
- bool activated;
-
- /* Event */
- struct kfd_event *event;
- uint32_t input_index;
+ wait_queue_entry_t wait;
+ struct kfd_event *event; /* Event to wait for */
+ bool activated; /* Becomes true when event is signaled */
};
/*
- * Over-complicated pooled allocator for event notification slots.
- *
* Each signal event needs a 64-bit signal slot where the signaler will write
- * a 1 before sending an interrupt.l (This is needed because some interrupts
+ * a 1 before sending an interrupt. (This is needed because some interrupts
* do not contain enough spare data bits to identify an event.)
- * We get whole pages from vmalloc and map them to the process VA.
- * Individual signal events are then allocated a slot in a page.
+ * We get whole pages and map them to the process VA.
+ * Individual signal events use their event_id as slot index.
*/
-
-struct signal_page {
- struct list_head event_pages; /* kfd_process.signal_event_pages */
+struct kfd_signal_page {
uint64_t *kernel_address;
uint64_t __user *user_address;
- uint32_t page_index; /* Index into the mmap aperture. */
- unsigned int free_slots;
- unsigned long used_slot_bitmap[0];
};
-#define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
-#define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
-#define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
-#define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
- SLOT_BITMAP_SIZE * sizeof(long))
-
-/*
- * For signal events, the event ID is used as the interrupt user data.
- * For SQ s_sendmsg interrupts, this is limited to 8 bits.
- */
-
-#define INTERRUPT_DATA_BITS 8
-#define SIGNAL_EVENT_ID_SLOT_SHIFT 0
-static uint64_t *page_slots(struct signal_page *page)
+static uint64_t *page_slots(struct kfd_signal_page *page)
{
return page->kernel_address;
}
-static bool allocate_free_slot(struct kfd_process *process,
- struct signal_page **out_page,
- unsigned int *out_slot_index)
-{
- struct signal_page *page;
-
- list_for_each_entry(page, &process->signal_event_pages, event_pages) {
- if (page->free_slots > 0) {
- unsigned int slot =
- find_first_zero_bit(page->used_slot_bitmap,
- SLOTS_PER_PAGE);
-
- __set_bit(slot, page->used_slot_bitmap);
- page->free_slots--;
-
- page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
-
- *out_page = page;
- *out_slot_index = slot;
-
- pr_debug("Allocated event signal slot in page %p, slot %d\n",
- page, slot);
-
- return true;
- }
- }
-
- pr_debug("No free event signal slots were found for process %p\n",
- process);
-
- return false;
-}
-
-#define list_tail_entry(head, type, member) \
- list_entry((head)->prev, type, member)
-
-static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
+static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
{
void *backing_store;
- struct signal_page *page;
+ struct kfd_signal_page *page;
- page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
- goto fail_alloc_signal_page;
+ return NULL;
- page->free_slots = SLOTS_PER_PAGE;
-
- backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ backing_store = (void *) __get_free_pages(GFP_KERNEL,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
if (!backing_store)
goto fail_alloc_signal_store;
- /* prevent user-mode info leaks */
+ /* Initialize all events to unsignaled */
memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
- KFD_SIGNAL_EVENT_LIMIT * 8);
+ KFD_SIGNAL_EVENT_LIMIT * 8);
page->kernel_address = backing_store;
-
- if (list_empty(&p->signal_event_pages))
- page->page_index = 0;
- else
- page->page_index = list_tail_entry(&p->signal_event_pages,
- struct signal_page,
- event_pages)->page_index + 1;
-
pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
- pr_debug("Page index is %d\n", page->page_index);
- list_add(&page->event_pages, &p->signal_event_pages);
-
- return true;
+ return page;
fail_alloc_signal_store:
kfree(page);
-fail_alloc_signal_page:
- return false;
+ return NULL;
}
-static bool allocate_event_notification_slot(struct file *devkfd,
- struct kfd_process *p,
- struct signal_page **page,
- unsigned int *signal_slot_index)
+static int allocate_event_notification_slot(struct kfd_process *p,
+ struct kfd_event *ev)
{
- bool ret;
+ int id;
- ret = allocate_free_slot(p, page, signal_slot_index);
- if (!ret) {
- ret = allocate_signal_page(devkfd, p);
- if (ret)
- ret = allocate_free_slot(p, page, signal_slot_index);
+ if (!p->signal_page) {
+ p->signal_page = allocate_signal_page(p);
+ if (!p->signal_page)
+ return -ENOMEM;
+ /* Oldest user mode expects 256 event slots */
+ p->signal_mapped_size = 256*8;
}
- return ret;
-}
-
-/* Assumes that the process's event_mutex is locked. */
-static void release_event_notification_slot(struct signal_page *page,
- size_t slot_index)
-{
- __clear_bit(slot_index, page->used_slot_bitmap);
- page->free_slots++;
-
- /* We don't free signal pages, they are retained by the process
- * and reused until it exits.
- */
-}
-
-static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
- unsigned int page_index)
-{
- struct signal_page *page;
-
/*
- * This is safe because we don't delete signal pages until the
- * process exits.
+ * Compatibility with old user mode: Only use signal slots
+ * user mode has mapped, may be less than
+ * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
+ * of the event limit without breaking user mode.
*/
- list_for_each_entry(page, &p->signal_event_pages, event_pages)
- if (page->page_index == page_index)
- return page;
+ id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
- return NULL;
+ ev->event_id = id;
+ page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
+
+ return 0;
}
/*
@@ -220,99 +124,81 @@ static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
*/
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
- struct kfd_event *ev;
-
- hash_for_each_possible(p->events, ev, events, id)
- if (ev->event_id == id)
- return ev;
-
- return NULL;
+ return idr_find(&p->event_idr, id);
}
-static u32 make_signal_event_id(struct signal_page *page,
- unsigned int signal_slot_index)
-{
- return page->page_index |
- (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
-}
-
-/*
- * Produce a kfd event id for a nonsignal event.
- * These are arbitrary numbers, so we do a sequential search through
- * the hash table for an unused number.
+/**
+ * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
+ * @p: Pointer to struct kfd_process
+ * @id: ID to look up
+ * @bits: Number of valid bits in @id
+ *
+ * Finds the first signaled event with a matching partial ID. If no
+ * matching signaled event is found, returns NULL. In that case the
+ * caller should assume that the partial ID is invalid and do an
+ * exhaustive search of all siglaned events.
+ *
+ * If multiple events with the same partial ID signal at the same
+ * time, they will be found one interrupt at a time, not necessarily
+ * in the same order the interrupts occurred. As long as the number of
+ * interrupts is correct, all signaled events will be seen by the
+ * driver.
*/
-static u32 make_nonsignal_event_id(struct kfd_process *p)
+static struct kfd_event *lookup_signaled_event_by_partial_id(
+ struct kfd_process *p, uint32_t id, uint32_t bits)
{
- u32 id;
-
- for (id = p->next_nonsignal_event_id;
- id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id);
- id++)
- ;
+ struct kfd_event *ev;
- if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
+ if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
+ return NULL;
- /*
- * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
- * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
- * the first loop fails immediately and we proceed with the
- * wraparound loop below.
- */
- p->next_nonsignal_event_id = id + 1;
+ /* Fast path for the common case that @id is not a partial ID
+ * and we only need a single lookup.
+ */
+ if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
+ if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
+ return NULL;
- return id;
+ return idr_find(&p->event_idr, id);
}
- for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id);
- id++)
- ;
-
+ /* General case for partial IDs: Iterate over all matching IDs
+ * and find the first one that has signaled.
+ */
+ for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
+ if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
+ continue;
- if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
- p->next_nonsignal_event_id = id + 1;
- return id;
+ ev = idr_find(&p->event_idr, id);
}
- p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- return 0;
-}
-
-static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
- struct signal_page *page,
- unsigned int signal_slot)
-{
- return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
+ return ev;
}
static int create_signal_event(struct file *devkfd,
struct kfd_process *p,
struct kfd_event *ev)
{
- if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
+ int ret;
+
+ if (p->signal_mapped_size &&
+ p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
pr_warn("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
- return -ENOMEM;
+ return -ENOSPC;
}
- if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
- &ev->signal_slot_index)) {
+ ret = allocate_event_notification_slot(p, ev);
+ if (ret) {
pr_warn("Signal event wasn't created because out of kernel memory\n");
- return -ENOMEM;
+ return ret;
}
p->signal_event_count++;
- ev->user_signal_address =
- &ev->signal_page->user_address[ev->signal_slot_index];
-
- ev->event_id = make_signal_event_id(ev->signal_page,
- ev->signal_slot_index);
-
+ ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
@@ -320,16 +206,20 @@ static int create_signal_event(struct file *devkfd,
return 0;
}
-/*
- * No non-signal events are supported yet.
- * We create them as events that never signal.
- * Set event calls from user-mode are failed.
- */
static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
{
- ev->event_id = make_nonsignal_event_id(p);
- if (ev->event_id == 0)
- return -ENOMEM;
+ /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
+ * intentional integer overflow to -1 without a compiler
+ * warning. idr_alloc treats a negative value as "maximum
+ * signed integer".
+ */
+ int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
+ (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
+ GFP_KERNEL);
+
+ if (id < 0)
+ return id;
+ ev->event_id = id;
return 0;
}
@@ -337,50 +227,47 @@ static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
void kfd_event_init_process(struct kfd_process *p)
{
mutex_init(&p->event_mutex);
- hash_init(p->events);
- INIT_LIST_HEAD(&p->signal_event_pages);
- p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_init(&p->event_idr);
+ p->signal_page = NULL;
p->signal_event_count = 0;
}
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
- if (ev->signal_page) {
- release_event_notification_slot(ev->signal_page,
- ev->signal_slot_index);
- p->signal_event_count--;
- }
+ struct kfd_event_waiter *waiter;
- /*
- * Abandon the list of waiters. Individual waiting threads will
- * clean up their own data.
- */
- list_del(&ev->waiters);
+ /* Wake up pending waiters. They will return failure */
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
+ waiter->event = NULL;
+ wake_up_all(&ev->wq);
+
+ if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
+ ev->type == KFD_EVENT_TYPE_DEBUG)
+ p->signal_event_count--;
- hash_del(&ev->events);
+ idr_remove(&p->event_idr, ev->event_id);
kfree(ev);
}
static void destroy_events(struct kfd_process *p)
{
struct kfd_event *ev;
- struct hlist_node *tmp;
- unsigned int hash_bkt;
+ uint32_t id;
- hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
+ idr_for_each_entry(&p->event_idr, ev, id)
destroy_event(p, ev);
+ idr_destroy(&p->event_idr);
}
/*
* We assume that the process is being destroyed and there is no need to
* unmap the pages or keep bookkeeping data in order.
*/
-static void shutdown_signal_pages(struct kfd_process *p)
+static void shutdown_signal_page(struct kfd_process *p)
{
- struct signal_page *page, *tmp;
+ struct kfd_signal_page *page = p->signal_page;
- list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
- event_pages) {
+ if (page) {
free_pages((unsigned long)page->kernel_address,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
kfree(page);
@@ -390,7 +277,7 @@ static void shutdown_signal_pages(struct kfd_process *p)
void kfd_event_free_process(struct kfd_process *p)
{
destroy_events(p);
- shutdown_signal_pages(p);
+ shutdown_signal_page(p);
}
static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
@@ -419,7 +306,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ev->auto_reset = auto_reset;
ev->signaled = false;
- INIT_LIST_HEAD(&ev->waiters);
+ init_waitqueue_head(&ev->wq);
*event_page_offset = 0;
@@ -430,10 +317,9 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
case KFD_EVENT_TYPE_DEBUG:
ret = create_signal_event(devkfd, p, ev);
if (!ret) {
- *event_page_offset = (ev->signal_page->page_index |
- KFD_MMAP_EVENTS_MASK);
+ *event_page_offset = KFD_MMAP_EVENTS_MASK;
*event_page_offset <<= PAGE_SHIFT;
- *event_slot_index = ev->signal_slot_index;
+ *event_slot_index = ev->event_id;
}
break;
default:
@@ -442,8 +328,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
}
if (!ret) {
- hash_add(p->events, &ev->events, ev->event_id);
-
*event_id = ev->event_id;
*event_trigger_data = ev->event_id;
} else {
@@ -477,19 +361,18 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
static void set_event(struct kfd_event *ev)
{
struct kfd_event_waiter *waiter;
- struct kfd_event_waiter *next;
- /* Auto reset if the list is non-empty and we're waking someone. */
- ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
+ /* Auto reset if the list is non-empty and we're waking
+ * someone. waitqueue_active is safe here because we're
+ * protected by the p->event_mutex, which is also held when
+ * updating the wait queues in kfd_wait_on_events.
+ */
+ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
- list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
waiter->activated = true;
- /* _init because free_waiters will call list_del */
- list_del_init(&waiter->waiters);
-
- wake_up_process(waiter->sleeping_task);
- }
+ wake_up_all(&ev->wq);
}
/* Assumes that p is current. */
@@ -538,13 +421,7 @@ int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
- page_slots(ev->signal_page)[ev->signal_slot_index] =
- UNSIGNALED_EVENT_SLOT;
-}
-
-static bool is_slot_signaled(struct signal_page *page, unsigned int index)
-{
- return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
+ page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
}
static void set_event_from_interrupt(struct kfd_process *p,
@@ -559,7 +436,7 @@ static void set_event_from_interrupt(struct kfd_process *p,
void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
uint32_t valid_id_bits)
{
- struct kfd_event *ev;
+ struct kfd_event *ev = NULL;
/*
* Because we are called from arbitrary context (workqueue) as opposed
@@ -573,26 +450,46 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
mutex_lock(&p->event_mutex);
- if (valid_id_bits >= INTERRUPT_DATA_BITS) {
- /* Partial ID is a full ID. */
- ev = lookup_event_by_id(p, partial_id);
+ if (valid_id_bits)
+ ev = lookup_signaled_event_by_partial_id(p, partial_id,
+ valid_id_bits);
+ if (ev) {
set_event_from_interrupt(p, ev);
- } else {
+ } else if (p->signal_page) {
/*
- * Partial ID is in fact partial. For now we completely
- * ignore it, but we could use any bits we did receive to
- * search faster.
+ * Partial ID lookup failed. Assume that the event ID
+ * in the interrupt payload was invalid and do an
+ * exhaustive search of signaled events.
*/
- struct signal_page *page;
- unsigned int i;
-
- list_for_each_entry(page, &p->signal_event_pages, event_pages)
- for (i = 0; i < SLOTS_PER_PAGE; i++)
- if (is_slot_signaled(page, i)) {
- ev = lookup_event_by_page_slot(p,
- page, i);
+ uint64_t *slots = page_slots(p->signal_page);
+ uint32_t id;
+
+ if (valid_id_bits)
+ pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
+ partial_id, valid_id_bits);
+
+ if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
+ /* With relatively few events, it's faster to
+ * iterate over the event IDR
+ */
+ idr_for_each_entry(&p->event_idr, ev, id) {
+ if (id >= KFD_SIGNAL_EVENT_LIMIT)
+ break;
+
+ if (slots[id] != UNSIGNALED_EVENT_SLOT)
+ set_event_from_interrupt(p, ev);
+ }
+ } else {
+ /* With relatively many events, it's faster to
+ * iterate over the signal slots and lookup
+ * only signaled events from the IDR.
+ */
+ for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
+ if (slots[id] != UNSIGNALED_EVENT_SLOT) {
+ ev = lookup_event_by_id(p, id);
set_event_from_interrupt(p, ev);
}
+ }
}
mutex_unlock(&p->event_mutex);
@@ -609,18 +506,16 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
GFP_KERNEL);
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
- INIT_LIST_HEAD(&event_waiters[i].waiters);
- event_waiters[i].sleeping_task = current;
+ init_wait(&event_waiters[i].wait);
event_waiters[i].activated = false;
}
return event_waiters;
}
-static int init_event_waiter(struct kfd_process *p,
+static int init_event_waiter_get_status(struct kfd_process *p,
struct kfd_event_waiter *waiter,
- uint32_t event_id,
- uint32_t input_index)
+ uint32_t event_id)
{
struct kfd_event *ev = lookup_event_by_id(p, event_id);
@@ -628,38 +523,60 @@ static int init_event_waiter(struct kfd_process *p,
return -EINVAL;
waiter->event = ev;
- waiter->input_index = input_index;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
- list_add(&waiter->waiters, &ev->waiters);
-
return 0;
}
-static bool test_event_condition(bool all, uint32_t num_events,
+static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
+{
+ struct kfd_event *ev = waiter->event;
+
+ /* Only add to the wait list if we actually need to
+ * wait on this event.
+ */
+ if (!waiter->activated)
+ add_wait_queue(&ev->wq, &waiter->wait);
+}
+
+/* test_event_condition - Test condition of events being waited for
+ * @all: Return completion only if all events have signaled
+ * @num_events: Number of events to wait for
+ * @event_waiters: Array of event waiters, one per event
+ *
+ * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
+ * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
+ * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
+ * the events have been destroyed.
+ */
+static uint32_t test_event_condition(bool all, uint32_t num_events,
struct kfd_event_waiter *event_waiters)
{
uint32_t i;
uint32_t activated_count = 0;
for (i = 0; i < num_events; i++) {
+ if (!event_waiters[i].event)
+ return KFD_IOC_WAIT_RESULT_FAIL;
+
if (event_waiters[i].activated) {
if (!all)
- return true;
+ return KFD_IOC_WAIT_RESULT_COMPLETE;
activated_count++;
}
}
- return activated_count == num_events;
+ return activated_count == num_events ?
+ KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
}
/*
* Copy event specific data, if defined.
* Currently only memory exception events have additional data to copy to user
*/
-static bool copy_signaled_event_data(uint32_t num_events,
+static int copy_signaled_event_data(uint32_t num_events,
struct kfd_event_waiter *event_waiters,
struct kfd_event_data __user *data)
{
@@ -673,15 +590,15 @@ static bool copy_signaled_event_data(uint32_t num_events,
waiter = &event_waiters[i];
event = waiter->event;
if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
- dst = &data[waiter->input_index].memory_exception_data;
+ dst = &data[i].memory_exception_data;
src = &event->memory_exception_data;
if (copy_to_user(dst, src,
sizeof(struct kfd_hsa_memory_exception_data)))
- return false;
+ return -EFAULT;
}
}
- return true;
+ return 0;
}
@@ -710,7 +627,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
uint32_t i;
for (i = 0; i < num_events; i++)
- list_del(&waiters[i].waiters);
+ if (waiters[i].event)
+ remove_wait_queue(&waiters[i].event->wq,
+ &waiters[i].wait);
kfree(waiters);
}
@@ -718,38 +637,56 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t user_timeout_ms,
- enum kfd_event_wait_result *wait_result)
+ uint32_t *wait_result)
{
struct kfd_event_data __user *events =
(struct kfd_event_data __user *) data;
uint32_t i;
int ret = 0;
+
struct kfd_event_waiter *event_waiters = NULL;
long timeout = user_timeout_to_jiffies(user_timeout_ms);
- mutex_lock(&p->event_mutex);
-
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
+ mutex_lock(&p->event_mutex);
+
for (i = 0; i < num_events; i++) {
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
sizeof(struct kfd_event_data))) {
ret = -EFAULT;
- goto fail;
+ goto out_unlock;
}
- ret = init_event_waiter(p, &event_waiters[i],
- event_data.event_id, i);
+ ret = init_event_waiter_get_status(p, &event_waiters[i],
+ event_data.event_id);
if (ret)
- goto fail;
+ goto out_unlock;
}
+ /* Check condition once. */
+ *wait_result = test_event_condition(all, num_events, event_waiters);
+ if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
+ ret = copy_signaled_event_data(num_events,
+ event_waiters, events);
+ goto out_unlock;
+ } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
+ /* This should not happen. Events shouldn't be
+ * destroyed while we're holding the event_mutex
+ */
+ goto out_unlock;
+ }
+
+ /* Add to wait lists if we need to wait. */
+ for (i = 0; i < num_events; i++)
+ init_event_waiter_add_to_waitlist(&event_waiters[i]);
+
mutex_unlock(&p->event_mutex);
while (true) {
@@ -771,62 +708,66 @@ int kfd_wait_on_events(struct kfd_process *p,
break;
}
- if (test_event_condition(all, num_events, event_waiters)) {
- if (copy_signaled_event_data(num_events,
- event_waiters, events))
- *wait_result = KFD_WAIT_COMPLETE;
- else
- *wait_result = KFD_WAIT_ERROR;
+ /* Set task state to interruptible sleep before
+ * checking wake-up conditions. A concurrent wake-up
+ * will put the task back into runnable state. In that
+ * case schedule_timeout will not put the task to
+ * sleep and we'll get a chance to re-check the
+ * updated conditions almost immediately. Otherwise,
+ * this race condition would lead to a soft hang or a
+ * very long sleep.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ *wait_result = test_event_condition(all, num_events,
+ event_waiters);
+ if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
break;
- }
- if (timeout <= 0) {
- *wait_result = KFD_WAIT_TIMEOUT;
+ if (timeout <= 0)
break;
- }
- timeout = schedule_timeout_interruptible(timeout);
+ timeout = schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
+ /* copy_signaled_event_data may sleep. So this has to happen
+ * after the task state is set back to RUNNING.
+ */
+ if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
+ ret = copy_signaled_event_data(num_events,
+ event_waiters, events);
+
mutex_lock(&p->event_mutex);
+out_unlock:
free_waiters(num_events, event_waiters);
mutex_unlock(&p->event_mutex);
-
- return ret;
-
-fail:
- if (event_waiters)
- free_waiters(num_events, event_waiters);
-
- mutex_unlock(&p->event_mutex);
-
- *wait_result = KFD_WAIT_ERROR;
+out:
+ if (ret)
+ *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
+ else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
+ ret = -EIO;
return ret;
}
int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
{
-
- unsigned int page_index;
unsigned long pfn;
- struct signal_page *page;
+ struct kfd_signal_page *page;
+ int ret;
- /* check required size is logical */
- if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
+ /* check required size doesn't exceed the allocated size */
+ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
get_order(vma->vm_end - vma->vm_start)) {
pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
- page_index = vma->vm_pgoff;
-
- page = lookup_signal_page_by_index(p, page_index);
+ page = p->signal_page;
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
- pr_debug("Signal page could not be found for page_index %u\n",
- page_index);
+ pr_debug("Signal page could not be found\n");
return -EINVAL;
}
@@ -847,8 +788,12 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page->user_address = (uint64_t __user *)vma->vm_start;
/* mapping the page to user process */
- return remap_pfn_range(vma, vma->vm_start, pfn,
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (!ret)
+ p->signal_mapped_size = vma->vm_end - vma->vm_start;
+
+ return ret;
}
/*
@@ -860,12 +805,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
{
struct kfd_hsa_memory_exception_data *ev_data;
struct kfd_event *ev;
- int bkt;
+ uint32_t id;
bool send_signal = true;
ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
- hash_for_each(p->events, bkt, ev, events)
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == type) {
send_signal = false;
dev_dbg(kfd_device,
@@ -904,14 +850,24 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
* running so the lookup function returns a locked process.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct mm_struct *mm;
if (!p)
return; /* Presumably process exited. */
+ /* Take a safe reference to the mm_struct, which may otherwise
+ * disappear even while the kfd_process is still referenced.
+ */
+ mm = get_task_mm(p->lead_thread);
+ if (!mm) {
+ mutex_unlock(&p->mutex);
+ return; /* Process is exiting */
+ }
+
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&p->mm->mmap_sem);
- vma = find_vma(p->mm, address);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
memory_exception_data.va = address;
@@ -937,7 +893,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
}
}
- up_read(&p->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
mutex_lock(&p->event_mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 28f6838b1f4c..abca5bfebbff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -27,12 +27,17 @@
#include <linux/hashtable.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/wait.h>
#include "kfd_priv.h"
#include <uapi/linux/kfd_ioctl.h>
-#define KFD_EVENT_ID_NONSIGNAL_MASK 0x80000000U
-#define KFD_FIRST_NONSIGNAL_EVENT_ID KFD_EVENT_ID_NONSIGNAL_MASK
-#define KFD_LAST_NONSIGNAL_EVENT_ID UINT_MAX
+/*
+ * IDR supports non-negative integer IDs. Small IDs are used for
+ * signal events to match their signal slot. Use the upper half of the
+ * ID space for non-signal events.
+ */
+#define KFD_FIRST_NONSIGNAL_EVENT_ID ((INT_MAX >> 1) + 1)
+#define KFD_LAST_NONSIGNAL_EVENT_ID INT_MAX
/*
* Written into kfd_signal_slot_t to indicate that the event is not signaled.
@@ -46,9 +51,6 @@ struct kfd_event_waiter;
struct signal_page;
struct kfd_event {
- /* All events in process, rooted at kfd_process.events. */
- struct hlist_node events;
-
u32 event_id;
bool signaled;
@@ -56,11 +58,9 @@ struct kfd_event {
int type;
- struct list_head waiters; /* List of kfd_event_waiter by waiters. */
+ wait_queue_head_t wq; /* List of event waiters. */
/* Only for signal events. */
- struct signal_page *signal_page;
- unsigned int signal_slot_index;
uint64_t __user *user_signal_address;
/* type specific data */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 70b3a99cffc2..035c351f47c5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -42,26 +42,26 @@
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/kfifo.h>
#include "kfd_priv.h"
-#define KFD_INTERRUPT_RING_SIZE 1024
+#define KFD_IH_NUM_ENTRIES 8192
static void interrupt_wq(struct work_struct *);
int kfd_interrupt_init(struct kfd_dev *kfd)
{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
+ int r;
+
+ r = kfifo_alloc(&kfd->ih_fifo,
+ KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size,
+ GFP_KERNEL);
+ if (r) {
+ dev_err(kfd_chardev(), "Failed to allocate IH fifo\n");
+ return r;
+ }
+ kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
@@ -92,74 +92,47 @@ void kfd_interrupt_exit(struct kfd_dev *kfd)
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
/*
- * Flush_scheduled_work ensures that there are no outstanding
+ * flush_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
- flush_scheduled_work();
+ flush_workqueue(kfd->ih_wq);
- kfree(kfd->interrupt_ring);
+ kfifo_free(&kfd->ih_fifo);
}
/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
+ * Assumption: single reader/writer. This function is not re-entrant
*/
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+ int count;
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
+ count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
+ if (count != kfd->device_info->ih_ring_entry_size) {
dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
+ "Interrupt ring overflow, dropping interrupt %d\n",
+ count);
return false;
}
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
return true;
}
/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
+ * Assumption: single reader/writer. This function is not re-entrant
*/
static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+ int count;
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
+ count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
+ WARN_ON(count && count != kfd->device_info->ih_ring_entry_size);
- return true;
+ return count == kfd->device_info->ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index ed71ad40e8f7..8b0c0645d7c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -185,7 +185,7 @@ static void uninitialize(struct kernel_queue *kq)
kq->mqd->destroy_mqd(kq->mqd,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
- QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ KFD_UNMAP_LATENCY_MS,
kq->queue->pipe,
kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
@@ -303,14 +303,20 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_KAVERI:
kernel_queue_init_cik(&kq->ops_asic_specific);
break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
+ goto out_free;
}
- if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
- pr_err("Failed to init kernel queue\n");
- kfree(kq);
- return NULL;
- }
- return kq;
+ if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+ return kq;
+
+ pr_err("Failed to init kernel queue\n");
+
+out_free:
+ kfree(kq);
+ return NULL;
}
void kernel_queue_uninit(struct kernel_queue *kq)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 0d73bea22c45..f744caeaee04 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
+#include <linux/printk.h>
#include "kfd_priv.h"
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
@@ -103,10 +104,6 @@ static int __init kfd_module_init(void)
return -1;
}
- err = kfd_pasid_init();
- if (err < 0)
- return err;
-
err = kfd_chardev_init();
if (err < 0)
goto err_ioctl;
@@ -126,7 +123,6 @@ static int __init kfd_module_init(void)
err_topology:
kfd_chardev_exit();
err_ioctl:
- kfd_pasid_exit();
return err;
}
@@ -137,8 +133,7 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- kfd_pasid_exit();
- dev_info(kfd_device, "Removed module\n");
+ pr_info("amdkfd: Removed module\n");
}
module_init(kfd_module_init);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index b1ef1368c3bb..dfd260ef81ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -31,6 +31,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return mqd_manager_init_cik(type, dev);
case CHIP_CARRIZO:
return mqd_manager_init_vi(type, dev);
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
}
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 44ffd23348fc..4728fad3fd74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -189,12 +189,9 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
@@ -205,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
@@ -215,24 +212,17 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->sdma_rlc_doorbell = q->doorbell_off <<
- SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
- 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+ m->sdma_rlc_doorbell =
+ q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->sdma_rlc_rb_cntl |=
- 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
-
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
@@ -359,19 +349,13 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
- m->cp_hqd_active = 0;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->cp_hqd_active = 1;
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 73cbfe186dd2..4ea854f9007b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -163,12 +163,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 1d312603de9f..16da8ad02d8b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -140,8 +140,6 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
- struct queue *cur;
- uint32_t num_queues;
packet = (struct pm4_mes_map_process *)buffer;
@@ -156,10 +154,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
packet->bitfields10.num_oac = qpd->num_oac;
- num_queues = 0;
- list_for_each_entry(cur, &qpd->queues_list, list)
- num_queues++;
- packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
+ packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
@@ -208,7 +203,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
- packet->bitfields2.engine_sel =
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
break;
@@ -376,7 +371,7 @@ int pm_send_set_resources(struct packet_manager *pm,
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
- packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
@@ -476,7 +471,7 @@ fail_acquire_packet_buffer:
}
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
- enum kfd_preempt_type_filter mode,
+ enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset,
unsigned int sdma_engine)
{
@@ -494,8 +489,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
- pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
- mode, reset, type);
+ pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
+ filter, reset, type);
packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
switch (type) {
@@ -521,29 +516,29 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;
- switch (mode) {
- case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
+ switch (filter) {
+ case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
- case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
+ case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
- case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
+ case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
- case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
+ case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
- WARN(1, "filter %d", mode);
+ WARN(1, "filter %d", filter);
retval = -EINVAL;
goto err_invalid;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 1e06de0bc673..d6a796144269 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -20,78 +20,64 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/types.h>
#include "kfd_priv.h"
-static unsigned long *pasid_bitmap;
-static unsigned int pasid_limit;
-static DEFINE_MUTEX(pasid_mutex);
-
-int kfd_pasid_init(void)
-{
- pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
-
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
- GFP_KERNEL);
- if (!pasid_bitmap)
- return -ENOMEM;
-
- set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
-
- return 0;
-}
-
-void kfd_pasid_exit(void)
-{
- kfree(pasid_bitmap);
-}
+static unsigned int pasid_bits = 16;
+static const struct kfd2kgd_calls *kfd2kgd;
bool kfd_set_pasid_limit(unsigned int new_limit)
{
- if (new_limit < pasid_limit) {
- bool ok;
-
- mutex_lock(&pasid_mutex);
-
- /* ensure that no pasids >= new_limit are in-use */
- ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
- pasid_limit);
- if (ok)
- pasid_limit = new_limit;
-
- mutex_unlock(&pasid_mutex);
-
- return ok;
+ if (new_limit < 2)
+ return false;
+
+ if (new_limit < (1U << pasid_bits)) {
+ if (kfd2kgd)
+ /* We've already allocated user PASIDs, too late to
+ * change the limit
+ */
+ return false;
+
+ while (new_limit < (1U << pasid_bits))
+ pasid_bits--;
}
return true;
}
-inline unsigned int kfd_get_pasid_limit(void)
+unsigned int kfd_get_pasid_limit(void)
{
- return pasid_limit;
+ return 1U << pasid_bits;
}
unsigned int kfd_pasid_alloc(void)
{
- unsigned int found;
-
- mutex_lock(&pasid_mutex);
-
- found = find_first_zero_bit(pasid_bitmap, pasid_limit);
- if (found == pasid_limit)
- found = 0;
- else
- set_bit(found, pasid_bitmap);
+ int r;
+
+ /* Find the first best KFD device for calling KGD */
+ if (!kfd2kgd) {
+ struct kfd_dev *dev = NULL;
+ unsigned int i = 0;
+
+ while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
+ if (dev && dev->kfd2kgd) {
+ kfd2kgd = dev->kfd2kgd;
+ break;
+ }
+ i++;
+ }
+
+ if (!kfd2kgd)
+ return false;
+ }
- mutex_unlock(&pasid_mutex);
+ r = kfd2kgd->alloc_pasid(pasid_bits);
- return found;
+ return r > 0 ? r : 0;
}
void kfd_pasid_free(unsigned int pasid)
{
- if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
- clear_bit(pasid, pasid_bitmap);
+ if (kfd2kgd)
+ kfd2kgd->free_pasid(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b87e96cee5fa..9e4134c5b481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -31,8 +31,12 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kfd_ioctl.h>
+#include <linux/idr.h>
+#include <linux/kfifo.h>
#include <kgd_kfd_interface.h>
+#include "amd_shared.h"
+
#define KFD_SYSFS_FILE_MODE 0444
#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
@@ -112,11 +116,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-enum asic_family_type {
- CHIP_KAVERI = 0,
- CHIP_CARRIZO
-};
-
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
const uint32_t *ih_ring_entry);
@@ -125,7 +124,7 @@ struct kfd_event_interrupt_class {
};
struct kfd_device_info {
- unsigned int asic_family;
+ enum amd_asic_type asic_family;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
unsigned int max_no_of_hqd;
@@ -141,6 +140,12 @@ struct kfd_mem_obj {
uint32_t *cpu_ptr;
};
+struct kfd_vmid_info {
+ uint32_t first_vmid_kfd;
+ uint32_t last_vmid_kfd;
+ uint32_t vmid_num_kfd;
+};
+
struct kfd_dev {
struct kgd_dev *kgd;
@@ -157,14 +162,12 @@ struct kfd_dev {
* to HW doorbell, GFX reserved some
* at the start)
*/
- size_t doorbell_process_limit; /* Number of processes we have doorbell
- * space for.
- */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
*/
struct kgd2kfd_shared_resources shared_resources;
+ struct kfd_vmid_info vm_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
@@ -180,10 +183,8 @@ struct kfd_dev {
unsigned int gtt_sa_num_of_chunks;
/* Interrupts */
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
+ struct kfifo ih_fifo;
+ struct workqueue_struct *ih_wq;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
@@ -221,22 +222,22 @@ void kfd_chardev_exit(void);
struct device *kfd_chardev(void);
/**
- * enum kfd_preempt_type_filter
+ * enum kfd_unmap_queues_filter
*
- * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
+ * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
*
- * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
+ * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
* running queues list.
*
- * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
+ * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
* specific process.
*
*/
-enum kfd_preempt_type_filter {
- KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
- KFD_PREEMPT_TYPE_FILTER_BY_PASID
+enum kfd_unmap_queues_filter {
+ KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ KFD_UNMAP_QUEUES_FILTER_BY_PASID
};
/**
@@ -404,7 +405,6 @@ struct scheduling_resources {
struct process_queue_manager {
/* data */
struct kfd_process *process;
- unsigned int num_concurrent_processes;
struct list_head queues;
unsigned long *queue_slot_bitmap;
};
@@ -420,6 +420,12 @@ struct qcm_process_device {
unsigned int queue_count;
unsigned int vmid;
bool is_debug;
+
+ /* This flag tells if we should reset all wavefronts on
+ * process termination
+ */
+ bool reset_wavefronts;
+
/*
* All the memory management data should be here too
*/
@@ -435,6 +441,13 @@ struct qcm_process_device {
uint32_t sh_hidden_private_base;
};
+
+enum kfd_pdd_bound {
+ PDD_UNBOUND = 0,
+ PDD_BOUND,
+ PDD_BOUND_SUSPENDED,
+};
+
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@@ -446,6 +459,8 @@ struct kfd_process_device {
/* The device that owns this data. */
struct kfd_dev *dev;
+ /* The process that owns this kfd_process_device. */
+ struct kfd_process *process;
/* per-process-per device QCM data structure */
struct qcm_process_device qpd;
@@ -459,12 +474,14 @@ struct kfd_process_device {
uint64_t scratch_limit;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
- bool bound;
+ enum kfd_pdd_bound bound;
- /* This flag tells if we should reset all
- * wavefronts on process termination
+ /* Flag used to tell the pdd has dequeued from the dqm.
+ * This is used to prevent dev->dqm->ops.process_termination() from
+ * being called twice when it is already called in IOMMU callback
+ * function.
*/
- bool reset_wavefronts;
+ bool already_dequeued;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -477,7 +494,12 @@ struct kfd_process {
*/
struct hlist_node kfd_processes;
- struct mm_struct *mm;
+ /*
+ * Opaque pointer to mm_struct. We don't hold a reference to
+ * it so it should never be dereferenced from here. This is
+ * only used for looking up processes by their mm.
+ */
+ void *mm;
struct mutex mutex;
@@ -485,6 +507,8 @@ struct kfd_process {
* In any process, the thread that started main() is the lead
* thread and outlives the rest.
* It is here because amd_iommu_bind_pasid wants a task_struct.
+ * It can also be used for safely getting a reference to the
+ * mm_struct of the process.
*/
struct task_struct *lead_thread;
@@ -495,6 +519,7 @@ struct kfd_process {
struct rcu_head rcu;
unsigned int pasid;
+ unsigned int doorbell_index;
/*
* List of kfd_process_device structures,
@@ -504,22 +529,16 @@ struct kfd_process {
struct process_queue_manager pqm;
- /* The process's queues. */
- size_t queue_array_size;
-
- /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
- struct kfd_queue **queues;
-
/*Is the user space process 32 bit?*/
bool is_32bit_user_mode;
/* Event-related data */
struct mutex event_mutex;
- /* All events in process hashed by ID, linked on kfd_event.events. */
- DECLARE_HASHTABLE(events, 4);
- /* struct slot_page_header.event_pages */
- struct list_head signal_event_pages;
- u32 next_nonsignal_event_id;
+ /* Event ID allocator and lookup */
+ struct idr event_idr;
+ /* Event page */
+ struct kfd_signal_page *signal_page;
+ size_t signal_mapped_size;
size_t signal_event_count;
bool signal_event_limit_reached;
};
@@ -549,8 +568,10 @@ struct kfd_process *kfd_get_process(const struct task_struct *);
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
- struct kfd_process *p);
-void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
+ struct kfd_process *p);
+int kfd_bind_processes_to_device(struct kfd_dev *dev);
+void kfd_unbind_processes_from_device(struct kfd_dev *dev);
+void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -584,6 +605,10 @@ void write_kernel_doorbell(u32 __iomem *db, u32 value);
unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int queue_id);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process);
+int kfd_alloc_process_doorbells(struct kfd_process *process);
+void kfd_free_process_doorbells(struct kfd_process *process);
/* GTT Sub-Allocator */
@@ -644,14 +669,14 @@ struct process_queue_node {
struct list_head process_queue_list;
};
+void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
+void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int flags,
- enum kfd_queue_type type,
unsigned int *qid);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
@@ -661,15 +686,12 @@ struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
- unsigned long timeout);
+ unsigned int timeout_ms);
/* Packet Manager */
-#define KFD_HIQ_TIMEOUT (500)
-
#define KFD_FENCE_COMPLETED (100)
#define KFD_FENCE_INIT (10)
-#define KFD_UNMAP_LATENCY (150)
struct packet_manager {
struct device_queue_manager *dqm;
@@ -688,33 +710,25 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
- enum kfd_preempt_type_filter mode,
+ enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset,
unsigned int sdma_engine);
void pm_release_ib(struct packet_manager *pm);
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
- struct kfd_process *process);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
extern const struct kfd_device_global_init_class device_global_init_class_cik;
-enum kfd_event_wait_result {
- KFD_WAIT_COMPLETE,
- KFD_WAIT_TIMEOUT,
- KFD_WAIT_ERROR
-};
-
void kfd_event_init_process(struct kfd_process *p);
void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t user_timeout_ms,
- enum kfd_event_wait_result *wait_result);
+ uint32_t *wait_result);
void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
uint32_t valid_id_bits);
void kfd_signal_iommu_event(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index c74cf22a1ed9..1f5ccd28bd41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,13 +35,6 @@ struct mm_struct;
#include "kfd_dbgmgr.h"
/*
- * Initial size for the array of queues.
- * The allocated size is doubled each time
- * it is exceeded up to MAX_PROCESS_QUEUES.
- */
-#define INITIAL_QUEUE_ARRAY_SIZE 16
-
-/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
@@ -171,25 +164,22 @@ static void kfd_process_wq_release(struct work_struct *work)
pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
pdd->dev->id, p->pasid);
- if (pdd->reset_wavefronts)
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ if (pdd->bound == PDD_BOUND)
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
- amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
-
kfree(pdd);
}
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
+ kfd_free_process_doorbells(p);
mutex_unlock(&p->mutex);
mutex_destroy(&p->mutex);
- kfree(p->queues);
-
kfree(p);
kfree(work);
@@ -201,7 +191,6 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process *p;
p = container_of(rcu, struct kfd_process, rcu);
- WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -235,24 +224,26 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_lock(&p->mutex);
- /* In case our notifier is called before IOMMU notifier */
- pqm_uninit(&p->pqm);
-
- /* Iterate over all process device data structure and check
- * if we should delete debug managers and reset all wavefronts
+ /* Iterate over all process device data structures and if the
+ * pdd is in debug mode, we should first force unregistration,
+ * then we will be able to destroy the queues
*/
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
- if ((pdd->dev->dbgmgr) &&
- (pdd->dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
-
- if (pdd->reset_wavefronts) {
- pr_warn("Resetting all wave fronts\n");
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
+ struct kfd_dev *dev = pdd->dev;
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+ if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
+ mutex_unlock(kfd_get_dbgmgr_mutex());
}
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
mutex_unlock(&p->mutex);
/*
@@ -279,15 +270,13 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (!process)
goto err_alloc_process;
- process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
- sizeof(process->queues[0]), GFP_KERNEL);
- if (!process->queues)
- goto err_alloc_queues;
-
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0)
goto err_alloc_pasid;
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
+
mutex_init(&process->mutex);
process->mm = thread->mm;
@@ -303,8 +292,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
process->lead_thread = thread->group_leader;
- process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
-
INIT_LIST_HEAD(&process->per_device_data);
kfd_event_init_process(process);
@@ -329,10 +316,10 @@ err_process_pqm_init:
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
mutex_destroy(&process->mutex);
+ kfd_free_process_doorbells(process);
+err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
- kfree(process->queues);
-err_alloc_queues:
kfree(process);
err_alloc_process:
return ERR_PTR(err);
@@ -345,9 +332,9 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
- break;
+ return pdd;
- return pdd;
+ return NULL;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -361,7 +348,9 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
- pdd->reset_wavefronts = false;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
list_add(&pdd->per_device_list, &p->per_device_data);
}
@@ -387,19 +376,87 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
- if (pdd->bound)
+ if (pdd->bound == PDD_BOUND) {
return pdd;
+ } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
+ pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
+ return ERR_PTR(-EINVAL);
+ }
err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
if (err < 0)
return ERR_PTR(err);
- pdd->bound = true;
+ pdd->bound = PDD_BOUND;
return pdd;
}
-void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+/*
+ * Bind processes do the device that have been temporarily unbound
+ * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
+ */
+int kfd_bind_processes_to_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+ int err = 0;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd->bound != PDD_BOUND_SUSPENDED) {
+ mutex_unlock(&p->mutex);
+ continue;
+ }
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
+ p->lead_thread);
+ if (err < 0) {
+ pr_err("Unexpected pasid %d binding failure\n",
+ p->pasid);
+ mutex_unlock(&p->mutex);
+ break;
+ }
+
+ pdd->bound = PDD_BOUND;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return err;
+}
+
+/*
+ * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
+ * processes will be restored to PDD_BOUND state in
+ * kfd_bind_processes_to_device.
+ */
+void kfd_unbind_processes_from_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+
+ if (pdd->bound == PDD_BOUND)
+ pdd->bound = PDD_BOUND_SUSPENDED;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
+
+void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
@@ -415,31 +472,23 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pr_debug("Unbinding process %d from IOMMU\n", pasid);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
-
- pqm_uninit(&p->pqm);
-
- pdd = kfd_get_process_device_data(dev, p);
+ mutex_lock(kfd_get_dbgmgr_mutex());
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
+ if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ mutex_unlock(kfd_get_dbgmgr_mutex());
- /*
- * Just mark pdd as unbound, because we still need it
- * to call amd_iommu_unbind_pasid() in when the
- * process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd)
+ /* For GPU relying on IOMMU, we need to dequeue here
+ * when PASID is still bound.
+ */
+ kfd_process_dequeue_from_device(pdd);
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 03bec765b03d..a3f1e62c60ba 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -63,6 +63,25 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
return 0;
}
+void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+
+ if (pdd->already_dequeued)
+ return;
+
+ dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+ pdd->already_dequeued = true;
+}
+
+void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ kfd_process_dequeue_from_device(pdd);
+}
+
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
@@ -78,21 +97,14 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
void pqm_uninit(struct process_queue_manager *pqm)
{
- int retval;
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- retval = pqm_destroy_queue(
- pqm,
- (pqn->q != NULL) ?
- pqn->q->properties.queue_id :
- pqn->kq->queue->properties.queue_id);
-
- if (retval != 0) {
- pr_err("failed to destroy queue\n");
- return;
- }
+ uninit_queue(pqn->q);
+ list_del(&pqn->process_queue_list);
+ kfree(pqn);
}
+
kfree(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
@@ -130,20 +142,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int flags,
- enum kfd_queue_type type,
unsigned int *qid)
{
int retval;
struct kfd_process_device *pdd;
- struct queue_properties q_properties;
struct queue *q;
struct process_queue_node *pqn;
struct kernel_queue *kq;
- int num_queues = 0;
- struct queue *cur;
+ enum kfd_queue_type type = properties->type;
+ unsigned int max_queues = 127; /* HWS limit */
- memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
kq = NULL;
@@ -159,19 +167,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* If we are just about to create DIQ, the is_debug flag is not set yet
* Hence we also check the type as well
*/
- if ((pdd->qpd.is_debug) ||
- (type == KFD_QUEUE_TYPE_DIQ)) {
- list_for_each_entry(cur, &pdd->qpd.queues_list, list)
- num_queues++;
- if (num_queues >= dev->device_info->max_no_of_hqd/2)
- return -ENOSPC;
- }
+ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
+ max_queues = dev->device_info->max_no_of_hqd/2;
+
+ if (pdd->qpd.queue_count >= max_queues)
+ return -ENOSPC;
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
return retval;
- if (list_empty(&pqm->queues)) {
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list)) {
pdd->qpd.pqm = pqm;
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
@@ -184,17 +191,35 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->queue_count >=
+ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ pr_err("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
+ print_queue(q);
+ break;
+
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
- ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
+ ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -231,9 +256,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
- *properties = q->properties;
pr_debug("PQM done creating queue\n");
- print_queue_properties(properties);
+ print_queue_properties(&q->properties);
}
return retval;
@@ -243,7 +267,8 @@ err_create_queue:
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
- if (list_empty(&pqm->queues))
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
@@ -290,9 +315,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
- if (retval != 0)
- return retval;
-
uninit_queue(pqn->q);
}
@@ -300,7 +322,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
kfree(pqn);
clear_bit(qid, pqm->queue_slot_bitmap);
- if (list_empty(&pqm->queues))
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list))
dqm->ops.unregister_process(dqm, &pdd->qpd);
return retval;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
new file mode 100644
index 000000000000..ec3285f65517
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -0,0 +1,45 @@
+menu "Display Engine Configuration"
+ depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+ bool "AMD DC - Enable new display engine"
+ default y
+ help
+ Choose this option if you want to use the new display engine
+ support for AMDGPU. This adds required support for Vega and
+ Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+ bool "DC support for Polaris and older ASICs"
+ default n
+ help
+ Choose this option to enable the new DC support for older asics
+ by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+ and Hawaii.
+
+config DRM_AMD_DC_FBC
+ bool "AMD FBC - Enable Frame Buffer Compression"
+ depends on DRM_AMD_DC
+ help
+ Choose this option if you want to use frame buffer compression
+ support.
+ This is a power optimisation feature, check its availability
+ on your hardware before enabling this option.
+
+
+config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
+ help
+ Choose this option if you want to have
+ RV family for display engine
+
+config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to hit
+ kdgb_break in assert.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
new file mode 100644
index 000000000000..c27c81cdeed3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -0,0 +1,43 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the DAL (Display Abstract Layer), which is a sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
new file mode 100644
index 000000000000..46464678f2b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -0,0 +1,107 @@
+===============================================================================
+TODOs
+===============================================================================
+
+1. Base this on drm-next - WIP
+
+
+2. Cleanup commit history
+
+
+3. WIP - Drop page flip helper and use DRM's version
+
+
+4. DONE - Flatten all DC objects
+ * dc_stream/core_stream/stream should just be dc_stream
+ * Same for other DC objects
+
+ "Is there any major reason to keep all those abstractions?
+
+ Could you collapse everything into struct dc_stream?
+
+ I haven't looked recently but I didn't get the impression there was a
+ lot of design around what was public/protected, more whatever needed
+ to be used by someone else was in public."
+ ~ Dave Airlie
+
+
+5. DONE - Rename DC objects to align more with DRM
+ * dc_surface -> dc_plane_state
+ * dc_stream -> dc_stream_state
+
+
+6. DONE - Per-plane and per-stream validation
+
+
+7. WIP - Per-plane and per-stream commit
+
+
+8. WIP - Split pipe_ctx into plane and stream resource structs
+
+
+9. Attach plane and stream reources to state object instead of validate_context
+
+
+10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
+ * Use drm_display_info instead
+ * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
+
+ "Making sure you use the sink-specific helper libraries and kernel
+ subsystems, since there's really no good reason to have 2nd
+ implementation of those in the kernel. Looks likes that's done for mst
+ and edid parsing. There's still a bit a midlayer feeling to the edid
+ parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
+ think it'd be much better if you convert that over to reading stuff
+ from drm_display_info and if needed, push stuff into the core). Also,
+ I can't come up with a good reason why DC needs all this (except to
+ reimplement half of our edid quirk table, which really isn't a good
+ idea). Might be good if you put this onto the list of things to fix
+ long-term, but imo not a blocker. Definitely make sure new stuff
+ doesn't slip in (i.e. if you start adding edid quirks to DC instead of
+ the drm core, refactoring to use the core edid stuff was pointless)."
+ ~ Daniel Vetter
+
+
+11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
+overy complicated HW programming function for sendind and receiving i2c/aux
+commands. We can greatly simplify that and move it into dc/dceXYZ like other
+HW blocks.
+
+12. drm_modeset_lock in MST should no longer be needed in recent kernels
+ * Adopt appropriate locking scheme
+
+13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
+a few indirections, and consider removing entirely and using the
+drm_atomic_helper_best_encoder default behaviour.
+
+14. core/dc_debug.c, consider switching to the atomic state debug helpers and
+moving all your driver state printing into the various atomic_print_state
+callbacks. There's also plans to expose this stuff in a standard way across all
+drivers, to make debugging userspace compositors easier across different hw.
+
+15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
+dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
+
+16. Move to core SCDC helpers (I think those are new since initial DC review).
+
+17. There's still a pretty massive layer cake around dp aux and DPCD handling,
+with like 3 levels of abstraction and using your own structures instead of the
+stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
+incompatible styles, just means more reasons not to add a third (or well third
+one gets to do the cleanup refactor).
+
+18. There's a pile of sink handling code, both for DP and HDMI where I didn't
+immediately recognize the standard. I think long term it'd be best for the drm
+subsystem if we try to move as much of that into helpers/core as possible, and
+share it with drivers. But that's a very long term goal, and by far not just an
+issue with DC - other drivers, especially around DP sink handling, are equally
+guilty.
+
+19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+stuff just isn't up to the challenges either. We need to figure out something
+that integrates better with DRM and linux debug printing, while not being
+useless with filtering output. dynamic debug printing might be an option.
+
+20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+retimer that we need to program to pass PHY compliance. Currently that's
+bypassing the i2c device and goes directly to HW. This should be changed.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
new file mode 100644
index 000000000000..2b72009844f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -0,0 +1,38 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..f71fe6d2ddda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,4934 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "dc/inc/core_types.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_pm.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "dm_services_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+#include "modules/inc/mod_freesync.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "soc15_common.h"
+#endif
+
+#include "modules/inc/mod_freesync.h"
+
+#include "i2caux_interface.h"
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs);
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t link_index);
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ uint32_t link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+
+
+
+static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+};
+
+static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
+};
+
+static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
+};
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc_state->stream);
+ }
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc_state->stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+ }
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ /*
+ * following if is check inherited from both functions where this one is
+ * used now. Need to be checked why it could happen.
+ */
+ if (otg_inst == -1) {
+ WARN_ON(1);
+ return adev->mode_info.crtcs[0];
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ unsigned long flags;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /*TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return;
+ }
+
+
+ /* wakeup usersapce */
+ if (amdgpu_crtc->event) {
+ /* Update to correct count/ts if racing with vblank irq */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->event = NULL;
+
+ } else
+ WARN_ON(1);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
+ __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ uint8_t crtc_index = 0;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (acrtc)
+ crtc_index = acrtc->crtc_id;
+
+ drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+ struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+ struct drm_device *dev = dm->ddev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dal_asic_id.h"
+/* Allocate memory for FBC compressed data */
+/* TODO: Dynamic allocation */
+#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
+
+static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+{
+ int r;
+ struct dm_comressor_info *compressor = &adev->dm.compressor;
+
+ if (!compressor->bo_ptr) {
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize fbc\n");
+ }
+
+}
+#endif
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ adev->dm.ddev = adev->ddev;
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+
+ /* initialize DAL's lock (for SYNC context use) */
+ spin_lock_init(&adev->dm.dal_lock);
+
+ /* initialize DAL's mutex */
+ mutex_init(&adev->dm.dal_mutex);
+
+ if(amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+ init_data.asic_id.vram_width = adev->mc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ adev->dm.dal = NULL;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ if (amdgpu_dc_log)
+ init_data.log_mask = DC_DEFAULT_LOG_MASK;
+ else
+ init_data.log_mask = DC_MIN_LOG_MASK;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (adev->family == FAMILY_CZ)
+ amdgpu_dm_initialize_fbc(adev);
+ init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
+#endif
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core initialized!\n");
+ } else {
+ DRM_INFO("Display Core failed to initialize!\n");
+ goto error;
+ }
+
+ INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -1;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ return;
+}
+
+static int dm_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+ return ret;
+ }
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+
+ return detect_mst_link_for_all_connectors(dev);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ !aconnector->mst_port) {
+
+ if (suspend)
+ drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+ else
+ drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ s3_handle_mst(adev->ddev, true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ return ret;
+}
+
+static struct amdgpu_dm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ uint32_t i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return to_amdgpu_dm_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+
+ int ret = 0;
+ int i;
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /* On resume we need to rewrite the MSTM control bits to enamble MST*/
+ s3_handle_mst(ddev, false);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* Do detection*/
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * this is the case when traversing through already created
+ * MST connectors, should be skipped
+ */
+ if (aconnector->mst_port)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+
+ /* Force mode set in atomic comit */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+
+ adev->dm.cached_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+static struct drm_atomic_state *
+dm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ if (drm_atomic_state_init(dev, &state->base) < 0)
+ goto fail;
+
+ return &state->base;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+
+static void
+dm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state->context) {
+ dc_release_state(dm_state->context);
+ dm_state->context = NULL;
+ }
+
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+dm_atomic_state_alloc_free(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ drm_atomic_state_default_release(state);
+ kfree(dm_state);
+}
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_user_framebuffer_create,
+ .output_poll_changed = amdgpu_output_poll_changed,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_state_alloc = dm_atomic_state_alloc,
+ .atomic_state_clear = dm_atomic_state_clear,
+ .atomic_state_free = dm_atomic_state_alloc_free
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+};
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+
+ sink = aconnector->dc_link->local_sink;
+
+ /* Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * don't do it here if u are during boot
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /* For S3 resume with headless use eml_sink to fake stream
+ * because on resume connecotr->sink is set ti NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+ /* retain and release bellow are used for
+ * bump up refcount for sink because the link don't point
+ * to it anymore after disconnect so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ if (!aconnector->dc_sink)
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ else if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_retain(aconnector->dc_sink);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return;
+
+ if (aconnector->dc_sink == sink) {
+ /* We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!! */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do */
+ if (sink) {
+ /* TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here. */
+ if (aconnector->dc_sink)
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+
+ aconnector->dc_sink = sink;
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ } else {
+ aconnector->edid =
+ (struct edid *) sink->dc_edid.raw_edid;
+
+
+ drm_mode_connector_update_edid_property(connector,
+ aconnector->edid);
+ }
+ amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ aconnector->dc_sink = NULL;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+
+ /* In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ int dpcd_bytes_to_read;
+
+ const int max_process_count = 30;
+ int process_count = 0;
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ while (dret == dpcd_bytes_to_read &&
+ process_count < max_process_count) {
+ uint8_t retry;
+ dret = 0;
+
+ process_count++;
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ /* handle HPD short pulse irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq(
+ &aconnector->mst_mgr,
+ esi,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ const int ack_dpcd_bytes_to_write =
+ dpcd_bytes_to_read - 1;
+
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t wret;
+
+ wret = drm_dp_dpcd_write(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ &esi[1],
+ ack_dpcd_bytes_to_write);
+ if (wret == ack_dpcd_bytes_to_write)
+ break;
+ }
+
+ /* check if there is new irq to be handle */
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+ /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+ }
+ if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (dc_link->type == dc_connection_mst_branch))
+ dm_handle_hpd_rx_irq(aconnector);
+
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN)
+ client_id = AMDGPU_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling. */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ * */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ /* indicate support of immediate flip */
+ adev->ddev->mode_config.async_page_flip = true;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ if (dc_link_set_backlight_level(dm->backlight_link,
+ bd->props.brightness, 0, 0))
+ return 0;
+ else
+ return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+ char bl_name[16];
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ dm->adev->ddev->primary->index);
+
+ dm->backlight_dev = backlight_device_register(bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
+
+ if (IS_ERR(dm->backlight_dev))
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+ unsigned long possible_crtcs;
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -1;
+ }
+
+ for (i = 0; i < dm->dc->caps.max_planes; i++) {
+ struct amdgpu_plane *plane;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[i] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ goto fail;
+ }
+ plane->base.type = mode_info->plane_type[i];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << i;
+ if (i >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
+ DETECT_REASON_BOOT))
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGA10:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ /*
+ * Temporary disable until pplib/smu interaction is implemented
+ */
+ dm->dc->debug.disable_stutter = true;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+ for (i = 0; i < dm->dc->caps.max_planes; i++)
+ kfree(mode_info->planes[i]);
+ return -1;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_mode_config_cleanup(dm->ddev);
+ return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+ u8 level)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ return 0;
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct mod_freesync_params freesync_params;
+ uint8_t num_streams;
+ uint8_t i;
+
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0;
+
+ /* Get freesync enable flag from DRM */
+
+ num_streams = dc_get_current_stream_count(adev->dm.dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream;
+ stream = dc_get_stream_at_index(adev->dm.dc, i);
+
+ mod_freesync_update_state(adev->dm.freesync_module,
+ &stream, 1, &freesync_params);
+ }
+
+ return r;
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .vblank_wait = NULL,
+ .backlight_set_level =
+ dm_set_backlight_level,/* called unconditionally */
+ .backlight_get_level =
+ dm_get_backlight_level,/* called unconditionally */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+ .notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ amdgpu_dm_display_resume(adev);
+ drm_kms_helper_hotplug_event(adev->ddev);
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
+ amdgpu_dm_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_carizzo;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_stoney;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_VEGA10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /* Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev->ddev->dev,
+ &dev_attr_s3_debug);
+#endif
+
+ return 0;
+}
+
+struct dm_connector_state {
+ struct drm_connector_state base;
+
+ enum amdgpu_rmx_type scaling;
+ uint8_t underscan_vborder;
+ uint8_t underscan_hborder;
+ bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+ container_of((x), struct dm_connector_state, base)
+
+static bool modeset_required(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_stream_state *old_stream)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ if (!crtc_state->enable)
+ return false;
+
+ return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ return !crtc_state->enable || !crtc_state->active;
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+ struct dc_plane_state *plane_state)
+{
+ plane_state->src_rect.x = state->src_x >> 16;
+ plane_state->src_rect.y = state->src_y >> 16;
+ /*we ignore for now mantissa and do not to deal with floating pixels :(*/
+ plane_state->src_rect.width = state->src_w >> 16;
+
+ if (plane_state->src_rect.width == 0)
+ return false;
+
+ plane_state->src_rect.height = state->src_h >> 16;
+ if (plane_state->src_rect.height == 0)
+ return false;
+
+ plane_state->dst_rect.x = state->crtc_x;
+ plane_state->dst_rect.y = state->crtc_y;
+
+ if (state->crtc_w == 0)
+ return false;
+
+ plane_state->dst_rect.width = state->crtc_w;
+
+ if (state->crtc_h == 0)
+ return false;
+
+ plane_state->dst_rect.height = state->crtc_h;
+
+ plane_state->clip_rect = plane_state->dst_rect;
+
+ switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_state->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_state->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_state->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+ return true;
+}
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags,
+ uint64_t *fb_location)
+{
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ int r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ // Don't show error msg. when return -ERESTARTSYS
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ if (fb_location)
+ *fb_location = amdgpu_bo_gpu_offset(rbo);
+
+ if (tiling_flags)
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+ struct dc_plane_state *plane_state,
+ const struct amdgpu_framebuffer *amdgpu_fb,
+ bool addReq)
+{
+ uint64_t tiling_flags;
+ uint64_t fb_location = 0;
+ uint64_t chroma_addr = 0;
+ unsigned int awidth;
+ const struct drm_framebuffer *fb = &amdgpu_fb->base;
+ int ret = 0;
+ struct drm_format_name_buf format_name;
+
+ ret = get_fb_info(
+ amdgpu_fb,
+ &tiling_flags,
+ addReq == true ? &fb_location:NULL);
+
+ if (ret)
+ return ret;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
+ return -EINVAL;
+ }
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
+ plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
+ plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
+ plane_state->plane_size.grph.surface_size.x = 0;
+ plane_state->plane_size.grph.surface_size.y = 0;
+ plane_state->plane_size.grph.surface_size.width = fb->width;
+ plane_state->plane_size.grph.surface_size.height = fb->height;
+ plane_state->plane_size.grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_SRGB;
+
+ } else {
+ awidth = ALIGN(fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(fb_location);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(fb_location);
+ chroma_addr = fb_location + (u64)(awidth * fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ plane_state->plane_size.video.luma_size.x = 0;
+ plane_state->plane_size.video.luma_size.y = 0;
+ plane_state->plane_size.video.luma_size.width = awidth;
+ plane_state->plane_size.video.luma_size.height = fb->height;
+ /* TODO: unhardcode */
+ plane_state->plane_size.video.luma_pitch = awidth;
+
+ plane_state->plane_size.video.chroma_size.x = 0;
+ plane_state->plane_size.video.chroma_size.y = 0;
+ plane_state->plane_size.video.chroma_size.width = awidth;
+ plane_state->plane_size.video.chroma_size.height = fb->height;
+ plane_state->plane_size.video.chroma_pitch = awidth / 2;
+
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_YCBCR709;
+ }
+
+ memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
+
+ /* Fill GFX8 params */
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+ unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ /* XXX fix me for VI */
+ plane_state->tiling_info.gfx8.num_banks = num_banks;
+ plane_state->tiling_info.gfx8.array_mode =
+ DC_ARRAY_2D_TILED_THIN1;
+ plane_state->tiling_info.gfx8.tile_split = tile_split;
+ plane_state->tiling_info.gfx8.bank_width = bankw;
+ plane_state->tiling_info.gfx8.bank_height = bankh;
+ plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
+ plane_state->tiling_info.gfx8.tile_mode =
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+ == DC_ARRAY_1D_TILED_THIN1) {
+ plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ }
+
+ plane_state->tiling_info.gfx8.pipe_config =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN) {
+ /* Fill GFX9 params */
+ plane_state->tiling_info.gfx9.num_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+ plane_state->tiling_info.gfx9.num_banks =
+ adev->gfx.config.gb_addr_config_fields.num_banks;
+ plane_state->tiling_info.gfx9.pipe_interleave =
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+ plane_state->tiling_info.gfx9.num_shader_engines =
+ adev->gfx.config.gb_addr_config_fields.num_se;
+ plane_state->tiling_info.gfx9.max_compressed_frags =
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+ plane_state->tiling_info.gfx9.num_rb_per_se =
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+ plane_state->tiling_info.gfx9.swizzle =
+ AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+ plane_state->tiling_info.gfx9.shaderEnable = 1;
+ }
+
+ plane_state->visible = true;
+ plane_state->scaling_quality.h_taps_c = 0;
+ plane_state->scaling_quality.v_taps_c = 0;
+
+ /* is this needed? is plane_state zeroed at allocation? */
+ plane_state->scaling_quality.h_taps = 0;
+ plane_state->scaling_quality.v_taps = 0;
+ plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ return ret;
+
+}
+
+static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *plane_state)
+{
+ int i;
+ struct dc_gamma *gamma;
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) crtc_state->gamma_lut->data;
+
+ gamma = dc_create_gamma();
+
+ if (gamma == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+ gamma->type = GAMMA_RGB_256;
+ gamma->num_entries = GAMMA_RGB_256_ENTRIES;
+ for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
+ gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
+ gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
+ gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
+ }
+
+ plane_state->gamma_correction = gamma;
+}
+
+static int fill_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state,
+ bool addrReq)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ const struct drm_crtc *crtc = plane_state->crtc;
+ struct dc_transfer_func *input_tf;
+ int ret = 0;
+
+ if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ return -EINVAL;
+
+ ret = fill_plane_attributes_from_fb(
+ crtc->dev->dev_private,
+ dc_plane_state,
+ amdgpu_fb,
+ addrReq);
+
+ if (ret)
+ return ret;
+
+ input_tf = dc_create_transfer_func();
+
+ if (input_tf == NULL)
+ return -ENOMEM;
+
+ input_tf->type = TF_TYPE_PREDEFINED;
+ input_tf->tf = TRANSFER_FUNCTION_SRGB;
+
+ dc_plane_state->in_transfer_func = input_tf;
+
+ /* In case of gamma set, update gamma value */
+ if (crtc_state->gamma_lut)
+ fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector)
+{
+ uint32_t bpc = connector->display_info.bpc;
+
+ /* Limited color depth to 8bit
+ * TODO: Still need to handle deep color
+ */
+ if (bpc > 8)
+ bpc = 8;
+
+ switch (bpc) {
+ case 0:
+ /* Temporary Work around, DRM don't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ int32_t width = mode_in->crtc_hdisplay * 9;
+ int32_t height = mode_in->crtc_vdisplay * 16;
+
+ if ((width - height) < 10 && (width - height) > -10)
+ return ASPECT_RATIO_16_9;
+ else
+ return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (dc_crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR420:
+ {
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ if (dc_crtc_timing->pix_clk_khz > 27030) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+
+ }
+ break;
+ case PIXEL_ENCODING_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return color_space;
+}
+
+/*****************************************************************************/
+
+static void
+fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+
+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+
+ if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+ timing_out->vic = drm_match_cea_mode(mode_in);
+
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width =
+ mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch =
+ mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch =
+ mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width =
+ mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_khz = mode_in->crtc_clock;
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ stream->output_color_space = get_output_color_space(timing_out);
+
+ {
+ struct dc_transfer_func *tf = dc_create_transfer_func();
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func = tf;
+ }
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strncpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *sink = NULL;
+ struct dc_sink_init_data sink_init_data = { 0 };
+
+ sink_init_data.link = aconnector->dc_link;
+ sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return -ENOMEM;
+ }
+
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+ aconnector->fake_enable = true;
+
+ aconnector->dc_sink = sink;
+ aconnector->dc_link->local_sink = sink;
+
+ return 0;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state)
+{
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector *drm_connector;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode = *drm_mode;
+ bool native_mode_found = false;
+
+ if (aconnector == NULL) {
+ DRM_ERROR("aconnector is NULL!\n");
+ goto drm_connector_null;
+ }
+
+ if (dm_state == NULL) {
+ DRM_ERROR("dm_state is NULL!\n");
+ goto dm_state_null;
+ }
+
+ drm_connector = &aconnector->base;
+
+ if (!aconnector->dc_sink) {
+ /*
+ * Exclude MST from creating fake_sink
+ * TODO: need to enable MST into fake_sink feature
+ */
+ if (aconnector->mst_port)
+ goto stream_create_fail;
+
+ if (create_fake_sink(aconnector))
+ goto stream_create_fail;
+ }
+
+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto stream_create_fail;
+ }
+
+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &aconnector->base.modes,
+ struct drm_display_mode,
+ head);
+
+ if (preferred_mode == NULL) {
+ /* This may not be an error, the use case is when we we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode,
+ dm_state->scaling != RMX_OFF);
+ }
+
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base);
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ drm_connector,
+ aconnector->dc_sink);
+
+stream_create_fail:
+dm_state_null:
+drm_connector_null:
+ return stream;
+}
+
+static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+ /* TODO Destroy dc_stream objects are stream object is flattened */
+ if (cur->stream)
+ dc_stream_release(cur->stream);
+
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+
+ kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state;
+
+ if (crtc->state)
+ dm_crtc_destroy_state(crtc, crtc->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!state))
+ return;
+
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state, *cur;
+
+ cur = to_dm_crtc_state(crtc->state);
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ if (cur->stream) {
+ state->stream = cur->stream;
+ dc_stream_retain(state->stream);
+ }
+
+ /* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+ return &state->base;
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+ .reset = dm_crtc_reset_state,
+ .destroy = amdgpu_dm_crtc_destroy,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activit. */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+ return ret;
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
+ }
+
+ }
+#endif
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+
+ connector->state = &state->base;
+ connector->state->connector = connector;
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (new_state) {
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &new_state->base);
+ return &new_state->base;
+ }
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_DRIVER("Finding the best encoder\n");
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+ return NULL;
+ }
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ DRM_ERROR("No encoder id\n");
+ return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+
+ if (!aconnector->base.edid_blob_ptr ||
+ !aconnector->base.edid_blob_ptr->data) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ aconnector->base.override_edid = false;
+ return;
+ }
+
+ edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON)
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /* In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+
+ aconnector->base.override_edid = true;
+ create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /* Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ stream = dc_create_stream_for_sink(dc_sink);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+ fill_stream_properties_from_drm_display_mode(stream, mode, connector);
+
+ stream->src.width = mode->hdisplay;
+ stream->src.height = mode->vdisplay;
+ stream->dst = stream->src;
+
+ if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+ result = MODE_OK;
+
+ dc_stream_release(stream);
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplug a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * is missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
+
+ if (unlikely(!dm_crtc_state->stream &&
+ modeset_required(state, NULL, dm_crtc_state->stream))) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+ return 0;
+
+ return ret;
+}
+
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = dm_crtc_helper_disable,
+ .atomic_check = dm_crtc_helper_atomic_check,
+ .mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+ struct dm_plane_state *amdgpu_state = NULL;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+ WARN_ON(amdgpu_state == NULL);
+
+ if (amdgpu_state) {
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ }
+}
+
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+ if (old_dm_plane_state->dc_state) {
+ dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+ dc_plane_state_retain(dm_plane_state->dc_state);
+ }
+
+ return &dm_plane_state->base;
+}
+
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (dm_plane_state->dc_state)
+ dc_plane_state_release(dm_plane_state->dc_state);
+
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = dm_drm_plane_reset,
+ .atomic_duplicate_state = dm_drm_plane_duplicate_state,
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t chroma_addr = 0;
+ int r;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+
+ if (!new_state->fb) {
+ DRM_DEBUG_DRIVER("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+
+ obj = afb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ return r;
+ }
+
+ amdgpu_bo_ref(rbo);
+
+ if (dm_plane_state_new->dc_state &&
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+ plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(afb->address);
+ chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ }
+ }
+
+ /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+ * prepare and cleanup in drm_atomic_helper_prepare_planes
+ * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+ * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+ * code touching fram buffers should be avoided for DC.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+ acrtc->cursor_bo = obj;
+ }
+ return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct amdgpu_bo *rbo;
+ struct amdgpu_framebuffer *afb;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+ afb = to_amdgpu_framebuffer(old_state->fb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static int dm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (!dm_plane_state->dc_state)
+ return 0;
+
+ if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ .prepare_fb = dm_plane_helper_prepare_fb,
+ .cleanup_fb = dm_plane_helper_cleanup_fb,
+ .atomic_check = dm_plane_atomic_check,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+static const uint32_t yuv_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
+};
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs)
+{
+ int res = -EPERM;
+
+ switch (aplane->base.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ aplane->base.format_default = true;
+
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+ ARRAY_SIZE(rgb_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+ ARRAY_SIZE(yuv_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ }
+
+ drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (aplane->base.funcs->reset)
+ aplane->base.funcs->reset(&aplane->base);
+
+
+ return res;
+}
+
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t crtc_index)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_plane *cursor_plane;
+
+ int res = -ENOMEM;
+
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
+
+ cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail;
+
+ res = drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+ &cursor_plane->base,
+ &amdgpu_dm_crtc_funcs, NULL);
+
+ if (res)
+ goto fail;
+
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (acrtc->base.funcs->reset)
+ acrtc->base.funcs->reset(&acrtc->base);
+
+ acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+ acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
+
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+ return 0;
+
+fail:
+ kfree(acrtc);
+ kfree(cursor_plane);
+ return res;
+}
+
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = helper->best_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ drm_edid_to_eld(connector, edid);
+
+ amdgpu_dm_get_native_mode(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+
+ encoder = helper->best_encoder(connector);
+
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ return amdgpu_dm_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = dm->ddev->dev_private;
+
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+ mutex_init(&aconnector->hpd_lock);
+
+ /* configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dal_i2caux_submit_i2c_command(
+ ddc_service->ctx->i2caux,
+ ddc_service->ddc_pin,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.class = I2C_CLASS_DDC;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ link->priv = aconnector;
+
+ DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_mode_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ drm_connector_register(&aconnector->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ /* NOTE: this currently will create backlight device even if a panel
+ * is not connected to the eDP/LVDS connector.
+ *
+ * This is less than ideal but we don't have sink information at this
+ * stage since detection happens after. We can't do detection earlier
+ * since MST detection needs connectors to be created first.
+ */
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * this is not correct translation but will work as soon as VBLANK
+ * constant is the same as PFLIP
+ */
+ int irq_type =
+ amdgpu_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
+
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+ if (adev->dm.freesync_module)
+ mod_freesync_remove_stream(adev->dm.freesync_module, stream);
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+{
+ struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+ if (!crtc || !plane->state->fb) {
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+ return 0;
+ }
+
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+ __func__,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ return -EINVAL;
+ }
+
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+ position->enable = true;
+ position->x = x;
+ position->y = y;
+ position->x_hotspot = xorigin;
+ position->y_hotspot = yorigin;
+
+ return 0;
+}
+
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position;
+ struct dc_cursor_attributes attributes;
+ int ret;
+
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+
+ ret = get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream)
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ return;
+ }
+
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ attributes.pitch = attributes.width;
+
+ if (crtc_state->stream) {
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
+
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+ }
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ uint32_t target,
+ struct dc_state *state)
+{
+ unsigned long flags;
+ uint32_t target_vblank;
+ int r, vpos, hpos;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+ target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+ /* TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve buffer before flip\n");
+ WARN_ON(1);
+ }
+
+ /* Wait for all fences on this FB */
+ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
+
+ amdgpu_bo_unreserve(abo);
+
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /* Flip */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
+
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
+
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+
+
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ surface_updates->flip_addr = &addr;
+
+
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+ acrtc_state->stream,
+ NULL,
+ &surface_updates->surface,
+ state);
+
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool *wait_for_vblank)
+{
+ uint32_t i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dc_stream_state *dc_stream_attach;
+ struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ int planes_count = 0;
+ unsigned long flags;
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool pflip_needed;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ pflip_needed = !state->allow_modeset;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("%s: acrtc %d, already busy\n",
+ __func__,
+ acrtc_attach->crtc_id);
+ /* In commit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (!pflip_needed) {
+ WARN_ON(!dm_new_plane_state->dc_state);
+
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
+
+ } else if (new_crtc_state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+ new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
+
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
+
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+ drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dm_state->context);
+ }
+
+ }
+
+ if (planes_count) {
+ unsigned long flags;
+
+ if (new_pcrtc_state->event) {
+
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (false == dc_commit_planes_to_stream(dm->dc,
+ plane_states_constructed,
+ planes_count,
+ dc_stream_attach,
+ dm_state->context))
+ dm_error("%s: Failed to attach plane!\n", __func__);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
+}
+
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ /*
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
+ }
+ /* Add check here for SoC's that support hardware cursor plane, to
+ * unset legacy_cursor_update */
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
+}
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ uint32_t i, j;
+ uint32_t new_crtcs_count = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+ struct dc_stream_state *new_stream = NULL;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+ dm_state = to_dm_atomic_state(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnect in fact.
+ * dc_sink in NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+
+ /*
+ * this loop saves set mode crtcs
+ * we needed to enable vblanks once all
+ * resources acquired in dc after dc_commit_streams
+ */
+
+ /*TODO move all this into dm_crtc_state, get rid of
+ * new_crtcs array and use old and new atomic states
+ * instead
+ */
+ new_crtcs[new_crtcs_count] = acrtc;
+ new_crtcs_count++;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
+ } /* for_each_crtc_in_state() */
+
+ /*
+ * Add streams after required streams from new and replaced streams
+ * are removed from freesync module
+ */
+ if (adev->dm.freesync_module) {
+ for (i = 0; i < new_crtcs_count; i++) {
+ struct amdgpu_dm_connector *aconnector = NULL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ &new_crtcs[i]->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ new_stream = dm_new_crtc_state->stream;
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+ state,
+ &new_crtcs[i]->base);
+ if (!aconnector) {
+ DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
+ "skipping freesync init\n",
+ new_crtcs[i]->crtc_id);
+ continue;
+ }
+
+ mod_freesync_add_stream(adev->dm.freesync_module,
+ new_stream, &aconnector->caps);
+ }
+ }
+
+ if (dm_state->context)
+ WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+
+ /* Handle scaling and underscan changes*/
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_stream_status *status = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->plane_count);
+
+ /*TODO How it works with MPO ?*/
+ if (!dc_commit_planes_to_stream(
+ dm->dc,
+ status->plane_states,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ dm_state->context))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
+
+ for (i = 0; i < new_crtcs_count; i++) {
+ /*
+ * loop to enable interrupts on newly arrived crtc
+ */
+ struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (adev->dm.freesync_module)
+ mod_freesync_notify_mode_change(
+ adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ }
+
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto err;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto err;
+
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return 0;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /* Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /* Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static int dm_update_crtcs_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *new_con_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ // Make sure fake sink is created in plug-in scenario
+ new_con_state = drm_atomic_get_connector_state(state,
+ &aconnector->base);
+
+ if (IS_ERR(new_con_state)) {
+ ret = PTR_ERR_OR_ZERO(new_con_state);
+ break;
+ }
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_conn_state);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it not and
+ * error, the OS will be updated after detection, and
+ * do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ break;
+ }
+ }
+
+ if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+
+ new_crtc_state->mode_changed = false;
+
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_remove_stream_from_ctx(
+ dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ goto next_crtc;
+
+ if (modereset_required(new_crtc_state))
+ goto next_crtc;
+
+ if (modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ dm_new_crtc_state->stream = new_stream;
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_add_stream_to_ctx(
+ dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+next_crtc:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+ }
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static int dm_update_planes_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ int i ;
+ /* TODO return page_flip_needed() function */
+ bool pflip_needed = !state->allow_modeset;
+ int ret = 0;
+
+ if (pflip_needed)
+ return ret;
+
+ /* Add new planes */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+
+ if (!old_plane_crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ continue;
+
+ DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ if (!dc_remove_plane_from_context(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = EINVAL;
+ return ret;
+ }
+
+
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ continue;
+
+ if (!new_plane_crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ if (!dm_new_plane_state->dc_state) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = fill_plane_attributes(
+ new_plane_crtc->dev->dev_private,
+ dm_new_plane_state->dc_state,
+ new_plane_state,
+ new_crtc_state,
+ false);
+ if (ret)
+ return ret;
+
+
+ if (!dc_add_plane_to_context(
+ dc,
+ dm_new_crtc_state->stream,
+ dm_new_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = -EINVAL;
+ return ret;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+ int ret;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+
+ /*
+ * This bool will be set for true for any modeset/reset
+ * or plane update which implies non fast surface update.
+ */
+ bool lock_and_validation_needed = false;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ goto fail;
+
+ /*
+ * legacy_cursor_update should be made false for SoC's having
+ * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
+ * otherwise for software cursor plane,
+ * we should not add it to list of affected planes.
+ */
+ if (state->legacy_cursor_update) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->color_mgmt_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ } else {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed)
+ continue;
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ dm_state->context = dc_create_state();
+ ASSERT(dm_state->context);
+ dc_resource_state_copy_construct_current(dc, dm_state->context);
+
+ /* Remove exiting planes if they are modified */
+ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Disable all crtcs which require disable */
+ ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Enable all crtcs which require enable */
+ ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Add new/modified planes */
+ ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ goto fail;
+
+ /* Check scaling and underscan changes*/
+ /*TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /*
+ * For full updates case when
+ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
+
+ if (lock_and_validation_needed) {
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+ goto fail;
+
+ if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ uint8_t dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i;
+ uint64_t val_capable;
+ bool edid_check_required;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ edid_check_required = false;
+ if (!amdgpu_dm_connector->dc_sink) {
+ DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+ return;
+ }
+ if (!adev->dm.freesync_module)
+ return;
+ /*
+ * if edid non zero restrict freesync only for dp and edp
+ */
+ if (edid) {
+ if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ edid_check_required = is_dp_capable_without_timing_msa(
+ adev->dm.dc,
+ amdgpu_dm_connector);
+ }
+ }
+ val_capable = 0;
+ if (edid_check_required == true && (edid->version > 1 ||
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+ amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+ amdgpu_dm_connector->caps.supported = true;
+ amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+ amdgpu_dm_connector->min_vfreq * 1000000;
+ amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+ amdgpu_dm_connector->max_vfreq * 1000000;
+ val_capable = 1;
+ }
+ }
+
+ /*
+ * TODO figure out how to notify user-mode or DRM of freesync caps
+ * once we figure out how to deal with freesync in an upstreamable
+ * fashion
+ */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
+{
+ /*
+ * TODO fill in once we figure out how to deal with freesync in
+ * an upstreamable fashion
+ */
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..117521c6a6ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include "dc.h"
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+ struct drm_framebuffer *fb;
+ int32_t x;
+ int32_t y;
+ struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+ struct list_head head;
+ /* In case this interrupt needs post-processing, 'work' will be queued*/
+ struct work_struct work;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+struct dm_comressor_info {
+ void *cpu_addr;
+ struct amdgpu_bo *bo_ptr;
+ uint64_t gpu_addr;
+};
+#endif
+
+
+struct amdgpu_display_manager {
+ struct dal *dal;
+ struct dc *dc;
+ struct cgs_device *cgs_device;
+ /* lock to be used when DAL is called from SYNC IRQ context */
+ spinlock_t dal_lock;
+
+ struct amdgpu_device *adev; /*AMD base driver*/
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+ struct amdgpu_dm_prev_state prev_state;
+
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+ *
+ * Each IRQ source may need to be handled at different contexts.
+ * By 'context' we mean, for example:
+ * - The ISR context, which is the direct interrupt handler.
+ * - The 'deferred' context - this is the post-processing of the
+ * interrupt, but at a lower priority.
+ *
+ * Note that handlers are called in the same order as they were
+ * registered (FIFO).
+ */
+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ struct common_irq_params
+ pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+ struct common_irq_params
+ vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+
+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
+ spinlock_t irq_handler_list_table_lock;
+
+ /* Timer-related data. */
+ struct list_head timer_handler_list;
+ struct workqueue_struct *timer_workqueue;
+
+ /* Use dal_mutex for any activity which is NOT syncronized by
+ * DRM mode setting locks.
+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+ * DRM mode setting locks being acquired. This is where dal_mutex
+ * is acquired before calling into DAL. */
+ struct mutex dal_mutex;
+
+ struct backlight_device *backlight_dev;
+
+ const struct dc_link *backlight_link;
+
+ struct work_struct mst_hotplug_work;
+
+ struct mod_freesync *freesync_module;
+
+ /**
+ * Caches device atomic state for suspend/resume
+ */
+ struct drm_atomic_state *cached_state;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct dm_comressor_info compressor;
+#endif
+};
+
+struct amdgpu_dm_connector {
+
+ struct drm_connector base;
+ uint32_t connector_id;
+
+ /* we need to mind the EDID between detect
+ and get modes due to analog/digital/tvencoder */
+ struct edid *edid;
+
+ /* shared with amdgpu */
+ struct amdgpu_hpd hpd;
+
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+
+ /* DM only */
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_dm_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
+ bool fake_enable;
+};
+
+#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_plane_state;
+
+struct dm_plane_state {
+ struct drm_plane_state base;
+ struct dc_plane_state *dc_state;
+};
+
+struct dm_crtc_state {
+ struct drm_crtc_state base;
+ struct dc_stream_state *stream;
+};
+
+#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
+
+struct dm_atomic_state {
+ struct drm_atomic_state base;
+
+ struct dc_state *context;
+};
+
+#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index);
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector);
+
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid);
+
+void
+amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
+
+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+
+#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
new file mode 100644
index 000000000000..9bd142f65f9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_edid.h>
+
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "dc.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+#include "dm_helpers.h"
+
+/* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+ *
+ * @edid: [in] pointer to edid
+ * edid_caps: [in] pointer to edid caps
+ * @return
+ * void
+ * */
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps)
+{
+ struct edid *edid_buf = (struct edid *) edid->raw_edid;
+ struct cea_sad *sads;
+ int sad_count = -1;
+ int sadb_count = -1;
+ int i = 0;
+ int j = 0;
+ uint8_t *sadb = NULL;
+
+ enum dc_edid_status result = EDID_OK;
+
+ if (!edid_caps || !edid)
+ return EDID_BAD_INPUT;
+
+ if (!drm_edid_is_valid(edid_buf))
+ result = EDID_BAD_CHECKSUM;
+
+ edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
+ ((uint16_t) edid_buf->mfg_id[1])<<8;
+ edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
+ ((uint16_t) edid_buf->prod_code[1])<<8;
+ edid_caps->serial_number = edid_buf->serial;
+ edid_caps->manufacture_week = edid_buf->mfg_week;
+ edid_caps->manufacture_year = edid_buf->mfg_year;
+
+ /* One of the four detailed_timings stores the monitor name. It's
+ * stored in an array of length 13. */
+ for (i = 0; i < 4; i++) {
+ if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
+ while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
+ if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
+ break;
+
+ edid_caps->display_name[j] =
+ edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
+ j++;
+ }
+ }
+ }
+
+ edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
+ (struct edid *) edid->raw_edid);
+
+ sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+ if (sad_count <= 0) {
+ DRM_INFO("SADs count is: %d, don't need to read it\n",
+ sad_count);
+ return result;
+ }
+
+ edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+ for (i = 0; i < edid_caps->audio_mode_count; ++i) {
+ struct cea_sad *sad = &sads[i];
+
+ edid_caps->audio_modes[i].format_code = sad->format;
+ edid_caps->audio_modes[i].channel_count = sad->channels;
+ edid_caps->audio_modes[i].sample_rate = sad->freq;
+ edid_caps->audio_modes[i].sample_size = sad->byte2;
+ }
+
+ sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
+
+ if (sadb_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
+ sadb_count = 0;
+ }
+
+ if (sadb_count)
+ edid_caps->speaker_flags = sadb[0];
+ else
+ edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+ kfree(sads);
+ kfree(sadb);
+
+ return result;
+}
+
+static void get_payload_table(
+ struct amdgpu_dm_connector *aconnector,
+ struct dp_mst_stream_allocation_table *proposed_table)
+{
+ int i;
+ struct drm_dp_mst_topology_mgr *mst_mgr =
+ &aconnector->mst_port->mst_mgr;
+
+ mutex_lock(&mst_mgr->payload_lock);
+
+ proposed_table->stream_count = 0;
+
+ /* number of active streams */
+ for (i = 0; i < mst_mgr->max_payloads; i++) {
+ if (mst_mgr->payloads[i].num_slots == 0)
+ break; /* end of vcp_id table */
+
+ ASSERT(mst_mgr->payloads[i].payload_state !=
+ DP_PAYLOAD_DELETE_LOCAL);
+
+ if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
+ mst_mgr->payloads[i].payload_state ==
+ DP_PAYLOAD_REMOTE) {
+
+ struct dp_mst_stream_allocation *sa =
+ &proposed_table->stream_allocations[
+ proposed_table->stream_count];
+
+ sa->slot_count = mst_mgr->payloads[i].num_slots;
+ sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
+ proposed_table->stream_count++;
+ }
+ }
+
+ mutex_unlock(&mst_mgr->payload_lock);
+}
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int slots = 0;
+ bool ret;
+ int clock;
+ int bpp = 0;
+ int pbn = 0;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ mst_port = aconnector->port;
+
+ if (enable) {
+ clock = stream->timing.pix_clk_khz;
+
+ switch (stream->timing.display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bpp = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bpp = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpp = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpp = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bpp = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bpp = 16;
+ break;
+ default:
+ ASSERT(bpp != 0);
+ break;
+ }
+
+ bpp = bpp * 3;
+
+ /* TODO need to know link rate */
+
+ pbn = drm_dp_calc_pbn_mode(clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+
+ if (!ret)
+ return false;
+
+ } else {
+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+ }
+
+ ret = drm_dp_update_payload_part1(mst_mgr);
+
+ /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+
+ get_payload_table(aconnector, proposed_table);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+/*
+ * Polls for ACT (allocation change trigger) handled and sends
+ * ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_check_act_status(mst_mgr);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_port = aconnector->port;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_update_payload_part2(mst_mgr);
+
+ if (ret)
+ return false;
+
+ if (!enable)
+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+
+ return true;
+}
+
+bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
+{
+ return true;
+}
+
+void dm_dtn_log_begin(struct dc_context *ctx)
+{}
+
+void dm_dtn_log_append_v(struct dc_context *ctx,
+ const char *pMsg, ...)
+{}
+
+void dm_dtn_log_end(struct dc_context *ctx)
+{}
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ if (boot) {
+ DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+ return true;
+ }
+
+ DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+}
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return;
+ }
+
+ DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ if (aconnector->mst_mgr.mst_state == true)
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
+}
+
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
+ data, size) > 0;
+}
+
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
+ address, (uint8_t *)data, size) > 0;
+}
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_msg *msgs;
+ int i = 0;
+ int num = cmd->number_of_payloads;
+ bool result;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+
+ if (!msgs)
+ return false;
+
+ for (i = 0; i < num; i++) {
+ msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
+ msgs[i].addr = cmd->payloads[i].address;
+ msgs[i].len = cmd->payloads[i].length;
+ msgs[i].buf = cmd->payloads[i].data;
+ }
+
+ result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
+
+ kfree(msgs);
+
+ return result;
+}
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_adapter *ddc;
+ int retry = 3;
+ enum dc_edid_status edid_status;
+ struct edid *edid;
+
+ if (link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /* some dongles read edid incorrectly the first time,
+ * do check sum and retry to make sure read correct edid.
+ */
+ do {
+
+ edid = drm_get_edid(&aconnector->base, ddc);
+
+ if (!edid)
+ return EDID_NO_RESPONSE;
+
+ sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
+ memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
+
+ /* We don't need the original edid anymore */
+ kfree(edid);
+
+ edid_status = dm_helpers_parse_edid_caps(
+ ctx,
+ &sink->dc_edid,
+ &sink->edid_caps);
+
+ } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+
+ if (edid_status != EDID_OK)
+ DRM_ERROR("EDID err: %d, on connector: %s",
+ edid_status,
+ aconnector->base.name);
+
+ return edid_status;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
new file mode 100644
index 000000000000..ca5d0d1581dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+ struct list_head list;
+ interrupt_handler handler;
+ void *handler_arg;
+
+ /* DM which this handler belongs to */
+ struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+ struct handler_common_data hcd;
+ /* DAL irq source which registered for this interrupt. */
+ enum dc_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+ struct handler_common_data hcd;
+ struct delayed_work d_work;
+};
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(struct handler_common_data *hcd,
+ void (*ih)(void *),
+ void *args,
+ struct amdgpu_display_manager *dm)
+{
+ hcd->handler = ih;
+ hcd->handler_arg = args;
+ hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+ struct list_head *entry;
+ struct irq_list_head *irq_list_head =
+ container_of(work, struct irq_list_head, work);
+ struct list_head *handler_list = &irq_list_head->head;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+
+ list_for_each(entry, handler_list) {
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ /* Call a DAL subcomponent which registered for interrupt notification
+ * at INTERRUPT_LOW_IRQ_CONTEXT.
+ * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ void *ih,
+ const struct dc_interrupt_params *int_params)
+{
+ struct list_head *hnd_list;
+ struct list_head *entry, *tmp;
+ struct amdgpu_dm_irq_handler_data *handler;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+ enum dc_irq_source irq_source;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ irq_source = int_params->irq_source;
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_for_each_safe(entry, tmp, hnd_list) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ if (ih == handler) {
+ /* Found our handler. Remove it from the list. */
+ list_del(&handler->hcd.list);
+ handler_removed = true;
+ break;
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_removed == false) {
+ /* Not necessarily an error - caller may not
+ * know the context. */
+ return NULL;
+ }
+
+ kfree(handler);
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+ ih, int_params->irq_source, int_params->int_context);
+
+ return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(struct amdgpu_device *adev,
+ struct amdgpu_dm_timer_handler_data *handler_in)
+{
+ struct amdgpu_dm_timer_handler_data *handler_temp;
+ struct list_head *handler_list;
+ struct list_head *entry, *tmp;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ handler_list = &adev->dm.timer_handler_list;
+
+ list_for_each_safe(entry, tmp, handler_list) {
+ /* Note that list_for_each_safe() guarantees that
+ * handler_temp is NOT null. */
+ handler_temp = list_entry(entry,
+ struct amdgpu_dm_timer_handler_data, hcd.list);
+
+ if (handler_in == NULL || handler_in == handler_temp) {
+ list_del(&handler_temp->hcd.list);
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+ handler_temp);
+
+ if (handler_in == NULL) {
+ /* Since it is still in the queue, it must
+ * be cancelled. */
+ cancel_delayed_work_sync(&handler_temp->d_work);
+ }
+
+ kfree(handler_temp);
+ handler_removed = true;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ /* Remove ALL handlers. */
+ if (handler_in == NULL)
+ continue;
+
+ /* Remove a SPECIFIC handler.
+ * Found our handler - we can stop here. */
+ if (handler_in == handler_temp)
+ break;
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_in != NULL && handler_removed == false)
+ DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+ handler_in);
+}
+
+static bool
+validate_irq_registration_params(struct dc_interrupt_params *int_params,
+ void (*ih)(void *))
+{
+ if (NULL == int_params || NULL == ih) {
+ DRM_ERROR("DM_IRQ: invalid input!\n");
+ return false;
+ }
+
+ if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+ int_params->int_context);
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+ int_params->irq_source);
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
+ irq_handler_idx handler_idx)
+{
+ if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+ return false;
+ }
+
+ return true;
+}
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args)
+{
+ struct list_head *hnd_list;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ unsigned long irq_table_flags;
+ enum dc_irq_source irq_source;
+
+ if (false == validate_irq_registration_params(int_params, ih))
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+
+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ if (!handler_data) {
+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+ init_handler_common_data(&handler_data->hcd, ih, handler_args,
+ &adev->dm);
+
+ irq_source = int_params->irq_source;
+
+ handler_data->irq_source = irq_source;
+
+ /* Lock the list, add the handler. */
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_add_tail(&handler_data->hcd.list, hnd_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ /* This pointer will be stored by code which requested interrupt
+ * registration.
+ * The same pointer will be needed in order to unregister the
+ * interrupt. */
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+ handler_data,
+ irq_source,
+ int_params->int_context);
+
+ return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih)
+{
+ struct list_head *handler_list;
+ struct dc_interrupt_params int_params;
+ int i;
+
+ if (false == validate_irq_unregistration_params(irq_source, ih))
+ return;
+
+ memset(&int_params, 0, sizeof(int_params));
+
+ int_params.irq_source = irq_source;
+
+ for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+ int_params.int_context = i;
+
+ handler_list = remove_irq_handler(adev, ih, &int_params);
+
+ if (handler_list != NULL)
+ break;
+ }
+
+ if (handler_list == NULL) {
+ /* If we got here, it means we searched all irq contexts
+ * for this irq source, but the handler was not found. */
+ DRM_ERROR(
+ "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+ ih, irq_source);
+ }
+}
+
+int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+
+ DRM_DEBUG_KMS("DM_IRQ\n");
+
+ spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ /* low context handler list init */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ INIT_LIST_HEAD(&lh->head);
+ INIT_WORK(&lh->work, dm_irq_work_func);
+
+ /* high context handler init */
+ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ }
+
+ INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+ /* allocate and initialize the workqueue for DM timer */
+ adev->dm.timer_workqueue = create_singlethread_workqueue(
+ "dm_timer_queue");
+ if (adev->dm.timer_workqueue == NULL) {
+ DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+ /* The handler was removed from the table,
+ * it means it is safe to flush all the 'work'
+ * (because no code can schedule a new one). */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ flush_work(&lh->work);
+ }
+
+ /* Cancel ALL timers and release handlers (if any). */
+ remove_timer_handler(adev, NULL);
+ /* Release the queue itself. */
+ destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h;
+ struct list_head *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+ /**
+ * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
+ * will be disabled from manage_dm_interrupts on disable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, false);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+
+ /* re-enable short pulse interrupts HW interrupt */
+ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+ /**
+ * Renable HW interrupt for HPD and only since FLIP and VBLANK
+ * will be enabled from manage_dm_interrupts on enable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ unsigned long irq_table_flags;
+ struct work_struct *work = NULL;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+ work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (work) {
+ if (!schedule_work(work))
+ DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+ irq_source);
+ }
+
+}
+
+/** amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ struct list_head *entry;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ list_for_each(
+ entry,
+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ /* Call a subcomponent which registered for immediate
+ * interrupt notification */
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ enum dc_irq_source src =
+ dc_interrupt_to_irq_source(
+ adev->dm.dc,
+ entry->src_id,
+ entry->src_data[0]);
+
+ dc_interrupt_ack(adev->dm.dc, src);
+
+ /* Call high irq work immediately */
+ amdgpu_dm_irq_immediate_work(adev, src);
+ /*Schedule low_irq work */
+ amdgpu_dm_irq_schedule_work(adev, src);
+
+ return 0;
+}
+
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+ switch (type) {
+ case AMDGPU_HPD_1:
+ return DC_IRQ_SOURCE_HPD1;
+ case AMDGPU_HPD_2:
+ return DC_IRQ_SOURCE_HPD2;
+ case AMDGPU_HPD_3:
+ return DC_IRQ_SOURCE_HPD3;
+ case AMDGPU_HPD_4:
+ return DC_IRQ_SOURCE_HPD4;
+ case AMDGPU_HPD_5:
+ return DC_IRQ_SOURCE_HPD5;
+ case AMDGPU_HPD_6:
+ return DC_IRQ_SOURCE_HPD6;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, src, st);
+ return 0;
+}
+
+static inline int dm_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state,
+ const enum irq_type dal_irq_type,
+ const char *func)
+{
+ bool st;
+ enum dc_irq_source irq_source;
+
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
+
+ if (!acrtc) {
+ DRM_ERROR(
+ "%s: crtc is NULL at id :%d\n",
+ func,
+ crtc_id);
+ return 0;
+ }
+
+ irq_source = dal_irq_type + acrtc->otg_inst;
+
+ st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, irq_source, st);
+ return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_PFLIP,
+ __func__);
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VBLANK,
+ __func__);
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+ .set = amdgpu_dm_set_crtc_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+ .set = amdgpu_dm_set_pflip_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+ .set = amdgpu_dm_set_hpd_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ true);
+ }
+ }
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ false);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
new file mode 100644
index 000000000000..82f8e761beca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ * 0 - success
+ * non-zero - error
+ */
+int amdgpu_dm_irq_init(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * IRQ Handler Index on success.
+ * NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ * by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ * amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih_index);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
new file mode 100644
index 000000000000..f8efb98b1fa7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/version.h>
+#include <drm/drm_atomic_helper.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "dc.h"
+#include "dm_helpers.h"
+
+#include "dc_link_ddc.h"
+
+/* #define TRACE_DPCD */
+
+#ifdef TRACE_DPCD
+#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
+
+static inline char *side_band_msg_type_to_str(uint32_t address)
+{
+ static char str[10] = {0};
+
+ if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
+ strcpy(str, "DOWN_REQ");
+ else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
+ strcpy(str, "UP_REP");
+ else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
+ strcpy(str, "DOWN_REP");
+ else
+ strcpy(str, "UP_REQ");
+
+ return str;
+}
+
+static void log_dpcd(uint8_t type,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size,
+ bool res)
+{
+ DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
+ (type == DP_AUX_NATIVE_READ) ||
+ (type == DP_AUX_I2C_READ) ?
+ "Read" : "Write",
+ address,
+ SIDE_BAND_MSG(address) ?
+ side_band_msg_type_to_str(address) : "Nop",
+ res ? "OK" : "Fail");
+
+ if (res) {
+ print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
+ }
+}
+#endif
+
+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ default:
+ return 0;
+ }
+
+#ifdef TRACE_DPCD
+ log_dpcd(msg->request,
+ msg->address,
+ msg->buffer,
+ msg->size,
+ r == DDC_RESULT_SUCESSFULL);
+#endif
+
+ return msg->size;
+}
+
+static enum drm_connector_status
+dm_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
+
+ enum drm_connector_status status =
+ drm_dp_mst_detect_port(
+ connector,
+ &master->mst_mgr,
+ aconnector->port);
+
+ return status;
+}
+
+static void
+dm_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+
+ drm_encoder_cleanup(&amdgpu_encoder->base);
+ kfree(amdgpu_encoder);
+ drm_connector_cleanup(connector);
+ kfree(amdgpu_dm_connector);
+}
+
+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
+ .detect = dm_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dm_dp_mst_connector_destroy,
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static int dm_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int ret;
+
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
+}
+
+static int dm_dp_mst_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ int ret = 0;
+
+ if (!aconnector)
+ return dm_connector_update_modes(connector, NULL);
+
+ if (!aconnector->edid) {
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return ret;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ if (aconnector->dc_sink)
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, edid);
+ }
+
+ ret = dm_connector_update_modes(connector, aconnector->edid);
+
+ return ret;
+}
+
+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ return &amdgpu_dm_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
+ .get_modes = dm_dp_mst_get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = dm_mst_best_encoder,
+};
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static struct amdgpu_encoder *
+dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_encoder *encoder;
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->base.helper_private;
+ struct drm_encoder *enc_master =
+ connector_funcs->best_encoder(&connector->base);
+
+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+ amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return NULL;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ drm_encoder_init(
+ dev,
+ &amdgpu_encoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_DPMST,
+ NULL);
+
+ drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+
+ return amdgpu_encoder;
+}
+
+static struct drm_connector *
+dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->mst_port == master
+ && !aconnector->port) {
+ DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = port;
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ drm_connector_list_iter_end(&conn_iter);
+ return &aconnector->base;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ return NULL;
+
+ connector = &aconnector->base;
+ aconnector->port = port;
+ aconnector->mst_port = master;
+
+ if (drm_connector_init(
+ dev,
+ connector,
+ &dm_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort)) {
+ kfree(aconnector);
+ return NULL;
+ }
+ drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ &adev->dm,
+ aconnector,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ master->dc_link,
+ master->connector_id);
+
+ aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+
+ /*
+ * TODO: understand why this one is needed
+ */
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.path_property,
+ 0);
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.tile_property,
+ 0);
+
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ /*
+ * Initialize connector state before adding the connectror to drm and
+ * framebuffer lists
+ */
+ amdgpu_dm_connector_funcs_reset(connector);
+
+ DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ DRM_DEBUG_KMS(":%d\n", connector->base.id);
+
+ return connector;
+}
+
+static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = NULL;
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ }
+ if (aconnector->edid) {
+ kfree(aconnector->edid);
+ aconnector->edid = NULL;
+ }
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+}
+
+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static void dm_dp_mst_register_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ else
+ DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
+
+ drm_connector_register(connector);
+
+}
+
+static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+ .add_connector = dm_dp_add_mst_connector,
+ .destroy_connector = dm_dp_destroy_mst_connector,
+ .hotplug = dm_dp_mst_hotplug,
+ .register_connector = dm_dp_mst_register_connector
+};
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->dm_dp_aux.aux.name = "dmdc";
+ aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+ aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+ aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
+
+ drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ aconnector->mst_mgr.cbs = &dm_mst_cbs;
+ drm_dp_mst_topology_mgr_init(
+ &aconnector->mst_mgr,
+ dm->adev->ddev,
+ &aconnector->dm_dp_aux.aux,
+ 16,
+ 4,
+ aconnector->connector_id);
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
new file mode 100644
index 000000000000..2da851b40042
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
+#define __DAL_AMDGPU_DM_MST_TYPES_H__
+
+struct amdgpu_display_manager;
+struct amdgpu_dm_connector;
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
new file mode 100644
index 000000000000..5df8fd5b537c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ /* TODO: return actual timestamp */
+ return 0;
+}
+
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+/**** power component interfaces ****/
+
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state)
+{
+ /*TODO*/
+ return false;
+}
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ /* TODO: complete implementation of
+ * amd_powerplay_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ amd_powerplay_display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ /* Default values, in case PPLib is not compiled-in. */
+ sys_clks->mclk.max_khz = 800000;
+ sys_clks->mclk.min_khz = 800000;
+
+ sys_clks->sclk.max_khz = 600000;
+ sys_clks->sclk.min_khz = 300000;
+
+ if (adev->pm.dpm_enabled) {
+ sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
+ sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
+
+ sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
+ sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ /* translate 10kHz to kHz */
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (amd_powerplay_get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
+ &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{}
+
+/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
new file mode 100644
index 000000000000..aed538a4d1ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -0,0 +1,54 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+DC_LIBS += dcn10 dml
+endif
+
+DC_LIBS += dce120
+
+DC_LIBS += dce112
+DC_LIBS += dce110
+DC_LIBS += dce100
+DC_LIBS += dce80
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
+AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
new file mode 100644
index 000000000000..6af8c8a9ad80
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+ logger.o log_helpers.o vector.o
+
+AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
new file mode 100644
index 000000000000..23c9a0ec0181
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#define DIVIDER 10000
+
+/* S2D13 value in [-3.00...0.9999] */
+#define S2D13_MIN (-3 * DIVIDER)
+#define S2D13_MAX (3 * DIVIDER)
+
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ int32_t numerator;
+ int32_t divisor = 1 << fractional_bits;
+
+ uint16_t result;
+
+ uint16_t d = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_abs(
+ arg));
+
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+ numerator = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_mul_int(
+ arg,
+ divisor));
+ else {
+ numerator = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(
+ dal_fixed31_32_from_int(
+ 1LL << integer_bits),
+ dal_fixed31_32_recip(
+ dal_fixed31_32_from_int(
+ divisor))));
+ }
+
+ if (numerator >= 0)
+ result = (uint16_t)numerator;
+ else
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+ if ((result != 0) && dal_fixed31_32_lt(
+ arg, dal_fixed31_32_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
+}
+/**
+* convert_float_matrix
+* This converts a double into HW register spec defined format S2D13.
+* @param :
+* @return None
+*/
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size)
+{
+ const struct fixed31_32 min_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+ const struct fixed31_32 max_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+ uint32_t i;
+
+ for (i = 0; i < buffer_size; ++i) {
+ uint32_t reg_value =
+ fixed_point_to_int_frac(
+ dal_fixed31_32_clamp(
+ flt[i],
+ min_2_13,
+ max_2_13),
+ 2,
+ 13);
+
+ matrix[i] = (uint16_t)reg_value;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index 13c8dbbccaf2..ade785c4fdc7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,22 +19,28 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef _ICELAND_SMC_H
-#define _ICELAND_SMC_H
-#include "smumgr.h"
+#ifndef __DAL_CONVERSION_H__
+#define __DAL_CONVERSION_H__
+#include "include/fixed31_32.h"
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
-uint32_t iceland_get_mac_definition(uint32_t value);
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
-#endif
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits);
+
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size);
+static inline unsigned int log_2(unsigned int num)
+{
+ return ilog2(num);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
new file mode 100644
index 000000000000..26936892c6f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+ int64_t arg)
+{
+ if (arg > 0)
+ return (uint64_t)arg;
+ else
+ return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+ uint64_t dividend,
+ uint64_t divisor,
+ uint64_t *remainder)
+{
+ uint64_t result;
+
+ ASSERT(divisor);
+
+ result = div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+ uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+ uint64_t remainder;
+
+ /* determine integer part */
+
+ uint64_t res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+ int64_t arg)
+{
+ struct fixed31_32 res;
+
+ ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+ res.value = arg.value << shift;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ uint64_t arg_value = abs_i64(arg.value);
+
+ uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+ uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ uint64_t tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return dal_fixed31_32_from_fraction(
+ dal_fixed31_32_one.value,
+ arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 square;
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 27;
+
+ struct fixed31_32 arg_norm = arg;
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_two_pi,
+ dal_fixed31_32_abs(arg))) {
+ arg_norm = dal_fixed31_32_sub(
+ arg_norm,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_two_pi,
+ (int32_t)div64_s64(
+ arg_norm.value,
+ dal_fixed31_32_two_pi.value)));
+ }
+
+ square = dal_fixed31_32_sqr(arg_norm);
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = dal_fixed31_32_div(
+ dal_fixed31_32_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg)
+{
+ return dal_fixed31_32_mul(
+ arg,
+ dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 26;
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ struct fixed31_32 arg)
+{
+ uint32_t n = 9;
+
+ struct fixed31_32 res = dal_fixed31_32_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+ do
+ res = dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_mul(
+ arg,
+ res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_ln2_div_2,
+ dal_fixed31_32_abs(arg))) {
+ int32_t m = dal_fixed31_32_round(
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_ln2));
+
+ struct fixed31_32 r = dal_fixed31_32_sub(
+ arg,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(dal_fixed31_32_lt(
+ dal_fixed31_32_abs(r),
+ dal_fixed31_32_one));
+
+ if (m > 0)
+ return dal_fixed31_32_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (uint8_t)m);
+ else
+ return dal_fixed31_32_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+ /* TODO improve 1st estimation */
+
+ struct fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct fixed31_32 res1 = dal_fixed31_32_add(
+ dal_fixed31_32_sub(
+ res,
+ dal_fixed31_32_one),
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_exp(res)));
+
+ error = dal_fixed31_32_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_exp(
+ dal_fixed31_32_mul(
+ dal_fixed31_32_log(arg1),
+ arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_half.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_one.value -
+ dal_fixed31_32_epsilon.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits)
+{
+ /* 1. create mask of integer part */
+ uint32_t result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
new file mode 100644
index 000000000000..4d3aaa82a07b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+ uint32_t i = 0;
+ uint64_t r;
+ uint64_t q = div64_u64_rem(n, d, &r);
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t sbit = q & (1ULL<<63);
+
+ r <<= 1;
+ r |= sbit ? 1 : 0;
+ q <<= 1;
+ if (r >= d) {
+ r -= d;
+ q |= 1;
+ }
+ }
+
+ if (2*r >= d)
+ q += 1;
+ return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx = {lhs.value + rhs.value};
+
+ ASSERT(fx.value >= rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+ ASSERT(fx.value >= (uint64_t)rhs << 32);
+ return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= rhs.value);
+ fx.value = lhs.value - rhs.value;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+ fx.value = lhs.value - ((uint64_t)rhs<<32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhs_int = lhs.value>>32;
+ uint64_t lhs_frac = (uint32_t)lhs.value;
+ uint64_t rhs_int = rhs.value>>32;
+ uint64_t rhs_frac = (uint32_t)rhs.value;
+ uint64_t ahbh = lhs_int * rhs_int;
+ uint64_t ahbl = lhs_int * rhs_frac;
+ uint64_t albh = lhs_frac * rhs_int;
+ uint64_t albl = lhs_frac * rhs_frac;
+
+ ASSERT((ahbh>>32) == 0);
+
+ fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+ return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+ uint64_t lhsf;
+
+ ASSERT((lhsi>>32) == 0);
+ lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+ ASSERT((lhsi<<32) + lhsf >= lhsf);
+ fx.value = (lhsi<<32) + lhsf;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+ return fx;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+ ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+ return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+ ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+ return (v.value + (1ULL<<31))>>32;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
new file mode 100644
index 000000000000..147822545252
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/grph_object_id.h"
+
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
new file mode 100644
index 000000000000..6e43168fbdd6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "logger.h"
+#include "include/logger_interface.h"
+#include "dm_helpers.h"
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+struct dc_signal_type_info {
+ enum signal_type type;
+ char name[MAX_NAME_LEN];
+};
+
+static const struct dc_signal_type_info signal_type_info_tbl[] = {
+ {SIGNAL_TYPE_NONE, "NC"},
+ {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
+ {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
+ {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
+ {SIGNAL_TYPE_LVDS, "LVDS"},
+ {SIGNAL_TYPE_RGB, "VGA"},
+ {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
+ {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+ {SIGNAL_TYPE_EDP, "eDP"},
+ {SIGNAL_TYPE_VIRTUAL, "Virtual"}
+};
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...)
+{
+ int i;
+ va_list args;
+ struct log_entry entry = { 0 };
+ enum signal_type signal;
+
+ if (link->local_sink)
+ signal = link->local_sink->sink_signal;
+ else
+ signal = link->connector_signal;
+
+ if (link->type == dc_connection_mst_branch)
+ signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+ dm_logger_open(ctx->logger, &entry, event);
+
+ for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+ if (signal == signal_type_info_tbl[i].type)
+ break;
+
+ if (i == NUM_ELEMENTS(signal_type_info_tbl))
+ goto fail;
+
+ dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+ signal_type_info_tbl[i].name,
+ link->link_index);
+
+ va_start(args, msg);
+ entry.buf_offset += dm_log_to_buffer(
+ &entry.buf[entry.buf_offset],
+ LOG_MAX_LINE_SIZE - entry.buf_offset,
+ msg, args);
+
+ if (entry.buf[strlen(entry.buf) - 1] == '\n') {
+ entry.buf[strlen(entry.buf) - 1] = '\0';
+ entry.buf_offset--;
+ }
+
+ if (hex_data)
+ for (i = 0; i < hex_data_count; i++)
+ dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+ dm_logger_append(&entry, "^\n");
+ dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
+ dm_logger_close(&entry);
+
+ va_end(args);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
new file mode 100644
index 000000000000..e04e8ecd4874
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_ERROR, "Error"},
+ {LOG_WARNING, "Warning"},
+ {LOG_DEBUG, "Debug"},
+ {LOG_DC, "DC_Interface"},
+ {LOG_SURFACE, "Surface"},
+ {LOG_HW_HOTPLUG, "HW_Hotplug"},
+ {LOG_HW_LINK_TRAINING, "HW_LKTN"},
+ {LOG_HW_SET_MODE, "HW_Mode"},
+ {LOG_HW_RESUME_S3, "HW_Resume"},
+ {LOG_HW_AUDIO, "HW_Audio"},
+ {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
+ {LOG_MST, "MST"},
+ {LOG_SCALER, "Scaler"},
+ {LOG_BIOS, "BIOS"},
+ {LOG_BANDWIDTH_CALCS, "BWCalcs"},
+ {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
+ {LOG_I2C_AUX, "I2C_AUX"},
+ {LOG_SYNC, "Sync"},
+ {LOG_BACKLIGHT, "Backlight"},
+ {LOG_FEATURE_OVERRIDE, "Override"},
+ {LOG_DETECTION_EDID_PARSER, "Edid"},
+ {LOG_DETECTION_DP_CAPS, "DP_Caps"},
+ {LOG_RESOURCE, "Resource"},
+ {LOG_DML, "DML"},
+ {LOG_EVENT_MODE_SET, "Mode"},
+ {LOG_EVENT_DETECTION, "Detect"},
+ {LOG_EVENT_LINK_TRAINING, "LKTN"},
+ {LOG_EVENT_LINK_LOSS, "LinkLoss"},
+ {LOG_EVENT_UNDERFLOW, "Underflow"},
+ {LOG_IF_TRACE, "InterfaceTrace"},
+ {LOG_DTN, "DTN"}
+};
+
+
+/* ----------- Object init and destruction ----------- */
+static bool construct(struct dc_context *ctx, struct dal_logger *logger,
+ uint32_t log_mask)
+{
+ /* malloc buffer and init offsets */
+ logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+ logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
+ GFP_KERNEL);
+
+ if (!logger->log_buffer)
+ return false;
+
+ /* Initialize both offsets to start of buffer (empty) */
+ logger->buffer_read_offset = 0;
+ logger->buffer_write_offset = 0;
+
+ logger->open_count = 0;
+
+ logger->flags.bits.ENABLE_CONSOLE = 1;
+ logger->flags.bits.ENABLE_BUFFER = 0;
+
+ logger->ctx = ctx;
+
+ logger->mask = log_mask;
+
+ return true;
+}
+
+static void destruct(struct dal_logger *logger)
+{
+ if (logger->log_buffer) {
+ kfree(logger->log_buffer);
+ logger->log_buffer = NULL;
+ }
+}
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
+{
+ /* malloc struct */
+ struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
+ GFP_KERNEL);
+
+ if (!logger)
+ return NULL;
+ if (!construct(ctx, logger, log_mask)) {
+ kfree(logger);
+ return NULL;
+ }
+
+ return logger;
+}
+
+uint32_t dal_logger_destroy(struct dal_logger **logger)
+{
+ if (logger == NULL || *logger == NULL)
+ return 1;
+ destruct(*logger);
+ kfree(*logger);
+ *logger = NULL;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static bool dal_logger_should_log(
+ struct dal_logger *logger,
+ enum dc_log_type log_type)
+{
+ if (logger->mask & (1 << log_type))
+ return true;
+
+ return false;
+}
+
+static void log_to_debug_console(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_CONSOLE == 0)
+ return;
+
+ if (entry->buf_offset) {
+ switch (entry->type) {
+ case LOG_ERROR:
+ dm_error("%s", entry->buf);
+ break;
+ default:
+ dm_output_to_console("%s", entry->buf);
+ break;
+ }
+ }
+}
+
+/* Print everything unread existing in log_buffer to debug console*/
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+{
+ char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+
+ if (should_warn)
+ dm_output_to_console(
+ "---------------- FLUSHING LOG BUFFER ----------------\n");
+ while (logger->buffer_read_offset < logger->buffer_write_offset) {
+
+ if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+ dm_output_to_console("%s", string_start);
+ string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+ }
+ logger->buffer_read_offset++;
+ }
+ if (should_warn)
+ dm_output_to_console(
+ "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+
+static void log_to_internal_buffer(struct log_entry *entry)
+{
+
+ uint32_t size = entry->buf_offset;
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_BUFFER == 0)
+ return;
+
+ if (logger->log_buffer == NULL)
+ return;
+
+ if (size > 0 && size < logger->log_buffer_size) {
+
+ int buffer_space = logger->log_buffer_size -
+ logger->buffer_write_offset;
+
+ if (logger->buffer_write_offset == logger->buffer_read_offset) {
+ /* Buffer is empty, start writing at beginning */
+ buffer_space = logger->log_buffer_size;
+ logger->buffer_write_offset = 0;
+ logger->buffer_read_offset = 0;
+ }
+
+ if (buffer_space > size) {
+ /* No wrap around, copy 'size' bytes
+ * from 'entry->buf' to 'log_buffer'
+ */
+ memmove(logger->log_buffer +
+ logger->buffer_write_offset,
+ entry->buf, size);
+ logger->buffer_write_offset += size;
+
+ } else {
+ /* Not enough room remaining, we should flush
+ * existing logs */
+
+ /* Flush existing unread logs to console */
+ dm_logger_flush_buffer(logger, true);
+
+ /* Start writing to beginning of buffer */
+ memmove(logger->log_buffer, entry->buf, size);
+ logger->buffer_write_offset = size;
+ logger->buffer_read_offset = 0;
+ }
+
+ }
+}
+
+static void log_heading(struct log_entry *entry)
+{
+ int j;
+
+ for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
+ const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
+ if (info->type == entry->type)
+ dm_logger_append(entry, "[%s]\t", info->name);
+ }
+}
+
+static void append_entry(
+ struct log_entry *entry,
+ char *buffer,
+ uint32_t buf_size)
+{
+ if (!entry->buf ||
+ entry->buf_offset + buf_size > entry->max_buf_bytes
+ ) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* Todo: check if off by 1 byte due to \0 anywhere */
+ memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+ entry->buf_offset += buf_size;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...)
+{
+ if (logger && dal_logger_should_log(logger, log_type)) {
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+ struct log_entry entry;
+
+ va_start(args, msg);
+
+ entry.logger = logger;
+
+ entry.buf = buffer;
+
+ entry.buf_offset = 0;
+ entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ entry.type = log_type;
+
+ log_heading(&entry);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+
+ buffer[entry.buf_offset + size] = '\0';
+ entry.buf_offset += size + 1;
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(&entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(&entry);
+
+ va_end(args);
+ }
+}
+
+/* Same as dm_logger_write, except without open() and close(), which must
+ * be done separately.
+ */
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...)
+{
+ struct dal_logger *logger;
+
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ logger = entry->logger;
+
+ if (logger && logger->open_count > 0 &&
+ dal_logger_should_log(logger, entry->type)) {
+
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+
+ va_start(args, msg);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+ if (size < LOG_MAX_LINE_SIZE - 1) {
+ append_entry(entry, buffer, size);
+ } else {
+ append_entry(entry, "LOG_ERROR, line too long\n", 27);
+ }
+
+ va_end(args);
+ }
+}
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry, /* out */
+ enum dc_log_type log_type)
+{
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ entry->type = log_type;
+ entry->logger = logger;
+
+ entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+ GFP_KERNEL);
+
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ logger->open_count++;
+
+ log_heading(entry);
+}
+
+void dm_logger_close(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger && logger->open_count > 0) {
+ logger->open_count--;
+ } else {
+ BREAK_TO_DEBUGGER();
+ goto cleanup;
+ }
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(entry);
+
+ /* TODO: Write end heading */
+
+cleanup:
+ if (entry->buf) {
+ kfree(entry->buf);
+ entry->buf = NULL;
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.h b/drivers/gpu/drm/amd/display/dc/basics/logger.h
new file mode 100644
index 000000000000..09722f0f8aa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_H__
+#define __DAL_LOGGER_H__
+
+
+#endif /* __DAL_LOGGER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
new file mode 100644
index 000000000000..217b8f1f7bf6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/vector.h"
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ vector->container = NULL;
+
+ if (!struct_size || !capacity) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+ if (vector->container == NULL)
+ return false;
+ vector->capacity = capacity;
+ vector->struct_size = struct_size;
+ vector->count = 0;
+ vector->ctx = ctx;
+ return true;
+}
+
+bool dal_vector_presized_costruct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ uint32_t i;
+
+ vector->container = NULL;
+
+ if (!struct_size || !count) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+
+ if (vector->container == NULL)
+ return false;
+
+ /* If caller didn't supply initial value then the default
+ * of all zeros is expected, which is exactly what dal_alloc()
+ * initialises the memory to. */
+ if (NULL != initial_value) {
+ for (i = 0; i < count; ++i)
+ memmove(
+ vector->container + i * struct_size,
+ initial_value,
+ struct_size);
+ }
+
+ vector->capacity = count;
+ vector->struct_size = struct_size;
+ vector->count = count;
+ return true;
+}
+
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_presized_costruct(
+ vector, ctx, size, initial_value, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_construct(vector, ctx, capacity, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+void dal_vector_destruct(
+ struct vector *vector)
+{
+ kfree(vector->container);
+ vector->count = 0;
+ vector->capacity = 0;
+}
+
+void dal_vector_destroy(
+ struct vector **vector)
+{
+ if (vector == NULL || *vector == NULL)
+ return;
+ dal_vector_destruct(*vector);
+ kfree(*vector);
+ *vector = NULL;
+}
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector)
+{
+ return vector->count;
+}
+
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index)
+{
+ if (vector->container == NULL || index >= vector->count)
+ return NULL;
+ return vector->container + (index * vector->struct_size);
+}
+
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index)
+{
+ if (index >= vector->count)
+ return false;
+
+ if (index != vector->count - 1)
+ memmove(
+ vector->container + (index * vector->struct_size),
+ vector->container + ((index + 1) * vector->struct_size),
+ (vector->count - index - 1) * vector->struct_size);
+ vector->count -= 1;
+
+ return true;
+}
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index)
+{
+ void *where = dal_vector_at_index(vector, index);
+
+ if (!where) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ memmove(
+ where,
+ what,
+ vector->struct_size);
+}
+
+static inline uint32_t calc_increased_capacity(
+ uint32_t old_capacity)
+{
+ return old_capacity * 2;
+}
+
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position)
+{
+ uint8_t *insert_address;
+
+ if (vector->count == vector->capacity) {
+ if (!dal_vector_reserve(
+ vector,
+ calc_increased_capacity(vector->capacity)))
+ return false;
+ }
+
+ insert_address = vector->container + (vector->struct_size * position);
+
+ if (vector->count && position < vector->count)
+ memmove(
+ insert_address + vector->struct_size,
+ insert_address,
+ vector->struct_size * (vector->count - position));
+
+ memmove(
+ insert_address,
+ what,
+ vector->struct_size);
+
+ vector->count++;
+
+ return true;
+}
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item)
+{
+ return dal_vector_insert_at(vector, item, vector->count);
+}
+
+struct vector *dal_vector_clone(
+ const struct vector *vector)
+{
+ struct vector *vec_cloned;
+ uint32_t count;
+
+ /* create new vector */
+ count = dal_vector_get_count(vector);
+
+ if (count == 0)
+ /* when count is 0 we still want to create clone of the vector
+ */
+ vec_cloned = dal_vector_create(
+ vector->ctx,
+ vector->capacity,
+ vector->struct_size);
+ else
+ /* Call "presized create" version, independently of how the
+ * original vector was created.
+ * The owner of original vector must know how to treat the new
+ * vector - as "presized" or as "regular".
+ * But from vector point of view it doesn't matter. */
+ vec_cloned = dal_vector_presized_create(vector->ctx, count,
+ NULL,/* no initial value */
+ vector->struct_size);
+
+ if (NULL == vec_cloned) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ /* copy vector's data */
+ memmove(vec_cloned->container, vector->container,
+ vec_cloned->struct_size * vec_cloned->capacity);
+
+ return vec_cloned;
+}
+
+uint32_t dal_vector_capacity(const struct vector *vector)
+{
+ return vector->capacity;
+}
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
+{
+ void *new_container;
+
+ if (capacity <= vector->capacity)
+ return true;
+
+ new_container = krealloc(vector->container,
+ capacity * vector->struct_size, GFP_KERNEL);
+
+ if (new_container) {
+ vector->container = new_container;
+ vector->capacity = capacity;
+ return true;
+ }
+
+ return false;
+}
+
+void dal_vector_clear(struct vector *vector)
+{
+ vector->count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
new file mode 100644
index 000000000000..239e86bbec5a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -0,0 +1,48 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'bios' sub-component of DAL.
+# It provides the parsing and executing controls for atom bios image.
+
+BIOS = bios_parser.o bios_parser_interface.o bios_parser_helper.o command_table.o command_table_helper.o bios_parser_common.o
+
+BIOS += command_table2.o command_table_helper2.o bios_parser2.o
+
+AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
+# DCE8.x is compiled.
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
+
+###############################################################################
+# DCE 11x
+###############################################################################
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper2_dce112.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
new file mode 100644
index 000000000000..86e6438c5cf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "dc_bios_types.h"
+#include "include/gpio_service_interface.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "bios_parser.h"
+#include "bios_parser_types_internal.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+/* TODO remove - only needed for default i2c speed */
+#include "dc.h"
+
+#define THREE_PERCENT_OF_10000 300
+
+#define LAST_RECORD_TYPE 0xff
+
+/* GUID to validate external display connection info table (aka OPM module) */
+static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+ 0x91, 0x6E, 0x57, 0x09,
+ 0x3F, 0x6D, 0xD2, 0x11,
+ 0x39, 0x8E, 0x00, 0xA0,
+ 0xC9, 0x69, 0x72, 0x3B};
+
+#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision);
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list);
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list);
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id);
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info);
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static struct device_id device_type_from_device_id(uint16_t device_id);
+static uint32_t signal_to_ss_id(enum as_signal_type signal);
+static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+/*****************************************************************************/
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+static uint8_t bios_parser_get_connectors_number(
+ struct dc_bios *dcb);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info);
+
+/*****************************************************************************/
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
+{
+ ATOM_OBJECT_TABLE *table;
+
+ uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+
+ if (!table)
+ return 0;
+ else
+ return table->ucNumberOfObjects;
+}
+
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ return get_number_of_objects(bp,
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t connector_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object = get_bios_object(bp, id);
+
+ return get_dst_number_from_object(bp, object);
+}
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ uint32_t number;
+ uint16_t *id;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!src_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ number = get_src_obj_list(bp, object, &id);
+
+ if (number <= index)
+ return BP_RESULT_BADINPUT;
+
+ *src_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ uint32_t number;
+ uint16_t *id = NULL;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ number = get_dest_obj_list(bp, object, &id);
+
+ if (number <= index || !id)
+ return BP_RESULT_BADINPUT;
+
+ *dest_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_COMMON_RECORD_HEADER *header;
+ ATOM_I2C_RECORD *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
+ /* get the I2C info */
+ record = (ATOM_I2C_RECORD *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->ucRecordSize;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO *info =
+ (ATOM_VOLTAGE_OBJECT_INFO *) address;
+
+ uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_VOLTAGE_OBJECT *object =
+ (ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+
+ if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+ (object->ucVoltageType &
+ VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+
+ *i2c_line = object->asControl.ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ voltage_current_object += object->ucSize;
+ }
+ return result;
+}
+
+static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+ uint32_t index,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->asVoltageObj[0]));
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+ (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+
+ if (object->sHeader.ucVoltageMode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->sHeader.ucVoltageType == index) {
+ *i2c_line = object->ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_I2C_ID_CONFIG_ACCESS *config;
+ ATOM_I2C_RECORD record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+
+ record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+ record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+ record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(VoltageObjectInfo))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+
+ header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 1:
+ case 2:
+ result = get_voltage_ddc_info_v1(&i2c_line, header,
+ voltage_info_address);
+ break;
+ case 3:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+/* TODO: temporary commented out to suppress 'defined but not used' warning */
+#if 0
+static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+ struct bios_parser *bp,
+ uint8_t i2c_line, struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *table;
+ uint32_t i;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ offset += bp->object_info_tbl_offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+
+ if (!table)
+ return BP_RESULT_BADBIOSTABLE;
+
+ for (i = 0; i < table->ucNumberOfObjects; i++) {
+ object = &table->asObjects[i];
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ ATOM_COMMON_RECORD_HEADER *header =
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <=
+ header->ucRecordSize) {
+ ATOM_I2C_RECORD *record =
+ (ATOM_I2C_RECORD *) header;
+
+ if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+ continue;
+
+ /* get the I2C info */
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+ }
+ }
+
+ return BP_RESULT_NORECORD;
+}
+#endif
+
+static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_HPD_INT_RECORD *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
+ info->hpd_active = record->ucPlugged_PinState;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
+ header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
+ continue;
+
+ *record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (bios_parser_get_device_tag_record(bp, object, &record)
+ != BP_RESULT_OK)
+ return BP_RESULT_NORECORD;
+
+ if (device_tag_index >= record->ucNumberOfDevice)
+ return BP_RESULT_NORECORD;
+
+ device_tag = &record->asDeviceTag[device_tag_index];
+
+ info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
+ info->dev_id =
+ device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(FirmwareInfo)) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(FirmwareInfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 1:
+ switch (revision.minor) {
+ case 4:
+ result = get_firmware_info_v1_4(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v2_1(bp, info);
+ break;
+ case 2:
+ result = get_firmware_info_v2_2(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information on the SS, report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information on the SS,report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
+ struct spread_spectrum_info internalSS;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmwareInfo)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
+ info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ struct spread_spectrum_info internal_ss;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* Remote Display */
+ info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
+
+ /* Is allowed minimum BL level */
+ info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
+ /* Used starting from CI */
+ info->smu_gpu_pll_output_freq =
+ (uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t table_index = 0;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ table_size =
+ (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &ss_table_header_include->asSpreadSpectrum[0];
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ for (i = 0; i < table_size; i++) {
+ if (tbl[i].ucClockIndication != (uint8_t) id)
+ continue;
+
+ if (table_index != index) {
+ table_index++;
+ continue;
+ }
+ /* VBIOS introduced new defines for Version 3, same values as
+ * before, so now use these new ones for Version 3.
+ * Shouldn't affect field VBIOS's V3 as define values are still
+ * same.
+ * #define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
+ * #define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
+
+ * Old VBIOS defines:
+ * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+ * #define ATOM_EXTERNAL_SS_MASK 0x00000002
+ */
+
+ if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.EXTERNAL = true;
+
+ if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.CENTER_MODE = true;
+
+ /* Older VBIOS (in field) always provides SS percentage in 0.01%
+ * units set Divider to 100 */
+ ss_info->spread_percentage_divider = 100;
+
+ /* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
+ if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
+ & tbl[i].ucSpreadSpectrumMode)
+ ss_info->spread_percentage_divider = 1000;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ ss_info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ ss_info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+
+ return BP_RESULT_OK;
+ }
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_adjust_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.adjust_display_pll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
+ struct dc_bios *dcb,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
+ bp, bp_params, enable);
+
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_program_display_engine_pll(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.program_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.program_clock(bp, bp_params);
+
+}
+
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+}
+
+static enum bp_result bios_parser_crt_control(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint8_t standard;
+
+ if (!bp->cmd_tbl.dac1_encoder_control &&
+ engine_id == ENGINE_ID_DACA)
+ return BP_RESULT_FAILURE;
+ if (!bp->cmd_tbl.dac2_encoder_control &&
+ engine_id == ENGINE_ID_DACB)
+ return BP_RESULT_FAILURE;
+ /* validate params */
+ switch (engine_id) {
+ case ENGINE_ID_DACA:
+ case ENGINE_ID_DACB:
+ break;
+ default:
+ /* unsupported engine */
+ return BP_RESULT_FAILURE;
+ }
+
+ standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+
+ if (enable) {
+ if (engine_id == ENGINE_ID_DACA) {
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ } else {
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ }
+ } else {
+ if (engine_id == ENGINE_ID_DACA) {
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ } else {
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
+ return (ATOM_HPD_INT_RECORD *) header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/**
+ * Get I2C information of input object id
+ *
+ * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+ */
+static ATOM_I2C_RECORD *get_i2c_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!record_header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+ 0 == record_header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+ sizeof(ATOM_I2C_RECORD) <=
+ record_header->ucRecordSize) {
+ return (ATOM_I2C_RECORD *)record_header;
+ }
+
+ offset += record_header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ uint32_t clk_id_ss = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+ /* signal translation */
+ clk_id_ss = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ if (!index)
+ return get_ss_info_from_ss_info_table(bp, clk_id_ss,
+ ss_info);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ /* there can not be more then one entry for Internal
+ * SS Info table version 2.1 */
+ if (!index)
+ return get_ss_info_from_tbl(bp, clk_id_ss,
+ ss_info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 3:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info);
+
+/**
+ * get_ss_info_from_table
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param this
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ if (!ss_info) /* check for bad input, if ss_info is not NULL */
+ return BP_RESULT_BADINPUT;
+ /* for SS_Info table only support DP and LVDS */
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_info_from_ss_info_table(bp, id, ss_info);
+ else
+ return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
+ ss_info);
+}
+
+/**
+ * get_ss_info_from_internal_ss_info_tbl_V2_1
+ * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
+ * from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t tbl_size, i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return result;
+
+ header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ memset(info, 0, sizeof(struct spread_spectrum_info));
+
+ tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &(header->asSpreadSpectrum[0]);
+ for (i = 0; i < tbl_size; i++) {
+ result = BP_RESULT_NORECORD;
+
+ if (tbl[i].ucClockIndication != (uint8_t)id)
+ continue;
+
+ if (ATOM_EXTERNAL_SS_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.EXTERNAL = true;
+ }
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.CENTER_MODE = true;
+ }
+ info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+
+}
+
+/**
+ * get_ss_info_from_ss_info_table
+ * Get spread sprectrum information from the SS_Info table from the VBIOS
+ * if the pointer to info is NULL, indicate the caller what to know the number
+ * of entries that matches the id
+ * for, the SS_Info table, there should not be more than 1 entry match.
+ *
+ * @param [in] id, spread sprectrum id
+ * @param [out] pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* exist of the SS_Info table */
+ /* check for bad input, pSSinfo can not be NULL */
+ if (!DATA_TABLES(SS_Info) || !ss_info)
+ return result;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return result;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return result;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++) {
+ if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
+ continue;
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ if (ATOM_EXTERNAL_SS_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.EXTERNAL = true;
+
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.CENTER_MODE = true;
+
+ ss_info->type.STEP_AND_DELAY_INFO = true;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
+ ss_info->step_and_delay_info.delay =
+ tbl->asSS_Info[i].ucSS_Delay;
+ ss_info->step_and_delay_info.recommended_ref_div =
+ tbl->asSS_Info[i].ucRecommendedRef_Div;
+ ss_info->spread_spectrum_range =
+ (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
+
+ /* there will be only one entry for each display type in SS_info
+ * table */
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+}
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_COMMON_TABLE_HEADER *hdr;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_FAILURE;
+
+ hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
+
+ if (!hdr)
+ return BP_RESULT_BADBIOSTABLE;
+
+ switch (hdr->ucTableFormatRevision) {
+ case 1:
+ switch (hdr->ucTableContentRevision) {
+ case 0:
+ case 1:
+ case 2:
+ return get_embedded_panel_info_v1_2(bp, info);
+ case 3:
+ return get_embedded_panel_info_v1_3(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LVDS_INFO_V12 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LVDS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds =
+ GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != lvds->sHeader.ucTableFormatRevision
+ || 2 > lvds->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units*/
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ {
+ uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
+ /* Get minimum supported refresh rate*/
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+
+ /*Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
+ lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
+
+ if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.SPATIAL = true;
+
+ if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.TEMPORAL = true;
+
+ if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.API_ENABLED = true;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LCD_INFO_V13 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!((1 == lvds->sHeader.ucTableFormatRevision)
+ && (3 <= lvds->sHeader.ucTableContentRevision)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ /* Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_V13_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ /* Get supported refresh rate*/
+ if (info->drr_enabled == 1) {
+ uint8_t min_rr =
+ lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
+ uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
+
+ if (min_rr != 0) {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ } else {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+ }
+
+ if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
+ lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
+
+ return BP_RESULT_OK;
+}
+
+/**
+ * bios_parser_get_encoder_cap_info
+ *
+ * @brief
+ * Get encoder capability information of input object id
+ *
+ * @param object_id, Object id
+ * @param object_id, encoder cap information structure
+ *
+ * @return Bios parser result code
+ *
+ */
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_ENCODER_CAP_RECORD_V2 *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_EN = record->usHBR2En;
+ info->DP_HBR3_EN = record->usHBR3En;
+ info->HDMI_6GB_EN = record->usHDMI6GEn;
+ return BP_RESULT_OK;
+}
+
+/**
+ * get_encoder_cap_record
+ *
+ * @brief
+ * Get encoder cap record for the object
+ *
+ * @param object, ATOM object
+ *
+ * @return atom encoder cap record
+ *
+ * @note
+ * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ */
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_ENCODER_CAP_RECORD_V2) <= header->ucRecordSize)
+ return (ATOM_ENCODER_CAP_RECORD_V2 *)header;
+ }
+
+ return NULL;
+}
+
+static uint32_t get_ss_entry_number(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id);
+
+/**
+ * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
+ * the VBIOS that match the SSid (to be converted from signal)
+ *
+ * @param[in] signal, ASSignalType to be converted to SSid
+ * @return number of SS Entry that match the signal
+ */
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint32_t ss_id = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ ss_id = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ return get_ss_entry_number(bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ return
+ get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * get_ss_entry_number_from_ss_info_tbl
+ * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
+ *
+ * @note There can only be one entry for each id for SS_Info Table
+ *
+ * @param [in] id, spread spectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t number = 0;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* SS_Info table exist */
+ if (!DATA_TABLES(SS_Info))
+ return number;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+ DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return number;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS: {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return number;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++)
+ if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
+ number = 1;
+ break;
+ }
+
+ return number;
+}
+
+/**
+ * get_ss_entry_number
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param id, spread sprectrum info index
+ * @return Bios parser result code
+ */
+static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
+{
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_entry_number_from_ss_info_tbl(bp, id);
+
+ return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
+}
+
+/**
+ * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
+ * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
+ * Ver 2.1 from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return 0;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &header_include->asSpreadSpectrum[0];
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ return 1;
+
+ return 0;
+}
+/**
+ * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
+ * the VBIOS that matches id
+ *
+ * @param[in] id, spread sprectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ uint32_t number = 0;
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return number;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &header_include->asSpreadSpectrum[0];
+
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ number++;
+
+ return number;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
+ * offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_GPIO_PIN_LUT *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(GPIO_Pin_LUT))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ for (i = 0; i < count; ++i) {
+ if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->asGPIO_Pin[i].ucGpioPinBitShift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info)
+{
+ ATOM_GPIO_I2C_INFO *header;
+ uint32_t count = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(GPIO_I2C_Info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ if (count < record->sucI2cId.bfI2C_LineMux)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
+ info->i2c_line = record->sucI2cId.bfI2C_LineMux;
+ info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
+ info->i2c_slave_address = record->ucI2CAddr;
+
+ info->gpio_info.clk_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
+ info->gpio_info.clk_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
+ info->gpio_info.clk_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
+ info->gpio_info.data_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
+ info->gpio_info.data_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
+ info->gpio_info.data_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
+ info->gpio_info.data_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
+
+ info->gpio_info.clk_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
+ info->gpio_info.clk_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkEnShift;
+ info->gpio_info.clk_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
+ info->gpio_info.clk_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
+ info->gpio_info.data_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
+ info->gpio_info.data_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataEnShift;
+ info->gpio_info.data_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
+ info->gpio_info.data_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ uint32_t offset;
+ ATOM_OBJECT_TABLE *tbl;
+ uint32_t i;
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_CONNECTOR:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_ROUTER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_GENERIC:
+ if (bp->object_info_tbl.revision.minor < 3)
+ return NULL;
+ offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ offset += bp->object_info_tbl_offset;
+
+ tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+ if (!tbl)
+ return NULL;
+
+ for (i = 0; i < tbl->ucNumberOfObjects; i++)
+ if (dal_graphics_object_id_is_equal(id,
+ object_id_from_bios_object_id(
+ le16_to_cpu(tbl->asObjects[i].usObjectID))))
+ return &tbl->asObjects[i];
+
+ return NULL;
+}
+
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+ if ((!number) || (!*number))
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+
+ if (!number)
+ return 0;
+
+ return *number;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ switch (device_id) {
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_LCD2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_CRT2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DEVICE_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
+ tbl_revision->minor =
+ (uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
+}
+
+static uint32_t signal_to_ss_id(enum as_signal_type signal)
+{
+ uint32_t clk_id_ss = 0;
+
+ switch (signal) {
+ case AS_SIGNAL_TYPE_DVI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
+ break;
+ case AS_SIGNAL_TYPE_LVDS:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
+ break;
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
+ break;
+ default:
+ break;
+ }
+ return clk_id_ss;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CRT:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CV_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_TV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_TV1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+/**
+ * HwContext interface for writing MM registers
+ */
+
+static bool i2c_read(
+ struct bios_parser *bp,
+ struct graphics_object_i2c_info *i2c_info,
+ uint8_t *buffer,
+ uint32_t length)
+{
+ struct ddc *ddc;
+ uint8_t offset[2] = { 0, 0 };
+ bool result = false;
+ struct i2c_command cmd;
+ struct gpio_ddc_hw_info hw_info = {
+ i2c_info->i2c_hw_assist,
+ i2c_info->i2c_line };
+
+ ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+ i2c_info->gpio_info.clk_a_register_index,
+ (1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+
+ if (!ddc)
+ return result;
+
+ /*Using SW engine */
+ cmd.engine = I2C_COMMAND_ENGINE_SW;
+ cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ {
+ struct i2c_payload payloads[] = {
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = offset,
+ .length = sizeof(offset),
+ .write = true
+ },
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = buffer,
+ .length = length,
+ .write = false
+ }
+ };
+
+ cmd.payloads = payloads;
+ cmd.number_of_payloads = ARRAY_SIZE(payloads);
+
+ /* TODO route this through drm i2c_adapter */
+ result = dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc,
+ &cmd);
+ }
+
+ dal_gpio_destroy_ddc(&ddc);
+
+ return result;
+}
+
+/**
+ * Read external display connection info table through i2c.
+ * validate the GUID and checksum.
+ *
+ * @return enum bp_result whether all data was sucessfully read
+ */
+static enum bp_result get_ext_display_connection_info(
+ struct bios_parser *bp,
+ ATOM_OBJECT *opm_object,
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+{
+ bool config_tbl_present = false;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ uint32_t i = 0;
+
+ if (opm_object == NULL)
+ return BP_RESULT_BADINPUT;
+
+ i2c_record = get_i2c_record(bp, opm_object);
+
+ if (i2c_record != NULL) {
+ ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+ struct graphics_object_i2c_info i2c_info;
+
+ gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+ bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+
+ if (NULL == gpio_i2c_header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+ BP_RESULT_OK)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (i2c_read(bp,
+ &i2c_info,
+ (uint8_t *)ext_display_connection_info_tbl,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+ config_tbl_present = true;
+ }
+ }
+
+ /* Validate GUID */
+ if (config_tbl_present)
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+ if (ext_display_connection_info_tbl->ucGuid[i]
+ != ext_display_connection_guid[i]) {
+ config_tbl_present = false;
+ break;
+ }
+ }
+
+ /* Validate checksum */
+ if (config_tbl_present) {
+ uint8_t check_sum = 0;
+ uint8_t *buf =
+ (uint8_t *)ext_display_connection_info_tbl;
+
+ for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+ i++) {
+ check_sum += buf[i];
+ }
+
+ if (check_sum != 0)
+ config_tbl_present = false;
+ }
+
+ if (config_tbl_present)
+ return BP_RESULT_OK;
+ else
+ return BP_RESULT_FAILURE;
+}
+
+/*
+ * Gets the first device ID in the same group as the given ID for enumerating.
+ * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+ *
+ * The first device ID in the same group as the passed device ID, or 0 if no
+ * matching device group found.
+ */
+static uint32_t enum_first_device_id(uint32_t dev_id)
+{
+ /* Return the first in the group that this ID belongs to. */
+ if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+ return ATOM_DEVICE_TV1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+ return ATOM_DEVICE_CV_SUPPORT;
+
+ /* No group found for this device ID. */
+
+ dm_error("%s: incorrect input %d\n", __func__, dev_id);
+ /* No matching support flag for given device ID */
+ return 0;
+}
+
+/*
+ * Gets the next device ID in the group for a given device ID.
+ *
+ * The current device ID being enumerated on.
+ *
+ * The next device ID in the group, or 0 if no device exists.
+ */
+static uint32_t enum_next_dev_id(uint32_t dev_id)
+{
+ /* Get next device ID in the group. */
+ switch (dev_id) {
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ }
+
+ /* Done enumerating through devices. */
+ return 0;
+}
+
+/*
+ * Returns the new device tag record for patched BIOS object.
+ *
+ * [IN] pExtDisplayPath - External display path to copy device tag from.
+ * [IN] deviceSupport - Bit vector for device ID support flags.
+ * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+ *
+ * True if a compatible device ID was found, false otherwise.
+ */
+static bool get_patched_device_tag(
+ struct bios_parser *bp,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t device_support,
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+{
+ uint32_t dev_id;
+ /* Use fallback behaviour if not supported. */
+ if (!bp->remap_device_tags) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+ return true;
+ }
+
+ /* Find the first unused in the same group. */
+ dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+ while (dev_id != 0) {
+ /* Assign this device ID if supported. */
+ if ((device_support & dev_id) != 0) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+ return true;
+ }
+
+ dev_id = enum_next_dev_id(dev_id);
+ }
+
+ /* No compatible device ID found. */
+ return false;
+}
+
+/*
+ * Adds a device tag to a BIOS object's device tag record if there is
+ * matching device ID supported.
+ *
+ * pObject - Pointer to the BIOS object to add the device tag to.
+ * pExtDisplayPath - Display path to retrieve base device ID from.
+ * pDeviceSupport - Pointer to bit vector for supported device IDs.
+ */
+static void add_device_tag_from_ext_display_path(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t *device_support)
+{
+ /* Get device tag record for object. */
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+ enum bp_result result =
+ bios_parser_get_device_tag_record(
+ bp, object, &device_tag_record);
+
+ if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+ && (result == BP_RESULT_OK)) {
+ uint8_t index;
+
+ if ((device_tag_record->ucNumberOfDevice == 1) &&
+ (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+ /*Workaround bug in current VBIOS releases where
+ * ucNumberOfDevice = 1 but there is no actual device
+ * tag data. This w/a is temporary until the updated
+ * VBIOS is distributed. */
+ device_tag_record->ucNumberOfDevice =
+ device_tag_record->ucNumberOfDevice - 1;
+ }
+
+ /* Attempt to find a matching device ID. */
+ index = device_tag_record->ucNumberOfDevice;
+ device_tag = &device_tag_record->asDeviceTag[index];
+ if (get_patched_device_tag(
+ bp,
+ ext_display_path,
+ *device_support,
+ device_tag)) {
+ /* Update cached device support to remove assigned ID.
+ */
+ *device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+ device_tag_record->ucNumberOfDevice++;
+ }
+ }
+}
+
+/*
+ * Read out a single EXT_DISPLAY_PATH from the external display connection info
+ * table. The specific entry in the table is determined by the enum_id passed
+ * in.
+ *
+ * EXT_DISPLAY_PATH describing a single Configuration table entry
+ */
+
+#define INVALID_CONNECTOR 0xffff
+
+static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+ uint32_t bios_object_id)
+{
+ EXT_DISPLAY_PATH *ext_display_path;
+ uint32_t ext_display_path_index =
+ ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+
+ if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+ return NULL;
+
+ ext_display_path = &config_table->sPath[ext_display_path_index];
+
+ if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+ ext_display_path->usDeviceConnector = cpu_to_le16(0);
+
+ return ext_display_path;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+static enum bp_result patch_bios_image_from_ext_display_connection_info(
+ struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+ EXT_DISPLAY_PATH *ext_display_path;
+ ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+ ATOM_HPD_INT_RECORD *hpd_record = NULL;
+ ATOM_OBJECT_TABLE *encoder_table;
+ uint32_t encoder_table_offset;
+ ATOM_OBJECT *opm_object = NULL;
+ uint32_t i = 0;
+ struct graphics_object_id opm_object_id =
+ dal_graphics_object_id_init(
+ GENERIC_ID_MXM_OPM,
+ ENUM_ID_1,
+ OBJECT_TYPE_GENERIC);
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+ uint32_t cached_device_support =
+ le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+
+ uint32_t dst_number;
+ uint16_t *dst_object_id_list;
+
+ opm_object = get_bios_object(bp, opm_object_id);
+ if (!opm_object)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(&ext_display_connection_info_tbl, 0,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+
+ connector_tbl_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Read Connector info table from EEPROM through i2c */
+ if (get_ext_display_connection_info(bp,
+ opm_object,
+ &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+
+ dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
+ "%s: Failed to read Connection Info Table", __func__);
+ return BP_RESULT_UNSUPPORTED;
+ }
+
+ /* Get pointer to AUX/DDC and HPD LUTs */
+ aux_ddc_lut_record =
+ get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+ hpd_pin_lut_record =
+ get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+
+ if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Cache support bits for currently unmapped device types. */
+ if (bp->remap_device_tags) {
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+ uint32_t j;
+ /* Remove support for all non-MXM connectors. */
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM == object_id.id))
+ continue;
+
+ /* Remove support for all device tags. */
+ if (bios_parser_get_device_tag_record(
+ bp, object, &dev_tag_record) != BP_RESULT_OK)
+ continue;
+
+ for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+ &dev_tag_record->asDeviceTag[j];
+ cached_device_support &=
+ ~le16_to_cpu(device_tag->usDeviceID);
+ }
+ }
+ }
+
+ /* Find all MXM connector objects and patch them with connector info
+ * from the external display connection info table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on the enum
+ * id. */
+ ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(object->usObjectID));
+ if (!ext_display_path)
+ return BP_RESULT_FAILURE;
+
+ /* Patch device connector ID */
+ object->usObjectID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+
+ /* Patch device tag, ulACPIDeviceEnum. */
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+
+ /* Patch HPD info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+ hpd_record = get_hpd_record(bp, object);
+ if (hpd_record) {
+ uint8_t index =
+ ext_display_path->ucExtHPDPINLutIndex;
+ hpd_record->ucHPDIntGPIOID =
+ hpd_pin_lut_record->ucHPDPINMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid hpd record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Patch I2C/AUX info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+ i2c_record = get_i2c_record(bp, object);
+ if (i2c_record) {
+ uint8_t index =
+ ext_display_path->ucExtAUXDDCLutIndex;
+ i2c_record->sucI2cId =
+ aux_ddc_lut_record->ucAUXDDCMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid I2C record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Merge with other MXM connectors that map to the same physical
+ * connector. */
+ for (j = i + 1;
+ j < connector_tbl->ucNumberOfObjects; j++) {
+ ATOM_OBJECT *next_object;
+ struct graphics_object_id next_object_id;
+ EXT_DISPLAY_PATH *next_ext_display_path;
+
+ next_object = &connector_tbl->asObjects[j];
+ next_object_id = object_id_from_bios_object_id(
+ le16_to_cpu(next_object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+ (CONNECTOR_ID_MXM == next_object_id.id))
+ continue;
+
+ next_ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(next_object->usObjectID));
+
+ if (next_ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ /* Merge if using same connector. */
+ if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+ le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+ (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+ /* Clear duplicate connector from table. */
+ next_object->usObjectID = cpu_to_le16(0);
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+ }
+ }
+ }
+
+ /* Find all encoders which have an MXM object as their destination.
+ * Replace the MXM object with the real connector Id from the external
+ * display connection info table */
+
+ encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &encoder_table->asObjects[i];
+
+ dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+
+ for (j = 0; j < dst_number; j++) {
+ object_id = object_id_from_bios_object_id(
+ dst_object_id_list[j]);
+
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on
+ * the enum id. */
+ ext_display_path =
+ get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ dst_object_id_list[j]);
+
+ if (ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ dst_object_id_list[j] =
+ le16_to_cpu(ext_display_path->usDeviceConnector);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+
+static void process_ext_display_connection_info(struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ bool mxm_connector_found = false;
+ bool null_entry_found = false;
+ uint32_t i = 0;
+
+ connector_tbl_offset = bp->object_info_tbl_offset +
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Look for MXM connectors to determine whether we need patch the VBIOS
+ * connector info table. Look for null entries to determine whether we
+ * need to compact connector table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+ (CONNECTOR_ID_MXM == object_id.id)) {
+ /* Once we found MXM connector - we can break */
+ mxm_connector_found = true;
+ break;
+ } else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+ /* We need to continue looping - to check if MXM
+ * connector present */
+ null_entry_found = true;
+ }
+ }
+
+ /* Patch BIOS image */
+ if (mxm_connector_found || null_entry_found) {
+ uint32_t connectors_num = 0;
+ uint8_t *original_bios;
+ /* Step 1: Replace bios image with the new copy which will be
+ * patched */
+ bp->base.bios_local_image = kzalloc(bp->base.bios_size,
+ GFP_KERNEL);
+ if (bp->base.bios_local_image == NULL) {
+ BREAK_TO_DEBUGGER();
+ /* Failed to alloc bp->base.bios_local_image */
+ return;
+ }
+
+ memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+ original_bios = bp->base.bios;
+ bp->base.bios = bp->base.bios_local_image;
+ connector_tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Step 2: (only if MXM connector found) Patch BIOS image with
+ * info from external module */
+ if (mxm_connector_found &&
+ patch_bios_image_from_ext_display_connection_info(bp) !=
+ BP_RESULT_OK) {
+ /* Patching the bios image has failed. We will copy
+ * again original image provided and afterwards
+ * only remove null entries */
+ memmove(
+ bp->base.bios_local_image,
+ original_bios,
+ bp->base.bios_size);
+ }
+
+ /* Step 3: Compact connector table (remove null entries, valid
+ * entries moved to beginning) */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+
+ if (OBJECT_TYPE_CONNECTOR != object_id.type)
+ continue;
+
+ if (i != connectors_num) {
+ memmove(
+ &connector_tbl->
+ asObjects[connectors_num],
+ object,
+ sizeof(ATOM_OBJECT));
+ }
+ ++connectors_num;
+ }
+ connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+ }
+}
+
+static void bios_parser_post_init(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ process_ext_display_connection_info(bp);
+}
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v8(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
+ uint32_t i;
+
+ info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (info_v8 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+ info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
+ info->gpu_cap_info =
+ le32_to_cpu(info_v8->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v8->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v8->usExtDispConnInfoOffset);
+ info->memory_type = info_v8->ucMemoryType;
+ info->ma_channel_number = info_v8->ucUMAChannelNumber;
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v8->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk =
+ le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v8->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v8->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v8->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v8->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v8->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v8->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v8->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v9(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
+ uint32_t i;
+
+ info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (!info_v9)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
+ info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
+ info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
+ info->memory_type = info_v9->ucMemoryType;
+ info->ma_channel_number = info_v9->ucUMAChannelNumber;
+ info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
+ info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
+ info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v9->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v9->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v9->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v9->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 8:
+ result = get_integrated_info_v8(bp, info);
+ break;
+ case 9:
+ result = get_integrated_info_v9(bp, info);
+ break;
+ default:
+ return result;
+
+ }
+ }
+
+ /* Sort voltage table from low to high*/
+ if (result == BP_RESULT_OK) {
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (
+ info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+/******************************************************************************/
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ /* bios scratch register communication */
+ .is_accelerated_mode = bios_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+ /* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */
+
+ .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */
+
+ .program_display_engine_pll = bios_parser_program_display_engine_pll,
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ /* SW init and patch */
+ .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
+
+ .bios_parser_destroy = bios_parser_destroy,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ ATOM_ROM_HEADER *rom_header = NULL;
+ ATOM_OBJECT_HEADER *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
+ if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(ATOM_MASTER_DATA_TABLE,
+ rom_header->usMasterDataTableOffset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->sHeader,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 3) {
+ ATOM_OBJECT_HEADER_V3 *tbl_v3;
+
+ tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
+ bp->object_info_tbl_offset);
+ if (!tbl_v3)
+ return false;
+
+ bp->object_info_tbl.v1_3 = tbl_v3;
+ } else if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 1)
+ bp->object_info_tbl.v1_1 = object_info_tbl;
+ else
+ return false;
+
+ dal_bios_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+/******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
new file mode 100644
index 000000000000..d6f16275048f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_H__
+#define __DAL_BIOS_PARSER_H__
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
new file mode 100644
index 000000000000..1ee1717f2e6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -0,0 +1,1934 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "dc_bios_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table2.h"
+
+#include "bios_parser_helper.h"
+#include "command_table_helper2.h"
+#include "bios_parser2.h"
+#include "bios_parser_types_internal2.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+#define LAST_RECORD_TYPE 0xff
+
+
+struct i2c_id_config_access {
+ uint8_t bfI2C_LineMux:4;
+ uint8_t bfHW_EngineID:3;
+ uint8_t bfHW_Capable:1;
+ uint8_t ucAccess;
+};
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
+
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void firmware_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static void get_atom_data_table_revision(
+ struct atom_common_table_header *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) atom_data_tbl->format_revision & 0x3f;
+ tbl_revision->minor =
+ (uint32_t) atom_data_tbl->content_revision & 0x3f;
+}
+
+/* BIOS oject table displaypath is per connector.
+ * There is extra path not for connector. BIOS fill its encoderid as 0
+ */
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int count = 0;
+ unsigned int i;
+
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ if (bp->object_info_tbl.v1_4->display_path[i].encoderobjid != 0)
+ count++;
+ }
+ return count;
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ if (bp->object_info_tbl.v1_4->number_of_path > i)
+ object_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ struct object_info_table *tbl = &bp->object_info_tbl;
+ struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4;
+
+ if (v1_4->number_of_path > i) {
+ /* If display_objid is generic object id, the encoderObj
+ * /extencoderobjId should be 0
+ */
+ if (v1_4->display_path[i].encoderobjid != 0 &&
+ v1_4->display_path[i].display_objid != 0)
+ object_id = object_id_from_bios_object_id(
+ v1_4->display_path[i].display_objid);
+ }
+
+ return object_id;
+}
+
+
+/* TODO: GetNumberOfSrc*/
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ /* connector has 1 Dest, encoder has 0 Dest */
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ return 0;
+ case OBJECT_TYPE_CONNECTOR:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* removed getSrcObjList, getDestObjList*/
+
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!src_object_id)
+ return bp_result;
+
+ switch (object_id.type) {
+ /* Encoder's Source is GPU. BIOS does not provide GPU, since all
+ * displaypaths point to same GPU (0x1100). Hardcode GPU object type
+ */
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(0x1100);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ case OBJECT_TYPE_CONNECTOR:
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id == obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ switch (object_id.type) {
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *dest_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+
+/* from graphics_object_id, find display path which includes the object_id */
+static struct atom_display_object_path_v2 *get_bios_object(
+ struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ unsigned int i;
+ struct graphics_object_id obj_id = {0};
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ case OBJECT_TYPE_CONNECTOR:
+ case OBJECT_TYPE_GENERIC:
+ /* Both Generic and Connector Object ID
+ * will be stored on display_objid
+ */
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].display_objid
+ );
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ default:
+ return NULL;
+ }
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ struct atom_display_object_path_v2 *object;
+ struct atom_common_record_header *header;
+ struct atom_i2c_record *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = object->disp_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_I2C_RECORD_TYPE
+ && sizeof(struct atom_i2c_record) <=
+ header->record_size) {
+ /* get the I2C info */
+ record = (struct atom_i2c_record *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->record_size;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(
+ struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info)
+{
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ unsigned int table_index = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_assignment) >
+ le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: is version change? */
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+
+ table_index = record->i2c_id & I2C_HW_LANE_MUX;
+
+ if (count < table_index) {
+ bool find_valid = false;
+
+ for (table_index = 0; table_index < count; table_index++) {
+ if (((record->i2c_id & I2C_HW_CAP) == (
+ header->gpio_pin[table_index].gpio_id &
+ I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
+ }
+ }
+ /* If we don't find the entry that we are looking for then
+ * we will return BP_Result_BadBiosTable.
+ */
+ if (find_valid == false)
+ return BP_RESULT_BADBIOSTABLE;
+ }
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
+ info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
+ info->i2c_engine_id = (record->i2c_id & I2C_HW_ENGINE_ID_MASK) >> 4;
+ info->i2c_slave_address = record->i2c_slave_addr;
+
+ /* TODO: check how to get register offset for en, Y, etc. */
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(
+ header->gpio_pin[table_index].data_a_reg_index);
+ info->gpio_info.clk_a_shift =
+ header->gpio_pin[table_index].gpio_bitshift;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_voltage_ddc_info_v4(
+ uint8_t *i2c_line,
+ uint32_t index,
+ struct atom_common_table_header *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ struct atom_voltage_objects_info_v4_1 *info =
+ (struct atom_voltage_objects_info_v4_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->voltage_object[0]));
+
+ while ((address + le16_to_cpu(header->structuresize)) >
+ voltage_current_object) {
+ struct atom_i2c_voltage_object_v4 *object =
+ (struct atom_i2c_voltage_object_v4 *)
+ voltage_current_object;
+
+ if (object->header.voltage_mode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->header.voltage_type == index) {
+ *i2c_line = object->i2c_id ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object +=
+ le16_to_cpu(object->header.object_size);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct i2c_id_config_access *config;
+ struct atom_i2c_record record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (struct i2c_id_config_access *) &i2c_channel_id;
+
+ record.i2c_id = config->bfHW_Capable;
+ record.i2c_id |= config->bfI2C_LineMux;
+ record.i2c_id |= config->bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(voltageobject_info))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base,
+ DATA_TABLES(voltageobject_info),
+ sizeof(struct atom_common_table_header));
+
+ header = (struct atom_common_table_header *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v4(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_hpd_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_hpd_int_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->pin_id;
+ info->hpd_active = record->plugin_pin_state;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct atom_hpd_int_record *get_hpd_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->disp_recordoffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_HPD_INT_RECORD_TYPE
+ && sizeof(struct atom_hpd_int_record) <=
+ header->record_size)
+ return (struct atom_hpd_int_record *) header;
+
+ offset += header->record_size;
+ }
+
+ return NULL;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
+ * to get the registerA offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_lut_v2_1)
+ > le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Temporary hard code gpio pin info */
+#if defined(FOR_SIMNOW_BOOT)
+ {
+ struct atom_gpio_pin_assignment gpio_pin[8] = {
+ {0x5db5, 0, 0, 1, 0},
+ {0x5db5, 8, 8, 2, 0},
+ {0x5db5, 0x10, 0x10, 3, 0},
+ {0x5db5, 0x18, 0x14, 4, 0},
+ {0x5db5, 0x1A, 0x18, 5, 0},
+ {0x5db5, 0x1C, 0x1C, 6, 0},
+ };
+
+ count = 6;
+ memmove(header->gpio_pin, gpio_pin, sizeof(gpio_pin));
+ }
+#else
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+#endif
+ for (i = 0; i < count; ++i) {
+ if (header->gpio_pin[i].gpio_id != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(
+ header->gpio_pin[i].data_a_reg_index);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->gpio_pin[i].gpio_bitshift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ result_device_id.raw_device_tag = device_id;
+
+ switch (device_id) {
+ case ATOM_DISPLAY_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DISPLAY_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DISPLAY_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DISPLAY_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DISPLAY_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ info->acpi_device = 0; /* BIOS no longer provides this */
+ info->dev_id = device_type_from_device_id(object->device_tag);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v4_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dp_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dp_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static enum bp_result get_ss_info_v4_2(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_1 *smu_info = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!DATA_TABLES(smu_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_1, DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ smu_info->gpuclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info
+ * ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v4_1(bp, signal, index, ss_info);
+ case 2:
+ return get_ss_info_v4_2(bp, signal, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_embedded_panel_info_v2_1(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ struct lcd_info_v2_1 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(struct lcd_info_v2_1, DATA_TABLES(lcd_info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: previous vv1_3, should v2_1 */
+ if (!((lvds->table_header.format_revision == 2)
+ && (lvds->table_header.content_revision >= 1)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->lcd_timing.h_active);
+ /* usHBlanking_Time includes borders, so we should really be
+ * subtractingborders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.h_blanking_time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->lcd_timing.v_active);
+ /* usVBlanking_Time includes borders, so we should really be
+ * subtracting borders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.v_blanking_time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.h_sync_offset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->lcd_timing.h_sync_width);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.v_sync_offset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->lcd_timing.v_syncwidth);
+ info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border;
+ info->lcd_timing.vertical_border = lvds->lcd_timing.v_border;
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY);
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2);
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2);
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC);
+ info->lcd_timing.misc_info.INTERLACE =
+ !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
+
+ /* not provided by VBIOS*/
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0;
+ /* not provided by VBIOS*/
+ info->ss_id = 0;
+
+ info->realtek_eDPToLVDS =
+ !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_FAILURE;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(lcd_info));
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ get_atom_data_table_revision(header, &tbl_revision);
+
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_embedded_panel_info_v2_1(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_LCD1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DISPLAY_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DISPLAY_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DISPLAY_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DISPLAY_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DISPLAY_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) &
+ mask) != 0;
+}
+
+static void bios_parser_post_init(
+ struct dc_bios *dcb)
+{
+ /* TODO for OPM module. Need implement later */
+}
+
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ /* TODO: DAL2 atomfirmware implementation does not need this.
+ * why DAL3 need this?
+ */
+ return 1;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static unsigned int bios_parser_get_smu_clock_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.get_smu_clock_info)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.get_smu_clock_info(bp);
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_accelerated_mode(
+ struct dc_bios *dcb)
+{
+ return bios_is_accelerated_mode(dcb);
+}
+
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ struct atom_common_table_header *header;
+
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(firmwareinfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(firmwareinfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v3_1(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_1 *firmware_info;
+ struct atom_display_controller_info_v4_1 *dce_info = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
+ DATA_TABLES(firmwareinfo));
+
+ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!firmware_info || !dce_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. */
+ /* We need to convert from 10KHz units into KHz units */
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+ info->default_engine_clk = firmware_info->bootup_sclk_in10khz * 10;
+
+ /* 27MHz for Vega10: */
+ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+
+ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+ if (info->pll_info.crystal_frequency == 0)
+ info->pll_info.crystal_frequency = 27000;
+ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+ /* Get GPU PLL VCO Clock */
+
+ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+ /* VBIOS gives in 10KHz */
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_encoder_caps_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_CAP = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0;
+ info->DP_HBR2_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2_EN) ? 1 : 0;
+ info->DP_HBR3_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
+ info->HDMI_6GB_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+
+ return BP_RESULT_OK;
+}
+
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = object->encoder_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->record_size;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type != ATOM_ENCODER_CAP_RECORD_TYPE)
+ continue;
+
+ if (sizeof(struct atom_encoder_caps_record) <=
+ header->record_size)
+ return (struct atom_encoder_caps_record *)header;
+ }
+
+ return NULL;
+}
+
+/*
+ * get_integrated_info_v11
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v11(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ struct atom_integrated_system_info_v1_11 *info_v11;
+ uint32_t i;
+
+ info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+ DATA_TABLES(integratedsysteminfo));
+
+ if (info_v11 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->gpu_cap_info =
+ le32_to_cpu(info_v11->gpucapinfo);
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v11->system_config);
+ info->cpu_cap_info = le32_to_cpu(info_v11->cpucapinfo);
+ info->memory_type = info_v11->memorytype;
+ info->ma_channel_number = info_v11->umachannelnumber;
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v11->lvds_ss_percentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->lvds_ss_rate_10hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v11->hdmi_ss_percentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->hdmi_ss_rate_10hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v11->dvi_ss_percentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v11->dvi_ss_rate_10hz);
+ info->lvds_misc = info_v11->lvds_misc;
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v11->extdispconninfo.guid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v11->extdispconninfo.path[i].connectorobjid));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].ext_encoder_objid));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_tag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_acpi_enum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v11->extdispconninfo.path[i].auxddclut_index;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v11->extdispconninfo.path[i].hpdlut_index;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v11->extdispconninfo.path[i].channelmapping;
+ info->ext_disp_conn_info.path[i].caps =
+ le16_to_cpu(info_v11->extdispconninfo.path[i].caps);
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v11->extdispconninfo.checksum;
+
+ info->dp0_ext_hdmi_slv_addr = info_v11->dp0_retimer_set.HdmiSlvAddr;
+ info->dp0_ext_hdmi_reg_num = info_v11->dp0_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) {
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp0_ext_hdmi_6g_reg_num = info_v11->dp0_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) {
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp1_ext_hdmi_slv_addr = info_v11->dp1_retimer_set.HdmiSlvAddr;
+ info->dp1_ext_hdmi_reg_num = info_v11->dp1_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) {
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp1_ext_hdmi_6g_reg_num = info_v11->dp1_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) {
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp2_ext_hdmi_slv_addr = info_v11->dp2_retimer_set.HdmiSlvAddr;
+ info->dp2_ext_hdmi_reg_num = info_v11->dp2_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) {
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp2_ext_hdmi_6g_reg_num = info_v11->dp2_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) {
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp3_ext_hdmi_slv_addr = info_v11->dp3_retimer_set.HdmiSlvAddr;
+ info->dp3_ext_hdmi_reg_num = info_v11->dp3_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) {
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp3_ext_hdmi_6g_reg_num = info_v11->dp3_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) {
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+
+ /** TODO - review **/
+ #if 0
+ info->boot_up_engine_clock = le32_to_cpu(info_v11->ulBootUpEngineClock)
+ * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v11->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v11->ulBootUpReqDisplayVector);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v11->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v11->usExtDispConnInfoOffset);
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v11->ulGMCRestoreResetTime);
+ info->minimum_n_clk =
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk <
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(
+ info_v11->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v11->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v11->usPCIEClkSSType);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v11->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v11->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v11->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v11->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v11->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v11->sAvail_SCLK[i].ulSupportedSCLK)
+ * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageID);
+ }
+ #endif /* TODO*/
+
+ return BP_RESULT_OK;
+}
+
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ if (info && DATA_TABLES(integratedsysteminfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(integratedsysteminfo));
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 11:
+ result = get_integrated_info_v11(bp, info);
+ break;
+ default:
+ return result;
+ }
+ }
+
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* Sort voltage table from low to high*/
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk
+ ) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+
+
+ .is_accelerated_mode = bios_parser_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+
+/* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing,
+
+ /* .blank_crtc = bios_parser_blank_crtc, */
+
+ .crtc_source_select = bios_parser_crtc_source_select,
+
+ /* .external_encoder_control = bios_parser_external_encoder_control, */
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ .post_init = bios_parser_post_init,
+
+ .bios_parser_destroy = firmware_parser_destroy,
+
+ .get_smu_clock_info = bios_parser_get_smu_clock_info,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ struct atom_rom_header_v2_2 *rom_header = NULL;
+ struct display_object_info_table_v1_4 *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[OFFSET_TO_ATOM_ROM_IMAGE_SIZE] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(struct atom_master_data_table_v2_1,
+ rom_header->masterdatatable_offset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(displayobjectinfo);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->table_header,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 4) {
+ struct display_object_info_table_v1_4 *tbl_v1_4;
+
+ tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+ if (!tbl_v1_4)
+ return false;
+
+ bp->object_info_tbl.v1_4 = tbl_v1_4;
+ } else
+ return false;
+
+ dal_firmware_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
new file mode 100644
index 000000000000..cb40546cdafe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER2_H__
+#define __DAL_BIOS_PARSER2_H__
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
new file mode 100644
index 000000000000..a8cb039d2572
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "bios_parser_common.h"
+#include "include/grph_object_ctrl_defs.h"
+
+static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
+ >> OBJECT_TYPE_SHIFT;
+ enum object_type object_type;
+
+ switch (bios_object_type) {
+ case GRAPH_OBJECT_TYPE_GPU:
+ object_type = OBJECT_TYPE_GPU;
+ break;
+ case GRAPH_OBJECT_TYPE_ENCODER:
+ object_type = OBJECT_TYPE_ENCODER;
+ break;
+ case GRAPH_OBJECT_TYPE_CONNECTOR:
+ object_type = OBJECT_TYPE_CONNECTOR;
+ break;
+ case GRAPH_OBJECT_TYPE_ROUTER:
+ object_type = OBJECT_TYPE_ROUTER;
+ break;
+ case GRAPH_OBJECT_TYPE_GENERIC:
+ object_type = OBJECT_TYPE_GENERIC;
+ break;
+ default:
+ object_type = OBJECT_TYPE_UNKNOWN;
+ break;
+ }
+
+ return object_type;
+}
+
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_enum_id =
+ (bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ enum object_enum_id id;
+
+ switch (bios_enum_id) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ id = ENUM_ID_1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ id = ENUM_ID_2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ id = ENUM_ID_3;
+ break;
+ case GRAPH_OBJECT_ENUM_ID4:
+ id = ENUM_ID_4;
+ break;
+ case GRAPH_OBJECT_ENUM_ID5:
+ id = ENUM_ID_5;
+ break;
+ case GRAPH_OBJECT_ENUM_ID6:
+ id = ENUM_ID_6;
+ break;
+ case GRAPH_OBJECT_ENUM_ID7:
+ id = ENUM_ID_7;
+ break;
+ default:
+ id = ENUM_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+}
+
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
+ enum encoder_id id;
+
+ switch (bios_encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ id = ENCODER_ID_INTERNAL_LVDS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ id = ENCODER_ID_INTERNAL_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
+ id = ENCODER_ID_INTERNAL_TMDS2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ id = ENCODER_ID_INTERNAL_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ id = ENCODER_ID_INTERNAL_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ id = ENCODER_ID_INTERNAL_LVTM1;
+ break;
+ case ENCODER_OBJECT_ID_HDMI_INTERNAL:
+ id = ENCODER_ID_INTERNAL_HDMI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_MVPU_FPGA:
+ id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ id = ENCODER_ID_INTERNAL_DDI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ id = ENCODER_ID_INTERNAL_UNIPHY;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ id = ENCODER_ID_INTERNAL_UNIPHY1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ id = ENCODER_ID_INTERNAL_UNIPHY2;
+ break;
+ case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
+ id = ENCODER_ID_EXTERNAL_NUTMEG;
+ break;
+ case ENCODER_OBJECT_ID_TRAVIS:
+ id = ENCODER_ID_EXTERNAL_TRAVIS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ id = ENCODER_ID_INTERNAL_UNIPHY3;
+ break;
+ default:
+ id = ENCODER_ID_UNKNOWN;
+ ASSERT(0);
+ break;
+ }
+
+ return id;
+}
+
+static enum connector_id connector_id_from_bios_object_id(
+ uint32_t bios_object_id)
+{
+ uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum connector_id id;
+
+ switch (bios_connector_id) {
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ id = CONNECTOR_ID_SINGLE_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ id = CONNECTOR_ID_DUAL_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ id = CONNECTOR_ID_SINGLE_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ id = CONNECTOR_ID_DUAL_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_VGA:
+ id = CONNECTOR_ID_VGA;
+ break;
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ id = CONNECTOR_ID_HDMI_TYPE_A;
+ break;
+ case CONNECTOR_OBJECT_ID_LVDS:
+ id = CONNECTOR_ID_LVDS;
+ break;
+ case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
+ id = CONNECTOR_ID_PCIE;
+ break;
+ case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
+ id = CONNECTOR_ID_HARDCODE_DVI;
+ break;
+ case CONNECTOR_OBJECT_ID_DISPLAYPORT:
+ id = CONNECTOR_ID_DISPLAY_PORT;
+ break;
+ case CONNECTOR_OBJECT_ID_eDP:
+ id = CONNECTOR_ID_EDP;
+ break;
+ case CONNECTOR_OBJECT_ID_MXM:
+ id = CONNECTOR_ID_MXM;
+ break;
+ default:
+ id = CONNECTOR_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum generic_id id;
+
+ switch (bios_generic_id) {
+ case GENERIC_OBJECT_ID_MXM_OPM:
+ id = GENERIC_ID_MXM_OPM;
+ break;
+ case GENERIC_OBJECT_ID_GLSYNC:
+ id = GENERIC_ID_GLSYNC;
+ break;
+ case GENERIC_OBJECT_ID_STEREO_PIN:
+ id = GENERIC_ID_STEREO;
+ break;
+ default:
+ id = GENERIC_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t id_from_bios_object_id(enum object_type type,
+ uint32_t bios_object_id)
+{
+ switch (type) {
+ case OBJECT_TYPE_GPU:
+ return gpu_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_ENCODER:
+ return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_CONNECTOR:
+ return (uint32_t)connector_id_from_bios_object_id(
+ bios_object_id);
+ case OBJECT_TYPE_GENERIC:
+ return generic_id_from_bios_object_id(bios_object_id);
+ default:
+ return 0;
+ }
+}
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ enum object_type type;
+ enum object_enum_id enum_id;
+ struct graphics_object_id go_id = { 0 };
+
+ type = object_type_from_bios_object_id(bios_object_id);
+
+ if (OBJECT_TYPE_UNKNOWN == type)
+ return go_id;
+
+ enum_id = enum_id_from_bios_object_id(bios_object_id);
+
+ if (ENUM_ID_UNKNOWN == enum_id)
+ return go_id;
+
+ go_id = dal_graphics_object_id_init(
+ id_from_bios_object_id(type, bios_object_id), enum_id, type);
+
+ return go_id;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
new file mode 100644
index 000000000000..a076c61dfae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __BIOS_PARSER_COMMON_H__
+#define __BIOS_PARSER_COMMON_H__
+
+#include "dm_services.h"
+#include "ObjectID.h"
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
new file mode 100644
index 000000000000..5c9e5108c32c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "command_table.h"
+#include "bios_parser_types_internal.h"
+
+uint8_t *bios_get_image(struct dc_bios *bp,
+ uint32_t offset,
+ uint32_t size)
+{
+ if (bp->bios && offset + size < bp->bios_size)
+ return bp->bios + offset;
+ else
+ return NULL;
+}
+
+#include "reg_helper.h"
+
+#define CTX \
+ bios->ctx
+#define REG(reg)\
+ (bios->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
+
+bool bios_is_accelerated_mode(
+ struct dc_bios *bios)
+{
+ uint32_t acc_mode;
+ REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
+ return (acc_mode == 1);
+}
+
+
+void bios_set_scratch_acc_mode_change(
+ struct dc_bios *bios)
+{
+ REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
+}
+
+
+void bios_set_scratch_critical_state(
+ struct dc_bios *bios,
+ bool state)
+{
+ uint32_t critial_state = state ? 1 : 0;
+ REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
new file mode 100644
index 000000000000..c0047efeb006
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_HELPER_H__
+#define __DAL_BIOS_PARSER_HELPER_H__
+
+struct bios_parser;
+
+uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
+ uint32_t size);
+
+bool bios_is_accelerated_mode(struct dc_bios *bios);
+void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
+void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+
+#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
new file mode 100644
index 000000000000..0079a1e26efd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+
+#include "bios_parser_interface.h"
+#include "bios_parser.h"
+
+#include "bios_parser2.h"
+
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct dc_bios *bios = NULL;
+
+ bios = firmware_parser_create(init, dce_version);
+
+ /* Fall back to old bios parser for older asics */
+ if (bios == NULL)
+ bios = bios_parser_create(init, dce_version);
+
+ return bios;
+}
+
+void dal_bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct dc_bios *bios = *dcb;
+
+ bios->funcs->bios_parser_destroy(dcb);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
new file mode 100644
index 000000000000..5918923bfb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ ATOM_OBJECT_HEADER *v1_1;
+ ATOM_OBJECT_HEADER_V3 *v1_3;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ ATOM_MASTER_DATA_TABLE *master_data_tbl;
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
new file mode 100644
index 000000000000..bf1f5c86e65c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+/* use atomfirmware_bringup.h only. Not atombios.h anymore */
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ struct display_object_info_table_v1_4 *v1_4;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ struct atom_master_data_table_v2_1 *master_data_tbl;
+
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
new file mode 100644
index 000000000000..3f7b2dabc2b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -0,0 +1,2424 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table.h"
+#include "command_table_helper.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal.h"
+
+#define EXEC_BIOS_CMD_TABLE(command, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(command)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
+static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_output_control(struct bios_parser *bp);
+static void init_set_crtc_timing(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+static void init_enable_crtc_mem_req(struct bios_parser *bp);
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_program_clock(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+ init_enable_spread_spectrum_on_ppll(bp);
+ init_adjust_display_pll(bp);
+ init_dac_encoder_control(bp);
+ init_dac_output_control(bp);
+ init_set_crtc_timing(bp);
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+ init_enable_crtc_mem_req(bp);
+ init_program_clock(bp);
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
+
+ switch (version) {
+ case 2:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
+ break;
+
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
+ break;
+
+ default:
+ init_encoder_control_dig_v1(bp);
+ break;
+ }
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp)
+{
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
+ cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
+ else
+ cmd_tbl->encoder_control_dig1 = NULL;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
+ cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
+ else
+ cmd_tbl->encoder_control_dig2 = NULL;
+
+ cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (cntl != NULL)
+ switch (cntl->engine_id) {
+ case ENGINE_ID_DIGA:
+ if (cmd_tbl->encoder_control_dig1 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig1(bp, cntl);
+ break;
+ case ENGINE_ID_DIGB:
+ if (cmd_tbl->encoder_control_dig2 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig2(bp, cntl);
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio);
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
+
+ params.ucDigId = (uint8_t)(cntl->engine_id);
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.ulPixelClock = cntl->pixel_clock / 10;
+ params.ucDigMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulPixelClock =
+ (params.ulPixelClock * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulPixelClock =
+ (params.ulPixelClock * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulPixelClock =
+ (params.ulPixelClock * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
+ frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 2:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v4;
+ break;
+ case 5:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
+ break;
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+ /* Bit4: DP connector flag
+ * =0 connector is none-DP connector
+ * =1 connector is DP connector
+ */
+ params.acConfig.fDPConnector = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
+ cntl->transmitter);
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
+ uint32_t pll_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
+ || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
+ return BP_RESULT_BADINPUT;
+
+ /* fill information based on the action */
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if (dual_link_conn) {
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+ }
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ if (dual_link_conn && cntl->multi_path)
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+
+ params.acConfig.ucRefClkSource = (uint8_t)pll_id;
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
+ uint32_t ref_clk_src_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
+ return BP_RESULT_BADINPUT;
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ {
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ }
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
+ params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
+ break;
+ default:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
+ params.ucAction = (uint8_t)(cntl->action);
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+
+ params.ucDigMode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+ params.asConfig.ucPhyClkSrcId =
+ cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
+ /* 00 - coherent mode */
+ params.asConfig.ucCoherentMode = cntl->coherent;
+ params.asConfig.ucHPDSel =
+ cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel =
+ cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
+ params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
+ else
+ params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+ params.ulSymClock = cntl->pixel_clock/10;
+
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+ switch (cntl->signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 3:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
+ break;
+ case 5:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
+ break;
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V3 *params;
+ SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
+
+ memset(&allocation, 0, sizeof(allocation));
+
+ if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
+ else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
+ else
+ return BP_RESULT_BADINPUT;
+
+ allocation.sPCLKInput.usRefDiv =
+ cpu_to_le16((uint16_t)bp_params->reference_divider);
+ allocation.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)bp_params->feedback_divider);
+ allocation.sPCLKInput.ucFracFbDiv =
+ (uint8_t)bp_params->fractional_feedback_divider;
+ allocation.sPCLKInput.ucPostDiv =
+ (uint8_t)bp_params->pixel_clock_post_divider;
+
+ /* We need to convert from KHz units into 10KHz units */
+ allocation.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
+ params->ucTransmitterId =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params->ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
+
+ if (CONTROLLER_ID_D1 != bp_params->controller_id)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
+ PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
+#endif
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
+ PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
+#endif
+
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ clk.sPCLKInput.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t)(bp_params->reference_divider);
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)(bp_params->feedback_divider));
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t)(bp_params->pixel_clock_post_divider);
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ /* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
+ * =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we program it
+ * to 888 by default.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t) bp_params->reference_divider;
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t) bp_params->feedback_divider);
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t) bp_params->pixel_clock_post_divider;
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
+ }
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+ }
+
+ /* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
+ * 24bpp =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we pass required
+ * target rate that includes deep color.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.ucCRTC = controller_id;
+ clk.ucPpll = (uint8_t) pll_id;
+ clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
+ clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
+
+ clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE PIXEL CLOCK SS
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
+ case 1:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v3;
+ break;
+ default:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
+
+ memset(&params, 0, sizeof(params));
+
+ if ((enable == true) && (bp_params->percentage > 0))
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)bp_params->percentage);
+ params.ucSpreadSpectrumStep =
+ (uint8_t)bp_params->ver1.step;
+ params.ucSpreadSpectrumDelay =
+ (uint8_t)bp_params->ver1.delay;
+ /* convert back to unit of 10KHz */
+ params.ucSpreadSpectrumRange =
+ (uint8_t)(bp_params->ver1.range / 10000);
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucPpll = ATOM_PPLL1;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucPpll = ATOM_PPLL2;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if ((enable == true) && (bp_params->percentage > 0)) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)(bp_params->percentage));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (bp_params->pll_id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ /* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
+ * not for SI display clock.
+ */
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_PLL2:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_DCPLL:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ /* Unexpected PLL value!! */
+ return result;
+ }
+
+ if (enable == true) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumAmountFrac =
+ cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ADJUST DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+
+static void init_adjust_display_pll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
+ case 2:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
+ break;
+ default:
+ bp->cmd_tbl.adjust_display_pll = NULL;
+ break;
+ }
+}
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
+
+ params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
+ params.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+ return result;
+}
+
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
+ uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
+
+ memset(&params, 0, sizeof(params));
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
+ params.sInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.sInput.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ if (bp_params->ss_enable == true)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
+
+ if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
+ /* Convert output pixel clock back 10KHz-->KHz: multiply
+ * original pixel clock in KHz by ratio
+ * [output pxlClk/input pxlClk] */
+ uint64_t pixel_clk_10_khz_out =
+ (uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
+ uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
+
+ if (pixel_clk_10_kHz_in != 0) {
+ bp_params->adjusted_pixel_clock =
+ div_u64(pixel_clk * pixel_clk_10_khz_out,
+ pixel_clk_10_kHz_in);
+ } else {
+ bp_params->adjusted_pixel_clock = 0;
+ BREAK_TO_DEBUGGER();
+ }
+
+ bp_params->reference_divider = params.sOutput.ucRefDiv;
+ bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
+
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+
+static void init_dac_encoder_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_encoder_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_encoder_control = NULL;
+ break;
+ }
+}
+
+static void dac_encoder_control_prepare_params(
+ DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ params->ucDacStandard = dac_standard;
+ if (enable)
+ params->ucAction = ATOM_ENABLE;
+ else
+ params->ucAction = ATOM_DISABLE;
+
+ /* We need to convert from KHz units into 10KHz units
+ * it looks as if the TvControl do not care about pixel clock
+ */
+ params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
+}
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC OUTPUT CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+
+static void init_dac_output_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_output_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_output_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
+ if (dtd_version > 2)
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+ else
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
+ case 1:
+ bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
+ params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
+ params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
+ params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
+ params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start));
+ params.usV_SyncWidth =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_width));
+
+ /* VBIOS does not expect any value except zero into this call, for
+ * underscan use another entry ProgramOverscan call but when mode
+ * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
+ * but when same ,but 60 Hz there is corruption
+ * DAL1 does not allow the mode 1776x1000@60
+ */
+ params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
+ params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
+ params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
+ params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply tis for
+ * non-TV/CV only due to complex MV testing for possible
+ * impact
+ * if (pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field, 2.5
+ * lines for the 2nd feild. we need input as 5 instead
+ * of 4, but it is 4 either from Edid data
+ * (spec CEA 861) or CEA timing table.
+ */
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.usH_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.usV_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable */
+ params.usH_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable */
+ params.usV_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
+ params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.usV_SyncOffset =
+ cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ /* set controller id */
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ /* set encoder id */
+ if (bp->cmd_helper->engine_bp_to_atom(
+ bp_params->engine_id, &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.ucCRTC = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC MEM REQ
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc_mem_req(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc_mem_req = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_BADINPUT;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
+ params.ucCRTC = id;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
+ result = BP_RESULT_OK;
+ else
+ result = BP_RESULT_FAILURE;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_program_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 5:
+ bp->cmd_tbl.program_clock = program_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.program_clock = program_clock_v6;
+ break;
+ default:
+ bp->cmd_tbl.program_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
+ params.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
+ params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
+ params.sPCLKInput.ulDispEngClkFreq =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+ /* True display clock is returned by VBIOS if DFS bypass
+ * is enabled. */
+ bp_params->dfs_bypass_display_clock =
+ (uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ /* we need use _PS_Alloc struct */
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
+ struct graphics_object_id encoder;
+ bool is_input_signal_dp = false;
+
+ memset(&params, 0, sizeof(params));
+
+ cntl_params = &params.sExtEncoder;
+
+ encoder = cntl->encoder_id;
+
+ /* check if encoder supports external encoder control table */
+ switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ is_input_signal_dp = true;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* Fill information based on the action
+ *
+ * Bit[6:4]: indicate external encoder, applied to all functions.
+ * =0: external encoder1, mapped to external encoder enum id1
+ * =1: external encoder2, mapped to external encoder enum id2
+ *
+ * enum ObjectEnumId
+ * {
+ * EnumId_Unknown = 0,
+ * EnumId_1,
+ * EnumId_2,
+ * };
+ */
+ cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
+
+ switch (cntl->action) {
+ case EXTERNAL_ENCODER_CONTROL_INIT:
+ /* output display connector type. Only valid in encoder
+ * initialization */
+ cntl_params->usConnectorId =
+ cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_SETUP:
+ /* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
+ * 10KHz
+ * output display device pixel clock frequency in unit of 10KHz.
+ * Only valid in setup and enableoutput
+ */
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ /* Indicate display output signal type drive by external
+ * encoder, only valid in setup and enableoutput */
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+
+ if (is_input_signal_dp) {
+ /* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
+ * only valid in encoder setup with DP mode. */
+ if (LINK_RATE_HIGH == cntl->link_rate)
+ cntl_params->ucConfig |= 1;
+ /* output color depth Indicate encoder data bpc format
+ * in DP mode, only valid in encoder setup in DP mode.
+ */
+ cntl_params->ucBitPerColor =
+ (uint8_t)(cntl->color_depth);
+ }
+ /* Indicate how many lanes used by external encoder, only valid
+ * in encoder setup and enableoutput. */
+ cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_ENABLE:
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+ cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
+ break;
+ default:
+ break;
+ }
+
+ cntl_params->ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ params.ucDispPipeId = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ params.ucEnable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.asParam.ucDCEClkSrc = atom_pll_id;
+ params.asParam.ucDCEClkType = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ }
+ else
+ /* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
+ /* We need to convert from KHz units into 10KHz units */
+ params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
+
+ if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
new file mode 100644
index 000000000000..94f3d43a7471
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_H__
+#define __DAL_COMMAND_TABLE_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+};
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
new file mode 100644
index 000000000000..ba68693758a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table2.h"
+#include "command_table_helper2.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal2.h"
+
+#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
+ (((char *)(&((\
+ struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
+ ->FieldName)-(char *)0)/sizeof(uint16_t))
+
+#define EXEC_BIOS_CMD_TABLE(fname, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+
+static void init_set_crtc_timing(struct bios_parser *bp);
+
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+static void init_get_smu_clock_info(struct bios_parser *bp);
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(digxencodercontrol);
+
+ switch (version) {
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
+ break;
+ default:
+ bp->cmd_tbl.dig_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct dig_encoder_stream_setup_parameters_v1_5 params = {0};
+
+ params.digid = (uint8_t)(cntl->engine_id);
+ params.action = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.pclk_10khz = cntl->pixel_clock / 10;
+ params.digmode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.lanenum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.bitpercolor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.bitpercolor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.bitpercolor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.bitpercolor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.pclk_10khz =
+ (params.pclk_10khz * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.pclk_10khz =
+ (params.pclk_10khz * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.pclk_10khz =
+ (params.pclk_10khz * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*****************************************************************************
+ ******************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ struct dig_transmitter_control_ps_allocation_v1_6 ps = { { 0 } };
+
+ ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter);
+ ps.param.action = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings;
+ else
+ ps.param.mode_laneset.digmode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ ps.param.lanenum = (uint8_t)cntl->lanes_number;
+ ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+ ps.param.symclk_10khz = cntl->pixel_clock/10;
+
+
+ if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
+ cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
+ cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:ps.param.symclk_10khz = %d\n",\
+ __func__, ps.param.symclk_10khz);
+ }
+
+
+/*color_depth not used any more, driver has deep color factor in the Phyclk*/
+ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_pixel_clock_parameter_v1_7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->
+ controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.crtc_id = controller_id;
+ clk.pll_id = (uint8_t) pll_id;
+ clk.encoderobjid =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+
+ clk.encoder_mode = (uint8_t) bp->
+ cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock *
+ 10);
+
+ clk.deep_color_ratio =
+ (uint8_t) bp->cmd_helper->
+ transmitter_color_depth_to_atom(
+ bp_params->color_depth);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:program display clock = %d"\
+ "colorDepth = %d\n", __func__,\
+ bp_params->target_pixel_clock, bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(setcrtc_usingdtdtiming);
+
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_crtc_using_dtd_timing_parameters params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.h_size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.h_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->h_total -
+ bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.v_size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.v_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->v_total -
+ bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable
+ */
+ params.h_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start -
+ bp_params->h_addressable));
+ params.h_syncwidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable
+ */
+ params.v_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start -
+ bp_params->v_addressable));
+ params.v_syncwidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (bp_params->flags.HSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_HSYNC_POLARITY);
+
+ if (bp_params->flags.VSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.v_syncoffset =
+ cpu_to_le16(le16_to_cpu(params.v_syncoffset) +
+ 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ 0x100); /* ATOM_DOUBLE_CLOCK_MODE */
+
+ if (EXEC_BIOS_CMD_TABLE(setcrtc_usingdtdtiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) {
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct select_crtc_source_parameters_v2_3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.encoder_id = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (s == SIGNAL_TYPE_EDP ||
+ (s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal ==
+ SIGNAL_TYPE_LVDS))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.encode_mode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct enable_crtc_parameters params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.crtc_id = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.enable = ATOM_ENABLE;
+ else
+ params.enable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(enablecrtc, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(externalencodercontrol)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ /* TODO */
+ return BP_RESULT_OK;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+
+ struct enable_disp_power_gating_ps_allocation ps = { { 0 } };
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ ps.param.disp_pipe_id = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ ps.param.enable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+*******************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+*******************************************************************************
+*******************************************************************************/
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setdceclock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ struct set_dce_clock_ps_allocation_v2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type,
+ &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.param.dceclksrc = atom_pll_id;
+ params.param.dceclktype = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ } else
+ /* only program clock frequency if display clock is used;
+ * VBIOS will program DPREFCLK
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.param.dceclk_10khz = cpu_to_le32(
+ bp_params->target_clock_frequency / 10);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
+ "%s:target_clock_frequency = %d"\
+ "clock_type = %d \n", __func__,\
+ bp_params->target_clock_frequency,\
+ bp_params->clock_type);
+
+ if (EXEC_BIOS_CMD_TABLE(setdceclock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(
+ params.param.dceclk_10khz) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** GET SMU CLOCK INFO
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+
+static void init_get_smu_clock_info(struct bios_parser *bp)
+{
+ /* TODO add switch for table vrsion */
+ bp->cmd_tbl.get_smu_clock_info = get_smu_clock_info_v3_1;
+
+}
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
+
+ smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+
+ /* Get Specific Clock */
+ if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
+ memmove(&smu_output, &smu_input, sizeof(
+ struct atom_get_smu_clock_info_parameters_v3_1));
+ return smu_output.atom_smu_outputclkfreq.syspllvcofreq_10khz;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
new file mode 100644
index 000000000000..59061b806df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE2_H__
+#define __DAL_COMMAND_TABLE2_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct bios_parser *bp);
+
+};
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
new file mode 100644
index 000000000000..2979358c6a55
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
+{
+ /* there are three transmitter blocks, each one has two links 4-lanes
+ * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
+ * each transmitter block B, D and F as link 1, third transmitter block
+ * has non splitable links (UniphyE and UniphyF can not be configured
+ * separately to drive two different streams)
+ */
+ if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_D) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_F)) {
+ /* Bit2: Link Select
+ * =0: PHY linkA/C/E
+ * =1: PHY linkB/D/F
+ */
+ ctrl_param->acConfig.ucLinkSel = 1;
+ }
+
+ /* Bit[4:3]: Transmitter Selection
+ * =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * =02: Digital Transmitter3 ( UNIPHY linkEF )
+ * =03: Reserved
+ */
+ ctrl_param->acConfig.ucTransmitterSel =
+ (uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
+
+ /* We need to convert from KHz units into 10KHz units */
+ ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
+ ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
+ ctrl_param->ucEncoderMode =
+ (uint8_t)(h->encoder_mode_bp_to_atom(
+ control->signal, control->enable_dp_audio));
+ ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
new file mode 100644
index 000000000000..1fab634b66be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_H__
+#define __DAL_COMMAND_TABLE_HELPER_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ void (*assign_control_parameter)(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
new file mode 100644
index 000000000000..9a4d30dd4969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper2.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper2(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#endif
+
+ case DCE_VERSION_12_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ /* TODO :case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ */
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ /*TODO:case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ */
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_INTERNAL_VIRTUAL:
+ return ENCODER_OBJECT_ID_NONE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
new file mode 100644
index 000000000000..9f587c91d843
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_H__
+#define __DAL_COMMAND_TABLE_HELPER2_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper2_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(
+ enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
new file mode 100644
index 000000000000..ca24154468c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
index 9df1fea8e971..eb60c2ead992 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,30 +18,17 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-/*
- * radeon_kfd.h defines the private interface between the
- * AMD kernel graphics drivers and the AMD KFD.
- */
-
-#ifndef RADEON_KFD_H_INCLUDED
-#define RADEON_KFD_H_INCLUDED
-
-#include <linux/types.h>
-#include "kgd_kfd_interface.h"
-
-struct radeon_device;
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
-int radeon_kfd_init(void);
-void radeon_kfd_fini(void);
+struct command_table_helper;
-void radeon_kfd_suspend(struct radeon_device *rdev);
-int radeon_kfd_resume(struct radeon_device *rdev);
-void radeon_kfd_interrupt(struct radeon_device *rdev,
- const void *ih_ring_entry);
-void radeon_kfd_device_probe(struct radeon_device *rdev);
-void radeon_kfd_device_init(struct radeon_device *rdev);
-void radeon_kfd_device_fini(struct radeon_device *rdev);
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
-#endif /* RADEON_KFD_H_INCLUDED */
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
new file mode 100644
index 000000000000..0237ae575068
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper2.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
new file mode 100644
index 000000000000..abf28a06f5bc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
new file mode 100644
index 000000000000..452034f83e4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
new file mode 100644
index 000000000000..dc3660951355
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
new file mode 100644
index 000000000000..8b30b558cf1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ BREAK_TO_DEBUGGER(); /* check when this will happen! */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter =
+ dal_cmd_table_helper_assign_control_parameter,
+ .clock_source_id_to_ref_clk_src =
+ dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+ .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
new file mode 100644
index 000000000000..e675c359e306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
new file mode 100644
index 000000000000..7959e382ed28
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -0,0 +1,39 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'calcs' sub-component of DAL.
+# It calculates Bandwidth and Watermarks values for HW programming
+#
+
+CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare
+
+BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
+endif
+
+AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
new file mode 100644
index 000000000000..6ca288fb5fb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "bw_fixed.h"
+
+
+#define MIN_I64 \
+ (int64_t)(-(1LL << 63))
+
+#define MAX_I64 \
+ (int64_t)((1ULL << 63) - 1)
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+static uint64_t abs_i64(int64_t arg)
+{
+ if (arg >= 0)
+ return (uint64_t)(arg);
+ else
+ return (uint64_t)(-arg);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
+{
+ struct bw_fixed res;
+ ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
+{
+ struct bw_fixed res;
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+ uint64_t arg1_value;
+ uint64_t arg2_value;
+ uint64_t remainder;
+
+ /* determine integer part */
+ uint64_t res_value;
+
+ ASSERT(denominator != 0);
+
+ arg1_value = abs_i64(numerator);
+ arg2_value = abs_i64(denominator);
+ res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= BW_FIXED_MAX_I32);
+
+ /* determine fractional part */
+ {
+ uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ do
+ {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value)
+ {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= MAX_I64 - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)(res_value);
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
+struct bw_fixed bw_floor2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
+ return result;
+}
+
+struct bw_fixed bw_ceil2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ if (abs_i64(result.value) < abs_i64(arg.value)) {
+ if (arg.value < 0)
+ result.value -= abs_i64(significance.value);
+ else
+ result.value += abs_i64(significance.value);
+ }
+ return result;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = abs_i64(arg1.value);
+ uint64_t arg2_value = abs_i64(arg2.value);
+
+ uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= BW_FIXED_MAX_I32);
+
+ res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
new file mode 100644
index 000000000000..7243c37f569e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "custom_float.h"
+
+
+static bool build_custom_float(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct fixed31_32 mantissa_constant_plus_max_fraction =
+ dal_fixed31_32_from_fraction(
+ (1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct fixed31_32 mantiss;
+
+ if (dal_fixed31_32_eq(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = format->sign;
+ value = dal_fixed31_32_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shl(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (dal_fixed31_32_le(
+ mantissa_constant_plus_max_fraction,
+ value)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shr(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ mantissa_constant_plus_max_fraction,
+ value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = dal_fixed31_32_sub(
+ value,
+ dal_fixed31_32_one);
+
+ if (dal_fixed31_32_lt(
+ mantiss,
+ dal_fixed31_32_zero) ||
+ dal_fixed31_32_lt(
+ dal_fixed31_32_one,
+ mantiss))
+ mantiss = dal_fixed31_32_zero;
+ else
+ mantiss = dal_fixed31_32_shl(
+ mantiss,
+ format->mantissa_bits);
+
+ *mantissa = dal_fixed31_32_floor(mantiss);
+
+ return true;
+}
+
+static bool setup_custom_float(
+ const struct custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return build_custom_float(
+ value, format, &negative, &mantissa, &exponenta) &&
+ setup_custom_float(
+ format, negative, mantissa, exponenta, result);
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
new file mode 100644
index 000000000000..6347712db834
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -0,0 +1,3257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*******************************************************************************
+ * Private Functions
+ ******************************************************************************/
+
+static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id)
+{
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CZ:
+ if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_STONEY;
+ return BW_CALCS_VERSION_CARRIZO;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS10;
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
+ return BW_CALCS_VERSION_INVALID;
+
+ case FAMILY_AI:
+ return BW_CALCS_VERSION_VEGA10;
+
+ default:
+ return BW_CALCS_VERSION_INVALID;
+ }
+}
+
+static void calculate_bandwidth(
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ struct bw_calcs_data *data)
+
+{
+ const int32_t pixels_per_chunk = 512;
+ const int32_t high = 2;
+ const int32_t mid = 1;
+ const int32_t low = 0;
+ const uint32_t s_low = 0;
+ const uint32_t s_mid1 = 1;
+ const uint32_t s_mid2 = 2;
+ const uint32_t s_mid3 = 3;
+ const uint32_t s_mid4 = 4;
+ const uint32_t s_mid5 = 5;
+ const uint32_t s_mid6 = 6;
+ const uint32_t s_high = 7;
+ const uint32_t bus_efficiency = 1;
+ const uint32_t dmif_chunk_buff_margin = 1;
+
+ uint32_t max_chunks_fbc_mode;
+ int32_t num_cursor_lines;
+
+ int32_t i, j, k;
+ struct bw_fixed yclk[3];
+ struct bw_fixed sclk[8];
+ bool d0_underlay_enable;
+ bool d1_underlay_enable;
+ bool fbc_enabled;
+ bool lpt_enabled;
+ enum bw_defines sclk_message;
+ enum bw_defines yclk_message;
+ enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
+ enum bw_defines tiling_mode[maximum_number_of_surfaces];
+ enum bw_defines surface_type[maximum_number_of_surfaces];
+ enum bw_defines voltage;
+ enum bw_defines pipe_check;
+ enum bw_defines hsr_check;
+ enum bw_defines vsr_check;
+ enum bw_defines lb_size_check;
+ enum bw_defines fbc_check;
+ enum bw_defines rotation_check;
+ enum bw_defines mode_check;
+ enum bw_defines nbp_state_change_enable_blank;
+ /*initialize variables*/
+ int32_t number_of_displays_enabled = 0;
+ int32_t number_of_displays_enabled_with_margin = 0;
+ int32_t number_of_aligned_displays_with_no_margin = 0;
+
+ yclk[low] = vbios->low_yclk;
+ yclk[mid] = vbios->mid_yclk;
+ yclk[high] = vbios->high_yclk;
+ sclk[s_low] = vbios->low_sclk;
+ sclk[s_mid1] = vbios->mid1_sclk;
+ sclk[s_mid2] = vbios->mid2_sclk;
+ sclk[s_mid3] = vbios->mid3_sclk;
+ sclk[s_mid4] = vbios->mid4_sclk;
+ sclk[s_mid5] = vbios->mid5_sclk;
+ sclk[s_mid6] = vbios->mid6_sclk;
+ sclk[s_high] = vbios->high_sclk;
+ /*''''''''''''''''''*/
+ /* surface assignment:*/
+ /* 0: d0 underlay or underlay luma*/
+ /* 1: d0 underlay chroma*/
+ /* 2: d1 underlay or underlay luma*/
+ /* 3: d1 underlay chroma*/
+ /* 4: d0 graphics*/
+ /* 5: d1 graphics*/
+ /* 6: d2 graphics*/
+ /* 7: d3 graphics, same mode as d2*/
+ /* 8: d4 graphics, same mode as d2*/
+ /* 9: d5 graphics, same mode as d2*/
+ /* ...*/
+ /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
+ /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
+ /* underlay luma and chroma surface parameters from spreadsheet*/
+
+
+
+
+ if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
+ else {
+ d0_underlay_enable = 1;
+ }
+ if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
+ else {
+ d1_underlay_enable = 1;
+ }
+ data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ surface_type[0] = bw_def_underlay420_luma;
+ surface_type[2] = bw_def_underlay420_luma;
+ data->bytes_per_pixel[0] = 1;
+ data->bytes_per_pixel[2] = 1;
+ surface_type[1] = bw_def_underlay420_chroma;
+ surface_type[3] = bw_def_underlay420_chroma;
+ data->bytes_per_pixel[1] = 2;
+ data->bytes_per_pixel[3] = 2;
+ data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
+ break;
+ case bw_def_422:
+ surface_type[0] = bw_def_underlay422;
+ surface_type[2] = bw_def_underlay422;
+ data->bytes_per_pixel[0] = 2;
+ data->bytes_per_pixel[2] = 2;
+ data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
+ break;
+ default:
+ surface_type[0] = bw_def_underlay444;
+ surface_type[2] = bw_def_underlay444;
+ data->bytes_per_pixel[0] = 4;
+ data->bytes_per_pixel[2] = 4;
+ data->lb_size_per_component[0] = dceip->lb_size_per_component444;
+ data->lb_size_per_component[2] = dceip->lb_size_per_component444;
+ break;
+ }
+ if (d0_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[0] = 1;
+ data->enable[1] = 1;
+ break;
+ default:
+ data->enable[0] = 1;
+ data->enable[1] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[0] = 0;
+ data->enable[1] = 0;
+ }
+ if (d1_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[2] = 1;
+ data->enable[3] = 1;
+ break;
+ default:
+ data->enable[2] = 1;
+ data->enable[3] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[2] = 0;
+ data->enable[3] = 0;
+ }
+ data->use_alpha[0] = 0;
+ data->use_alpha[1] = 0;
+ data->use_alpha[2] = 0;
+ data->use_alpha[3] = 0;
+ data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
+ /*underlay0 same and graphics display pipe0*/
+ data->interlace_mode[0] = data->interlace_mode[4];
+ data->interlace_mode[1] = data->interlace_mode[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->interlace_mode[2] = data->interlace_mode[5];
+ data->interlace_mode[3] = data->interlace_mode[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->h_total[0] = data->h_total[4];
+ data->v_total[0] = data->v_total[4];
+ data->h_total[1] = data->h_total[4];
+ data->v_total[1] = data->v_total[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->h_total[2] = data->h_total[5];
+ data->v_total[2] = data->v_total[5];
+ data->h_total[3] = data->h_total[5];
+ data->v_total[3] = data->v_total[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->pixel_rate[0] = data->pixel_rate[4];
+ data->pixel_rate[1] = data->pixel_rate[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->pixel_rate[2] = data->pixel_rate[5];
+ data->pixel_rate[3] = data->pixel_rate[5];
+ if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
+ tiling_mode[0] = bw_def_linear;
+ tiling_mode[1] = bw_def_linear;
+ tiling_mode[2] = bw_def_linear;
+ tiling_mode[3] = bw_def_linear;
+ }
+ else {
+ tiling_mode[0] = bw_def_landscape;
+ tiling_mode[1] = bw_def_landscape;
+ tiling_mode[2] = bw_def_landscape;
+ tiling_mode[3] = bw_def_landscape;
+ }
+ data->lb_bpc[0] = data->underlay_lb_bpc;
+ data->lb_bpc[1] = data->underlay_lb_bpc;
+ data->lb_bpc[2] = data->underlay_lb_bpc;
+ data->lb_bpc[3] = data->underlay_lb_bpc;
+ data->compression_rate[0] = bw_int_to_fixed(1);
+ data->compression_rate[1] = bw_int_to_fixed(1);
+ data->compression_rate[2] = bw_int_to_fixed(1);
+ data->compression_rate[3] = bw_int_to_fixed(1);
+ data->access_one_channel_only[0] = 0;
+ data->access_one_channel_only[1] = 0;
+ data->access_one_channel_only[2] = 0;
+ data->access_one_channel_only[3] = 0;
+ data->cursor_width_pixels[0] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[1] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[3] = bw_int_to_fixed(0);
+ /* graphics surface parameters from spreadsheet*/
+ fbc_enabled = 0;
+ lpt_enabled = 0;
+ for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
+ if (i < data->number_of_displays + 4) {
+ if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else if (i == 4) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ }
+ else {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
+ surface_type[i] = bw_def_graphics;
+ data->lb_size_per_component[i] = dceip->lb_size_per_component444;
+ if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
+ tiling_mode[i] = bw_def_linear;
+ }
+ else {
+ tiling_mode[i] = bw_def_tiled;
+ }
+ data->lb_bpc[i] = data->graphics_lb_bpc;
+ if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
+ data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
+ data->access_one_channel_only[i] = data->lpt_en[i];
+ }
+ else {
+ data->compression_rate[i] = bw_int_to_fixed(1);
+ data->access_one_channel_only[i] = 0;
+ }
+ if (data->fbc_en[i] == 1) {
+ fbc_enabled = 1;
+ if (data->lpt_en[i] == 1) {
+ lpt_enabled = 1;
+ }
+ }
+ data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+ }
+ /* display_write_back420*/
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->enable[maximum_number_of_surfaces - 2] = 1;
+ data->enable[maximum_number_of_surfaces - 1] = 1;
+ }
+ else {
+ data->enable[maximum_number_of_surfaces - 2] = 0;
+ data->enable[maximum_number_of_surfaces - 1] = 0;
+ }
+ surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
+ surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
+ data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
+ data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
+ data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
+ data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
+ tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
+ data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
+ data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
+ data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
+ data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
+ /*assume display pipe1 has dwb enabled*/
+ data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
+ data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
+ data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
+ data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
+ data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
+ data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
+ data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
+ data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
+ data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
+ data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ data->use_alpha[maximum_number_of_surfaces - 2] = 0;
+ data->use_alpha[maximum_number_of_surfaces - 1] = 0;
+ /*mode check calculations:*/
+ /* mode within dce ip capabilities*/
+ /* fbc*/
+ /* hsr*/
+ /* vsr*/
+ /* lb size*/
+ /*effective scaling source and ratios:*/
+ /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
+ /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
+ /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
+ /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
+ /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
+ /*in interlace mode there is 2:1 vertical downscaling for each field*/
+ /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
+ data->h_taps[i] = bw_int_to_fixed(1);
+ data->v_taps[i] = bw_int_to_fixed(1);
+ }
+ if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
+ data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
+ data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
+ data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
+ data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
+ data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
+ }
+ else {
+ data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
+ data->src_width_after_surface_type = data->src_width[i];
+ data->src_height_after_surface_type = data->src_height[i];
+ data->hsr_after_surface_type = data->h_scale_ratio[i];
+ data->vsr_after_surface_type = data->v_scale_ratio[i];
+ }
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->src_width_after_rotation = data->src_height_after_surface_type;
+ data->src_height_after_rotation = data->src_width_after_surface_type;
+ data->hsr_after_rotation = data->vsr_after_surface_type;
+ data->vsr_after_rotation = data->hsr_after_surface_type;
+ }
+ else {
+ data->src_width_after_rotation = data->src_width_after_surface_type;
+ data->src_height_after_rotation = data->src_height_after_surface_type;
+ data->hsr_after_rotation = data->hsr_after_surface_type;
+ data->vsr_after_rotation = data->vsr_after_surface_type;
+ }
+ switch (data->stereo_mode[i]) {
+ case bw_def_top_bottom:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
+ break;
+ case bw_def_side_by_side:
+ data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ default:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ }
+ data->hsr[i] = data->hsr_after_stereo;
+ if (data->interlace_mode[i]) {
+ data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
+ }
+ else {
+ data->vsr[i] = data->vsr_after_stereo;
+ }
+ if (data->panning_and_bezel_adjustment != bw_def_none) {
+ data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
+ }
+ else {
+ data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
+ }
+ data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
+ }
+ }
+ /*mode support checks:*/
+ /*the number of graphics and underlay pipes is limited by the ip support*/
+ /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
+ /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
+ /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
+ /*the number of lines in the line buffer has to exceed the number of vertical taps*/
+ /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
+ /*rotation is not supported with linear of stereo modes*/
+ if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
+ pipe_check = bw_def_ok;
+ }
+ else {
+ pipe_check = bw_def_notok;
+ }
+ hsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
+ hsr_check = bw_def_hsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->hsr[i], data->h_taps[i])) {
+ hsr_check = bw_def_hsr_mtn_h_taps;
+ }
+ else {
+ if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
+ hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
+ }
+ }
+ }
+ }
+ }
+ }
+ vsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
+ vsr_check = bw_def_vsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->vsr[i], data->v_taps[i])) {
+ vsr_check = bw_def_vsr_mtn_v_taps;
+ }
+ }
+ }
+ }
+ }
+ lb_size_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
+ data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
+ }
+ else {
+ data->source_width_in_lb = data->source_width_pixels[i];
+ }
+ switch (data->lb_bpc[i]) {
+ case 8:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ case 10:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ default:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
+ break;
+ }
+ data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
+ /*clamp the partitions to the maxium number supported by the lb*/
+ if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->lb_partitions_max[i] = bw_int_to_fixed(10);
+ }
+ else {
+ data->lb_partitions_max[i] = bw_int_to_fixed(7);
+ }
+ data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
+ if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
+ lb_size_check = bw_def_notok;
+ }
+ }
+ }
+ fbc_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
+ fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
+ }
+ }
+ rotation_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
+ rotation_check = bw_def_invalid_linear_or_stereo_mode;
+ }
+ }
+ }
+ if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
+ mode_check = bw_def_ok;
+ }
+ else {
+ mode_check = bw_def_notok;
+ }
+ /*number of memory channels for write-back client*/
+ data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
+ data->number_of_dram_channels = vbios->number_of_dram_channels;
+ /*modify number of memory channels if lpt mode is enabled*/
+ /* low power tiling mode register*/
+ /* 0 = use channel 0*/
+ /* 1 = use channel 0 and 1*/
+ /* 2 = use channel 0,1,2,3*/
+ if ((fbc_enabled == 1 && lpt_enabled == 1)) {
+ data->dram_efficiency = bw_int_to_fixed(1);
+ if (dceip->low_power_tiling_mode == 0) {
+ data->number_of_dram_channels = 1;
+ }
+ else if (dceip->low_power_tiling_mode == 1) {
+ data->number_of_dram_channels = 2;
+ }
+ else if (dceip->low_power_tiling_mode == 2) {
+ data->number_of_dram_channels = 4;
+ }
+ else {
+ data->number_of_dram_channels = 1;
+ }
+ }
+ else {
+ data->dram_efficiency = bw_frc_to_fixed(8, 10);
+ }
+ /*memory request size and latency hiding:*/
+ /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
+ /*the display write-back requests are single line*/
+ /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
+ /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
+ if ((i < 4)) {
+ /*underlay portrait tiling mode is not supported*/
+ data->orthogonal_rotation[i] = 1;
+ }
+ else {
+ /*graphics portrait tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ else {
+ if ((i < 4)) {
+ /*underlay landscape tiling mode is only supported*/
+ if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ else {
+ /*graphics landscape tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
+ }
+ else {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(1);
+ }
+ else if (tiling_mode[i] == bw_def_linear) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ }
+ else {
+ if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
+ switch (data->bytes_per_pixel[i]) {
+ case 8:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ if (data->orthogonal_rotation[i]) {
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 4:
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ break;
+ }
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ }
+ else {
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ /*requested peak bandwidth:*/
+ /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
+ /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
+ /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
+ /**/
+ /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
+ /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
+ /**/
+ /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
+ /*rounded up to even and divided by the line times for initialization, which is normally three.*/
+ /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
+ /*rounded up to line pairs if not doing line buffer prefetching.*/
+ /**/
+ /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
+ /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
+ /**/
+ /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
+ /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/
+ /*panning/bezel adjustment mode.*/
+ /**/
+ /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
+ /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
+ /*furthermore, there is only one line time for initialization.*/
+ /**/
+ /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
+ /*the ceiling of the vertical scale ratio.*/
+ /**/
+ /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
+ /**/
+ /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
+ /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
+ /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
+ if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ v_filter_init_mode[i] = bw_def_manual;
+ data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
+ }
+ else {
+ v_filter_init_mode[i] = bw_def_auto;
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ data->num_lines_at_frame_start = bw_int_to_fixed(1);
+ }
+ else {
+ data->num_lines_at_frame_start = bw_int_to_fixed(3);
+ }
+ if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
+ data->line_buffer_prefetch[i] = 1;
+ }
+ else {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
+ if (data->line_buffer_prefetch[i] == 1) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(4, 3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(6, 4))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
+ }
+ else {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
+ }
+ if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
+ }
+ else {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
+ }
+ data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
+ data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
+ }
+ }
+ /*outstanding chunk request limit*/
+ /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
+ /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
+ /**/
+ /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
+ /**/
+ /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
+ /*and underlay.*/
+ /**/
+ /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
+ /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
+ /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
+ /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
+ /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
+ /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
+ data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
+ }
+ else {
+ data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
+ }
+ }
+ if (data->fbc_en[i] == 1) {
+ max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ switch (surface_type[i]) {
+ case bw_def_display_write_back420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
+ break;
+ case bw_def_display_write_back420_chroma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
+ break;
+ case bw_def_underlay420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ break;
+ case bw_def_underlay420_chroma:
+ data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
+ break;
+ case bw_def_underlay422:case bw_def_underlay444:
+ if (data->orthogonal_rotation[i] == 0) {
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ }
+ else {
+ data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
+ }
+ break;
+ default:
+ if (data->fbc_en[i] == 1) {
+ /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ else {
+ /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ break;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ }
+ else {
+ data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_dmif_size_in_time = bw_int_to_fixed(9999);
+ data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
+ data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ else {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
+ data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ }
+ }
+ data->total_requests_for_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
+ data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
+ }
+ else {
+ data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+ /*set maximum chunk limit if only one graphic pipe is enabled*/
+ data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
+ }
+ else {
+ data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
+ /*clamp maximum chunk limit in the graphic display pipe*/
+ if ((i >= 4)) {
+ data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
+ }
+ }
+ }
+ }
+ /*outstanding pte request limit*/
+ /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
+ /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
+ /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
+ /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
+ /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
+ /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
+ /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
+ /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
+ if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ }
+ else {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ if (tiling_mode[i] == bw_def_linear) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
+ data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+ }
+ else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ else {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(1);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
+ data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
+ data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
+ if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
+ data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
+ }
+ else {
+ data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ /*pitch padding recommended for efficiency in linear mode*/
+ /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
+ /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
+ data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
+
+ /*pixel transfer time*/
+ /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
+ /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
+ /*the page close-open time is determined by trc and the number of page close-opens*/
+ /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
+ /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
+ /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
+ /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
+ /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
+ /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
+ /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
+ /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
+ /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
+ /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
+ /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
+ /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
+ /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
+ data->cursor_total_data = bw_int_to_fixed(0);
+ data->cursor_total_request_groups = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
+ if (dceip->large_cursor == 1) {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
+ }
+ else {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
+ }
+ if (data->scatter_gather_enable_for_pipe[i]) {
+ data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
+ data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
+ }
+ }
+ }
+ data->tile_width_in_pixels = bw_int_to_fixed(8);
+ data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
+ data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
+ }
+ else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
+ data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
+ }
+ else {
+ data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
+ }
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ else {
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ }
+ }
+ data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
+ data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ }
+ }
+ data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ }
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
+ data->total_display_reads_required_data = bw_int_to_fixed(0);
+ data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
+ /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/
+ /*pseudo-channel may be read independently of one another.*/
+ /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
+ /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
+ /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
+ /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
+ /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
+ /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
+ /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
+ /*the memory efficiency will be 50% for the 32 byte sized data.*/
+ if (vbios->memory_type == bw_def_hbm) {
+ data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
+ }
+ else {
+ data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
+ }
+ data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
+ data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
+ }
+ else {
+ data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
+ data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
+ }
+ else {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
+ }
+ else {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
+ }
+ }
+ data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
+ data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ for (j = 0; j <= 2; j++) {
+ for (k = 0; k <= 7; k++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/
+ /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/
+ data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = bw_int_to_fixed(3);
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
+ }
+ else {
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
+ }
+ }
+ }
+ }
+ }
+ /*cpu c-state and p-state change enable*/
+ /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
+ /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
+ /*condition for the blackout duration:*/
+ /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
+ /*condition for the blackout recovery:*/
+ /* recovery time > dmif burst time + 2 * urgent latency*/
+ /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
+ /* / (dispclk - display bw)*/
+ /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
+ /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
+ if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ else {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
+ data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
+ if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ else {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpup_state_change_enable = bw_def_yes;
+ if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpuc_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ }
+ else {
+ data->cpup_state_change_enable = bw_def_no;
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ /*nb p-state change enable*/
+ /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
+ /*below the maximum.*/
+ /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
+ /*minus the dmif burst time, minus the source line transfer time*/
+ /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
+ /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ else {
+ /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ /*initialize variables*/
+ number_of_displays_enabled = 0;
+ number_of_displays_enabled_with_margin = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ number_of_displays_enabled = number_of_displays_enabled + 1;
+ }
+ data->display_pstate_change_enable[k] = 0;
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
+ data->dram_speed_change_margin = bw_int_to_fixed(9999);
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
+ data->num_displays_with_margin[i][j] = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each set of clock frequencies*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ else {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each display pipe*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ /*determine the number of displays with margin to switch in the v_active region*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+ number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
+ }
+ }
+ /*determine the number of displays that don't have any dram clock change margin, but*/
+ /*have the same resolution. these displays can switch in a common vblank region if*/
+ /*their frames are aligned.*/
+ data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ else {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ data->displays_with_same_mode[i] = bw_int_to_fixed(0);
+ if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
+ for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
+ if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+ data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
+ }
+ }
+ }
+ }
+ /*compute the maximum number of aligned displays with no margin*/
+ number_of_aligned_displays_with_no_margin = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
+ }
+ /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
+ /*aligned displays with the same timing.*/
+ /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
+ /*displays are in v_blank or v_active.*/
+ if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+ data->nbp_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->nbp_state_change_enable = bw_def_no;
+ }
+ /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
+ if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+ nbp_state_change_enable_blank = bw_def_yes;
+ }
+ else {
+ nbp_state_change_enable_blank = bw_def_no;
+ }
+ /*required yclk(pclk)*/
+ /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
+ /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
+ /* number of cursor lines stored in the cursor data return buffer*/
+ num_cursor_lines = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
+ /*compute number of cursor lines stored in data return buffer*/
+ if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
+ num_cursor_lines = 4;
+ }
+ else {
+ num_cursor_lines = 2;
+ }
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
+ }
+ }
+ }
+ /*compute minimum time to read one chunk from the dmif buffer*/
+ if ((number_of_displays_enabled > 2)) {
+ data->chunk_request_delay = 0;
+ }
+ else {
+ data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
+ }
+ data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
+ data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
+ data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
+ data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
+ data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
+ data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
+ data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
+ if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->low_yclk);
+ data->y_clk_level = low;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->mid_yclk);
+ data->y_clk_level = mid;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+ yclk_message = bw_fixed_to_int(vbios->high_yclk);
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ yclk_message = bw_def_exceeded_allowed_maximum_bw;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ }
+ /*required sclk*/
+ /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
+ /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->sclk_level = s_high;
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->sclk_level = s_high;
+ }
+ else {
+ data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
+ if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_low;
+ data->sclk_level = s_low;
+ data->required_sclk = vbios->low_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid1;
+ data->required_sclk = vbios->mid1_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid2;
+ data->required_sclk = vbios->mid2_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid3;
+ data->required_sclk = vbios->mid3_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid4;
+ data->required_sclk = vbios->mid4_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid5;
+ data->required_sclk = vbios->mid5_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid6;
+ data->required_sclk = vbios->mid6_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+ sclk_message = bw_def_high;
+ data->sclk_level = s_high;
+ data->required_sclk = vbios->high_sclk;
+ }
+ else {
+ sclk_message = bw_def_exceeded_allowed_maximum_sclk;
+ data->sclk_level = s_high;
+ /*required_sclk = high_sclk*/
+ }
+ }
+ /*dispclk*/
+ /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/
+ /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
+ /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
+ /*if that does not happen either, dispclk required is the dispclk required with ramping.*/
+ /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
+ /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
+ /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/
+ /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
+ /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
+ /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
+ /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
+ /*the scaling limits factor itself it also clamped to at least 1*/
+ /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
+ data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
+ data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
+ data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
+ }
+ }
+ data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
+ data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
+ data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
+ }
+ if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
+ data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
+ }
+ }
+ }
+ data->total_read_request_bandwidth = bw_int_to_fixed(0);
+ data->total_write_request_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
+ }
+ else {
+ data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
+ }
+ }
+ }
+ data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ if (data->cpuc_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->cpup_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->nbp_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ }
+ if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
+ }
+ else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = vbios->high_voltage_max_dispclk;
+ }
+ else {
+ data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
+ }
+ /* required core voltage*/
+ /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
+ /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
+ /* otherwise, the core voltage required is high if the three clocks are within the high limits*/
+ /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
+ if (pipe_check == bw_def_notok) {
+ voltage = bw_def_na;
+ }
+ else if (mode_check == bw_def_notok) {
+ voltage = bw_def_notok;
+ }
+ else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
+ voltage = bw_def_0_72;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
+ voltage = bw_def_0_8;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
+ if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
+ voltage = bw_def_high_no_nbp_state_change;
+ }
+ else {
+ voltage = bw_def_0_9;
+ }
+ }
+ else {
+ voltage = bw_def_notok;
+ }
+ if (voltage == bw_def_0_72) {
+ data->max_phyclk = vbios->low_voltage_max_phyclk;
+ }
+ else if (voltage == bw_def_0_8) {
+ data->max_phyclk = vbios->mid_voltage_max_phyclk;
+ }
+ else {
+ data->max_phyclk = vbios->high_voltage_max_phyclk;
+ }
+ /*required blackout recovery time*/
+ data->blackout_recovery_time = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ else {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ }
+ }
+ /*sclk deep sleep*/
+ /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
+ /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
+ /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
+ /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
+ /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else if (surface_type[i] == bw_def_graphics) {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ else if (data->orthogonal_rotation[i] == 0) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
+ data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
+ }
+ }
+ }
+ data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
+ /*urgent, stutter and nb-p_state watermark*/
+ /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
+ /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/
+ /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
+ /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
+ /*blackout_duration is added to the urgent watermark*/
+ data->chunk_request_time = bw_int_to_fixed(0);
+ data->cursor_request_time = bw_int_to_fixed(0);
+ /*compute total time to request one chunk from each active display pipe*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
+ }
+ }
+ /*compute total time to request cursor data*/
+ data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ /*unconditionally remove black out time from the nb p_state watermark*/
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ else {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
+ data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ }
+ }
+ /*stutter mode enable*/
+ /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
+ /*display pipe.*/
+ data->stutter_mode_enable = data->cpuc_state_change_enable;
+ if (data->number_of_displays > 1) {
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
+ data->stutter_mode_enable = bw_def_no;
+ }
+ }
+ }
+ }
+ /*performance metrics*/
+ /* display read access efficiency (%)*/
+ /* display write back access efficiency (%)*/
+ /* stutter efficiency (%)*/
+ /* extra underlay pitch recommended for efficiency (pixels)*/
+ /* immediate flip time (us)*/
+ /* latency for other clients due to urgent display read (us)*/
+ /* latency for other clients due to urgent display write (us)*/
+ /* average bandwidth consumed by display (no compression) (gb/s)*/
+ /* required dram bandwidth (gb/s)*/
+ /* required sclk (m_hz)*/
+ /* required rd urgent latency (us)*/
+ /* nb p-state change margin (us)*/
+ /*dmif and mcifwr dram access efficiency*/
+ /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/
+ data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
+ if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
+ data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
+ }
+ else {
+ data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
+ }
+ /*average bandwidth*/
+ /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+ /*the average bandwidth with compression is the same, divided by the compression ratio*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+ }
+ }
+ data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+ data->total_average_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+ data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+ }
+ }
+ /*stutter efficiency*/
+ /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
+ /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
+ /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
+ /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
+ /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
+ /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
+ data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
+ }
+ }
+ data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
+ data->total_stutter_dmif_buffer_size = 0;
+ data->total_bytes_requested = 0;
+ data->min_stutter_dmif_buffer_size = 9999;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
+ data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
+ data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
+ data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
+ }
+ data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
+ }
+ }
+ data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+ data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
+ data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
+ data->time_in_self_refresh = data->min_stutter_refresh_duration;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else {
+ /*compute stutter efficiency assuming 60 hz refresh rate*/
+ data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
+ }
+ /*immediate flip time*/
+ /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
+ /*otherwise, it may take just one urgenr memory trip*/
+ data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
+ if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
+ data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
+ }
+ }
+ }
+ data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
+ /*worst latency for other clients*/
+ /*it is the urgent latency plus the urgent burst time*/
+ data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
+ }
+ else {
+ data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
+ }
+ /*dmif mc urgent latency suppported in high sclk and yclk*/
+ data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
+ /*dram speed/p-state change margin*/
+ /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ }
+ }
+ /*sclk required vs urgent latency*/
+ for (i = 1; i <= 5; i++) {
+ data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
+ if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ }
+ else {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
+ }
+ }
+ /*output link bit per pixel supported*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ data->output_bpphdmi[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr2[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+ if (data->enable[k]) {
+ data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+ data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
+ data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
+ data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ }
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id)
+{
+ struct bw_calcs_dceip dceip = { 0 };
+ struct bw_calcs_vbios vbios = { 0 };
+
+ enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
+
+ dceip.version = version;
+
+ switch (version) {
+ case BW_CALCS_VERSION_CARRIZO:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1600);
+ vbios.mid_yclk = bw_int_to_fixed(1600);
+ vbios.low_yclk = bw_frc_to_fixed(66666, 100);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(300);
+ vbios.mid2_sclk = bw_int_to_fixed(300);
+ vbios.mid3_sclk = bw_int_to_fixed(300);
+ vbios.mid4_sclk = bw_int_to_fixed(300);
+ vbios.mid5_sclk = bw_int_to_fixed(300);
+ vbios.mid6_sclk = bw_int_to_fixed(300);
+ vbios.high_sclk = bw_frc_to_fixed(62609, 100);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 3;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = false;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ break;
+ case BW_CALCS_VERSION_POLARIS10:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_POLARIS11:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ if (vbios.number_of_dram_channels == 2) // 64-bit
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ else
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 5;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_STONEY:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1866);
+ vbios.mid_yclk = bw_int_to_fixed(1866);
+ vbios.low_yclk = bw_int_to_fixed(1333);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(600);
+ vbios.mid2_sclk = bw_int_to_fixed(600);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(600);
+ vbios.mid6_sclk = bw_int_to_fixed(600);
+ vbios.high_sclk = bw_int_to_fixed(800);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 2;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_VEGA10:
+ vbios.memory_type = bw_def_hbm;
+ vbios.dram_channel_width_in_bits = 128;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 16;
+ vbios.high_yclk = bw_int_to_fixed(2400);
+ vbios.mid_yclk = bw_int_to_fixed(1700);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(350);
+ vbios.mid2_sclk = bw_int_to_fixed(400);
+ vbios.mid3_sclk = bw_int_to_fixed(500);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(700);
+ vbios.mid6_sclk = bw_int_to_fixed(760);
+ vbios.high_sclk = bw_int_to_fixed(776);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(39);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = false;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = true;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = true;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 24576;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = false;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ default:
+ break;
+ }
+ *bw_dceip = dceip;
+ *bw_vbios = vbios;
+
+}
+
+/**
+ * Compare calculated (required) clocks against the clocks available at
+ * maximum voltage (max Performance Level).
+ */
+static bool is_display_configuration_supported(
+ const struct bw_calcs_vbios *vbios,
+ const struct dce_bw_output *calcs_output)
+{
+ uint32_t int_max_clk;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->dispclk_khz > int_max_clk)
+ return false;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_sclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->sclk_khz > int_max_clk)
+ return false;
+
+ return true;
+}
+
+static void populate_initial_data(
+ const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
+{
+ int i, j;
+ int num_displays = 0;
+
+ data->underlay_surface_type = bw_def_420;
+ data->panning_and_bezel_adjustment = bw_def_none;
+ data->graphics_lb_bpc = 10;
+ data->underlay_lb_bpc = 8;
+ data->underlay_tiling_mode = bw_def_tiled;
+ data->graphics_tiling_mode = bw_def_tiled;
+ data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
+ data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+
+ /* Pipes with underlay first */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || !pipe[i].bottom_pipe)
+ continue;
+
+ ASSERT(pipe[i].plane_state);
+
+ if (num_displays == 0) {
+ if (!pipe[i].plane_state->visible)
+ data->d0_underlay_mode = bw_def_underlay_only;
+ else
+ data->d0_underlay_mode = bw_def_blend;
+ } else {
+ if (!pipe[i].plane_state->visible)
+ data->d1_underlay_mode = bw_def_underlay_only;
+ else
+ data->d1_underlay_mode = bw_def_blend;
+ }
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+
+
+ for (j = 0; j < 2; j++) {
+ data->fbc_en[num_displays * 2 + j] = false;
+ data->lpt_en[num_displays * 2 + j] = false;
+
+ data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
+ data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
+ pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch);
+ data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].bottom_pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
+ }
+
+ num_displays++;
+ }
+
+ /* Pipes without underlay after */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || pipe[i].bottom_pipe)
+ continue;
+
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ if (pipe[i].plane_state) {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ } else {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ }
+
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+ num_displays++;
+ }
+
+ data->number_of_displays = num_displays;
+}
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+
+bool bw_calcs(struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx pipe[],
+ int pipe_count,
+ struct dce_bw_output *calcs_output)
+{
+ struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data),
+ GFP_KERNEL);
+ if (!data)
+ return false;
+
+ populate_initial_data(pipe, pipe_count, data);
+
+ /*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
+ calcs_output->all_displays_in_sync = false;
+
+ if (data->number_of_displays != 0) {
+ uint8_t yclk_lvl, sclk_lvl;
+ struct bw_fixed high_sclk = vbios->high_sclk;
+ struct bw_fixed mid1_sclk = vbios->mid1_sclk;
+ struct bw_fixed mid2_sclk = vbios->mid2_sclk;
+ struct bw_fixed mid3_sclk = vbios->mid3_sclk;
+ struct bw_fixed mid4_sclk = vbios->mid4_sclk;
+ struct bw_fixed mid5_sclk = vbios->mid5_sclk;
+ struct bw_fixed mid6_sclk = vbios->mid6_sclk;
+ struct bw_fixed low_sclk = vbios->low_sclk;
+ struct bw_fixed high_yclk = vbios->high_yclk;
+ struct bw_fixed mid_yclk = vbios->mid_yclk;
+ struct bw_fixed low_yclk = vbios->low_yclk;
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ yclk_lvl = data->y_clk_level;
+ sclk_lvl = data->sclk_level;
+
+ calcs_output->nbp_state_change_enable =
+ data->nbp_state_change_enable;
+ calcs_output->cpuc_state_change_enable =
+ data->cpuc_state_change_enable;
+ calcs_output->cpup_state_change_enable =
+ data->cpup_state_change_enable;
+ calcs_output->stutter_mode_enable =
+ data->stutter_mode_enable;
+ calcs_output->dispclk_khz =
+ bw_fixed_to_int(bw_mul(data->dispclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->blackout_recovery_time_us =
+ bw_fixed_to_int(data->blackout_recovery_time);
+ calcs_output->sclk_khz =
+ bw_fixed_to_int(bw_mul(data->required_sclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->sclk_deep_sleep_khz =
+ bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
+ bw_int_to_fixed(1000)));
+ if (yclk_lvl == 0)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(low_yclk, bw_int_to_fixed(1000)));
+ else if (yclk_lvl == 1)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(mid_yclk, bw_int_to_fixed(1000)));
+ else
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(high_yclk, bw_int_to_fixed(1000)));
+
+ /* units: nanosecond, 16bit storage. */
+
+ calcs_output->nbp_state_change_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->stutter_exit_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->urgent_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+ }
+
+ if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
+ } else {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ }
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->stutter_exit_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->urgent_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
+ ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
+ } else {
+ calcs_output->nbp_state_change_enable = true;
+ calcs_output->cpuc_state_change_enable = true;
+ calcs_output->cpup_state_change_enable = true;
+ calcs_output->stutter_mode_enable = true;
+ calcs_output->dispclk_khz = 0;
+ calcs_output->sclk_khz = 0;
+ }
+
+ kfree(data);
+
+ return is_display_configuration_supported(vbios, calcs_output);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
new file mode 100644
index 000000000000..626f9cf8aad2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -0,0 +1,1899 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calc_auto.h"
+#include "dcn_calc_math.h"
+
+/*REVISION#250*/
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_different_hratio_vratio == dcn_bw_yes) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k];
+ }
+ else {
+ v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k];
+ }
+ }
+ else {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]);
+ }
+ else {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]);
+ }
+ v->v_ratio[k] = v->h_ratio[k];
+ }
+ if (v->interlace_output[k] == 1.0) {
+ v->v_ratio[k] = 2.0 * v->v_ratio[k];
+ }
+ if ((v->underscan_output[k] == 1.0)) {
+ v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
+ v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
+ }
+ }
+ /*scaler taps calculation*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0));
+ }
+ else if (v->h_ratio[k] < 1.0) {
+ v->acceptable_quality_hta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_hta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->htaps[k] = v->override_hta_ps[k];
+ }
+ else {
+ v->htaps[k] = v->acceptable_quality_hta_ps;
+ }
+ if (v->v_ratio[k] > 1.0) {
+ v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0));
+ }
+ else if (v->v_ratio[k] < 1.0) {
+ v->acceptable_quality_vta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_vta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vtaps[k] = v->override_vta_ps[k];
+ }
+ else {
+ v->vtaps[k] = v->acceptable_quality_vta_ps;
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->vta_pschroma[k] = 0.0;
+ v->hta_pschroma[k] = 0.0;
+ }
+ else {
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vta_pschroma[k] = v->override_vta_pschroma[k];
+ v->hta_pschroma[k] = v->override_hta_pschroma[k];
+ }
+ else {
+ v->vta_pschroma[k] = v->acceptable_quality_vta_ps;
+ v->hta_pschroma[k] = v->acceptable_quality_hta_ps;
+ }
+ }
+ }
+}
+
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
+{
+ int i;
+ int j;
+ int k;
+ /*mode support, voltage state and soc configuration*/
+
+ /*scale ratio support check*/
+
+ v->scale_ratio_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) {
+ v->scale_ratio_support = dcn_bw_no;
+ }
+ }
+ /*source format, pixel format and scan support check*/
+
+ v->source_format_pixel_and_scan_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) {
+ v->source_format_pixel_and_scan_support = dcn_bw_no;
+ }
+ }
+ /*bandwidth support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_ysingle_dpp[k] = v->viewport_width[k];
+ }
+ else {
+ v->swath_width_ysingle_dpp[k] = v->viewport_height[k];
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_in_dety[k] = 8.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_in_dety[k] = 4.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_in_dety[k] = 2.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_in_dety[k] = 1.0;
+ v->byte_per_pixel_in_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]);
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64);
+ }
+ else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ else if (v->pte_enable == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512);
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ else {
+ v->write_bandwidth[k] = 0.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0;
+ }
+ v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second;
+ v->dcc_enabled_in_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_in_any_plane = dcn_bw_yes;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->return_bw_per_state[i] = v->return_bw_todcn_per_state;
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) {
+ v->bandwidth_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->bandwidth_support[i] = dcn_bw_no;
+ }
+ }
+ /*writeback latency support check*/
+
+ v->writeback_latency_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ }
+ /*re-ordering buffer support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i];
+ if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) {
+ v->rob_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->rob_support[i] = dcn_bw_no;
+ }
+ }
+ /*display io support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) {
+ if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k];
+ }
+ }
+ else if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k] * 3.0;
+ }
+ if (v->output[k] == dcn_bw_hdmi) {
+ v->required_phyclk[k] = v->required_output_bw;
+ switch (v->output_deep_color[k]) {
+ case dcn_bw_encoder_10bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4;
+ break;
+ case dcn_bw_encoder_12bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2;
+ break;
+ default:
+ break;
+ }
+ v->required_phyclk[k] = v->required_phyclk[k] / 3.0;
+ }
+ else if (v->output[k] == dcn_bw_dp) {
+ v->required_phyclk[k] = v->required_output_bw / 4.0;
+ }
+ else {
+ v->required_phyclk[k] = 0.0;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->dio_support[i] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) {
+ v->dio_support[i] = dcn_bw_no;
+ }
+ }
+ }
+ /*total available writeback support check*/
+
+ v->total_number_of_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0;
+ }
+ }
+ if (v->total_number_of_active_writeback <= v->max_num_writeback) {
+ v->total_available_writeback_support = dcn_bw_yes;
+ }
+ else {
+ v->total_available_writeback_support = dcn_bw_no;
+ }
+ /*maximum dispclk/dppclk support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->pscl_factor_chroma[k] = 0.0;
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0);
+ }
+ else {
+ if (v->h_ratio[k] / 2.0 > 1.0) {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_block_height_y[k] = 4.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_height_c[k] = 0.0;
+ v->read256_block_width_c[k] = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ v->read256_block_height_c[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_block_height_y[k] = 16.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k];
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->max_swath_height_y[k] = v->read256_block_height_y[k];
+ v->max_swath_height_c[k] = v->read256_block_height_c[k];
+ }
+ else {
+ v->max_swath_height_y[k] = v->read256_block_width_y[k];
+ v->max_swath_height_c[k] = v->read256_block_width_c[k];
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ }
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->maximum_swath_width = 8192.0;
+ }
+ else {
+ v->maximum_swath_width = 5120.0;
+ }
+ v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0);
+ }
+ else {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0));
+ }
+ v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size);
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (v->odm_capability == dcn_bw_yes) {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ else {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ }
+ else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ /*viewport size check*/
+
+ v->viewport_size_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) {
+ v->viewport_size_support = dcn_bw_no;
+ }
+ }
+ /*total available pipes support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) {
+ v->total_available_pipes_support[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->total_available_pipes_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ /*urgent latency support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k];
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k];
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->max_swath_height_c[k] > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k];
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = 0.0;
+ }
+ else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ else {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
+ v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->urgent_latency_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) {
+ v->urgent_latency_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*prefetch check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_dcc_active_dpp[i][j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->projected_dcfclk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ else {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_y = 8.0 * v->read256_block_height_y[k];
+ v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y;
+ v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y;
+ v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ v->meta_row_bytes_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_y = 256.0;
+ v->macro_tile_block_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 4096.0;
+ v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 64.0 * 1024;
+ v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_y = 256.0 * 1024;
+ v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k];
+ }
+ if (v->macro_tile_block_size_bytes_y <= 65536.0) {
+ v->data_pte_req_height_y = v->macro_tile_block_height_y;
+ }
+ else {
+ v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_c = 8.0 * v->read256_block_height_c[k];
+ v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c;
+ v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c;
+ v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_c = 256.0;
+ v->macro_tile_block_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 4096.0;
+ v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k];
+ }
+ v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c;
+ if (v->macro_tile_block_size_bytes_c <= 65536.0) {
+ v->data_pte_req_height_c = v->macro_tile_block_height_c;
+ }
+ else {
+ v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c;
+ v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c;
+ v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c;
+ v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0;
+ v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0);
+ v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_y[k] > 1.0) {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y);
+ v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0;
+ v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0);
+ v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_c[k] > 1.0) {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c);
+ v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c;
+ }
+ else {
+ v->prefetch_lines_c[k] = 0.0;
+ }
+ v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j];
+ if (v->no_of_dpp[i][j][k] > 1.0) {
+ v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dst_y_after_scaler = 1.0;
+ }
+ else {
+ v->dst_y_after_scaler = 0.0;
+ }
+ v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
+ v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
+ v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
+ if (v->pte_enable == dcn_bw_yes) {
+ v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
+ }
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
+ v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
+ v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]);
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_immediate_flip_bytes[k] = 0.0;
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency);
+ }
+ else {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip);
+ }
+ v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
+ v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0;
+ }
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ else {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) {
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*mode support, voltage state and soc configuration*/
+
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ for (j = 0; j <= 1; j++) {
+ if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) {
+ if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_with_immediate_flip = i;
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_without_immediate_flip = i;
+ }
+ }
+ if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) {
+ v->immediate_flip_supported = dcn_bw_no;
+ v->voltage_level = v->voltage_level_without_immediate_flip;
+ }
+ else {
+ v->immediate_flip_supported = dcn_bw_yes;
+ v->voltage_level = v->voltage_level_with_immediate_flip;
+ }
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ for (j = 0; j <= 1; j++) {
+ v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k];
+ }
+ v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j];
+ }
+ v->max_phyclk = v->phyclk_per_state[v->voltage_level];
+}
+void display_pipe_configuration(struct dcn_bw_internal_vars *v)
+{
+ int j;
+ int k;
+ /*display pipe configuration*/
+
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp_per_ratio[j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k];
+ }
+ }
+ if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) {
+ v->dispclk_dppclk_ratio = 1;
+ v->final_error_message = v->error_message[0];
+ }
+ else {
+ v->dispclk_dppclk_ratio = 2;
+ v->final_error_message = v->error_message[1];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pix_dety = 8.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pix_dety = 4.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pix_dety = 2.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pix_dety = 1.0;
+ v->byte_per_pix_detc = 2.0;
+ }
+ else {
+ v->byte_per_pix_dety = 4.0f / 3.0f;
+ v->byte_per_pix_detc = 8.0f / 3.0f;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_bytes_block_height_y = 4.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_height_c = 0.0;
+ v->read256_bytes_block_width_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ v->read256_bytes_block_height_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_bytes_block_height_y = 16.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->maximum_swath_height_y = v->read256_bytes_block_height_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_height_c;
+ }
+ else {
+ v->maximum_swath_height_y = v->read256_bytes_block_width_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_width_c;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y;
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->maximum_swath_height_c > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_y[k] = v->maximum_swath_height_y;
+ v->swath_height_c[k] = v->maximum_swath_height_c;
+ }
+ else {
+ v->swath_height_y[k] = v->minimum_swath_height_y;
+ v->swath_height_c[k] = v->minimum_swath_height_c;
+ }
+ if (v->swath_height_c[k] == 0.0) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0;
+ v->det_buffer_size_c[k] = 0.0;
+ }
+ else if (v->swath_height_y[k] <= v->swath_height_c[k]) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ }
+ else {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0;
+ }
+ }
+}
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ /*dispclk and dppclk calculation*/
+
+ v->dispclk_with_ramping = 0.0;
+ v->dispclk_without_ramping = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0);
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->pscl_throughput_chroma[k] = 0.0;
+ v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma;
+ }
+ else {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0);
+ v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma);
+ }
+ if (v->odm_capable == dcn_bw_yes) {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ else {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ }
+ if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->dispclk_without_ramping;
+ }
+ else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->max_dispclk[number_of_states];
+ }
+ else {
+ v->dispclk = v->dispclk_with_ramping;
+ }
+ v->dppclk = v->dispclk / v->dispclk_dppclk_ratio;
+ /*urgent watermark*/
+
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->dcc_enabled_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_any_plane = dcn_bw_yes;
+ }
+ }
+ v->return_bw = v->return_bandwidth_to_dcn;
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_dety[k] = 8.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_dety[k] = 4.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_dety[k] = 2.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_dety[k] = 1.0;
+ v->byte_per_pixel_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_data_read_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
+ v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
+ v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
+ }
+ v->total_active_dpp = 0.0;
+ v->total_dcc_active_dpp = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k];
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k];
+ }
+ }
+ v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw;
+ v->last_pixel_of_line_extra_watermark = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]);
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ }
+ v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw;
+ }
+ v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency;
+ /*nb p-state/dram clock change watermark*/
+
+ v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark;
+ v->total_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_active_writeback = v->total_active_writeback + 1.0;
+ }
+ }
+ if (v->total_active_writeback <= 1.0) {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency;
+ }
+ else {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk;
+ }
+ /*stutter efficiency*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k];
+ v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]);
+ v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k];
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0);
+ v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]);
+ v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0);
+ }
+ else {
+ v->lines_in_detc[k] = 0.0;
+ v->lines_in_detc_rounded_down_to_swath[k] = 0.0;
+ v->full_det_buffering_time_c[k] = 999999.0;
+ }
+ }
+ v->min_full_det_buffering_time = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_y[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_c[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ }
+ v->average_read_bandwidth_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0;
+ }
+ else {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0;
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0;
+ }
+ }
+ v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0));
+ v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0);
+ if (v->total_active_writeback == 0.0) {
+ v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0;
+ }
+ else {
+ v->stutter_efficiency_not_including_vblank = 0.0;
+ }
+ v->smallest_vblank = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k];
+ }
+ else {
+ v->v_blank_time = 0.0;
+ }
+ v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time);
+ }
+ v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0;
+ /*dcfclk deep sleep*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ else {
+ v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k];
+ }
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0);
+ }
+ v->dcf_clk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]);
+ }
+ /*stutter watermark*/
+
+ v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep;
+ v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ /*urgent latency supported*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]);
+ v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]);
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]);
+ v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]);
+ v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma);
+ }
+ else {
+ v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma;
+ }
+ }
+ v->min_urgent_latency_support_us = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]);
+ }
+ /*non-urgent latency tolerance*/
+
+ v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark;
+ /*prefetch*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->block_height256_bytes_y = 4.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ }
+ v->block_height256_bytes_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ v->block_height256_bytes_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->block_height256_bytes_y = 16.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y);
+ v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y;
+ v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ v->meta_row_byte_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_byte_y = 256.0;
+ v->macro_tile_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_byte_y = 4096.0;
+ v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_byte_y = 64.0 * 1024;
+ v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ else {
+ v->macro_tile_size_byte_y = 256.0 * 1024;
+ v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y;
+ }
+ if (v->macro_tile_size_byte_y <= 65536.0) {
+ v->pixel_pte_req_height_y = v->macro_tile_height_y;
+ }
+ else {
+ v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c);
+ v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c;
+ v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_bytes_c = 256.0;
+ v->macro_tile_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_bytes_c = 4096.0;
+ v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ else {
+ v->macro_tile_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c;
+ }
+ if (v->macro_tile_size_bytes_c <= 65536.0) {
+ v->pixel_pte_req_height_c = v->macro_tile_height_c;
+ }
+ else {
+ v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c;
+ v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c;
+ v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c;
+ v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0);
+ v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1;
+ if (v->v_init_pre_fill_y[k] > 1.0) {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ else {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y);
+ v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0);
+ v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1;
+ if (v->v_init_pre_fill_c[k] > 1.0) {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ else {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c);
+ }
+ else {
+ v->max_num_swath_c[k] = 0.0;
+ v->max_partial_swath_c = 0.0;
+ }
+ v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c;
+ }
+ v->t_calc = 24.0 / v->dcf_clk_deep_sleep;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ }
+ v->next_prefetch_mode = 0.0;
+ do {
+ v->v_startup_lines = 13.0;
+ do {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes;
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no;
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->v_ratio_prefetch_more_than4 = dcn_bw_no;
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->prefetch_mode = v->next_prefetch_mode;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk;
+ if (v->dpp_per_plane[k] > 1.0) {
+ v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dsty_after_scaler = 1.0;
+ }
+ else {
+ v->dsty_after_scaler = 0.0;
+ }
+ v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
+ v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+ v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+ v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
+ v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
+ if (v->prefetch_mode == 0.0) {
+ v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else {
+ v->t_wait = v->urgent_latency;
+ }
+ v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4;
+ if (v->destination_lines_for_prefetch[k] > 0.0) {
+ v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->prefetch_bandwidth[k] = 999999.0;
+ }
+ }
+ v->bandwidth_available_for_immediate_flip = v->return_bw;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]);
+ }
+ v->tot_immediate_flip_bytes = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k];
+ }
+ }
+ v->max_rd_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ }
+ else {
+ v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k];
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_y[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_c[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->required_prefetch_pix_data_bw = 999999.0;
+ }
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw);
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->v_ratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ if (v->max_vstartup_lines[k] > v->v_startup_lines) {
+ if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no;
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ }
+ }
+ if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) {
+ v->prefetch_mode_supported = dcn_bw_yes;
+ }
+ else {
+ v->prefetch_mode_supported = dcn_bw_no;
+ }
+ v->v_startup_lines = v->v_startup_lines + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no)));
+ v->next_prefetch_mode = v->next_prefetch_mode + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0));
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio_prefetch_y[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio_prefetch_c[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ }
+ }
+ /*min ttuv_blank*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->prefetch_mode == 0.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no;
+ v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark;
+ }
+ }
+ /*nb p-state/dram clock change support*/
+
+ v->active_dp_ps = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]);
+ if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k];
+ }
+ else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_y = 1.0;
+ }
+ if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0);
+ }
+ else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_c = 1.0;
+ }
+ v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c);
+ }
+ else {
+ v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y;
+ }
+ if (v->output_format[k] == dcn_bw_444) {
+ v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark;
+ }
+ else {
+ v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark;
+ }
+ if (v->output[k] == dcn_bw_writeback) {
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) {
+ v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark);
+ }
+ else {
+ v->v_blank_dram_clock_change_latency_margin[k] = 0.0;
+ }
+ }
+ v->min_active_dram_clock_change_margin = 999999.0;
+ v->v_blank_of_min_active_dram_clock_change_margin = 999999.0;
+ v->second_min_active_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ }
+ }
+ v->min_vblank_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) {
+ v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ }
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin);
+ }
+ else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) {
+ v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin);
+ }
+ else {
+ v->dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ }
+ if (v->min_active_dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_active;
+ }
+ else if (v->dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_blank;
+ }
+ else {
+ v->dram_clock_change_support = dcn_bw_not_supported;
+ }
+ /*maximum bandwidth used*/
+
+ v->wr_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ }
+ v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
new file mode 100644
index 000000000000..03f06f682ead
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_AUTO_H_
+#define _DCN_CALC_AUTO_H_
+
+#include "dcn_calcs.h"
+
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v);
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v);
+void display_pipe_configuration(struct dcn_bw_internal_vars *v);
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(
+ struct dcn_bw_internal_vars *v);
+
+#endif /* _DCN_CALC_AUTO_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
new file mode 100644
index 000000000000..b6abe0f3bb15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn_calc_math.h"
+
+float dcn_bw_mod(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 - arg1 * ((int) (arg1 / arg2));
+}
+
+float dcn_bw_min2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 < arg2 ? arg1 : arg2;
+}
+
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+float dcn_bw_max2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+
+float dcn_bw_floor2(const float arg, const float significance)
+{
+ if (significance == 0)
+ return 0;
+ return ((int) (arg / significance)) * significance;
+}
+
+float dcn_bw_ceil2(const float arg, const float significance)
+{
+ float flr = dcn_bw_floor2(arg, significance);
+ if (significance == 0)
+ return 0;
+ return flr + 0.00001 >= arg ? arg : flr + significance;
+}
+
+float dcn_bw_max3(float v1, float v2, float v3)
+{
+ return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2);
+}
+
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5)
+{
+ return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5);
+}
+
+float dcn_bw_pow(float a, float exp)
+{
+ float temp;
+ /*ASSERT(exp == (int)exp);*/
+ if ((int)exp == 0)
+ return 1;
+ temp = dcn_bw_pow(a, (int)(exp / 2));
+ if (((int)exp % 2) == 0) {
+ return temp * temp;
+ } else {
+ if ((int)exp > 0)
+ return a * temp * temp;
+ else
+ return (temp * temp) / a;
+ }
+}
+
+float dcn_bw_log(float a, float b)
+{
+ int * const exp_ptr = (int *)(&a);
+ int x = *exp_ptr;
+ const int log_2 = ((x >> 23) & 255) - 128;
+ x &= ~(255 << 23);
+ x += 127 << 23;
+ *exp_ptr = x;
+
+ a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
+
+ if (b > 2.00001 || b < 1.99999)
+ return (a + log_2) / dcn_bw_log(b, 2);
+ else
+ return (a + log_2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
new file mode 100644
index 000000000000..f46ab0e24ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_MATH_H_
+#define _DCN_CALC_MATH_H_
+
+float dcn_bw_mod(const float arg1, const float arg2);
+float dcn_bw_min2(const float arg1, const float arg2);
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
+float dcn_bw_max2(const float arg1, const float arg2);
+float dcn_bw_floor2(const float arg, const float significance);
+float dcn_bw_ceil2(const float arg, const float significance);
+float dcn_bw_max3(float v1, float v2, float v3);
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
+float dcn_bw_pow(float a, float exp);
+float dcn_bw_log(float a, float b);
+
+#endif /* _DCN_CALC_MATH_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
new file mode 100644
index 000000000000..3dce35e66b09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -0,0 +1,1626 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calcs.h"
+#include "dcn_calc_auto.h"
+#include "dc.h"
+#include "dal_asic_id.h"
+
+#include "resource.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn_calc_math.h"
+
+/* Defaults from spreadsheet rev#247 */
+const struct dcn_soc_bounding_box dcn10_soc_defaults = {
+ /* latencies */
+ .sr_exit_time = 17, /*us*/
+ .sr_enter_plus_exit_time = 19, /*us*/
+ .urgent_latency = 4, /*us*/
+ .dram_clock_change_latency = 17, /*us*/
+ .write_back_latency = 12, /*us*/
+ .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/
+
+ /* below default clocks derived from STA target base on
+ * slow-slow corner + 10% margin with voltages aligned to FCLK.
+ *
+ * Use these value if fused value doesn't make sense as earlier
+ * part don't have correct value fused */
+ /* default DCF CLK DPM on RV*/
+ .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */
+ .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */
+ .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */
+ .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */
+
+ /* default DISP CLK voltage state on RV */
+ .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */
+ .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */
+ .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */
+ .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */
+
+ /* default DPP CLK voltage state on RV */
+ .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */
+ .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */
+ .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */
+ .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */
+
+ /* default PHY CLK voltage state on RV */
+ .phyclkv_max0p9 = 900, /*MHz*/
+ .phyclkv_nom0p8 = 847, /*MHz*/
+ .phyclkv_mid0p72 = 800, /*MHz*/
+ .phyclkv_min0p65 = 600, /*MHz*/
+
+ /* BW depend on FCLK, MCLK, # of channels */
+ /* dual channel BW */
+ .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/
+ /* single channel BW
+ .fabric_and_dram_bandwidth_vmax0p9 = 19.2f,
+ .fabric_and_dram_bandwidth_vnom0p8 = 17.066f,
+ .fabric_and_dram_bandwidth_vmid0p72 = 14.933f,
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f,
+ */
+
+ .number_of_channels = 2,
+
+ .socclk = 208, /*MHz*/
+ .downspreading = 0.5f, /*%*/
+ .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/
+ .urgent_out_of_order_return_per_channel = 256, /*bytes*/
+ .vmm_page_size = 4096, /*bytes*/
+ .return_bus_width = 64, /*bytes*/
+ .max_request_size = 256, /*bytes*/
+
+ /* Depends on user class (client vs embedded, workstation, etc) */
+ .percent_disp_bw_limit = 0.3f /*%*/
+};
+
+const struct dcn_ip_params dcn10_ip_defaults = {
+ .rob_buffer_size_in_kbyte = 64,
+ .det_buffer_size_in_kbyte = 164,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_in_kbyte = 8,
+ .pte_enable = dcn_bw_yes,
+ .pte_chunk_size = 2, /*kbytes*/
+ .meta_chunk_size = 2, /*kbytes*/
+ .writeback_chunk_size = 2, /*kbytes*/
+ .odm_capability = dcn_bw_no,
+ .dsc_capability = dcn_bw_no,
+ .line_buffer_size = 589824, /*bit*/
+ .max_line_buffer_lines = 12,
+ .is_line_buffer_bpp_fixed = dcn_bw_no,
+ .line_buffer_fixed_bpp = dcn_bw_na,
+ .writeback_luma_buffer_size = 12, /*kbytes*/
+ .writeback_chroma_buffer_size = 8, /*kbytes*/
+ .max_num_dpp = 4,
+ .max_num_writeback = 2,
+ .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/
+ .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/
+ .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/
+ .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .pte_buffer_size_in_requests = 42,
+ .dispclk_ramping_margin = 1, /*%*/
+ .under_scan_factor = 1.11f,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no,
+ .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no,
+ .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/
+};
+
+static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ case DC_SW_LINEAR:
+ return dcn_bw_sw_linear;
+ case DC_SW_4KB_S:
+ return dcn_bw_sw_4_kb_s;
+ case DC_SW_4KB_D:
+ return dcn_bw_sw_4_kb_d;
+ case DC_SW_64KB_S:
+ return dcn_bw_sw_64_kb_s;
+ case DC_SW_64KB_D:
+ return dcn_bw_sw_64_kb_d;
+ case DC_SW_VAR_S:
+ return dcn_bw_sw_var_s;
+ case DC_SW_VAR_D:
+ return dcn_bw_sw_var_d;
+ case DC_SW_64KB_S_T:
+ return dcn_bw_sw_64_kb_s_t;
+ case DC_SW_64KB_D_T:
+ return dcn_bw_sw_64_kb_d_t;
+ case DC_SW_4KB_S_X:
+ return dcn_bw_sw_4_kb_s_x;
+ case DC_SW_4KB_D_X:
+ return dcn_bw_sw_4_kb_d_x;
+ case DC_SW_64KB_S_X:
+ return dcn_bw_sw_64_kb_s_x;
+ case DC_SW_64KB_D_X:
+ return dcn_bw_sw_64_kb_d_x;
+ case DC_SW_VAR_S_X:
+ return dcn_bw_sw_var_s_x;
+ case DC_SW_VAR_D_X:
+ return dcn_bw_sw_var_d_x;
+ case DC_SW_256B_S:
+ case DC_SW_256_D:
+ case DC_SW_256_R:
+ case DC_SW_4KB_R:
+ case DC_SW_64KB_R:
+ case DC_SW_VAR_R:
+ case DC_SW_4KB_R_X:
+ case DC_SW_64KB_R_X:
+ case DC_SW_VAR_R_X:
+ default:
+ BREAK_TO_DEBUGGER(); /*not in formula*/
+ return dcn_bw_sw_4_kb_s;
+ }
+}
+
+static int tl_lb_bpp_to_int(enum lb_pixel_depth depth)
+{
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ return 18;
+ case LB_PIXEL_DEPTH_24BPP:
+ return 24;
+ case LB_PIXEL_DEPTH_30BPP:
+ return 30;
+ case LB_PIXEL_DEPTH_36BPP:
+ return 36;
+ default:
+ return 30;
+ }
+}
+
+static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ return dcn_bw_rgb_sub_16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ return dcn_bw_rgb_sub_32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return dcn_bw_rgb_sub_64;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return dcn_bw_yuv420_sub_8;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return dcn_bw_yuv420_sub_10;
+ default:
+ return dcn_bw_rgb_sub_32;
+ }
+}
+
+static void pipe_ctx_to_e2e_pipe_params (
+ const struct pipe_ctx *pipe,
+ struct _vcs_dpi_display_pipe_params_st *input)
+{
+ input->src.is_hsplit = false;
+ if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+ else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+
+ input->src.dcc = pipe->plane_state->dcc.enable;
+ input->src.dcc_rate = 1;
+ input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
+ input->src.source_scan = dm_horz;
+ input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle;
+
+ input->src.viewport_width = pipe->plane_res.scl_data.viewport.width;
+ input->src.viewport_height = pipe->plane_res.scl_data.viewport.height;
+ input->src.data_pitch = pipe->plane_res.scl_data.viewport.width;
+ input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width;
+ input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
+ input->src.cur0_bpp = 32;
+
+ switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ switch (pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ case ROTATION_ANGLE_180:
+ input->src.source_scan = dm_horz;
+ break;
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ input->src.source_scan = dm_vert;
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ /* TODO: Fix pixel format mappings */
+ switch (pipe->plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ input->src.source_format = dm_420_8;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ input->src.source_format = dm_420_10;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ input->src.source_format = dm_444_64;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ default:
+ input->src.source_format = dm_444_32;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ }
+
+ input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps;
+ input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0;
+ input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit < 1.0)
+ input->scale_ratio_depth.vinit = 1;
+ input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps;
+ input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c;
+ input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c;
+ input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0;
+ input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit_c < 1.0)
+ input->scale_ratio_depth.vinit_c = 1;
+ switch (pipe->plane_res.scl_data.lb_params.depth) {
+ case LB_PIXEL_DEPTH_30BPP:
+ input->scale_ratio_depth.lb_depth = 30; break;
+ case LB_PIXEL_DEPTH_36BPP:
+ input->scale_ratio_depth.lb_depth = 36; break;
+ default:
+ input->scale_ratio_depth.lb_depth = 24; break;
+ }
+
+
+ input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top
+ + pipe->stream->timing.v_border_bottom;
+
+ input->dest.recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.htotal = pipe->stream->timing.h_total;
+ input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch;
+ input->dest.hblank_end = input->dest.hblank_start
+ - pipe->stream->timing.h_addressable
+ - pipe->stream->timing.h_border_left
+ - pipe->stream->timing.h_border_right;
+
+ input->dest.vtotal = pipe->stream->timing.v_total;
+ input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch;
+ input->dest.vblank_end = input->dest.vblank_start
+ - pipe->stream->timing.v_addressable
+ - pipe->stream->timing.v_border_bottom
+ - pipe->stream->timing.v_border_top;
+ input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0;
+ input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width;
+
+}
+
+static void dcn_bw_calc_rq_dlg_ttu(
+ const struct dc *dc,
+ const struct dcn_bw_internal_vars *v,
+ struct pipe_ctx *pipe,
+ int in_idx)
+{
+ struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml);
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
+ struct _vcs_dpi_display_rq_params_st rq_param = {0};
+ struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
+ struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+ float total_active_bw = 0;
+ float total_prefetch_bw = 0;
+ int total_flip_bytes = 0;
+ int i;
+
+ for (i = 0; i < number_of_planes; i++) {
+ total_active_bw += v->read_bandwidth[i];
+ total_prefetch_bw += v->prefetch_bandwidth[i];
+ total_flip_bytes += v->total_immediate_flip_bytes[i];
+ }
+ dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+ if (dlg_sys_param.total_flip_bw < 0.0)
+ dlg_sys_param.total_flip_bw = 0;
+
+ dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
+ dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+ dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
+ dlg_sys_param.t_extra_us = v->urgent_extra_latency;
+ dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+ dlg_sys_param.total_flip_bytes = total_flip_bytes;
+
+ pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
+ input.clks_cfg.dcfclk_mhz = v->dcfclk;
+ input.clks_cfg.dispclk_mhz = v->dispclk;
+ input.clks_cfg.dppclk_mhz = v->dppclk;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+ input.clks_cfg.socclk_mhz = v->socclk;
+ input.clks_cfg.voltage = v->voltage_level;
+// dc->dml.logger = pool->base.logger;
+ input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+ input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+ //input[in_idx].dout.output_standard;
+ switch (v->output_deep_color[in_idx]) {
+ case dcn_bw_encoder_12bpc:
+ input.dout.output_bpc = dm_out_12;
+ break;
+ case dcn_bw_encoder_10bpc:
+ input.dout.output_bpc = dm_out_10;
+ break;
+ case dcn_bw_encoder_8bpc:
+ default:
+ input.dout.output_bpc = dm_out_8;
+ break;
+ }
+
+ /*todo: soc->sr_enter_plus_exit_time??*/
+ dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+
+ dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+ dml1_extract_rq_regs(dml, rq_regs, rq_param);
+ dml1_rq_dlg_get_dlg_params(
+ dml,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ input,
+ true,
+ true,
+ v->pte_enable == dcn_bw_yes,
+ pipe->plane_state->flip_immediate);
+}
+
+static void split_stream_across_pipes(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe)
+{
+ int pipe_idx = secondary_pipe->pipe_idx;
+
+ if (!primary_pipe->plane_state)
+ return;
+
+ *secondary_pipe = *primary_pipe;
+
+ secondary_pipe->pipe_idx = pipe_idx;
+ secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+ if (primary_pipe->bottom_pipe) {
+ ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
+ secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
+ secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
+ }
+ primary_pipe->bottom_pipe = secondary_pipe;
+ secondary_pipe->top_pipe = primary_pipe;
+
+ resource_build_scaling_params(primary_pipe);
+ resource_build_scaling_params(secondary_pipe);
+}
+
+static void calc_wm_sets_and_perf_params(
+ struct dc_state *context,
+ struct dcn_bw_internal_vars *v)
+{
+ /* Calculate set A last to keep internal var state consistent for required config */
+ if (v->voltage_level < 2) {
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+
+ v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
+ v->dcfclk = v->dcfclkv_nom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ if (v->voltage_level < 3) {
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[1] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[0] = v->dcfclkv_max0p9;
+ v->dcfclk = v->dcfclkv_max0p9;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ if (v->voltage_level >= 2) {
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ }
+ if (v->voltage_level >= 3)
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+}
+
+static bool dcn_bw_apply_registry_override(struct dc *dc)
+{
+ bool updated = false;
+
+ kernel_fpu_begin();
+ if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
+ && dc->debug.sr_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000)
+ != dc->debug.sr_enter_plus_exit_time_ns
+ && dc->debug.sr_enter_plus_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns
+ && dc->debug.urgent_latency_ns) {
+ updated = true;
+ dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000)
+ != dc->debug.percent_of_ideal_drambw
+ && dc->debug.percent_of_ideal_drambw) {
+ updated = true;
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->debug.percent_of_ideal_drambw;
+ }
+
+ if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ updated = true;
+ dc->dcn_soc->dram_clock_change_latency =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+
+ return updated;
+}
+
+void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
+{
+ /*
+ * disable optional pipe split by lower dispclk bounding box
+ * at DPM0
+ */
+ v->max_dispclk[0] = v->max_dppclk_vmin0p65;
+}
+
+void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+ unsigned int pixel_rate_khz)
+{
+ float pixel_rate_mhz = pixel_rate_khz / 1000;
+
+ /*
+ * force enabling pipe split by lower dpp clock for DPM0 to just
+ * below the specify pixel_rate, so bw calc would split pipe.
+ */
+ if (pixel_rate_mhz < v->max_dppclk[0])
+ v->max_dppclk[0] = pixel_rate_mhz;
+}
+
+void hack_bounding_box(struct dcn_bw_internal_vars *v,
+ struct dc_debug *dbg,
+ struct dc_state *context)
+{
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
+ context->stream_count >= 2) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (context->stream_count == 1 &&
+ dbg->force_single_disp_pipe_split) {
+ struct dc_stream_state *stream0 = context->streams[0];
+
+ hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
+ }
+}
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
+ int i, input_idx;
+ int vesa_sync_start, asic_blank_end, asic_blank_start;
+ bool bw_limit_pass;
+ float bw_limit;
+
+ PERFORMANCE_TRACE_START();
+ if (dcn_bw_apply_registry_override(dc))
+ dcn_bw_sync_calcs_and_dml(dc);
+
+ memset(v, 0, sizeof(*v));
+ kernel_fpu_begin();
+ v->sr_exit_time = dc->dcn_soc->sr_exit_time;
+ v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
+ v->urgent_latency = dc->dcn_soc->urgent_latency;
+ v->write_back_latency = dc->dcn_soc->write_back_latency;
+ v->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+
+ v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65;
+ v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72;
+ v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8;
+ v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9;
+
+ v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65;
+ v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72;
+ v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8;
+ v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65;
+ v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72;
+ v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8;
+ v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ v->socclk = dc->dcn_soc->socclk;
+
+ v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65;
+ v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72;
+ v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8;
+ v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9;
+
+ v->downspreading = dc->dcn_soc->downspreading;
+ v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles;
+ v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ v->number_of_channels = dc->dcn_soc->number_of_channels;
+ v->vmm_page_size = dc->dcn_soc->vmm_page_size;
+ v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency;
+ v->return_bus_width = dc->dcn_soc->return_bus_width;
+
+ v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte;
+ v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ v->pte_enable = dc->dcn_ip->pte_enable;
+ v->pte_chunk_size = dc->dcn_ip->pte_chunk_size;
+ v->meta_chunk_size = dc->dcn_ip->meta_chunk_size;
+ v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size;
+ v->odm_capability = dc->dcn_ip->odm_capability;
+ v->dsc_capability = dc->dcn_ip->dsc_capability;
+ v->line_buffer_size = dc->dcn_ip->line_buffer_size;
+ v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed;
+ v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size;
+ v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size;
+ v->max_num_dpp = dc->dcn_ip->max_num_dpp;
+ v->max_num_writeback = dc->dcn_ip->max_num_writeback;
+ v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput;
+ v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput;
+ v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput;
+ v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput;
+ v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ v->max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ v->max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ v->under_scan_factor = dc->dcn_ip->under_scan_factor;
+ v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests;
+ v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin;
+ v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ v->bug_forcing_luma_and_chroma_request_to_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+
+ v->voltage[5] = dcn_bw_no_support;
+ v->voltage[4] = dcn_bw_v_max0p9;
+ v->voltage[3] = dcn_bw_v_max0p9;
+ v->voltage[2] = dcn_bw_v_nom0p8;
+ v->voltage[1] = dcn_bw_v_mid0p72;
+ v->voltage[0] = dcn_bw_v_min0p65;
+ v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->dcfclk_per_state[5] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[4] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[3] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->max_dispclk[5] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[4] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[3] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[2] = v->max_dispclk_vnom0p8;
+ v->max_dispclk[1] = v->max_dispclk_vmid0p72;
+ v->max_dispclk[0] = v->max_dispclk_vmin0p65;
+ v->max_dppclk[5] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[4] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[3] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[2] = v->max_dppclk_vnom0p8;
+ v->max_dppclk[1] = v->max_dppclk_vmid0p72;
+ v->max_dppclk[0] = v->max_dppclk_vmin0p65;
+ v->phyclk_per_state[5] = v->phyclkv_max0p9;
+ v->phyclk_per_state[4] = v->phyclkv_max0p9;
+ v->phyclk_per_state[3] = v->phyclkv_max0p9;
+ v->phyclk_per_state[2] = v->phyclkv_nom0p8;
+ v->phyclk_per_state[1] = v->phyclkv_mid0p72;
+ v->phyclk_per_state[0] = v->phyclkv_min0p65;
+
+ hack_bounding_box(v, &dc->debug, context);
+
+ if (v->voltage_override == dcn_bw_v_max0p9) {
+ v->voltage_override_level = number_of_states - 1;
+ } else if (v->voltage_override == dcn_bw_v_nom0p8) {
+ v->voltage_override_level = number_of_states - 2;
+ } else if (v->voltage_override == dcn_bw_v_mid0p72) {
+ v->voltage_override_level = number_of_states - 3;
+ } else {
+ v->voltage_override_level = 0;
+ }
+ v->synchronized_vblank = dcn_bw_no;
+ v->ta_pscalculation = dcn_bw_override;
+ v->allow_different_hratio_vratio = dcn_bw_yes;
+
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ v->underscan_output[input_idx] = false; /* taken care of in recout already*/
+ v->interlace_output[input_idx] = false;
+
+ v->htotal[input_idx] = pipe->stream->timing.h_total;
+ v->vtotal[input_idx] = pipe->stream->timing.v_total;
+ v->vactive[input_idx] = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom;
+ v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
+ - v->vactive[input_idx]
+ - pipe->stream->timing.v_front_porch;
+ v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f;
+
+ if (!pipe->plane_state) {
+ v->dcc_enable[input_idx] = dcn_bw_yes;
+ v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
+ v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s;
+ v->lb_bit_per_pixel[input_idx] = 30;
+ v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->override_hta_ps[input_idx] = 1;
+ v->override_vta_ps[input_idx] = 1;
+ v->override_hta_pschroma[input_idx] = 1;
+ v->override_vta_pschroma[input_idx] = 1;
+ v->source_scan[input_idx] = dcn_bw_hor;
+
+ } else {
+ v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height;
+ v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width;
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width;
+ v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height;
+ if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) {
+ if (pipe->plane_state->rotation % 2 == 0) {
+ int viewport_end = pipe->plane_res.scl_data.viewport.width
+ + pipe->plane_res.scl_data.viewport.x;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_width[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+ else
+ v->viewport_width[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.x;
+ } else {
+ int viewport_end = pipe->plane_res.scl_data.viewport.height
+ + pipe->plane_res.scl_data.viewport.y;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_height[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+ else
+ v->viewport_height[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.y;
+ }
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width
+ + pipe->bottom_pipe->plane_res.scl_data.recout.width;
+ }
+
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
+ pipe->plane_state->format);
+ v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
+ pipe->plane_state->tiling_info.gfx9.swizzle);
+ v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
+ v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps;
+ v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
+ v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
+ v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
+ }
+ if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
+ v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp;
+ v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
+ v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
+ v->output[input_idx] = pipe->stream->sink->sink_signal ==
+ SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
+ v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
+ if (v->output[input_idx] == dcn_bw_hdmi) {
+ switch (pipe->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_101010:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc;
+ break;
+ case COLOR_DEPTH_121212:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc;
+ break;
+ case COLOR_DEPTH_161616:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc;
+ break;
+ default:
+ break;
+ }
+ }
+
+ input_idx++;
+ }
+ v->number_of_active_planes = input_idx;
+
+ scaler_settings_calculation(v);
+ mode_support_and_system_configuration(v);
+
+ if (v->voltage_level == 0 &&
+ (dc->debug.sr_exit_time_dpm0_ns
+ || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
+
+ if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
+ v->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
+ if (dc->debug.sr_exit_time_dpm0_ns)
+ v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
+ dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ mode_support_and_system_configuration(v);
+ }
+
+ if (v->voltage_level != 5) {
+ float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+ if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8)
+ bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8;
+ else
+ bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9;
+
+ if (bw_consumed < v->fabric_and_dram_bandwidth)
+ if (dc->debug.voltage_align_fclk)
+ bw_consumed = v->fabric_and_dram_bandwidth;
+
+ display_pipe_configuration(v);
+ calc_wm_sets_and_perf_params(context, v);
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ (ddr4_dram_factor_single_Channel * v->number_of_channels));
+ if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ }
+
+ context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ if (dc->debug.max_disp_clk == true)
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+
+ if (context->bw.dcn.calc_clk.dispclk_khz <
+ dc->debug.min_disp_clk_khz) {
+ context->bw.dcn.calc_clk.dispclk_khz =
+ dc->debug.min_disp_clk_khz;
+ }
+
+ context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /* skip inactive pipe */
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ vesa_sync_start = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom +
+ pipe->stream->timing.v_front_porch;
+
+ asic_blank_end = (pipe->stream->timing.v_total -
+ vesa_sync_start -
+ pipe->stream->timing.v_border_top)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ asic_blank_start = asic_blank_end +
+ (pipe->stream->timing.v_border_top +
+ pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ pipe->pipe_dlg_param.vblank_start = asic_blank_start;
+ pipe->pipe_dlg_param.vblank_end = asic_blank_end;
+
+ if (pipe->plane_state) {
+ struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+ if (v->dpp_per_plane[input_idx] == 2 ||
+ ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
+ if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* update previously split pipe */
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start;
+ hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
+ } else {
+ /* pipe not split previously needs split */
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ ASSERT(hsplit_pipe);
+ split_stream_across_pipes(
+ &context->res_ctx, pool,
+ pipe, hsplit_pipe);
+ }
+
+ dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx);
+ } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* merge previously split pipe */
+ pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
+ if (hsplit_pipe->bottom_pipe)
+ hsplit_pipe->bottom_pipe->top_pipe = pipe;
+ hsplit_pipe->plane_state = NULL;
+ hsplit_pipe->stream = NULL;
+ hsplit_pipe->top_pipe = NULL;
+ hsplit_pipe->bottom_pipe = NULL;
+ resource_build_scaling_params(pipe);
+ }
+ /* for now important to do this after pipe split for building e2e params */
+ dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx);
+ }
+
+ input_idx++;
+ }
+ }
+
+ if (v->voltage_level == 0) {
+
+ dc->dml.soc.sr_enter_plus_exit_time_us =
+ dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ }
+
+ /*
+ * BW limit is set to prevent display from impacting other system functions
+ */
+
+ bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
+ bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
+
+ kernel_fpu_end();
+
+ PERFORMANCE_TRACE_END();
+
+ if (bw_limit_pass && v->voltage_level != 5)
+ return true;
+ else
+ return false;
+}
+
+static unsigned int dcn_find_normalized_clock_vdd_Level(
+ const struct dc *dc,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz)
+{
+ int vdd_level = dcn_bw_v_min0p65;
+
+ if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
+ return vdd_level;
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ }
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ default:
+ break;
+ }
+ return vdd_level;
+}
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks)
+{
+ unsigned vdd_level, vdd_level_temp;
+ unsigned dcf_clk;
+
+ /*find a common supported voltage level*/
+ vdd_level = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+
+ /*find that level conresponding dcfclk*/
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ if (vdd_level == dcn_bw_v_max0p91) {
+ BREAK_TO_DEBUGGER();
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ } else if (vdd_level == dcn_bw_v_max0p9)
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ else if (vdd_level == dcn_bw_v_nom0p8)
+ dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
+ else if (vdd_level == dcn_bw_v_mid0p72)
+ dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
+ else
+ dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\tdcf_clk for voltage = %d\n", dcf_clk);
+ return dcf_clk;
+}
+
+void dcn_bw_update_from_pplib(struct dc *dc)
+{
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_clock_levels_with_voltage clks = {0};
+
+ kernel_fpu_begin();
+
+ /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
+ clks.num_levels != 0) {
+ ASSERT(clks.num_levels >= 3);
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+ if (clks.num_levels > 2) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ }
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
+ clks.num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+
+ kernel_fpu_end();
+}
+
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+{
+ struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_wm_range_sets ranges = {0};
+ int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
+ int max_dcfclk_khz, min_dcfclk_khz;
+ int socclk_khz;
+ const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (!pp->set_wm_ranges)
+ return;
+
+ kernel_fpu_begin();
+ max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
+ nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
+ mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
+ min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
+ min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ socclk_khz = dc->dcn_soc->socclk * 1000;
+ kernel_fpu_end();
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+ * care what the value is, hence min to overdrive level
+ */
+ ranges.num_reader_wm_sets = WM_COUNT;
+ ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ }
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ pp->set_wm_ranges(&pp->pp_smu, &ranges);
+}
+
+void dcn_bw_sync_calcs_and_dml(struct dc *dc)
+{
+ kernel_fpu_begin();
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "sr_exit_time: %d ns\n"
+ "sr_enter_plus_exit_time: %d ns\n"
+ "urgent_latency: %d ns\n"
+ "write_back_latency: %d ns\n"
+ "percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+ "max_request_size: %d bytes\n"
+ "dcfclkv_max0p9: %d kHz\n"
+ "dcfclkv_nom0p8: %d kHz\n"
+ "dcfclkv_mid0p72: %d kHz\n"
+ "dcfclkv_min0p65: %d kHz\n"
+ "max_dispclk_vmax0p9: %d kHz\n"
+ "max_dispclk_vnom0p8: %d kHz\n"
+ "max_dispclk_vmid0p72: %d kHz\n"
+ "max_dispclk_vmin0p65: %d kHz\n"
+ "max_dppclk_vmax0p9: %d kHz\n"
+ "max_dppclk_vnom0p8: %d kHz\n"
+ "max_dppclk_vmid0p72: %d kHz\n"
+ "max_dppclk_vmin0p65: %d kHz\n"
+ "socclk: %d kHz\n"
+ "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
+ "phyclkv_max0p9: %d kHz\n"
+ "phyclkv_nom0p8: %d kHz\n"
+ "phyclkv_mid0p72: %d kHz\n"
+ "phyclkv_min0p65: %d kHz\n"
+ "downspreading: %d %\n"
+ "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
+ "urgent_out_of_order_return_per_channel: %d Bytes\n"
+ "number_of_channels: %d\n"
+ "vmm_page_size: %d Bytes\n"
+ "dram_clock_change_latency: %d ns\n"
+ "return_bus_width: %d Bytes\n",
+ dc->dcn_soc->sr_exit_time * 1000,
+ dc->dcn_soc->sr_enter_plus_exit_time * 1000,
+ dc->dcn_soc->urgent_latency * 1000,
+ dc->dcn_soc->write_back_latency * 1000,
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency,
+ dc->dcn_soc->max_request_size,
+ dc->dcn_soc->dcfclkv_max0p9 * 1000,
+ dc->dcn_soc->dcfclkv_nom0p8 * 1000,
+ dc->dcn_soc->dcfclkv_mid0p72 * 1000,
+ dc->dcn_soc->dcfclkv_min0p65 * 1000,
+ dc->dcn_soc->max_dispclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dispclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dispclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dispclk_vmin0p65 * 1000,
+ dc->dcn_soc->max_dppclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dppclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dppclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dppclk_vmin0p65 * 1000,
+ dc->dcn_soc->socclk * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000,
+ dc->dcn_soc->phyclkv_max0p9 * 1000,
+ dc->dcn_soc->phyclkv_nom0p8 * 1000,
+ dc->dcn_soc->phyclkv_mid0p72 * 1000,
+ dc->dcn_soc->phyclkv_min0p65 * 1000,
+ dc->dcn_soc->downspreading * 100,
+ dc->dcn_soc->round_trip_ping_latency_cycles,
+ dc->dcn_soc->urgent_out_of_order_return_per_channel,
+ dc->dcn_soc->number_of_channels,
+ dc->dcn_soc->vmm_page_size,
+ dc->dcn_soc->dram_clock_change_latency * 1000,
+ dc->dcn_soc->return_bus_width);
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "rob_buffer_size_in_kbyte: %d\n"
+ "det_buffer_size_in_kbyte: %d\n"
+ "dpp_output_buffer_pixels: %d\n"
+ "opp_output_buffer_lines: %d\n"
+ "pixel_chunk_size_in_kbyte: %d\n"
+ "pte_enable: %d\n"
+ "pte_chunk_size: %d kbytes\n"
+ "meta_chunk_size: %d kbytes\n"
+ "writeback_chunk_size: %d kbytes\n"
+ "odm_capability: %d\n"
+ "dsc_capability: %d\n"
+ "line_buffer_size: %d bits\n"
+ "max_line_buffer_lines: %d\n"
+ "is_line_buffer_bpp_fixed: %d\n"
+ "line_buffer_fixed_bpp: %d\n"
+ "writeback_luma_buffer_size: %d kbytes\n"
+ "writeback_chroma_buffer_size: %d kbytes\n"
+ "max_num_dpp: %d\n"
+ "max_num_writeback: %d\n"
+ "max_dchub_topscl_throughput: %d pixels/dppclk\n"
+ "max_pscl_tolb_throughput: %d pixels/dppclk\n"
+ "max_lb_tovscl_throughput: %d pixels/dppclk\n"
+ "max_vscl_tohscl_throughput: %d pixels/dppclk\n"
+ "max_hscl_ratio: %d\n"
+ "max_vscl_ratio: %d\n"
+ "max_hscl_taps: %d\n"
+ "max_vscl_taps: %d\n"
+ "pte_buffer_size_in_requests: %d\n"
+ "dispclk_ramping_margin: %d %\n"
+ "under_scan_factor: %d %\n"
+ "max_inter_dcn_tile_repeaters: %d\n"
+ "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
+ "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
+ "dcfclk_cstate_latency: %d\n",
+ dc->dcn_ip->rob_buffer_size_in_kbyte,
+ dc->dcn_ip->det_buffer_size_in_kbyte,
+ dc->dcn_ip->dpp_output_buffer_pixels,
+ dc->dcn_ip->opp_output_buffer_lines,
+ dc->dcn_ip->pixel_chunk_size_in_kbyte,
+ dc->dcn_ip->pte_enable,
+ dc->dcn_ip->pte_chunk_size,
+ dc->dcn_ip->meta_chunk_size,
+ dc->dcn_ip->writeback_chunk_size,
+ dc->dcn_ip->odm_capability,
+ dc->dcn_ip->dsc_capability,
+ dc->dcn_ip->line_buffer_size,
+ dc->dcn_ip->max_line_buffer_lines,
+ dc->dcn_ip->is_line_buffer_bpp_fixed,
+ dc->dcn_ip->line_buffer_fixed_bpp,
+ dc->dcn_ip->writeback_luma_buffer_size,
+ dc->dcn_ip->writeback_chroma_buffer_size,
+ dc->dcn_ip->max_num_dpp,
+ dc->dcn_ip->max_num_writeback,
+ dc->dcn_ip->max_dchub_topscl_throughput,
+ dc->dcn_ip->max_pscl_tolb_throughput,
+ dc->dcn_ip->max_lb_tovscl_throughput,
+ dc->dcn_ip->max_vscl_tohscl_throughput,
+ dc->dcn_ip->max_hscl_ratio,
+ dc->dcn_ip->max_vscl_ratio,
+ dc->dcn_ip->max_hscl_taps,
+ dc->dcn_ip->max_vscl_taps,
+ dc->dcn_ip->pte_buffer_size_in_requests,
+ dc->dcn_ip->dispclk_ramping_margin,
+ dc->dcn_ip->under_scan_factor * 100,
+ dc->dcn_ip->max_inter_dcn_tile_repeaters,
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
+ dc->dcn_ip->dcfclk_cstate_latency);
+ dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
+
+ dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
+ dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
+ dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
+ dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
+
+ dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
+ dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
+ dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
+ dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
+ dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
+ dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
+ dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
+ dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
+ dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
+ dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
+
+ dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency;
+ dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency;
+ dc->dml.soc.ideal_dram_bw_after_urgent_percent =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+ dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size;
+ dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading;
+ dc->dml.soc.round_trip_ping_latency_dcfclk_cycles =
+ dc->dcn_soc->round_trip_ping_latency_cycles;
+ dc->dml.soc.urgent_out_of_order_return_per_channel_bytes =
+ dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels;
+ dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size;
+ dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency;
+ dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width;
+
+ dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte;
+ dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes;
+ dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size;
+ dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size;
+ dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size;
+ dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size;
+ dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes;
+ dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size;
+ dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size;
+ dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp;
+ dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback;
+ dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput;
+ dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput;
+ dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput;
+ dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput;
+ dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ /*pte_buffer_size_in_requests missing in dml*/
+ dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin;
+ dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor;
+ dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes;
+ dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
+ dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
+ kernel_fpu_end();
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
new file mode 100644
index 000000000000..7240db2e6f09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -0,0 +1,1684 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+#include "hubp.h"
+
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (NULL != dc->links[i])
+ link_destroy(&dc->links[i]);
+ }
+}
+
+static bool create_links(
+ struct dc *dc,
+ uint32_t num_virtual_links)
+{
+ int i;
+ int connectors_num;
+ struct dc_bios *bios = dc->ctx->dc_bios;
+
+ dc->link_count = 0;
+
+ connectors_num = bios->funcs->get_connectors_number(bios);
+
+ if (connectors_num > ENUM_ID_COUNT) {
+ dm_error(
+ "DC: Number of connectors %d exceeds maximum of %d!\n",
+ connectors_num,
+ ENUM_ID_COUNT);
+ return false;
+ }
+
+ if (connectors_num == 0 && num_virtual_links == 0) {
+ dm_error("DC: Number of connectors is zero!\n");
+ }
+
+ dm_output_to_console(
+ "DC: %s: connectors_num: physical:%d, virtual:%d\n",
+ __func__,
+ connectors_num,
+ num_virtual_links);
+
+ for (i = 0; i < connectors_num; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ /* next BIOS object table connector */
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link = link_create(&link_init_params);
+
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ for (i = 0; i < num_virtual_links; i++) {
+ struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+ struct encoder_init_data enc_init = {0};
+
+ if (link == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
+ link->ctx = dc->ctx;
+ link->dc = dc;
+ link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ link->link_id.enum_id = ENUM_ID_1;
+ link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ enc_init.ctx = dc->ctx;
+ enc_init.channel = CHANNEL_ID_UNKNOWN;
+ enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc_init.transmitter = TRANSMITTER_UNKNOWN;
+ enc_init.connector = link->link_id;
+ enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+ enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+ enc_init.encoder.enum_id = ENUM_ID_1;
+ virtual_link_encoder_construct(link->link_enc, &enc_init);
+ }
+
+ return true;
+
+failed_alloc:
+ return false;
+}
+
+static bool stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ int vmin, int vmax)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.set_drr(&pipe, 1, vmin, vmax);
+
+ /* build and update the info frame */
+ resource_build_info_frame(pipe);
+ dc->hwss.update_info_frame(pipe);
+
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+ struct crtc_position position;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_csc_matrix(pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static void set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_events *events)
+{
+ int i = 0;
+ int j = 0;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+}
+
+static void set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+static void perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+static void set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ link->preferred_link_setting = *link_setting;
+ dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+static void enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+static void disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+static void set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
+static void set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option)
+{
+ struct bit_depth_reduction_params params;
+ struct dc_link *link = stream->status.link;
+ struct pipe_ctx *pipes = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+ stream) {
+ pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ memset(&params, 0, sizeof(params));
+ if (!pipes)
+ return;
+ if (option > DITHER_OPTION_MAX)
+ return;
+
+ stream->dither_option = option;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &params);
+ stream->bit_depth_params = params;
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+void set_dpms(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off)
+{
+ struct pipe_ctx *pipe_ctx = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx) {
+ ASSERT(0);
+ return;
+ }
+
+ if (stream->dpms_off != dpms_off) {
+ stream->dpms_off = dpms_off;
+ if (dpms_off)
+ core_link_disable_stream(pipe_ctx,
+ KEEP_ACQUIRED_RESOURCE);
+ else
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+}
+
+static void allocate_dc_stream_funcs(struct dc *dc)
+{
+ if (dc->hwss.set_drr != NULL) {
+ dc->stream_funcs.adjust_vmin_vmax =
+ stream_adjust_vmin_vmax;
+ }
+
+ dc->stream_funcs.set_static_screen_events =
+ set_static_screen_events;
+
+ dc->stream_funcs.get_crtc_position =
+ stream_get_crtc_position;
+
+ dc->stream_funcs.set_gamut_remap =
+ set_gamut_remap;
+
+ dc->stream_funcs.program_csc_matrix =
+ program_csc_matrix;
+
+ dc->stream_funcs.set_dither_option =
+ set_dither_option;
+
+ dc->stream_funcs.set_dpms =
+ set_dpms;
+
+ dc->link_funcs.set_drive_settings =
+ set_drive_settings;
+
+ dc->link_funcs.perform_link_training =
+ perform_link_training;
+
+ dc->link_funcs.set_preferred_link_settings =
+ set_preferred_link_settings;
+
+ dc->link_funcs.enable_hpd =
+ enable_hpd;
+
+ dc->link_funcs.disable_hpd =
+ disable_hpd;
+
+ dc->link_funcs.set_test_pattern =
+ set_test_pattern;
+}
+
+static void destruct(struct dc *dc)
+{
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+
+ destroy_links(dc);
+
+ dc_destroy_resource_pool(dc);
+
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+ if (dc->ctx->i2caux)
+ dal_i2caux_destroy(&dc->ctx->i2caux);
+
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+ if (dc->ctx->logger)
+ dal_logger_destroy(&dc->ctx->logger);
+
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+ kfree(dc->bw_vbios);
+ dc->bw_vbios = NULL;
+
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+ kfree(dc->dcn_ip);
+ dc->dcn_ip = NULL;
+
+#endif
+}
+
+static bool construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dal_logger *logger;
+ struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
+ GFP_KERNEL);
+ struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
+ GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
+ GFP_KERNEL);
+ struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+#endif
+
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ if (!dc_dceip) {
+ dm_error("%s: failed to create dceip\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_dceip = dc_dceip;
+
+ if (!dc_vbios) {
+ dm_error("%s: failed to create vbios\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_vbios = dc_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_soc = dcn_soc;
+
+ if (!dcn_ip) {
+ dm_error("%s: failed to create dcn_ip\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_ip = dcn_ip;
+#endif
+
+ if (!dc_ctx) {
+ dm_error("%s: failed to create ctx\n", __func__);
+ goto fail;
+ }
+
+ dc->current_state = dc_create_state();
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+
+ /* Create logger */
+ logger = dal_logger_create(dc_ctx, init_params->log_mask);
+
+ if (!logger) {
+ /* can *not* call logger. call base driver 'print error' */
+ dm_error("%s: failed to create Logger!\n", __func__);
+ goto fail;
+ }
+ dc_ctx->logger = logger;
+ dc->ctx = dc_ctx;
+ dc->ctx->dce_environment = init_params->dce_environment;
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc->ctx->dce_version = dc_version;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
+#endif
+ /* Resource should construct all asic specific resources.
+ * This should be the only place where we need to parse the asic id
+ */
+ if (init_params->vbios_override)
+ dc_ctx->dc_bios = init_params->vbios_override;
+ else {
+ /* Create BIOS parser */
+ struct bp_init_data bp_init_data;
+
+ bp_init_data.ctx = dc_ctx;
+ bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+ dc_ctx->dc_bios = dal_bios_parser_create(
+ &bp_init_data, dc_version);
+
+ if (!dc_ctx->dc_bios) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc_ctx->created_bios = true;
+ }
+
+ /* Create I2C AUX */
+ dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
+
+ if (!dc_ctx->i2caux) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ /* Create GPIO service */
+ dc_ctx->gpio_service = dal_gpio_service_create(
+ dc_version,
+ dc_ctx->dce_environment,
+ dc_ctx);
+
+ if (!dc_ctx->gpio_service) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc->res_pool = dc_create_resource_pool(
+ dc,
+ init_params->num_virtual_links,
+ dc_version,
+ init_params->asic_id);
+ if (!dc->res_pool)
+ goto fail;
+
+ dc_resource_state_construct(dc, dc->current_state);
+
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ allocate_dc_stream_funcs(dc);
+
+ return true;
+
+fail:
+
+ destruct(dc);
+ return false;
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+ int i, j;
+ struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *current_ctx;
+
+ if (dangling_context == NULL)
+ return;
+
+ dc_resource_state_copy_construct(dc->current_state, dangling_context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *old_stream =
+ dc->current_state->res_ctx.pipe_ctx[i].stream;
+ bool should_disable = true;
+
+ for (j = 0; j < context->stream_count; j++) {
+ if (old_stream == context->streams[j]) {
+ should_disable = false;
+ break;
+ }
+ }
+ if (should_disable && old_stream) {
+ dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ }
+ }
+
+ current_ctx = dc->current_state;
+ dc->current_state = dangling_context;
+ dc_release_state(current_ctx);
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+ struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ unsigned int full_pipe_count;
+
+ if (NULL == dc)
+ goto alloc_fail;
+
+ if (false == construct(dc, init_params))
+ goto construct_fail;
+
+ /*TODO: separate HW and SW initialization*/
+ dc->hwss.init_hw(dc);
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
+
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+
+ dc->config = init_params->flags;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Display Core initialized\n");
+
+
+ /* TODO: missing feature to be enabled */
+ dc->debug.disable_dfs_bypass = true;
+
+ return dc;
+
+construct_fail:
+ kfree(dc);
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+ destruct(*dc);
+ kfree(*dc);
+ *dc = NULL;
+}
+
+static void program_timing_sync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, j;
+ int group_index = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
+ continue;
+
+ unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+ }
+
+ for (i = 0; i < pipe_count; i++) {
+ int group_size = 1;
+ struct pipe_ctx *pipe_set[MAX_PIPES];
+
+ if (!unsynced_pipes[i])
+ continue;
+
+ pipe_set[0] = unsynced_pipes[i];
+ unsynced_pipes[i] = NULL;
+
+ /* Add tg to the set, search rest of the tg's for ones with
+ * same timing, add all tgs with same timing to the group
+ */
+ for (j = i + 1; j < pipe_count; j++) {
+ if (!unsynced_pipes[j])
+ continue;
+
+ if (resource_are_streams_timing_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ }
+ }
+
+ /* set first unblanked pipe as master */
+ for (j = 0; j < group_size; j++) {
+ struct pipe_ctx *temp;
+
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (j == 0)
+ break;
+
+ temp = pipe_set[0];
+ pipe_set[0] = pipe_set[j];
+ pipe_set[j] = temp;
+ break;
+ }
+ }
+
+ /* remove any other unblanked pipes as they have already been synced */
+ for (j = j + 1; j < group_size; j++) {
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+
+ if (group_size > 1) {
+ dc->hwss.enable_timing_synchronization(
+ dc, group_index, group_size, pipe_set);
+ group_index++;
+ }
+ }
+}
+
+static bool context_changed(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i;
+
+ if (context->stream_count != dc->current_state->stream_count)
+ return true;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i] != context->streams[i])
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ bool ret = true;
+ int i, j;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ for (j = 0 ; pipe && j < stream_count; j++) {
+ if (streams[j] && streams[j] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ return ret;
+}
+
+
+/*
+ * Applies given context to HW and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, j, k, l;
+ struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+
+ disable_dangling_plane(dc, context);
+
+ for (i = 0; i < context->stream_count; i++)
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+ dc->hwss.enable_accelerated_mode(dc);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_sink *sink = context->streams[i]->sink;
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+
+ /*
+ * enable stereo
+ * TODO rework dc_enable_stereo call to work with validation sets?
+ */
+ for (k = 0; k < MAX_PIPES; k++) {
+ pipe = &context->res_ctx.pipe_ctx[k];
+
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
+ context->streams[i]->timing.h_addressable,
+ context->streams[i]->timing.v_addressable,
+ context->streams[i]->timing.h_total,
+ context->streams[i]->timing.v_total,
+ context->streams[i]->timing.pix_clk_khz);
+ }
+
+ dc->hwss.ready_shared_resources(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ program_timing_sync(dc, context);
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < MAX_PIPES; j++) {
+ pipe = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe->top_pipe && pipe->stream == context->streams[i])
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+ }
+
+ dc_release_state(dc->current_state);
+
+ dc->current_state = context;
+
+ dc_retain_state(dc->current_state);
+
+ dc->hwss.optimize_shared_resources(dc);
+
+ return result;
+}
+
+bool dc_commit_state(struct dc *dc, struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i;
+
+ if (false == context_changed(dc, context))
+ return DC_OK;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+ __func__, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ dc_stream_log(stream,
+ dc->ctx->logger,
+ LOG_DC);
+ }
+
+ result = dc_commit_state_no_check(dc, context);
+
+ return (result == DC_OK);
+}
+
+
+bool dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+ int i;
+ struct dc_state *context = dc->current_state;
+
+ post_surface_trace(dc);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL
+ || context->res_ctx.pipe_ctx[i].plane_state == NULL)
+ dc->hwss.power_down_front_end(dc, i);
+
+ /* 3rd param should be true, temp w/a for RV*/
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+#else
+ dc->hwss.set_bandwidth(dc, context, true);
+#endif
+ return true;
+}
+
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
+
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
+ }
+
+ memset(updates, 0, sizeof(updates));
+
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
+ }
+
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, plane_states, state);
+
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
+}
+
+struct dc_state *dc_create_state(void)
+{
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+
+ kref_init(&context->refcount);
+ return context;
+}
+
+void dc_retain_state(struct dc_state *context)
+{
+ kref_get(&context->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *context = container_of(kref, struct dc_state, refcount);
+ dc_resource_state_destruct(context);
+ kfree(context);
+}
+
+void dc_release_state(struct dc_state *context)
+{
+ kref_put(&context->refcount, dc_state_free);
+}
+
+static bool is_surface_in_context(
+ const struct dc_state *context,
+ const struct dc_plane_state *plane_state)
+{
+ int j;
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (plane_state == pipe_ctx->plane_state) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return 12;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return 16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ return 32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return 64;
+ default:
+ ASSERT_CRITICAL(false);
+ return -1;
+ }
+}
+
+static enum surface_update_type get_plane_info_update_type(
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ struct dc_plane_info temp_plane_info;
+ memset(&temp_plane_info, 0, sizeof(temp_plane_info));
+
+ if (!u->plane_info)
+ return UPDATE_TYPE_FAST;
+
+ temp_plane_info = *u->plane_info;
+
+ /* Copy all parameters that will cause a full update
+ * from current surface, the rest of the parameters
+ * from provided plane configuration.
+ * Perform memory compare and special validation
+ * for those that can cause fast/medium updates
+ */
+
+ /* Full update parameters */
+ temp_plane_info.color_space = u->surface->color_space;
+ temp_plane_info.dcc = u->surface->dcc;
+ temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
+ temp_plane_info.plane_size = u->surface->plane_size;
+ temp_plane_info.rotation = u->surface->rotation;
+ temp_plane_info.stereo_format = u->surface->stereo_format;
+
+ if (surface_index == 0)
+ temp_plane_info.visible = u->plane_info->visible;
+ else
+ temp_plane_info.visible = u->surface->visible;
+
+ if (memcmp(u->plane_info, &temp_plane_info,
+ sizeof(struct dc_plane_info)) != 0)
+ return UPDATE_TYPE_FULL;
+
+ if (pixel_format_to_bpp(u->plane_info->format) !=
+ pixel_format_to_bpp(u->surface->format)) {
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+ return UPDATE_TYPE_FULL;
+ }
+
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ /* todo: below are HW dependent, we should add a hook to
+ * DCE/N resource and validated there.
+ */
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ /* swizzled mode requires RQ to be setup properly,
+ * thus need to run DML to calculate RQ settings
+ */
+ return UPDATE_TYPE_FULL;
+ }
+ }
+
+ return UPDATE_TYPE_MED;
+}
+
+static enum surface_update_type get_scaling_info_update_type(
+ const struct dc_surface_update *u)
+{
+ if (!u->scaling_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height
+ || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
+ || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
+ return UPDATE_TYPE_FULL;
+
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+ || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+ || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ return UPDATE_TYPE_MED;
+
+ return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(
+ const struct dc *dc,
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ const struct dc_state *context = dc->current_state;
+ enum surface_update_type type = UPDATE_TYPE_FAST;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (!is_surface_in_context(context, u->surface))
+ return UPDATE_TYPE_FULL;
+
+ type = get_plane_info_update_type(u, surface_index);
+ if (overall_type < type)
+ overall_type = type;
+
+ type = get_scaling_info_update_type(u);
+ if (overall_type < type)
+ overall_type = type;
+
+ if (u->in_transfer_func ||
+ u->hdr_static_metadata) {
+ if (overall_type < UPDATE_TYPE_MED)
+ overall_type = UPDATE_TYPE_MED;
+ }
+
+ return overall_type;
+}
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return UPDATE_TYPE_FULL;
+
+ if (stream_update)
+ return UPDATE_TYPE_FULL;
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+ det_surface_update(dc, &updates[i], i);
+
+ if (type == UPDATE_TYPE_FULL)
+ return type;
+
+ if (overall_type < type)
+ overall_type = type;
+ }
+
+ return overall_type;
+}
+
+static struct dc_stream_status *stream_get_status(
+ struct dc_state *ctx,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < ctx->stream_count; i++) {
+ if (stream == ctx->streams[i]) {
+ return &ctx->stream_status[i];
+ }
+ }
+
+ return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+
+static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ dc->hwss.set_bandwidth(dc, context, false);
+ context_clock_trace(dc, context);
+ }
+
+ if (update_type > UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+ }
+ }
+
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+ * just return after program front end.
+ */
+ dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
+ return;
+ }
+
+ /* Lock pipes for provided surfaces, or all active if full update*/
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ true);
+ }
+ if (update_type == UPDATE_TYPE_FULL)
+ break;
+ }
+
+ /* Full fe update*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
+ continue;
+
+ if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
+ struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+ }
+
+ if (update_type > UPDATE_TYPE_FAST)
+ context_timing_trace(dc, &context->res_ctx);
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ if (update_type == UPDATE_TYPE_MED)
+ dc->hwss.apply_ctx_for_surface(
+ dc, stream, surface_count, context);
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ if (srf_updates[i].flip_addr)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ /* work around to program degamma regs for split pipe after set mode. */
+ if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+
+ if (stream_update != NULL &&
+ stream_update->out_transfer_func != NULL) {
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ if (srf_updates[i].hdr_static_metadata) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
+ }
+ }
+
+ /* Unlock pipes */
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ for (j = 0; j < surface_count; j++) {
+ if (update_type != UPDATE_TYPE_FULL &&
+ srf_updates[j].surface != pipe_ctx->plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ false);
+
+ break;
+ }
+ }
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state)
+{
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
+ if (update_type >= update_surface_trace_level)
+ update_surface_trace(dc, srf_updates, surface_count);
+
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+
+ /* initialize scratch memory for building context */
+ context = dc_create_state();
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return;
+ }
+
+ dc_resource_state_copy_construct(state, context);
+ }
+
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ /* TODO: On flip we don't build the state, so it still has the
+ * old address. Which is why we are updating the address here
+ */
+ if (srf_updates[i].flip_addr) {
+ surface->address = srf_updates[i].flip_addr->address;
+ surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
+
+ }
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+
+ if (update_type >= UPDATE_TYPE_FULL)
+ dc_post_update_surfaces_to_stream(dc);
+
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+
+ dc->current_state = context;
+ dc_release_state(old);
+
+ }
+
+ return;
+
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+ return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+ if (i < dc->current_state->stream_count)
+ return dc->current_state->streams[i];
+ return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+ if (dc == NULL)
+ return;
+
+ dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+ dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state)
+{
+ struct kref refcount;
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+ dc_resource_state_construct(dc, dc->current_state);
+
+ dc->hwss.init_hw(dc);
+ break;
+ default:
+
+ dc->hwss.power_down(dc);
+
+ /* Zero out the current context so that on resume we start with
+ * clean state, and dc hw programming optimizations will not
+ * cause any trouble.
+ */
+
+ /* Preserve refcount */
+ refcount = dc->current_state->refcount;
+ dc_resource_state_destruct(dc->current_state);
+ memset(dc->current_state, 0,
+ sizeof(*dc->current_state));
+
+ dc->current_state->refcount = refcount;
+
+ break;
+ }
+
+}
+
+void dc_resume(struct dc *dc)
+{
+
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++)
+ core_link_resume(dc->links[i]);
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ cmd);
+}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link->ctx,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ if (edid_status != EDID_OK)
+ goto fail;
+
+ return dc_sink;
+fail:
+ dc_link_remove_remote_sink(link, dc_sink);
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
new file mode 100644
index 000000000000..43c7a7fddb83
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * dc_debug.c
+ *
+ * Created on: Nov 3, 2016
+ * Author: yonsun
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#define SURFACE_TRACE(...) do {\
+ if (dc->debug.surface_trace) \
+ dm_logger_write(logger, \
+ LOG_IF_TRACE, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define TIMING_TRACE(...) do {\
+ if (dc->debug.timing_trace) \
+ dm_logger_write(logger, \
+ LOG_SYNC, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define CLOCK_TRACE(...) do {\
+ if (dc->debug.clock_trace) \
+ dm_logger_write(logger, \
+ LOG_BANDWIDTH_CALCS, \
+ ##__VA_ARGS__); \
+} while (0)
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_plane_state *plane_state = plane_states[i];
+
+ SURFACE_TRACE("Planes %d:\n", i);
+
+ SURFACE_TRACE(
+ "plane_state->visible = %d;\n"
+ "plane_state->flip_immediate = %d;\n"
+ "plane_state->address.type = %d;\n"
+ "plane_state->address.grph.addr.quad_part = 0x%X;\n"
+ "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "plane_state->scaling_quality.h_taps = %d;\n"
+ "plane_state->scaling_quality.v_taps = %d;\n"
+ "plane_state->scaling_quality.h_taps_c = %d;\n"
+ "plane_state->scaling_quality.v_taps_c = %d;\n",
+ plane_state->visible,
+ plane_state->flip_immediate,
+ plane_state->address.type,
+ plane_state->address.grph.addr.quad_part,
+ plane_state->address.grph.meta_addr.quad_part,
+ plane_state->scaling_quality.h_taps,
+ plane_state->scaling_quality.v_taps,
+ plane_state->scaling_quality.h_taps_c,
+ plane_state->scaling_quality.v_taps_c);
+
+ SURFACE_TRACE(
+ "plane_state->src_rect.x = %d;\n"
+ "plane_state->src_rect.y = %d;\n"
+ "plane_state->src_rect.width = %d;\n"
+ "plane_state->src_rect.height = %d;\n"
+ "plane_state->dst_rect.x = %d;\n"
+ "plane_state->dst_rect.y = %d;\n"
+ "plane_state->dst_rect.width = %d;\n"
+ "plane_state->dst_rect.height = %d;\n"
+ "plane_state->clip_rect.x = %d;\n"
+ "plane_state->clip_rect.y = %d;\n"
+ "plane_state->clip_rect.width = %d;\n"
+ "plane_state->clip_rect.height = %d;\n",
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height,
+ plane_state->clip_rect.x,
+ plane_state->clip_rect.y,
+ plane_state->clip_rect.width,
+ plane_state->clip_rect.height);
+
+ SURFACE_TRACE(
+ "plane_state->plane_size.grph.surface_size.x = %d;\n"
+ "plane_state->plane_size.grph.surface_size.y = %d;\n"
+ "plane_state->plane_size.grph.surface_size.width = %d;\n"
+ "plane_state->plane_size.grph.surface_size.height = %d;\n"
+ "plane_state->plane_size.grph.surface_pitch = %d;\n",
+ plane_state->plane_size.grph.surface_size.x,
+ plane_state->plane_size.grph.surface_size.y,
+ plane_state->plane_size.grph.surface_size.width,
+ plane_state->plane_size.grph.surface_size.height,
+ plane_state->plane_size.grph.surface_pitch);
+
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
+ plane_state->tiling_info.gfx8.num_banks,
+ plane_state->tiling_info.gfx8.bank_width,
+ plane_state->tiling_info.gfx8.bank_width_c,
+ plane_state->tiling_info.gfx8.bank_height,
+ plane_state->tiling_info.gfx8.bank_height_c,
+ plane_state->tiling_info.gfx8.tile_aspect,
+ plane_state->tiling_info.gfx8.tile_aspect_c,
+ plane_state->tiling_info.gfx8.tile_split,
+ plane_state->tiling_info.gfx8.tile_split_c,
+ plane_state->tiling_info.gfx8.tile_mode,
+ plane_state->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_state->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_state->color_space = %d;\n"
+ "plane_state->dcc.enable = %d;\n"
+ "plane_state->format = %d;\n"
+ "plane_state->rotation = %d;\n"
+ "plane_state->stereo_format = %d;\n",
+ plane_state->tiling_info.gfx8.pipe_config,
+ plane_state->tiling_info.gfx8.array_mode,
+ plane_state->color_space,
+ plane_state->dcc.enable,
+ plane_state->format,
+ plane_state->rotation,
+ plane_state->stereo_format);
+
+ SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
+ plane_state->tiling_info.gfx9.swizzle);
+
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_surface_update *update = &updates[i];
+
+ SURFACE_TRACE("Update %d\n", i);
+ if (update->flip_addr) {
+ SURFACE_TRACE("flip_addr->address.type = %d;\n"
+ "flip_addr->address.grph.addr.quad_part = 0x%X;\n"
+ "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "flip_addr->flip_immediate = %d;\n",
+ update->flip_addr->address.type,
+ update->flip_addr->address.grph.addr.quad_part,
+ update->flip_addr->address.grph.meta_addr.quad_part,
+ update->flip_addr->flip_immediate);
+ }
+
+ if (update->plane_info) {
+ SURFACE_TRACE(
+ "plane_info->color_space = %d;\n"
+ "plane_info->format = %d;\n"
+ "plane_info->plane_size.grph.surface_pitch = %d;\n"
+ "plane_info->plane_size.grph.surface_size.height = %d;\n"
+ "plane_info->plane_size.grph.surface_size.width = %d;\n"
+ "plane_info->plane_size.grph.surface_size.x = %d;\n"
+ "plane_info->plane_size.grph.surface_size.y = %d;\n"
+ "plane_info->rotation = %d;\n",
+ update->plane_info->color_space,
+ update->plane_info->format,
+ update->plane_info->plane_size.grph.surface_pitch,
+ update->plane_info->plane_size.grph.surface_size.height,
+ update->plane_info->plane_size.grph.surface_size.width,
+ update->plane_info->plane_size.grph.surface_size.x,
+ update->plane_info->plane_size.grph.surface_size.y,
+ update->plane_info->rotation,
+ update->plane_info->stereo_format);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
+ update->plane_info->tiling_info.gfx8.num_banks,
+ update->plane_info->tiling_info.gfx8.bank_width,
+ update->plane_info->tiling_info.gfx8.bank_width_c,
+ update->plane_info->tiling_info.gfx8.bank_height,
+ update->plane_info->tiling_info.gfx8.bank_height_c,
+ update->plane_info->tiling_info.gfx8.tile_aspect,
+ update->plane_info->tiling_info.gfx8.tile_aspect_c,
+ update->plane_info->tiling_info.gfx8.tile_split,
+ update->plane_info->tiling_info.gfx8.tile_split_c,
+ update->plane_info->tiling_info.gfx8.tile_mode,
+ update->plane_info->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_info->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_info->visible = %d;\n"
+ "plane_info->per_pixel_alpha = %d;\n",
+ update->plane_info->tiling_info.gfx8.pipe_config,
+ update->plane_info->tiling_info.gfx8.array_mode,
+ update->plane_info->visible,
+ update->plane_info->per_pixel_alpha);
+
+ SURFACE_TRACE("surface->tiling_info.gfx9.swizzle = %d;\n",
+ update->plane_info->tiling_info.gfx9.swizzle);
+ }
+
+ if (update->scaling_info) {
+ SURFACE_TRACE(
+ "scaling_info->src_rect.x = %d;\n"
+ "scaling_info->src_rect.y = %d;\n"
+ "scaling_info->src_rect.width = %d;\n"
+ "scaling_info->src_rect.height = %d;\n"
+ "scaling_info->dst_rect.x = %d;\n"
+ "scaling_info->dst_rect.y = %d;\n"
+ "scaling_info->dst_rect.width = %d;\n"
+ "scaling_info->dst_rect.height = %d;\n"
+ "scaling_info->clip_rect.x = %d;\n"
+ "scaling_info->clip_rect.y = %d;\n"
+ "scaling_info->clip_rect.width = %d;\n"
+ "scaling_info->clip_rect.height = %d;\n"
+ "scaling_info->scaling_quality.h_taps = %d;\n"
+ "scaling_info->scaling_quality.v_taps = %d;\n"
+ "scaling_info->scaling_quality.h_taps_c = %d;\n"
+ "scaling_info->scaling_quality.v_taps_c = %d;\n",
+ update->scaling_info->src_rect.x,
+ update->scaling_info->src_rect.y,
+ update->scaling_info->src_rect.width,
+ update->scaling_info->src_rect.height,
+ update->scaling_info->dst_rect.x,
+ update->scaling_info->dst_rect.y,
+ update->scaling_info->dst_rect.width,
+ update->scaling_info->dst_rect.height,
+ update->scaling_info->clip_rect.x,
+ update->scaling_info->clip_rect.y,
+ update->scaling_info->clip_rect.width,
+ update->scaling_info->clip_rect.height,
+ update->scaling_info->scaling_quality.h_taps,
+ update->scaling_info->scaling_quality.v_taps,
+ update->scaling_info->scaling_quality.h_taps_c,
+ update->scaling_info->scaling_quality.v_taps_c);
+ }
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void post_surface_trace(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ SURFACE_TRACE("post surface process.\n");
+
+}
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+ int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+ struct crtc_position position;
+ unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ /* get_position() returns CRTC vertical/horizontal counter
+ * hence not applicable for underlay pipe
+ */
+ if (pipe_ctx->stream == NULL
+ || pipe_ctx->pipe_idx == underlay_idx)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
+ h_pos[i] = position.horizontal_count;
+ v_pos[i] = position.vertical_count;
+ }
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
+ pipe_ctx->stream_res.tg->inst,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ h_pos[i], v_pos[i]);
+ }
+}
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
new file mode 100644
index 000000000000..71993d5983bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "timing_generator.h"
+#include "hw_sequencer.h"
+
+/* used as index in array of black_color_format */
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+ BLACK_COLOR_FORMAT_DEBUG,
+};
+
+static const struct tg_color black_color_format[] = {
+ /* BlackColorFormat_RGB_FullRange */
+ {0, 0, 0},
+ /* BlackColorFormat_RGB_Limited */
+ {0x40, 0x40, 0x40},
+ /* BlackColorFormat_YUV_TV */
+ {0x200, 0x40, 0x200},
+ /* BlackColorFormat_YUV_CV */
+ {0x1f4, 0x40, 0x1f4},
+ /* BlackColorFormat_YUV_SuperAA */
+ {0x1a2, 0x20, 0x1a2},
+ /* visual confirm debug */
+ {0xff, 0xff, 0},
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color)
+{
+ switch (colorspace) {
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+ break;
+
+ case COLOR_SPACE_SRGB_LIMITED:
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+ break;
+
+ default:
+ /* fefault is sRGB black (full range). */
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+ /* default is sRGB black 0. */
+ break;
+ }
+}
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg)
+{
+ int counter;
+
+ for (counter = 0; counter < 100; counter++) {
+ if (tg->funcs->is_blanked(tg))
+ break;
+
+ msleep(1);
+ }
+
+ if (counter == 100) {
+ dm_error("DC: failed to blank crtc!\n");
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
new file mode 100644
index 000000000000..e27ed4a45265
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -0,0 +1,2435 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "atom.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "abm.h"
+#include "fixed31_32.h"
+#include "dpcd_defs.h"
+#include "dmcu.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_enum.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define LINK_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+ __VA_ARGS__)
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+ LINK_RATE_REF_FREQ_IN_MHZ = 27,
+ PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct dc_link *link)
+{
+ int i;
+
+ if (link->ddc)
+ dal_ddc_service_destroy(&link->ddc);
+
+ if(link->link_enc)
+ link->link_enc->funcs->destroy(&link->link_enc);
+
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ for (i = 0; i < link->sink_count; ++i)
+ dc_sink_release(link->remote_sinks[i]);
+}
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
+{
+ enum bp_result bp_result;
+ struct graphics_object_hpd_info hpd_info;
+ struct gpio_pin_info pin_info;
+
+ if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+ return NULL;
+
+ bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+ hpd_info.hpd_int_gpio_uid, &pin_info);
+
+ if (bp_result != BP_RESULT_OK) {
+ ASSERT(bp_result == BP_RESULT_NORECORD);
+ return NULL;
+ }
+
+ return dal_gpio_service_create_irq(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+}
+
+/*
+ * Function: program_hpd_filter
+ *
+ * @brief
+ * Programs HPD filter on associated HPD line
+ *
+ * @param [in] delay_on_connect_in_ms: Connect filter timeout
+ * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ * @return
+ * true on success, false otherwise
+ */
+static bool program_hpd_filter(
+ const struct dc_link *link)
+{
+ bool result = false;
+
+ struct gpio *hpd;
+
+ int delay_on_connect_in_ms = 0;
+ int delay_on_disconnect_in_ms = 0;
+
+ /* Verify feature is supported */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* Program hpd filter */
+ delay_on_connect_in_ms = 500;
+ delay_on_disconnect_in_ms = 100;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* Program hpd filter to allow DP signal to settle */
+ /* 500: not able to detect MST <-> SST switch as HPD is low for
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50:not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
+ */
+ delay_on_connect_in_ms = 80;
+ delay_on_disconnect_in_ms = 0;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ default:
+ /* Don't program hpd filter */
+ return false;
+ }
+
+ /* Obtain HPD handle */
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return result;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = delay_on_connect_in_ms;
+ config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+
+ result = true;
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+
+ return result;
+}
+
+static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+{
+ uint32_t is_hpd_high = 0;
+ struct gpio *hpd_pin;
+
+ /* todo: may need to lock gpio access */
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ if (hpd_pin == NULL)
+ goto hpd_gpio_failure;
+
+ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+ dal_gpio_get_value(hpd_pin, &is_hpd_high);
+ dal_gpio_close(hpd_pin);
+ dal_gpio_destroy_irq(&hpd_pin);
+
+ if (is_hpd_high) {
+ *type = dc_connection_single;
+ /* TODO: need to do the actual detection */
+ } else {
+ *type = dc_connection_none;
+ }
+
+ return true;
+
+hpd_gpio_failure:
+ return false;
+}
+
+static enum ddc_transaction_type get_ddc_transaction_type(
+ enum signal_type sink_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830). */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+
+ return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+ struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
+{
+ if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+ switch (downstream.id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ {
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ }
+ }
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ case CONNECTOR_ID_VGA:
+ return SIGNAL_TYPE_RGB;
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case CONNECTOR_ID_LVDS:
+ return SIGNAL_TYPE_LVDS;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case CONNECTOR_ID_EDP:
+ return SIGNAL_TYPE_EDP;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ } else if (downstream.type == OBJECT_TYPE_ENCODER) {
+ switch (downstream.id) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ }
+
+ return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct dc_link *link)
+{
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(link->link_id);
+
+ bool present =
+ ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP));
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return present;
+ }
+
+ /* Open GPIO and set it to I2C mode */
+ /* Note: this GpioMode_Input will be converted
+ * to GpioConfigType_I2cAuxDualMode in GPIO component,
+ * which indicates we need additional delay */
+
+ if (GPIO_RESULT_OK != dal_ddc_open(
+ ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ dal_gpio_destroy_ddc(&ddc);
+
+ return present;
+ }
+
+ /* Read GPIO: DP sink is present if both clock and data pins are zero */
+ /* [anaumov] in DAL2, there was no check for GPIO failure */
+
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+ dal_ddc_close(ddc);
+
+ return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(
+ struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ enum signal_type result = get_basic_signal_type(
+ link->link_enc->id, link->link_id);
+
+ /* Internal digital encoder will detect only dongles
+ * that require digital signal */
+
+ /* Detection mechanism is different
+ * for different native connectors.
+ * LVDS connector supports only LVDS signal;
+ * PCIE is a bus slot, the actual connector needs to be detected first;
+ * eDP connector supports only eDP signal;
+ * HDMI should check straps for audio */
+
+ /* PCIE detects the actual connector on add-on board */
+
+ if (link->link_id.id == CONNECTOR_ID_PCIE) {
+ /* ZAZTODO implement PCIE add-on card detection */
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A: {
+ /* check audio support:
+ * if native HDMI is not supported, switch to DVI */
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+
+ if (!aud_support->hdmi_audio_native)
+ if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT: {
+ /* DP HPD short pulse. Passive DP dongle will not
+ * have short pulse
+ */
+ if (reason != DETECT_REASON_HPDRX) {
+ /* Check whether DP signal detected: if not -
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+ if (!is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+ enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ switch (dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (audio_support->hdmi_audio_on_dongle)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ if (audio_support->hdmi_audio_native)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ default:
+ signal = SIGNAL_TYPE_NONE;
+ break;
+ }
+
+ return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
+{
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ ddc, sink_cap);
+ return decide_signal_from_strap_and_dongle_type(
+ sink_cap->dongle_type,
+ audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+}
+
+static void detect_dp(
+ struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
+{
+ bool boot = false;
+ sink_caps->signal = link_detect_sink(link, reason);
+ sink_caps->transaction_type =
+ get_ddc_transaction_type(sink_caps->signal);
+
+ if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ detect_dp_sink_caps(link);
+
+ if (is_mst_supported(link)) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ link->type = dc_connection_mst_branch;
+
+ /*
+ * This call will initiate MST topology discovery. Which
+ * will detect MST ports and add new DRM connector DRM
+ * framework. Then read EDID via remote i2c over aux. In
+ * the end, will notify DRM detect result and save EDID
+ * into DRM framework.
+ *
+ * .detect is called by .fill_modes.
+ * .fill_modes is called by user mode ioctl
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * .get_modes is called by .fill_modes.
+ *
+ * call .get_modes, AMDGPU DM implementation will create
+ * new dc_sink and add to dc_link. For long HPD plug
+ * in/out, MST has its own handle.
+ *
+ * Therefore, just after dc_create, link->sink is not
+ * created for MST until user mode app calls
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * Need check ->sink usages in case ->sink = NULL
+ * TODO: s3 resume check
+ */
+ if (reason == DETECT_REASON_BOOT)
+ boot = true;
+
+ if (!dm_helpers_dp_mst_start_top_mgr(
+ link->ctx,
+ link, boot)) {
+ /* MST not supported */
+ link->type = dc_connection_single;
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ }
+ }
+
+ if (link->type != dc_connection_mst_branch &&
+ is_dp_active_dongle(link)) {
+ /* DP active dongles */
+ link->type = dc_connection_active_dongle;
+ if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+ /*
+ * active dongle unplug processing for short irq
+ */
+ link_disconnect_sink(link);
+ return;
+ }
+
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ *converter_disable_audio = true;
+ }
+ } else {
+ /* DP passive dongles */
+ sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+ sink_caps,
+ audio_support);
+ }
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ uint8_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (false == detect_sink(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->local_sink)
+ return true;
+
+ link_disconnect_sink(link);
+
+ if (new_connection_type != dc_connection_none) {
+ link->type = new_connection_type;
+
+ /* From Disconnected-to-Connected. */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ if (aud_support->hdmi_audio_native)
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ detect_dp(
+ link,
+ &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason);
+
+ /* Active dongle downstream unplug */
+ if (link->type == dc_connection_active_dongle
+ && link->dpcd_caps.sink_count.
+ bits.SINK_COUNT == 0)
+ return true;
+
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+ /* Need to setup mst link_cap struct here
+ * otherwise dc_link_detect() will leave mst link_cap
+ * empty which leads to allocate_mst_payload() has "0"
+ * pbn_per_slot value leading to exception on dal_fixed31_32_div()
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ return false;
+ }
+
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return false;
+ } /* switch() */
+
+ if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+ link->dpcd_sink_count = link->dpcd_caps.sink_count.
+ bits.SINK_COUNT;
+ else
+ link->dpcd_sink_count = 1;
+
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps.transaction_type);
+
+ link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
+ link->ddc);
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return false;
+ }
+
+ sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+ sink->converter_disable_audio = converter_disable_audio;
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ switch (edid_status) {
+ case EDID_BAD_CHECKSUM:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "EDID checksum invalid.\n");
+ break;
+ case EDID_NO_RESPONSE:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "No EDID read.\n");
+ return false;
+
+ default:
+ break;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ }
+
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+ &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
+ EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: "
+ "manufacturer_id = %X, "
+ "product_id = %X, "
+ "serial_number = %X, "
+ "manufacture_week = %d, "
+ "manufacture_year = %d, "
+ "display_name = %s, "
+ "speaker_flag = %d, "
+ "audio_mode_count = %d\n",
+ __func__,
+ sink->edid_caps.manufacturer_id,
+ sink->edid_caps.product_id,
+ sink->edid_caps.serial_number,
+ sink->edid_caps.manufacture_week,
+ sink->edid_caps.manufacture_year,
+ sink->edid_caps.display_name,
+ sink->edid_caps.speaker_flags,
+ sink->edid_caps.audio_mode_count);
+
+ for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: mode number = %d, "
+ "format_code = %d, "
+ "channel_count = %d, "
+ "sample_rate = %d, "
+ "sample_size = %d\n",
+ __func__,
+ i,
+ sink->edid_caps.audio_modes[i].format_code,
+ sink->edid_caps.audio_modes[i].channel_count,
+ sink->edid_caps.audio_modes[i].sample_rate,
+ sink->edid_caps.audio_modes[i].sample_size);
+ }
+
+ } else {
+ /* From Connected-to-Disconnected. */
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Disconnected\n",
+ link->link_index);
+
+ dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+
+ link->mst_stream_alloc_table.stream_count = 0;
+ memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ }
+
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+ link->link_index, sink,
+ (sink_caps.signal == SIGNAL_TYPE_NONE ?
+ "Disconnected":"Connected"));
+
+ return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+ struct dc_link *link)
+{
+ struct gpio *hpd;
+ enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd) {
+ switch (dal_irq_get_source(hpd)) {
+ case DC_IRQ_SOURCE_HPD1:
+ hpd_id = HPD_SOURCEID1;
+ break;
+ case DC_IRQ_SOURCE_HPD2:
+ hpd_id = HPD_SOURCEID2;
+ break;
+ case DC_IRQ_SOURCE_HPD3:
+ hpd_id = HPD_SOURCEID3;
+ break;
+ case DC_IRQ_SOURCE_HPD4:
+ hpd_id = HPD_SOURCEID4;
+ break;
+ case DC_IRQ_SOURCE_HPD5:
+ hpd_id = HPD_SOURCEID5;
+ break;
+ case DC_IRQ_SOURCE_HPD6:
+ hpd_id = HPD_SOURCEID6;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ dal_gpio_destroy_irq(&hpd);
+ }
+
+ return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+ struct ddc *ddc;
+ enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (ddc) {
+ switch (dal_ddc_get_line(ddc)) {
+ case GPIO_DDC_LINE_DDC1:
+ channel = CHANNEL_ID_DDC1;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ channel = CHANNEL_ID_DDC2;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ channel = CHANNEL_ID_DDC3;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ channel = CHANNEL_ID_DDC4;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ channel = CHANNEL_ID_DDC5;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ channel = CHANNEL_ID_DDC6;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ channel = CHANNEL_ID_DDC_VGA;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ channel = CHANNEL_ID_I2C_PAD;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
+{
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_A;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_B;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_C;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_D;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_E;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_F;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_G;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_NUTMEG_CRT;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_TRAVIS_CRT;
+ case ENUM_ID_2:
+ return TRANSMITTER_TRAVIS_LCD;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+}
+
+static bool construct(
+ struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ uint8_t i;
+ struct gpio *hpd_gpio = NULL;
+ struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+ struct dc_context *dc_ctx = init_params->ctx;
+ struct encoder_init_data enc_init_data = { 0 };
+ struct integrated_info info = {{{ 0 }}};
+ struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+ const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+ __func__, init_params->connector_index);
+ goto create_fail;
+ }
+
+ hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd = dal_irq_get_source(hpd_gpio);
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+
+ break;
+ case CONNECTOR_ID_EDP:
+ link->connector_signal = SIGNAL_TYPE_EDP;
+
+ if (hpd_gpio != NULL) {
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+ }
+ break;
+ default:
+ dm_logger_write(dc_ctx->logger, LOG_WARNING,
+ "Unsupported Connector type:%d!\n", link->link_id.id);
+ goto create_fail;
+ }
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ hpd_gpio = NULL;
+ }
+
+ /* TODO: #DAL3 Implement id to str function.*/
+ LINK_INFO("Connector[%d] description:"
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+ if (link->ddc == NULL) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ link->ddc_hw_inst =
+ dal_ddc_get_line(
+ dal_ddc_service_get_ddc_pin(link->ddc));
+
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.connector = link->link_id;
+ enc_init_data.channel = get_ddc_line(link);
+ enc_init_data.hpd_source = get_hpd_line(link);
+
+ link->hpd_src = enc_init_data.hpd_source;
+
+ enc_init_data.transmitter =
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc = link->dc->res_pool->funcs->link_enc_create(
+ &enc_init_data);
+
+ if( link->link_enc == NULL) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
+
+ for (i = 0; i < 4; i++) {
+ if (BP_RESULT_OK !=
+ bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ DC_ERROR("Failed to find device tag!\n");
+ goto device_tag_fail;
+ }
+
+ /* Look for device tag that matches connector signal,
+ * CRT for rgb, LCD for other supported signal tyes
+ */
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+ && link->connector_signal != SIGNAL_TYPE_RGB)
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+ && link->connector_signal == SIGNAL_TYPE_RGB)
+ continue;
+ break;
+ }
+
+ if (bios->integrated_info)
+ info = *bios->integrated_info;
+
+ /* Look for channel mapping corresponding to connector and device tag */
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+ struct external_display_path *path =
+ &info.ext_disp_conn_info.path[i];
+ if (path->device_connector_id.enum_id == link->link_id.enum_id
+ && path->device_connector_id.id == link->link_id.id
+ && path->device_connector_id.type == link->link_id.type) {
+
+ if (link->device_tag.acpi_device != 0
+ && path->device_acpi_enum == link->device_tag.acpi_device) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ } else if (path->device_tag ==
+ link->device_tag.dev_id.raw_device_tag) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ }
+ break;
+ }
+ }
+
+ /*
+ * TODO check if GPIO programmed correctly
+ *
+ * If GPIO isn't programmed correctly HPD might not rise or drain
+ * fast enough, leading to bounces.
+ */
+ program_hpd_filter(link);
+
+ return true;
+device_tag_fail:
+ link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+ dal_ddc_service_destroy(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ }
+
+ return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+ struct dc_link *link =
+ kzalloc(sizeof(*link), GFP_KERNEL);
+
+ if (NULL == link)
+ goto alloc_fail;
+
+ if (false == construct(link, init_params))
+ goto construct_fail;
+
+ return link;
+
+construct_fail:
+ kfree(link);
+
+alloc_fail:
+ return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+ destruct(*link);
+ kfree(*link);
+ *link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+ struct dc_link *link,
+ enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DDC_RESULT_SUCESSFULL);
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
+ "Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ union down_spread_ctrl downspread;
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+}
+
+static enum dc_status enable_link_dp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ bool skip_video_pattern;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link_settings link_settings = {0};
+ enum dp_panel_mode panel_mode;
+ enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+ /* get link settings for video mode timing */
+ decide_link_settings(stream, &link_settings);
+
+ /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+ if (state->dis_clk->funcs->set_min_clocks_state) {
+ if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+ state->dis_clk->funcs->set_min_clocks_state(
+ state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+ } else {
+ uint32_t dp_phyclk_in_khz;
+ const struct clocks_value clocks_value =
+ state->dis_clk->cur_clocks_value;
+
+ /* 27mhz = 27000000hz= 27000khz */
+ dp_phyclk_in_khz = link_settings.link_rate * 27000;
+
+ if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
+ (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
+ (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
+ state->dis_clk->funcs->apply_clock_voltage_request(
+ state->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ dp_phyclk_in_khz,
+ false,
+ true);
+ }
+ }
+ }
+
+ dp_enable_link_phy(
+ link,
+ pipe_ctx->stream->signal,
+ pipe_ctx->clock_source->id,
+ &link_settings);
+
+ panel_mode = dp_get_panel_mode(link);
+ dpcd_configure_panel_mode(link, panel_mode);
+
+ skip_video_pattern = true;
+
+ if (link_settings.link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(
+ link,
+ &link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS)) {
+ link->cur_link_settings = link_settings;
+ status = DC_OK;
+ }
+ else
+ status = DC_FAIL_DP_LINK_TRAINING;
+
+ enable_stream_features(pipe_ctx);
+
+ return status;
+}
+
+static enum dc_status enable_link_dp_mst(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* sink signal type after MST branch is MST. Multiple MST sinks
+ * share one link. Link DP PHY is enable or training only once.
+ */
+ if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+ return DC_OK;
+
+ /* set the sink to MST mode before enabling the link */
+ dp_enable_mst_on_sink(link, true);
+
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+ enum engine_id eng_id,
+ struct ext_hdmi_settings *settings)
+{
+ bool result = false;
+ int i = 0;
+ struct integrated_info *integrated_info =
+ pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+ if (integrated_info == NULL)
+ return false;
+
+ /*
+ * Get retimer settings from sbios for passing SI eye test for DCE11
+ * The setting values are varied based on board revision and port id
+ * Therefore the setting values of each ports is passed by sbios.
+ */
+
+ // Check if current bios contains ext Hdmi settings
+ if (integrated_info->gpu_cap_info & 0x20) {
+ switch (eng_id) {
+ case ENGINE_ID_DIGA:
+ settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp0_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp0_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp1_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp1_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp2_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp2_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp3_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp3_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ if (result == true) {
+ // Validate settings from bios integrated info table
+ if (settings->slv_addr == 0)
+ return false;
+ if (settings->reg_num > 9)
+ return false;
+ if (settings->reg_num_6g > 3)
+ return false;
+
+ for (i = 0; i < settings->reg_num; i++) {
+ if (settings->reg_settings[i].i2c_reg_index > 0x20)
+ return false;
+ }
+
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+ return false;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool i2c_write(struct pipe_ctx *pipe_ctx,
+ uint8_t address, uint8_t *buffer, uint32_t length)
+{
+ struct i2c_command cmd = {0};
+ struct i2c_payload payload = {0};
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.number_of_payloads = 1;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+ payload.address = address;
+ payload.data = buffer;
+ payload.length = length;
+ payload.write = true;
+ cmd.payloads = &payload;
+
+ if (dc_submit_i2c(pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream->sink->link->link_index, &cmd))
+ return true;
+
+ return false;
+}
+
+static void write_i2c_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz,
+ struct ext_hdmi_settings *settings)
+{
+ uint8_t slave_address = (settings->slv_addr >> 1);
+ uint8_t buffer[2];
+ const uint8_t apply_rx_tx_change = 0x4;
+ uint8_t offset = 0xA;
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Start Ext-Hdmi programming*/
+
+ for (i = 0; i < settings->reg_num; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+ settings->reg_settings[i].i2c_reg_index == 0xB ||
+ settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+
+ /* Apply 3G settings */
+ if (is_over_340mhz) {
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings_6g[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+ }
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ }
+}
+
+static void write_i2c_default_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Program Slave Address for tuning single integrity */
+ /* Write offset 0x0A to 0x13 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x13;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+}
+
+static void write_i2c_redriver_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ // Program Slave Address for tuning single integrity
+ buffer[3] = 0x4E;
+ buffer[4] = 0x4E;
+ buffer[5] = 0x4E;
+ buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ enum dc_color_depth display_color_depth;
+ enum engine_id eng_id;
+ struct ext_hdmi_settings settings = {0};
+ bool is_over_340mhz = false;
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+ write_i2c_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz, &settings);
+ } else {
+ write_i2c_default_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz);
+ }
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+ }
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->sink->link->ddc,
+ stream->phy_pix_clk,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+ memset(&stream->sink->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+
+ display_color_depth = stream->timing.display_color_depth;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ display_color_depth = COLOR_DEPTH_888;
+
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ pipe_ctx->clock_source->id,
+ display_color_depth,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+ stream->phy_pix_clk);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ switch (pipe_ctx->stream->signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_dp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ status = enable_link_dp_mst(state, pipe_ctx);
+ msleep(200);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_ctx->stream_res.audio && status == DC_OK) {
+ /* notify audio driver for audio modes of monitor */
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+
+ return status;
+}
+
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+ /*
+ * TODO: implement call for dp_set_hw_test_pattern
+ * it is needed for compliance testing
+ */
+
+ /* here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output */
+
+ if (dc_is_dp_signal(signal)) {
+ /* SST DP, eDP */
+ if (dc_is_dp_sst_signal(signal))
+ dp_disable_link_phy(link, signal);
+ else
+ dp_disable_link_phy_mst(link, signal);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+}
+
+bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dc_dongle_caps *dongle_caps)
+{
+ unsigned int required_pix_clk = timing->pix_clk_khz;
+
+ if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ dongle_caps->extendedCapValid == false)
+ return true;
+
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+
+ /* Check Color Depth and Pixel Clock */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ required_pix_clk /= 2;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ required_pix_clk = required_pix_clk * 10 / 8;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ required_pix_clk = required_pix_clk * 12 / 8;
+ break;
+
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+ return false;
+
+ return true;
+}
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+ struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
+
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+ if (link->remote_sinks[0])
+ return DC_OK;
+
+ /* Passive Dongle */
+ if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+ return DC_EXCEED_DONGLE_CAP;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (!dp_validate_mode_timing(
+ link,
+ timing))
+ return DC_NO_DP_LINK_BANDWIDTH;
+ break;
+
+ default:
+ break;
+ }
+
+ return DC_OK;
+}
+
+
+bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ unsigned int controller_id = 0;
+ int i;
+
+ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ return false;
+
+ dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n", level, level);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ if (stream != NULL) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream
+ == stream)
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ controller_id =
+ core_dc->current_state->
+ res_ctx.pipe_ctx[i].stream_res.tg->inst +
+ 1;
+ }
+ }
+ abm->funcs->set_backlight_level(
+ abm,
+ level,
+ frame_ramp,
+ controller_id);
+ }
+
+ return true;
+}
+
+bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+ return true;
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int i;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (link != NULL &&
+ dmcu != NULL) {
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId = link->link_enc->transmitter;
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_khz * 1000),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->res_cap->num_timing_generator;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_enabled = true;
+ dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ return true;
+ } else
+ return false;
+
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+ return &link->link_status;
+}
+
+void core_link_resume(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+ program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+ struct dc_link_settings *link_settings =
+ &stream->sink->link->cur_link_settings;
+ uint32_t link_rate_in_mbps =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+ struct fixed31_32 mbps = dal_fixed31_32_from_int(
+ link_rate_in_mbps * link_settings->lane_count);
+
+ return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(enum dc_color_depth color_depth)
+{
+ switch (color_depth) {
+ case COLOR_DEPTH_666: return 6;
+ case COLOR_DEPTH_888: return 8;
+ case COLOR_DEPTH_101010: return 10;
+ case COLOR_DEPTH_121212: return 12;
+ case COLOR_DEPTH_141414: return 14;
+ case COLOR_DEPTH_161616: return 16;
+ default: return 0;
+ }
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t bpc;
+ uint64_t kbps;
+ struct fixed31_32 peak_kbps;
+ uint32_t numerator;
+ uint32_t denominator;
+
+ bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
+ kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+ peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *stream_enc,
+ const struct dp_mst_stream_allocation_table *proposed_table)
+{
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+ { 0 } };
+ struct link_mst_stream_allocation *dc_alloc;
+
+ int i;
+ int j;
+
+ /* if DRM proposed_table has more than one new payload */
+ ASSERT(proposed_table->stream_count -
+ link->mst_stream_alloc_table.stream_count < 2);
+
+ /* copy proposed_table to link, add stream encoder */
+ for (i = 0; i < proposed_table->stream_count; i++) {
+
+ for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+ dc_alloc =
+ &link->mst_stream_alloc_table.stream_allocations[j];
+
+ if (dc_alloc->vcp_id ==
+ proposed_table->stream_allocations[i].vcp_id) {
+
+ work_table[i] = *dc_alloc;
+ break; /* exit j loop */
+ }
+ }
+
+ /* new vcp_id */
+ if (j == link->mst_stream_alloc_table.stream_count) {
+ work_table[i].vcp_id =
+ proposed_table->stream_allocations[i].vcp_id;
+ work_table[i].slot_count =
+ proposed_table->stream_allocations[i].slot_count;
+ work_table[i].stream_enc = stream_enc;
+ }
+ }
+
+ /* update link->mst_stream_alloc_table with work_table */
+ link->mst_stream_alloc_table.stream_count =
+ proposed_table->stream_count;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+ link->mst_stream_alloc_table.stream_allocations[i] =
+ work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ uint8_t i;
+
+ /* enable_link_dp_mst already check link->enabled_stream_count
+ * and stream is in link->stream[]. This is called during set mode,
+ * stream_enc is available.
+ */
+
+ /* get calculate VC payload for stream: stream_alloc */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* program DP source TX for payload */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* send down message */
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_timing(pipe_ctx);
+ avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+ uint8_t i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+ * should not done. For new mode set, map_resources will get engine
+ * for new stream, so stream_enc->id should be validated until here.
+ */
+
+ /* slot X.Y */
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* TODO: which component is responsible for remove payload table? */
+ if (mst_mode) {
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false)) {
+
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ return DC_OK;
+}
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ enum dc_status status = enable_link(state, pipe_ctx);
+
+ if (status != DC_OK) {
+ dm_logger_write(pipe_ctx->stream->ctx->logger,
+ LOG_WARNING, "enabling link %u failed: %d\n",
+ pipe_ctx->stream->sink->link->link_index,
+ status);
+
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
+ */
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
+
+ /* turn off otg test pattern if enable */
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
+ core_dc->hwss.enable_stream(pipe_ctx);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ core_dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->sink->link->cur_link_settings);
+}
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+
+ core_dc->hwss.disable_stream(pipe_ctx, option);
+
+ disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
+}
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ core_dc->hwss.set_avmute(pipe_ctx, enable);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
new file mode 100644
index 000000000000..d5294798b0a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "gpio_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+ int8_t vendor[3];/* "AMD" */
+ uint8_t version[2];
+ uint8_t size;
+ int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+ int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+ uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS 0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+union hdmi_scdc_update_read_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t STATUS_UPDATE:1;
+ uint8_t CED_UPDATE:1;
+ uint8_t RR_TEST:1;
+ uint8_t RESERVED:5;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_status_flags_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t CLOCK_DETECTED:1;
+ uint8_t CH0_LOCKED:1;
+ uint8_t CH1_LOCKED:1;
+ uint8_t CH2_LOCKED:1;
+ uint8_t RESERVED:4;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_ced_data {
+ uint8_t byte[7];
+ struct {
+ uint8_t CH0_8LOW:8;
+ uint8_t CH0_7HIGH:7;
+ uint8_t CH0_VALID:1;
+ uint8_t CH1_8LOW:8;
+ uint8_t CH1_7HIGH:7;
+ uint8_t CH1_VALID:1;
+ uint8_t CH2_8LOW:8;
+ uint8_t CH2_7HIGH:7;
+ uint8_t CH2_VALID:1;
+ uint8_t CHECKSUM:8;
+ } fields;
+};
+
+union hdmi_scdc_test_config_Data {
+ uint8_t byte;
+ struct {
+ uint8_t TEST_READ_REQUEST_DELAY:7;
+ uint8_t TEST_READ_REQUEST: 1;
+ } fields;
+};
+
+struct i2c_payloads {
+ struct vector payloads;
+};
+
+struct aux_payloads {
+ struct vector payloads;
+};
+
+static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct i2c_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+
+}
+
+static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+ return (struct i2c_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+ if (!p || !*p)
+ return;
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+
+}
+
+static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct aux_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct aux_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+}
+
+static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+ return (struct aux_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+ if (!p || !*p)
+ return;
+
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = EDID_SEGMENT_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct i2c_payload payload = {
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+
+}
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct aux_payload payload = {
+ .i2c_over_aux = true,
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+}
+
+static void construct(
+ struct ddc_service *ddc_service,
+ struct ddc_service_init_data *init_data)
+{
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(init_data->id);
+
+ struct gpio_service *gpio_service = init_data->ctx->gpio_service;
+ struct graphics_object_i2c_info i2c_info;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_bios *dcb = init_data->ctx->dc_bios;
+
+ ddc_service->link = init_data->link;
+ ddc_service->ctx = init_data->ctx;
+
+ if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+ ddc_service->ddc_pin = NULL;
+ } else {
+ hw_info.ddc_channel = i2c_info.i2c_line;
+ hw_info.hw_supported = i2c_info.i2c_hw_assist;
+
+ ddc_service->ddc_pin = dal_gpio_create_ddc(
+ gpio_service,
+ i2c_info.gpio_info.clk_a_register_index,
+ 1 << i2c_info.gpio_info.clk_a_shift,
+ &hw_info);
+ }
+
+ ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+ ddc_service->flags.FORCE_READ_REPEATED_START = false;
+ ddc_service->flags.EDID_STRESS_READ = false;
+
+ ddc_service->flags.IS_INTERNAL_DISPLAY =
+ connector_id == CONNECTOR_ID_EDP ||
+ connector_id == CONNECTOR_ID_LVDS;
+
+ ddc_service->wa.raw = 0;
+}
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *init_data)
+{
+ struct ddc_service *ddc_service;
+
+ ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL);
+
+ if (!ddc_service)
+ return NULL;
+
+ construct(ddc_service, init_data);
+ return ddc_service;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+ if (ddc->ddc_pin)
+ dal_gpio_destroy_ddc(&ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ destruct(*ddc);
+ kfree(*ddc);
+ *ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+ return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type)
+{
+ ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type)
+{
+ ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+ struct ddc_service *ddc,
+ uint32_t defer_delay)
+{
+ struct dc_link *link = ddc->link;
+
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+ !memcmp(link->dpcd_caps.branch_dev_name,
+ DP_DVI_CONVERTER_ID_4,
+ sizeof(link->dpcd_caps.branch_dev_name)))
+ return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+ defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+ return defer_delay;
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+ uint32_t defer_delay = 0;
+
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+ ddc->dongle_type)) {
+
+ defer_delay = DP_TRANSLATOR_DELAY;
+
+ defer_delay =
+ defer_delay_converter_wa(ddc, defer_delay);
+
+ } else /*sink has a delay different from an Active Converter*/
+ defer_delay = 0;
+ break;
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ defer_delay = DP_TRANSLATOR_DELAY;
+ break;
+ default:
+ break;
+ }
+ return defer_delay;
+}
+
+static bool i2c_read(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *buffer,
+ uint32_t len)
+{
+ uint8_t offs_data = 0;
+ struct i2c_payload payloads[2] = {
+ {
+ .write = true,
+ .address = address,
+ .length = 1,
+ .data = &offs_data },
+ {
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = buffer } };
+
+ struct i2c_command command = {
+ .payloads = payloads,
+ .number_of_payloads = 2,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ return dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap)
+{
+ uint8_t i;
+ bool is_valid_hdmi_signature;
+ enum display_dongle_type *dongle = &sink_cap->dongle_type;
+ uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+ bool is_type2_dongle = false;
+ struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+ /* Assume we have no valid DP passive dongle connected */
+ *dongle = DISPLAY_DONGLE_NONE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+ /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+ if (!i2c_read(
+ ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf))) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
+
+ /* Check if Type 2 dongle.*/
+ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+ is_type2_dongle = true;
+
+ dongle_signature =
+ (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+ is_valid_hdmi_signature = true;
+
+ /* Check EOT */
+ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+ is_valid_hdmi_signature = false;
+ }
+
+ /* Check signature */
+ for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+ /* If its not the right signature,
+ * skip mismatch in subversion byte.*/
+ if (dongle_signature->id[i] !=
+ dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+ if (is_type2_dongle) {
+ is_valid_hdmi_signature = false;
+ break;
+ }
+
+ }
+ }
+
+ if (is_type2_dongle) {
+ uint32_t max_tmds_clk =
+ type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+ max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+ if (0 == max_tmds_clk ||
+ max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+ max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle %dMhz: ",
+ max_tmds_clk);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+ max_tmds_clk);
+
+ }
+
+ /* Multiply by 1000 to convert to kHz. */
+ sink_cap->max_hdmi_pixel_clock =
+ max_tmds_clk * 1000;
+ }
+
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ }
+ }
+
+ return;
+}
+
+enum {
+ DP_SINK_CAP_SIZE =
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size)
+{
+ bool ret;
+ uint32_t payload_size =
+ dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+ DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+ uint32_t write_payloads =
+ (write_size + payload_size - 1) / payload_size;
+
+ uint32_t read_payloads =
+ (read_size + payload_size - 1) / payload_size;
+
+ uint32_t payloads_num = write_payloads + read_payloads;
+
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+ return false;
+
+ /*TODO: len of payload data for i2c and aux is uint8!!!!,
+ * but we want to read 256 over i2c!!!!*/
+ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+ struct aux_payloads *payloads =
+ dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+ struct aux_command command = {
+ .payloads = dal_ddc_aux_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .defer_delay = get_defer_delay(ddc),
+ .max_defer_write_retry = 0 };
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_aux_payloads_get_count(payloads);
+
+ ret = dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command);
+
+ dal_ddc_aux_payloads_destroy(&payloads);
+
+ } else {
+ struct i2c_payloads *payloads =
+ dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+ struct i2c_command command = {
+ .payloads = dal_ddc_i2c_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_i2c_payloads_get_count(payloads);
+
+ ret = dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+
+ dal_ddc_i2c_payloads_destroy(&payloads);
+ }
+
+ return ret;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload read_payload = {
+ .i2c_over_aux = i2c,
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = data,
+ };
+ struct aux_command command = {
+ .payloads = &read_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload write_payload = {
+ .i2c_over_aux = i2c,
+ .write = true,
+ .address = address,
+ .length = len,
+ .data = (uint8_t *)data,
+ };
+ struct aux_command command = {
+ .payloads = &write_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc)
+{
+ ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+ return ddc_service->ddc_pin;
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble)
+{
+ bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_SINK_VERSION;
+ uint8_t sink_version = 0;
+ uint8_t write_buffer[2] = {0};
+ /*Lower than 340 Scramble bit from SCDC caps*/
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &sink_version, sizeof(sink_version));
+ if (sink_version == 1) {
+ /*Source Version = 1*/
+ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+ write_buffer[1] = 1;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ write_buffer, sizeof(write_buffer), NULL, 0);
+ /*Read Request from SCDC caps*/
+ }
+ write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+ if (over_340_mhz) {
+ write_buffer[1] = 3;
+ } else if (lte_340_scramble) {
+ write_buffer[1] = 1;
+ } else {
+ write_buffer[1] = 0;
+ }
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+ sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+ uint8_t tmds_config = 0;
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &tmds_config, sizeof(tmds_config));
+ if (tmds_config & 0x1) {
+ union hdmi_scdc_status_flags_data status_data = { {0} };
+ uint8_t scramble_status = 0;
+
+ offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), &scramble_status,
+ sizeof(scramble_status));
+ offset = HDMI_SCDC_STATUS_FLAGS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), status_data.byte,
+ sizeof(status_data.byte));
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
new file mode 100644
index 000000000000..e6bf05d76a94
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -0,0 +1,2601 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+#include "resource.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_DISABLED };
+
+enum {
+ POST_LT_ADJ_REQ_LIMIT = 6,
+ POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+ LINK_TRAINING_MAX_RETRY_COUNT = 5,
+ /* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+ LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result);
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b);
+
+static void wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t default_wait_in_micro_secs)
+{
+ union training_aux_rd_interval training_rd_interval;
+
+ /* overwrite the delay if rev > 1.1*/
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* DP 1.2 or later - retrieve delay through
+ * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ default_wait_in_micro_secs =
+ training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+
+ udelay(default_wait_in_micro_secs);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n wait = %d\n",
+ __func__,
+ default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+ struct dc_link *link,
+ union dpcd_training_pattern dpcd_pattern)
+{
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ 1);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ uint8_t rate = (uint8_t)
+ (lt_settings->link_settings.link_rate);
+
+ union down_spread_ctrl downspread = {{0}};
+ union lane_count_set lane_count_set = {{0}};
+ uint8_t link_set_buffer[2];
+
+ downspread.raw = (uint8_t)
+ (lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+ link_set_buffer[0] = rate;
+ link_set_buffer[1] = lane_count_set.raw;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET,
+ link_set_buffer, 2);
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+ hw_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dpcd_training_patterns dpcd_tr_pattern =
+ DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+ break;
+ default:
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+
+ return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum hw_dp_training_pattern pattern)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET;
+ uint8_t dpcd_lt_buffer[5] = {0};
+ union dpcd_training_pattern dpcd_pattern = {{0}};
+ uint32_t lane;
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+ /*****************************************************************
+ * DpcdAddress_TrainingPatternSet
+ *****************************************************************/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+ = dpcd_pattern.raw;
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+ /*****************************************************************
+ * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+ *****************************************************************/
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ /* concatinate everything into one buffer*/
+
+ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+ // 0x00103 - 0x00102
+ memmove(
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+ dpcd_lane,
+ size_in_bytes);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ if (edp_workaround) {
+ /* for eDP write in 2 parts because the 5-byte burst is
+ * causing issues on some eDP panels (EPR#366724)
+ */
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw) );
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ size_in_bytes);
+
+ } else
+ /* write it all in (1 + number-of-lanes)-byte burst*/
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+ link->cur_lane_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ /*LANEx_CR_DONE bits All 1's?*/
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+ done = false;
+ }
+ return done;
+
+}
+
+static bool is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status,
+ union lane_align_status_updated *lane_status_updated)
+{
+ bool done = true;
+ uint32_t lane;
+ if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+ done = false;
+ else {
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+ !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+ done = false;
+ }
+ }
+ return done;
+
+}
+
+static void update_drive_settings(
+ struct link_training_settings *dest,
+ struct link_training_settings src)
+{
+ uint32_t lane;
+ for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+ dest->lane_settings[lane].VOLTAGE_SWING =
+ src.lane_settings[lane].VOLTAGE_SWING;
+ dest->lane_settings[lane].PRE_EMPHASIS =
+ src.lane_settings[lane].PRE_EMPHASIS;
+ dest->lane_settings[lane].POST_CURSOR2 =
+ src.lane_settings[lane].POST_CURSOR2;
+ }
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+ uint32_t index)
+{
+ uint8_t nibble;
+ nibble = buf[index / 2];
+
+ if (index % 2)
+ nibble >>= 4;
+ else
+ nibble &= 0x0F;
+
+ return nibble;
+}
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+ enum dc_voltage_swing voltage)
+{
+ enum dc_pre_emphasis pre_emphasis;
+ pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+ if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+ pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+ return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+ const struct link_training_settings *link_training_setting,
+ struct link_training_settings *max_lt_setting)
+{
+ uint32_t lane;
+ struct dc_lane_settings max_requested;
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[0].PRE_EMPHASIS;
+ /*max_requested.postCursor2 =
+ * link_training_setting->laneSettings[0].postCursor2;*/
+
+ /* Determine what the maximum of the requested settings are*/
+ for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+ lane++) {
+ if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+ max_requested.VOLTAGE_SWING)
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING;
+
+ if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+ max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS;
+
+ /*
+ if (link_training_setting->laneSettings[lane].postCursor2 >
+ max_requested.postCursor2)
+ {
+ max_requested.postCursor2 =
+ link_training_setting->laneSettings[lane].postCursor2;
+ }
+ */
+ }
+
+ /* make sure the requested settings are
+ * not higher than maximum settings*/
+ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+ max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+ if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+ max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+ /*
+ if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+ max_requested.postCursor2 = PostCursor2_MaxLevel;
+ */
+
+ /* make sure the pre-emphasis matches the voltage swing*/
+ if (max_requested.PRE_EMPHASIS >
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING))
+ max_requested.PRE_EMPHASIS =
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING);
+
+ /*
+ * Post Cursor2 levels are completely independent from
+ * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+ * can only be applied to each allowable combination of voltage
+ * swing and pre-emphasis levels */
+ /* if ( max_requested.postCursor2 >
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+ * max_requested.postCursor2 =
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+ */
+
+ max_lt_setting->link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ max_lt_setting->link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ max_lt_setting->link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ link_training_setting->link_settings.lane_count;
+ lane++) {
+ max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+ max_requested.VOLTAGE_SWING;
+ max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+ max_requested.PRE_EMPHASIS;
+ /*max_lt_setting->laneSettings[lane].postCursor2 =
+ * max_requested.postCursor2;
+ */
+ }
+
+}
+
+static void get_lane_status_and_drive_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status *ln_status,
+ union lane_align_status_updated *ln_status_updated,
+ struct link_training_settings *req_settings)
+{
+ uint8_t dpcd_buf[6] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+ struct link_training_settings request_settings = {{0}};
+ uint32_t lane;
+
+ memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ ln_status[lane].raw =
+ get_nibble_at_index(&dpcd_buf[0], lane);
+ dpcd_lane_adjust[lane].raw =
+ get_nibble_at_index(&dpcd_buf[4], lane);
+ }
+
+ ln_status_updated->raw = dpcd_buf[2];
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+ __func__,
+ DP_LANE0_1_STATUS, dpcd_buf[0],
+ DP_LANE2_3_STATUS, dpcd_buf[1]);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+ __func__,
+ DP_ADJUST_REQUEST_LANE0_1,
+ dpcd_buf[4],
+ DP_ADJUST_REQUEST_LANE2_3,
+ dpcd_buf[5]);
+
+ /*copy to req_settings*/
+ request_settings.link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ request_settings.link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ request_settings.link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ request_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ request_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+
+ /*Note: for postcursor2, read adjusted
+ * postcursor2 settings from*/
+ /*DpcdAddress_AdjustRequestPostCursor2 =
+ *0x020C (not implemented yet)*/
+
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ find_max_drive_settings(&request_settings, req_settings);
+
+ /* if post cursor 2 is needed in the future,
+ * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+ */
+
+}
+
+static void dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ uint32_t lane;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->
+ link_settings.lane_count);
+ lane++) {
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ link_training_setting->link_settings.lane_count);
+
+ /*
+ if (LTSettings.link.rate == LinkRate_High2)
+ {
+ DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+ for ( uint32_t lane = 0;
+ lane < lane_count_DPMax; lane++)
+ {
+ dpcd_lane2[lane].bits.post_cursor2_set =
+ static_cast<unsigned char>(
+ LTSettings.laneSettings[lane].postCursor2);
+ dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+ }
+ m_pDpcdAccessSrv->WriteDpcdData(
+ DpcdAddress_Lane0Set2,
+ reinterpret_cast<unsigned char*>(dpcd_lane2),
+ LTSettings.link.lanes);
+ }
+ */
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ link->cur_lane_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+ const struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count);
+ lane++) {
+ if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ == VOLTAGE_SWING_MAX_LEVEL)
+ return true;
+ }
+ return false;
+
+}
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+
+ uint32_t adj_req_count;
+ uint32_t adj_req_timer;
+ bool req_drv_setting_changed;
+ uint32_t lane;
+
+ req_drv_setting_changed = false;
+ for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+ adj_req_count++) {
+
+ req_drv_setting_changed = false;
+
+ for (adj_req_timer = 0;
+ adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+ adj_req_timer++) {
+
+ struct link_training_settings req_settings;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated
+ dpcd_lane_status_updated;
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ if (dpcd_lane_status_updated.bits.
+ POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+ return true;
+
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return false;
+
+ if (!is_ch_eq_done(
+ lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return false;
+
+ for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+ if (lt_settings->
+ lane_settings[lane].VOLTAGE_SWING !=
+ req_settings.lane_settings[lane].
+ VOLTAGE_SWING ||
+ lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+ req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+ req_drv_setting_changed = true;
+ break;
+ }
+ }
+
+ if (req_drv_setting_changed) {
+ update_drive_settings(
+ lt_settings,req_settings);
+
+ dc_link_dp_set_drive_settings(link,
+ lt_settings);
+ break;
+ }
+
+ msleep(1);
+ }
+
+ if (!req_drv_setting_changed) {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request Timed out\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request limit reached\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+
+}
+
+static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+{
+ enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+ struct encoder_feature_support *features = &link->link_enc->features;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ if (features->flags.bits.IS_TPS3_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+ if (features->flags.bits.IS_TPS4_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_4)
+ return HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_3)
+ return HW_DP_TRAINING_PATTERN_3;
+
+ return HW_DP_TRAINING_PATTERN_2;
+}
+
+static enum link_training_result perform_channel_equalization_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ struct link_training_settings req_settings;
+ enum hw_dp_training_pattern hw_tr_pattern;
+ uint32_t retries_ch_eq;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+ hw_tr_pattern = get_supported_tp(link);
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(link, lt_settings);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_for_training_aux_rd_interval(link, 400);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink*/
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return LINK_TRAINING_EQ_FAIL_CR;
+
+ /* 6. check CHEQ done*/
+ if (is_ch_eq_done(lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+ }
+
+ return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+static bool perform_clock_recovery_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t lane;
+ struct link_training_settings req_settings;
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+ enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+
+ retries_cr = 0;
+ retry_count = 0;
+ /* initial drive setting (VS/PE/PC2)*/
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->lane_settings[lane].VOLTAGE_SWING =
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->lane_settings[lane].PRE_EMPHASIS =
+ PRE_EMPHASIS_DISABLED;
+ lt_settings->lane_settings[lane].POST_CURSOR2 =
+ POST_CURSOR2_DISABLED;
+ }
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ /* najeeb - The synaptics MST hub can put the LT in
+ * infinite loop by switching the VS
+ */
+ /* between level 0 and level 1 continuously, here
+ * we try for CR lock for LinkTrainingMaxCRRetry count*/
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ /* 1. call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(
+ link,
+ lt_settings);
+
+ /* 2. update DPCD of the receiver*/
+ if (!retries_cr)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(
+ link,
+ lt_settings);
+
+ /* 3. wait receiver to lock-on*/
+ wait_for_training_aux_rd_interval(
+ link,
+ 100);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (is_cr_done(lane_count, dpcd_lane_status))
+ return true;
+
+ /* 6. max VS reached*/
+ if (is_max_vs_reached(lt_settings))
+ return false;
+
+ /* 7. same voltage*/
+ /* Note: VS same for all lanes,
+ * so comparing first lane is sufficient*/
+ if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ req_settings.lane_settings[0].VOLTAGE_SWING)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "%s: Link Training Error, could not \
+ get CR after %d tries. \
+ Possibly voltage swing issue", __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ return false;
+}
+
+static inline bool perform_link_training_int(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ bool status)
+{
+ union lane_count_set lane_count_set = { {0} };
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
+
+ /* 4. mainlink output idle pattern*/
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ /*
+ * 5. post training adjust if required
+ * If the upstream DPTX and downstream DPRX both support TPS4,
+ * TPS4 must be used instead of POST_LT_ADJ_REQ.
+ */
+ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+ get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+ return status;
+
+ if (status &&
+ perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+ status = false;
+
+ lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ core_link_write_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+
+ return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ char *link_rate = "Unknown";
+ struct link_training_settings lt_settings;
+
+ memset(&lt_settings, '\0', sizeof(lt_settings));
+
+ lt_settings.link_settings.link_rate = link_setting->link_rate;
+ lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+ /*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+ /* TODO hard coded to SS for now
+ * lt_settings.link_settings.link_spread =
+ * dal_display_path_is_ss_supported(
+ * path_mode->display_path) ?
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ /* 1. set link rate, lane count and spread*/
+ dpcd_set_link_settings(link, &lt_settings);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)*/
+ if (!perform_clock_recovery_sequence(link, &lt_settings)) {
+ status = LINK_TRAINING_CR_FAIL;
+ } else {
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings);
+ }
+
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+ if (!perform_link_training_int(link,
+ &lt_settings,
+ status == LINK_TRAINING_SUCCESS)) {
+ /* the next link training setting in this case
+ * would be the same as CR failure case.
+ */
+ status = LINK_TRAINING_CR_FAIL;
+ }
+ }
+
+ /* 6. print status message*/
+ switch (lt_settings.link_settings.link_rate) {
+
+ case LINK_RATE_LOW:
+ link_rate = "RBR";
+ break;
+ case LINK_RATE_HIGH:
+ link_rate = "HBR";
+ break;
+ case LINK_RATE_HIGH2:
+ link_rate = "HBR2";
+ break;
+ case LINK_RATE_RBR2:
+ link_rate = "RBR2";
+ break;
+ case LINK_RATE_HIGH3:
+ link_rate = "HBR3";
+ break;
+ default:
+ break;
+ }
+
+ /* Connectivity log: link training */
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
+ link_rate,
+ lt_settings.link_settings.lane_count,
+ (status == LINK_TRAINING_SUCCESS) ? "pass" :
+ ((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
+ "EQ failed"),
+ lt_settings.lane_settings[0].VOLTAGE_SWING,
+ lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+ return status;
+}
+
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts)
+{
+ uint8_t j;
+ uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+
+ for (j = 0; j < attempts; ++j) {
+
+ if (dc_link_dp_perform_link_training(
+ link,
+ link_setting,
+ skip_video_pattern) == LINK_TRAINING_SUCCESS)
+ return true;
+
+ msleep(delay_between_attempts);
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ }
+
+ return false;
+}
+
+static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+
+ /* Higher link settings based on feature supported */
+ if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /* Lower link settings based on sink's link cap */
+ if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count =
+ link->reported_link_cap.lane_count;
+ if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate =
+ link->reported_link_cap.link_rate;
+ if (link->reported_link_cap.link_spread <
+ max_link_cap.link_spread)
+ max_link_cap.link_spread =
+ link->reported_link_cap.link_spread;
+ return max_link_cap;
+}
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting)
+{
+ struct dc_link_settings max_link_cap = {0};
+ struct dc_link_settings cur_link_setting = {0};
+ struct dc_link_settings *cur = &cur_link_setting;
+ struct dc_link_settings initial_link_settings = {0};
+ bool success;
+ bool skip_link_training;
+ bool skip_video_pattern;
+ struct clock_source *dp_cs;
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+ enum link_training_result status;
+
+ success = false;
+ skip_link_training = false;
+
+ max_link_cap = get_max_link_cap(link);
+
+ /* TODO implement override and monitor patch later */
+
+ /* try to train the link from high to low to
+ * find the physical link capability
+ */
+ /* disable PHY done possible by BIOS, will be done by driver itself */
+ dp_disable_link_phy(link, link->connector_signal);
+
+ dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs)
+ dp_cs_id = dp_cs->id;
+ else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+ do {
+ skip_video_pattern = true;
+
+ if (cur->link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ dp_enable_link_phy(
+ link,
+ link->connector_signal,
+ dp_cs_id,
+ cur);
+
+ if (skip_link_training)
+ success = true;
+ else {
+ status = dc_link_dp_perform_link_training(
+ link,
+ cur,
+ skip_video_pattern);
+ if (status == LINK_TRAINING_SUCCESS)
+ success = true;
+ }
+
+ if (success)
+ link->verified_link_cap = *cur;
+
+ /* always disable the link before trying another
+ * setting or before returning we'll enable it later
+ * based on the actual mode we're driving
+ */
+ dp_disable_link_phy(link, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(
+ initial_link_settings, cur, status));
+
+ /* Link Training failed for all Link Settings
+ * (Lane Count is still unknown)
+ */
+ if (!success) {
+ /* If all LT fails for all settings,
+ * set verified = failed safe (1 lane low)
+ */
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+
+ link->verified_link_cap.link_spread =
+ LINK_SPREAD_DISABLED;
+ }
+
+
+ return success;
+}
+
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b)
+{
+ struct dc_link_settings link_settings = {0};
+
+ link_settings.lane_count =
+ (link_setting_a.lane_count <=
+ link_setting_b.lane_count) ?
+ link_setting_a.lane_count :
+ link_setting_b.lane_count;
+ link_settings.link_rate =
+ (link_setting_a.link_rate <=
+ link_setting_b.link_rate) ?
+ link_setting_a.link_rate :
+ link_setting_b.link_rate;
+ link_settings.link_spread = LINK_SPREAD_DISABLED;
+
+ /* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+ if (link_settings.link_rate > LINK_RATE_HIGH3) {
+ link_settings.link_rate = LINK_RATE_HIGH3;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH3
+ && link_settings.link_rate > LINK_RATE_HIGH2) {
+ link_settings.link_rate = LINK_RATE_HIGH2;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH2
+ && link_settings.link_rate > LINK_RATE_HIGH) {
+ link_settings.link_rate = LINK_RATE_HIGH;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH
+ && link_settings.link_rate > LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_LOW;
+ } else if (link_settings.link_rate < LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ return link_settings;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+ return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+ return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_FOUR:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_ONE;
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_UNKNOWN;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_LOW;
+ case LINK_RATE_LOW:
+ return LINK_RATE_UNKNOWN;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_FOUR;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH3;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ * true - link setting could be set
+ * false - has reached minimum setting
+ * and no further fallback could be done
+ */
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result)
+{
+ if (!current_link_setting)
+ return false;
+
+ switch (training_result) {
+ case LINK_TRAINING_CR_FAIL:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->link_rate =
+ initial_link_settings.link_rate;
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ {
+ if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_CR:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+ switch (timing->display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_khz;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1)
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+
+ return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_rate_in_kbps = link_setting->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ uint32_t lane_count = link_setting->lane_count;
+ uint32_t kbps = link_rate_in_kbps;
+ kbps *= lane_count;
+ kbps *= 8; /* 8 bits per byte*/
+
+ return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t req_bw;
+ uint32_t max_bw;
+
+ const struct dc_link_settings *link_setting;
+
+ /*always DP fail safe mode*/
+ if (timing->pix_clk_khz == (uint32_t)25175 &&
+ timing->h_addressable == (uint32_t)640 &&
+ timing->v_addressable == (uint32_t)480)
+ return true;
+
+ /* We always use verified link settings */
+ link_setting = &link->verified_link_cap;
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /*if (flags.DYNAMIC_VALIDATION == 1 &&
+ link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+ link_setting = &link->verified_link_cap;
+ */
+
+ req_bw = bandwidth_in_kbps_from_timing(timing);
+ max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+ if (req_bw <= max_bw) {
+ /* remember the biggest mode here, during
+ * initial link training (to get
+ * verified_link_cap), LS sends event about
+ * cannot train at reported cap to upper
+ * layer and upper layer will re-enumerate modes.
+ * this is not necessary if the lower
+ * verified_link_cap is enough to drive
+ * all the modes */
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /* if (flags.DYNAMIC_VALIDATION == 1)
+ dpsst->max_req_bw_for_verified_linkcap = dal_max(
+ dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+ return true;
+ } else
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+
+ struct dc_link_settings initial_link_setting = {
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ struct dc_link_settings current_link_setting =
+ initial_link_setting;
+ struct dc_link *link;
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+ req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->sink->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (is_mst_supported(link)) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = bandwidth_in_kbps_from_link_settings(
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+
+ BREAK_TO_DEBUGGER();
+ ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
+
+ *link_setting = link->verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ */
+ return core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct dc_link *link)
+{
+ /*
+ * Don't handle RX IRQ unless one of following is met:
+ * 1) The link is established (cur_link_settings != unknown)
+ * 2) We kicked off MST detection
+ * 3) We know we're dealing with an active dongle
+ */
+
+ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (link->type == dc_connection_mst_branch) ||
+ is_dp_active_dongle(link))
+ return true;
+
+ return false;
+}
+
+static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+{
+ union dpcd_psr_configuration psr_configuration;
+
+ if (!link->psr_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 368,/*DpcdAddress_PSR_Enable_Cfg*/
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+
+ if (psr_configuration.bits.ENABLE) {
+ unsigned char dpcdbuf[3] = {0};
+ union psr_error_status psr_error_status;
+ union psr_sink_psr_status psr_sink_psr_status;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 0x2006, /*DpcdAddress_PSR_Error_Status*/
+ (unsigned char *) dpcdbuf,
+ sizeof(dpcdbuf));
+
+ /*DPCD 2006h ERROR STATUS*/
+ psr_error_status.raw = dpcdbuf[0];
+ /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
+ psr_sink_psr_status.raw = dpcdbuf[2];
+
+ if (psr_error_status.bits.LINK_CRC_ERROR ||
+ psr_error_status.bits.RFB_STORAGE_ERROR) {
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 8198,/*DpcdAddress_PSR_Error_Status*/
+ &psr_error_status.raw,
+ sizeof(psr_error_status.raw));
+
+ /* PSR error, disable and re-enable PSR */
+ dc_link_set_psr_enable(link, false, true);
+ dc_link_set_psr_enable(link, true, true);
+
+ return true;
+ } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+ /* No error is detect, PSR is active.
+ * We should return with IRQ_HPD handled without
+ * checking for loss of sync since PSR would have
+ * powered down main link.
+ */
+ return true;
+ }
+ }
+ return false;
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LANE_COUNT,
+ (unsigned char *)(&link_settings.lane_count),
+ 1);
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LINK_RATE,
+ (unsigned char *)(&link_settings.link_rate),
+ 1);
+
+ /* Set preferred link settings */
+ link->verified_link_cap.lane_count = link_settings.lane_count;
+ link->verified_link_cap.link_rate = link_settings.link_rate;
+
+ dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+/* TODO hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static uint8_t force_tps4_for_cp2520 = 1;
+
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+ union phy_test_pattern dpcd_test_pattern;
+ union lane_adjust dpcd_lane_adjustment[2];
+ unsigned char dpcd_post_cursor_2_adjustment = 0;
+ unsigned char test_80_bit_pattern[
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ enum dp_test_pattern test_pattern;
+ struct dc_link_training_settings link_settings;
+ union lane_adjust dpcd_lane_adjust;
+ unsigned int lane;
+ struct link_training_settings link_training_settings;
+ int i = 0;
+
+ dpcd_test_pattern.raw = 0;
+ memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+ memset(&link_settings, 0, sizeof(link_settings));
+
+ /* get phy test pattern and pattern parameters from DP receiver */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PHY_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_LANE0_1,
+ &dpcd_lane_adjustment[0].raw,
+ sizeof(dpcd_lane_adjustment));
+
+ /*get post cursor 2 parameters
+ * For DP 1.1a or eariler, this DPCD register's value is 0
+ * For DP 1.2 or later:
+ * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+ * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+ */
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_POST_CURSOR2,
+ &dpcd_post_cursor_2_adjustment,
+ sizeof(dpcd_post_cursor_2_adjustment));
+
+ /* translate request */
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case PHY_TEST_PATTERN_D10_2:
+ test_pattern = DP_TEST_PATTERN_D102;
+ break;
+ case PHY_TEST_PATTERN_SYMBOL_ERROR:
+ test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case PHY_TEST_PATTERN_PRBS7:
+ test_pattern = DP_TEST_PATTERN_PRBS7;
+ break;
+ case PHY_TEST_PATTERN_80BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+ core_link_read_dpcd(
+ link,
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ test_80_bit_pattern,
+ sizeof(test_80_bit_pattern));
+
+ /* prepare link training settings */
+ link_settings.link = link->cur_link_settings;
+
+ for (lane = 0; lane <
+ (unsigned int)(link->cur_link_settings.lane_count);
+ lane++) {
+ dpcd_lane_adjust.raw =
+ get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+ link_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_settings.lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+
+ for (i = 0; i < 4; i++)
+ link_training_settings.lane_settings[i] =
+ link_settings.lane_settings[i];
+ link_training_settings.link_settings = link_settings.link;
+ link_training_settings.allow_invalid_msa_timing_param = false;
+ /*Usage: Measure DP physical lane signal
+ * by DP SI test equipment automatically.
+ * PHY test pattern request is generated by equipment via HPD interrupt.
+ * HPD needs to be active all the time. HPD should be active
+ * all the time. Do not touch it.
+ * forward request to DS
+ */
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ test_80_bit_pattern,
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+}
+
+static void dp_test_send_link_test_pattern(struct dc_link *link)
+{
+ union link_test_pattern dpcd_test_pattern;
+ union test_misc dpcd_test_params;
+ enum dp_test_pattern test_pattern;
+
+ memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+ memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+ /* get link test pattern and pattern parameters */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_TEST_MISC0,
+ &dpcd_test_params.raw,
+ sizeof(dpcd_test_params));
+
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case LINK_TEST_PATTERN_COLOR_RAMP:
+ test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+ break;
+ case LINK_TEST_PATTERN_VERTICAL_BARS:
+ test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+ break; /* black and white */
+ case LINK_TEST_PATTERN_COLOR_SQUARES:
+ test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+ TEST_DYN_RANGE_VESA ?
+ DP_TEST_PATTERN_COLOR_SQUARES :
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ NULL,
+ NULL,
+ 0);
+}
+
+static void handle_automated_test(struct dc_link *link)
+{
+ union test_request test_request;
+ union test_response test_response;
+
+ memset(&test_request, 0, sizeof(test_request));
+ memset(&test_response, 0, sizeof(test_response));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+ if (test_request.bits.LINK_TRAINING) {
+ /* ACK first to let DP RX test box monitor LT sequence */
+ test_response.bits.ACK = 1;
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ dp_test_send_link_training(link);
+ /* no acknowledge request is needed again */
+ test_response.bits.ACK = 0;
+ }
+ if (test_request.bits.LINK_TEST_PATTRN) {
+ dp_test_send_link_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (test_request.bits.PHY_TEST_PATTERN) {
+ dp_test_send_phy_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (!test_request.raw)
+ /* no requests, revert all test signals
+ * TODO: revert all test signals
+ */
+ test_response.bits.ACK = 1;
+ /* send request acknowledgment */
+ if (test_response.bits.ACK)
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union device_service_irq device_service_clear = { { 0 } };
+ enum dc_status result = DDC_RESULT_UNKNOWN;
+ bool status = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Got short pulse HPD on link %d\n",
+ __func__, link->link_index);
+
+
+ /* All the "handle_hpd_irq_xxx()" methods
+ * should be called only after
+ * dal_dpsst_ls_read_hpd_irq_data
+ * Order of calls is important too
+ */
+ result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ if (out_hpd_irq_dpcd_data)
+ *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+ if (result != DC_OK) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain irq data\n",
+ __func__);
+ return false;
+ }
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ device_service_clear.bits.AUTOMATED_TEST = 1;
+ core_link_write_dpcd(
+ link,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &device_service_clear.raw,
+ sizeof(device_service_clear.raw));
+ device_service_clear.raw = 0;
+ handle_automated_test(link);
+ return false;
+ }
+
+ if (!allow_hpd_rx_irq(link)) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: skipping HPD handling on %d\n",
+ __func__, link->link_index);
+ return false;
+ }
+
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ /* If PSR-related error handled, Main link may be off,
+ * so do not handle as a normal sink status change interrupt.
+ */
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ return true;
+
+ /* check if we have MST msg and return since we poll for it */
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ return false;
+
+ /* For now we only handle 'Downstream port status' case.
+ * If we got sink count changed it means
+ * Downstream port status changed,
+ * then DM should call DC to do the detection. */
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
+ /* Connectivity log: link loss */
+ CONN_DATA_LINK_LOSS(link,
+ hpd_irq_dpcd_data.raw,
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
+ perform_link_training_with_retries(link,
+ &link->cur_link_settings,
+ true, LINK_TRAINING_ATTEMPTS);
+
+ status = false;
+ }
+
+ if (link->type == dc_connection_active_dongle &&
+ hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+ != link->dpcd_sink_count)
+ status = true;
+
+ /* reasons for HPD RX:
+ * 1. Link Loss - ie Re-train the Link
+ * 2. MST sideband message
+ * 3. Automated Test - ie. Internal Commit
+ * 4. CP (copy protection) - (not interesting for DM???)
+ * 5. DRR
+ * 6. Downstream Port status changed
+ * -ie. Detect - this the only one
+ * which is interesting for DM because
+ * it must call dc_link_detect.
+ */
+ return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct dc_link *link)
+{
+ bool mst = false;
+ enum dc_status st = DC_OK;
+ union dpcd_rev rev;
+ union mstm_cap cap;
+
+ rev.raw = 0;
+ cap.raw = 0;
+
+ st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+ sizeof(rev));
+
+ if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+ st = core_link_read_dpcd(link, DP_MSTM_CAP,
+ &cap.raw, sizeof(cap));
+ if (st == DC_OK && cap.bits.MST_CAP == 1)
+ mst = true;
+ }
+ return mst;
+
+}
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+ enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+ return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+{
+ union dp_downstream_port_present ds_port = { .byte = data };
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ ddc_service_set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
+ return;
+ }
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWNSTREAM_DVI_HDMI:
+ /* At this point we don't know is it DVI or HDMI,
+ * assume DVI.*/
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ default:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ }
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+ uint8_t det_caps[4];
+ union dwnstream_port_caps_byte0 *port_caps =
+ (union dwnstream_port_caps_byte0 *)det_caps;
+ core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+ det_caps, sizeof(det_caps));
+
+ switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ case DOWN_STREAM_DETAILED_VGA:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_DVI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_HDMI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+ link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+ if (ds_port.fields.DETAILED_CAPS) {
+
+ union dwnstream_port_caps_byte3_hdmi
+ hdmi_caps = {.raw = det_caps[3] };
+ union dwnstream_port_caps_byte1
+ hdmi_color_caps = {.raw = det_caps[2] };
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
+ det_caps[1] * 25000;
+
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+ hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+ }
+ }
+
+ ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+ {
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+ }
+
+ {
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+ }
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ int length)
+{
+ int retry = 0;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ if (!link->dpcd_caps.dpcd_rev.raw) {
+ do {
+ dp_receiver_power_ctrl(link, true);
+ core_link_read_dpcd(link, DP_DPCD_REV,
+ dpcd_data, length);
+ link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+ DP_DPCD_REV -
+ DP_DPCD_REV];
+ } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+ }
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ switch (link->dpcd_caps.branch_dev_id) {
+ /* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+ * all internal circuits including AUX communication preventing
+ * reading DPCD table and EDID (spec violation).
+ * Encoder will skip DP RX power down on disable_output to
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_1:
+ case DP_BRANCH_DEVICE_ID_4:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+ /* TODO: May need work around for other dongles. */
+ default:
+ link->wa_flags.dp_keep_receiver_powered = false;
+ break;
+ }
+ } else
+ link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_link_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
+
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+
+ {
+ union training_aux_rd_interval aux_rd_interval;
+
+ aux_rd_interval.raw =
+ dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+ if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ core_link_read_dpcd(
+ link,
+ DP_DP13_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ }
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ link->test_pattern_enabled = false;
+ link->compliance_test_state.raw = 0;
+
+ /* read sink count */
+ core_link_read_dpcd(link,
+ DP_SINK_COUNT,
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+}
+
+void detect_dp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+
+ /* dc init_hw has power encoder using default
+ * signal for connector. For native DP, no
+ * need to power up encoder again. If not native
+ * DP, hw_init may need check signal or power up
+ * encoder here.
+ */
+ /* TODO save sink caps in link->sink */
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+ link->verified_link_cap = link->reported_link_cap;
+}
+
+void dc_link_dp_enable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_dp_disable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->disable_hpd(encoder);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+ if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+ return true;
+ else
+ return false;
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+ struct pipe_ctx *pipe_ctx,
+ enum dp_test_pattern test_pattern)
+{
+ enum controller_dp_test_pattern controller_test_pattern;
+ enum dc_color_depth color_depth = pipe_ctx->
+ stream->timing.display_color_depth;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ break;
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+ break;
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+ break;
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+ break;
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+ break;
+ default:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ break;
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ {
+ /* disable bit depth reduction */
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ }
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ {
+ /* restore bitdepth reduction */
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &params);
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int lane;
+ unsigned int i;
+ unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+ union dpcd_training_pattern training_pattern;
+ enum dpcd_phy_test_patterns pattern;
+
+ memset(&training_pattern, 0, sizeof(training_pattern));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream->sink->link == link) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ /* Reset CRTC Test Pattern if it is currently running and request
+ * is VideoMode Reset DP Phy Test Pattern if it is currently running
+ * and request is VideoMode
+ */
+ if (link->test_pattern_enabled && test_pattern ==
+ DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set CRTC Test Pattern */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ /* Unblank Stream */
+ link->dc->hwss.unblank_stream(
+ pipe_ctx,
+ &link->verified_link_cap);
+ /* TODO:m_pHwss->MuteAudioEndpoint
+ * (pPathMode->pDisplayPath, false);
+ */
+
+ /* Reset Test Pattern state */
+ link->test_pattern_enabled = false;
+
+ return true;
+ }
+
+ /* Check for PHY Test Patterns */
+ if (is_dp_phy_pattern(test_pattern)) {
+ /* Set DPCD Lane Settings before running test pattern */
+ if (p_link_settings != NULL) {
+ dp_set_hw_lane_settings(link, p_link_settings);
+ dpcd_set_lane_settings(link, p_link_settings);
+ }
+
+ /* Blank stream if running test pattern */
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /*TODO:
+ * m_pHwss->
+ * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+ */
+ /* Blank stream */
+ pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ if (p_link_settings != NULL)
+ dpcd_set_link_settings(link,
+ p_link_settings);
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ pattern = PHY_TEST_PATTERN_NONE;
+ break;
+ case DP_TEST_PATTERN_D102:
+ pattern = PHY_TEST_PATTERN_D10_2;
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ pattern = PHY_TEST_PATTERN_PRBS7;
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ pattern = PHY_TEST_PATTERN_CP2520_1;
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ pattern = PHY_TEST_PATTERN_CP2520_2;
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ pattern = PHY_TEST_PATTERN_CP2520_3;
+ break;
+ default:
+ return false;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+ /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+ return false;
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.2 or later - DP receiver's link quality
+ * pattern is set using DPCD LINK_QUAL_LANEx_SET
+ * register (0x10B~0x10E)\
+ */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+ link_qual_pattern[lane] =
+ (unsigned char)(pattern);
+
+ core_link_write_dpcd(link,
+ DP_LINK_QUAL_LANE0_SET,
+ link_qual_pattern,
+ sizeof(link_qual_pattern));
+ } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+ link->dpcd_caps.dpcd_rev.raw == 0) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.1a or earlier - DP receiver's link
+ * quality pattern is set using
+ * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+ * register (0x102). We will use v_1.3 when we are
+ * setting test pattern for DP 1.1.
+ */
+ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ }
+ } else {
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ }
+
+ return true;
+}
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+ unsigned char mstmCntl;
+
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+ if (enable)
+ mstmCntl |= DP_MST_EN;
+ else
+ mstmCntl &= (~DP_MST_EN);
+
+ core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
new file mode 100644
index 000000000000..9a33b471270a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -0,0 +1,331 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_types.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dpcd_defs.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_read_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_write_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ uint8_t state;
+
+ state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+ core_link_write_dpcd(link, DP_SET_POWER, &state,
+ sizeof(state));
+}
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link->link_enc;
+
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ unsigned int i;
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk =
+ pipes[i].stream->timing.pix_clk_khz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dc_is_dp_sst_signal(signal)) {
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_power_control(link->link_enc, true);
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ link->dc->hwss.edp_backlight_control(link, true);
+ } else
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ } else {
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ dp_receiver_power_ctrl(link, true);
+}
+
+static bool edp_receiver_ready_T9(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+ if (edpRev < DP_EDP_12)
+ return true;
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ return result;
+}
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+{
+ if (!link->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(link, false);
+
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, false);
+ edp_receiver_ready_T9(link);
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+ link->dc->hwss.edp_power_control(link->link_enc, false);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+
+ /* Clear current link setting.*/
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+{
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count > 0)
+ return;
+
+ dp_disable_link_phy(link, signal);
+
+ /* set the sink to SST mode after disabling the link */
+ dp_enable_mst_on_sink(link, false);
+}
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ break;
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+
+ return true;
+}
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ /* call Encoder to set lane settings */
+ encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_2:
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_3:
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size)
+{
+ struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+ struct link_encoder *encoder = link->link_enc;
+
+ pattern_param.dp_phy_pattern = test_pattern;
+ pattern_param.custom_pattern = custom_pattern;
+ pattern_param.custom_pattern_size = custom_pattern_size;
+ pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+ encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ struct pipe_ctx *pipes =
+ &link->dc->current_state->res_ctx.pipe_ctx[0];
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link != NULL &&
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->sink->link == link) {
+ udelay(100);
+
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(
+ pipes[i].stream_res.stream_enc);
+
+ /* disable any test pattern that might be active */
+ dp_set_hw_test_pattern(link,
+ DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ dp_receiver_power_ctrl(link, false);
+
+ link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
+
+ link->link_enc->funcs->disable_output(
+ link->link_enc,
+ SIGNAL_TYPE_DISPLAY_PORT,
+ link);
+
+ /* Clear current link setting. */
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+
+ link->link_enc->funcs->enable_dp_output(
+ link->link_enc,
+ link_setting,
+ pipes[i].clock_source->id);
+
+ dp_receiver_power_ctrl(link, true);
+
+ perform_link_training_with_retries(
+ link,
+ link_setting,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS);
+
+ link->cur_link_settings = *link_setting;
+
+ link->dc->hwss.enable_stream(&pipes[i]);
+
+ link->dc->hwss.unblank_stream(&pipes[i],
+ link_setting);
+
+ if (pipes[i].stream_res.audio) {
+ /* notify audio driver for
+ * audio modes of monitor */
+ pipes[i].stream_res.audio->funcs->az_enable(
+ pipes[i].stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than
+ * per link */
+ pipes[i].stream_res.stream_enc->funcs->
+ audio_mute_control(
+ pipes[i].stream_res.stream_enc, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
new file mode 100644
index 000000000000..928895809867
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -0,0 +1,2807 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "timing_generator.h"
+#include "transform.h"
+#include "dpp.h"
+#include "core_types.h"
+#include "set_mode_types.h"
+#include "virtual/virtual_stream_encoder.h"
+
+#include "dce80/dce80_resource.h"
+#include "dce100/dce100_resource.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_resource.h"
+#endif
+#include "dce120/dce120_resource.h"
+
+enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+{
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CI:
+ dc_version = DCE_VERSION_8_0;
+ break;
+ case FAMILY_KV:
+ if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_8_3;
+ else
+ dc_version = DCE_VERSION_8_1;
+ break;
+ case FAMILY_CZ:
+ dc_version = DCE_VERSION_11_0;
+ break;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_10_0;
+ break;
+ }
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_11_2;
+ }
+ break;
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case FAMILY_RV:
+ dc_version = DCN_VERSION_1_0;
+ break;
+#endif
+ default:
+ dc_version = DCE_VERSION_UNKNOWN;
+ break;
+ }
+ return dc_version;
+}
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id)
+{
+ struct resource_pool *res_pool = NULL;
+
+ switch (dc_version) {
+ case DCE_VERSION_8_0:
+ res_pool = dce80_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_1:
+ res_pool = dce81_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_3:
+ res_pool = dce83_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_10_0:
+ res_pool = dce100_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_11_0:
+ res_pool = dce110_create_resource_pool(
+ num_virtual_links, dc, asic_id);
+ break;
+ case DCE_VERSION_11_2:
+ res_pool = dce112_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_12_0:
+ res_pool = dce120_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ res_pool = dcn10_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+#endif
+
+
+ default:
+ break;
+ }
+ if (res_pool != NULL) {
+ struct dc_firmware_info fw_info = { { 0 } };
+
+ if (dc->ctx->dc_bios->funcs->get_firmware_info(
+ dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
+ res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ } else
+ ASSERT_CRITICAL(false);
+ }
+
+ return res_pool;
+}
+
+void dc_destroy_resource_pool(struct dc *dc)
+{
+ if (dc) {
+ if (dc->res_pool)
+ dc->res_pool->funcs->destroy(&dc->res_pool);
+
+ kfree(dc->hwseq);
+ }
+}
+
+static void update_num_audio(
+ const struct resource_straps *straps,
+ unsigned int *num_audio,
+ struct audio_support *aud_support)
+{
+ aud_support->dp_audio = true;
+ aud_support->hdmi_audio_native = false;
+ aud_support->hdmi_audio_on_dongle = false;
+
+ if (straps->hdmi_disable == 0) {
+ if (straps->dc_pinstraps_audio & 0x2) {
+ aud_support->hdmi_audio_on_dongle = true;
+ aud_support->hdmi_audio_native = true;
+ }
+ }
+
+ switch (straps->audio_stream_number) {
+ case 0: /* multi streams supported */
+ break;
+ case 1: /* multi streams not supported */
+ *num_audio = 1;
+ break;
+ default:
+ DC_ERR("DC: unexpected audio fuse!\n");
+ }
+}
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs)
+{
+ struct dc_context *ctx = dc->ctx;
+ const struct resource_caps *caps = pool->res_cap;
+ int i;
+ unsigned int num_audio = caps->num_audio;
+ struct resource_straps straps = {0};
+
+ if (create_funcs->read_dce_straps)
+ create_funcs->read_dce_straps(dc->ctx, &straps);
+
+ pool->audio_count = 0;
+ if (create_funcs->create_audio) {
+ /* find the total number of streams available via the
+ * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+ * registers (one for each pin) starting from pin 1
+ * up to the max number of audio pins.
+ * We stop on the first pin where
+ * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+ */
+ update_num_audio(&straps, &num_audio, &pool->audio_support);
+ for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ struct audio *aud = create_funcs->create_audio(ctx, i);
+
+ if (aud == NULL) {
+ DC_ERR("DC: failed to create audio!\n");
+ return false;
+ }
+
+ if (!aud->funcs->endpoint_valid(aud)) {
+ aud->funcs->destroy(&aud);
+ break;
+ }
+
+ pool->audios[i] = aud;
+ pool->audio_count++;
+ }
+ }
+
+ pool->stream_enc_count = 0;
+ if (create_funcs->create_stream_encoder) {
+ for (i = 0; i < caps->num_stream_encoder; i++) {
+ pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
+ if (pool->stream_enc[i] == NULL)
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ pool->stream_enc_count++;
+ }
+ }
+ dc->caps.dynamic_audio = false;
+ if (pool->audio_count < pool->stream_enc_count) {
+ dc->caps.dynamic_audio = true;
+ }
+ for (i = 0; i < num_virtual_links; i++) {
+ pool->stream_enc[pool->stream_enc_count] =
+ virtual_stream_encoder_create(
+ ctx, ctx->dc_bios);
+ if (pool->stream_enc[pool->stream_enc_count] == NULL) {
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ return false;
+ }
+ pool->stream_enc_count++;
+ }
+
+ dc->hwseq = create_funcs->create_hwseq(ctx);
+
+ return true;
+}
+
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]--;
+
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count--;
+}
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]++;
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count++;
+}
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->timing.h_total != stream2->timing.h_total)
+ return false;
+
+ if (stream1->timing.v_total != stream2->timing.v_total)
+ return false;
+
+ if (stream1->timing.h_addressable
+ != stream2->timing.h_addressable)
+ return false;
+
+ if (stream1->timing.v_addressable
+ != stream2->timing.v_addressable)
+ return false;
+
+ if (stream1->timing.pix_clk_khz
+ != stream2->timing.pix_clk_khz)
+ return false;
+
+ if (stream1->phy_pix_clk != stream2->phy_pix_clk
+ && (!dc_is_dp_signal(stream1->signal)
+ || !dc_is_dp_signal(stream2->signal)))
+ return false;
+
+ return true;
+}
+
+static bool is_sharable_clk_src(
+ const struct pipe_ctx *pipe_with_clk_src,
+ const struct pipe_ctx *pipe)
+{
+ if (pipe_with_clk_src->clock_source == NULL)
+ return false;
+
+ if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+ && dc_is_dvi_signal(pipe->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe->stream->signal)
+ && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (!resource_are_streams_timing_synchronizable(
+ pipe_with_clk_src->stream, pipe->stream))
+ return false;
+
+ return true;
+}
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
+ return res_ctx->pipe_ctx[i].clock_source;
+ }
+
+ return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+ enum surface_pixel_format surface_pixel_format)
+{
+ enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+ switch (surface_pixel_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dal_pixel_format = PIXEL_FORMAT_INDEX8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ dal_pixel_format = PIXEL_FORMAT_FP16;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ default:
+ dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+ break;
+ }
+ return dal_pixel_format;
+}
+
+static void rect_swap_helper(struct rect *rect)
+{
+ uint32_t temp = 0;
+
+ temp = rect->height;
+ rect->height = rect->width;
+ rect->width = temp;
+
+ temp = rect->x;
+ rect->x = rect->y;
+ rect->y = temp;
+}
+
+static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect clip = { 0 };
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pri_split = false;
+ sec_split = false;
+ }
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ /* The actual clip is an intersection between stream
+ * source and surface clip
+ */
+ clip.x = stream->src.x > plane_state->clip_rect.x ?
+ stream->src.x : plane_state->clip_rect.x;
+
+ clip.width = stream->src.x + stream->src.width <
+ plane_state->clip_rect.x + plane_state->clip_rect.width ?
+ stream->src.x + stream->src.width - clip.x :
+ plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
+
+ clip.y = stream->src.y > plane_state->clip_rect.y ?
+ stream->src.y : plane_state->clip_rect.y;
+
+ clip.height = stream->src.y + stream->src.height <
+ plane_state->clip_rect.y + plane_state->clip_rect.height ?
+ stream->src.y + stream->src.height - clip.y :
+ plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+
+ /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+ * num_pixels = clip.num_pix * scl_ratio
+ */
+ data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+ surf_src.width / plane_state->dst_rect.width;
+ data->viewport.width = clip.width *
+ surf_src.width / plane_state->dst_rect.width;
+
+ data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+ surf_src.height / plane_state->dst_rect.height;
+ data->viewport.height = clip.height *
+ surf_src.height / plane_state->dst_rect.height;
+
+ /* Round down, compensate in init */
+ data->viewport_c.x = data->viewport.x / vpc_div;
+ data->viewport_c.y = data->viewport.y / vpc_div;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ /* Round up, assume original video size always even dimensions */
+ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+ data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ /* Handle hsplit */
+ if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
+ }
+ }
+
+ if (plane_state->rotation == ROTATION_ANGLE_90 ||
+ plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect surf_clip = plane_state->clip_rect;
+ int recout_full_x, recout_full_y;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ if (stream->src.x < surf_clip.x)
+ pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
+ - stream->src.x) * stream->dst.width
+ / stream->src.width;
+
+ pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
+ stream->dst.width / stream->src.width;
+ if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
+ stream->dst.x + stream->dst.width)
+ pipe_ctx->plane_res.scl_data.recout.width =
+ stream->dst.x + stream->dst.width
+ - pipe_ctx->plane_res.scl_data.recout.x;
+
+ pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ if (stream->src.y < surf_clip.y)
+ pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
+ - stream->src.y) * stream->dst.height
+ / stream->src.height;
+
+ pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
+ stream->dst.height / stream->src.height;
+ if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
+ stream->dst.y + stream->dst.height)
+ pipe_ctx->plane_res.scl_data.recout.height =
+ stream->dst.y + stream->dst.height
+ - pipe_ctx->plane_res.scl_data.recout.y;
+
+ /* Handle h & vsplit */
+ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
+ pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ } else {
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+ pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ }
+ } else if (pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
+
+ /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
+ */
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ surf_src.x * plane_state->dst_rect.width / surf_src.width
+ * stream->dst.width / stream->src.width;
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ surf_src.y * plane_state->dst_rect.height / surf_src.height
+ * stream->dst.height / stream->src.height;
+
+ recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
+ recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+}
+
+static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ const int in_w = stream->src.width;
+ const int in_h = stream->src.height;
+ const int out_w = stream->dst.width;
+ const int out_h = stream->dst.height;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+ surf_src.width,
+ plane_state->dst_rect.width);
+ pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+ surf_src.height,
+ plane_state->dst_rect.height);
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+ pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2;
+ else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2;
+
+ pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h);
+ pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert;
+
+ if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8
+ || pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) {
+ pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
+ }
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&src);
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+
+ data->inits.v = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
+ data->ratios.horz, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
+ data->ratios.horz_c, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
+ }
+
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
+ data->ratios.vert, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
+ data->ratios.vert_c, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
+ }
+
+ /* Interlaced inits based on final vert inits */
+ data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
+ data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct view recout_skip = { 0 };
+ bool res = false;
+
+ /* Important: scaling ratio calculation requires pixel format,
+ * lb depth calculation requires recout and taps require scaling ratios.
+ * Inits require viewport, taps, ratios and recout of split pipe
+ */
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
+
+ calculate_scaling_ratios(pipe_ctx);
+
+ calculate_viewport(pipe_ctx);
+
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ return false;
+
+ calculate_recout(pipe_ctx, &recout_skip);
+
+ /**
+ * Setting line buffer pixel depth to 24bpp yields banding
+ * on certain displays, such as the Sharp 4k
+ */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+ pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
+ pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
+
+ /* Taps calculations */
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (!res) {
+ /* Try 24 bpp linebuffer */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
+
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
+ }
+
+ if (res)
+ /* May need to re-check lb size after this in some obscure scenario */
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+
+ dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+ "%s: Viewport:\nheight:%d width:%d x:%d "
+ "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+ "y:%d\n",
+ __func__,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ plane_state->dst_rect.height,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y);
+
+ return res;
+}
+
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
+ context->res_ctx.pipe_ctx[i].stream != NULL)
+ if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i]))
+ return DC_FAIL_SCALING;
+ }
+
+ return DC_OK;
+}
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+
+ return secondary_pipe;
+}
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].top_pipe) {
+ return &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+ return NULL;
+}
+
+static struct pipe_ctx *resource_get_tail_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *head_pipe, *tail_pipe;
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe)
+ return NULL;
+
+ tail_pipe = head_pipe->bottom_pipe;
+
+ while (tail_pipe) {
+ head_pipe = tail_pipe;
+ tail_pipe = tail_pipe->bottom_pipe;
+ }
+
+ return head_pipe;
+}
+
+/*
+ * A free_pipe for a stream is defined here as a pipe
+ * that has no surface attached yet
+ */
+static struct pipe_ctx *acquire_free_pipe_for_stream(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ struct pipe_ctx *head_pipe = NULL;
+
+ /* Find head pipe, which has the back end set up*/
+
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ if (!head_pipe->plane_state)
+ return head_pipe;
+
+ /* Re-use pipe already acquired for this stream if available*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].plane_state) {
+ return &res_ctx->pipe_ctx[i];
+ }
+ }
+
+ /*
+ * At this point we have no re-useable pipe for this stream and we need
+ * to acquire an idle one to satisfy the request
+ */
+
+ if (!pool->funcs->acquire_idle_pipe_for_layer)
+ return NULL;
+
+ return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
+
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static int acquire_first_split_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+ if (pipe_ctx->bottom_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+#endif
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
+ struct dc_stream_status *stream_status = NULL;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ return false;
+ }
+
+
+ if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ return false;
+ }
+
+ head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!head_pipe) {
+ dm_error("Head pipe not found for stream_state %p !\n", stream);
+ return false;
+ }
+
+ free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (!free_pipe) {
+ int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ if (pipe_idx >= 0)
+ free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ }
+#endif
+ if (!free_pipe)
+ return false;
+
+ /* retain new surfaces */
+ dc_plane_state_retain(plane_state);
+ free_pipe->plane_state = plane_state;
+
+ if (head_pipe != free_pipe) {
+
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
+ free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
+ free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
+ free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
+ free_pipe->clock_source = tail_pipe->clock_source;
+ free_pipe->top_pipe = tail_pipe;
+ tail_pipe->bottom_pipe = free_pipe;
+ }
+
+ /* assign new surfaces*/
+ stream_status->plane_states[stream_status->plane_count] = plane_state;
+
+ stream_status->plane_count++;
+
+ return true;
+}
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ /* release pipe for plane*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx;
+
+ if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe)
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* Second condition is to avoid setting NULL to top pipe
+ * of tail pipe making it look like head pipe in subsequent
+ * deletes
+ */
+ if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ /*
+ * For head pipe detach surfaces from pipe for tail
+ * pipe just zero it out
+ */
+ if (!pipe_ctx->top_pipe) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ }
+ }
+ }
+
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ return true;
+}
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
+ return false;
+
+ return true;
+}
+
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
+ return false;
+
+ return true;
+}
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context)
+{
+ struct dc_validation_set set;
+ int i;
+
+ set.stream = stream;
+ set.plane_count = plane_count;
+
+ for (i = 0; i < plane_count; i++)
+ set.plane_states[i] = plane_states[i];
+
+ return add_all_planes_for_stream(dc, stream, &set, 1, context);
+}
+
+
+
+static bool is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+{
+ if (cur_stream == NULL)
+ return true;
+
+ /* If sink pointer changed, it means this is a hotplug, we should do
+ * full hw setting.
+ */
+ if (cur_stream->sink != new_stream->sink)
+ return true;
+
+ /* If output color space is changed, need to reprogram info frames */
+ if (cur_stream->output_color_space != new_stream->output_color_space)
+ return true;
+
+ return memcmp(
+ &cur_stream->timing,
+ &new_stream->timing,
+ sizeof(struct dc_crtc_timing)) != 0;
+}
+
+static bool are_stream_backends_same(
+ struct dc_stream_state *stream_a, struct dc_stream_state *stream_b)
+{
+ if (stream_a == stream_b)
+ return true;
+
+ if (stream_a == NULL || stream_b == NULL)
+ return false;
+
+ if (is_timing_changed(stream_a, stream_b))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+
+ if (!are_stream_backends_same(old_stream, stream))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+ if (old_stream == stream)
+ return true;
+
+ if (old_stream == NULL || stream == NULL)
+ return false;
+
+ if (memcmp(&old_stream->src,
+ &stream->src,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ if (memcmp(&old_stream->dst,
+ &stream->dst,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ return true;
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void update_stream_engine_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct stream_encoder *stream_enc,
+ bool acquired)
+{
+ int i;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i] == stream_enc)
+ res_ctx->is_stream_enc_acquired[i] = acquired;
+ }
+}
+
+/* TODO: release audio object */
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if (pool->audios[i] == audio)
+ res_ctx->is_audio_acquired[i] = acquired;
+ }
+}
+
+static int acquire_first_free_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.mi = pool->mis[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.xfm = pool->transforms[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->sink->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * below can happen in cases when stream encoder is acquired:
+ * 1) for second MST display in chain, so preferred engine already
+ * acquired;
+ * 2) for another link, which preferred engine already acquired by any
+ * MST configuration.
+ *
+ * If signal is of DP type and preferred engine not found, return last available
+ *
+ * TODO - This is just a patch up and a generic solution is
+ * required for non DP connectors.
+ */
+
+ if (j >= 0 && dc_is_dp_signal(stream->signal))
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
+static struct audio *find_first_free_audio(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ enum engine_id id)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+
+ return pool->audios[i];
+ }
+ }
+ /*not found the matching one, first come first serve*/
+ for (i = 0; i < pool->audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+ return pool->audios[i];
+ }
+ }
+ return 0;
+}
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < old_context->stream_count; i++) {
+ struct dc_stream_state *old_stream = old_context->streams[i];
+
+ if (are_stream_backends_same(old_stream, stream))
+ return true;
+ }
+
+ return false;
+}
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ enum dc_status res;
+
+ if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
+ DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = stream;
+ dc_stream_retain(stream);
+ new_ctx->stream_count++;
+
+ res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
+ if (res != DC_OK)
+ DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *del_pipe = NULL;
+
+ /* Release primary pipe */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
+ !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ ASSERT(del_pipe->stream_res.stream_enc);
+ update_stream_engine_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.stream_enc,
+ false);
+
+ if (del_pipe->stream_res.audio)
+ update_audio_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.audio,
+ false);
+
+ resource_unreference_clock_source(&new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->clock_source);
+
+ memset(del_pipe, 0, sizeof(*del_pipe));
+
+ break;
+ }
+ }
+
+ if (!del_pipe) {
+ DC_ERROR("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++)
+ if (new_ctx->streams[i] == stream)
+ break;
+
+ if (new_ctx->streams[i] != stream) {
+ DC_ERROR("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(new_ctx->streams[i]);
+ new_ctx->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < new_ctx->stream_count; i++) {
+ new_ctx->streams[i] = new_ctx->streams[i + 1];
+ new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = NULL;
+ memset(
+ &new_ctx->stream_status[new_ctx->stream_count],
+ 0,
+ sizeof(new_ctx->stream_status[0]));
+
+ return DC_OK;
+}
+
+static void copy_pipe_ctx(
+ const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
+{
+ struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
+ struct dc_stream_state *stream = to_pipe_ctx->stream;
+
+ *to_pipe_ctx = *from_pipe_ctx;
+ to_pipe_ctx->stream = stream;
+ if (plane_state != NULL)
+ to_pipe_ctx->plane_state = plane_state;
+}
+
+static struct dc_stream_state *find_pll_sharable_stream(
+ struct dc_stream_state *stream_needs_pll,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream_has_pll = context->streams[i];
+
+ /* We are looking for non dp, non virtual stream */
+ if (resource_are_streams_timing_synchronizable(
+ stream_needs_pll, stream_has_pll)
+ && !dc_is_dp_signal(stream_has_pll->signal)
+ && stream_has_pll->sink->link->connector_signal
+ != SIGNAL_TYPE_VIRTUAL)
+ return stream_has_pll;
+
+ }
+
+ return NULL;
+}
+
+static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+{
+ uint32_t pix_clk = timing->pix_clk_khz;
+ uint32_t normalized_pix_clk = pix_clk;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pix_clk /= 2;
+ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ normalized_pix_clk = pix_clk;
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_pix_clk = (pix_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_pix_clk = (pix_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_pix_clk = (pix_clk * 48) / 24;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ return normalized_pix_clk;
+}
+
+static void calculate_phy_pix_clks(struct dc_stream_state *stream)
+{
+ /* update actual pixel clock on all streams */
+ if (dc_is_hdmi_signal(stream->signal))
+ stream->phy_pix_clk = get_norm_pix_clk(
+ &stream->timing);
+ else
+ stream->phy_pix_clk =
+ stream->timing.pix_clk_khz;
+}
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ int pipe_idx = -1;
+
+ /* TODO Check if this is needed */
+ /*if (!resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ stream->bit_depth_params =
+ old_context->streams[i]->bit_depth_params;
+ stream->clamping = old_context->streams[i]->clamping;
+ continue;
+ }
+ }
+ */
+
+ /* acquire new resources */
+ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (pipe_idx < 0)
+ pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+#endif
+
+ if (pipe_idx < 0)
+ return DC_NO_CONTROLLER_RESOURCE;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ pipe_ctx->stream_res.stream_enc =
+ find_first_free_match_stream_enc_for_link(
+ &context->res_ctx, pool, stream);
+
+ if (!pipe_ctx->stream_res.stream_enc)
+ return DC_NO_STREAM_ENG_RESOURCE;
+
+ update_stream_engine_usage(
+ &context->res_ctx, pool,
+ pipe_ctx->stream_res.stream_enc,
+ true);
+
+ /* TODO: Add check if ASIC support and EDID audio */
+ if (!stream->sink->converter_disable_audio &&
+ dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+ stream->audio_info.mode_count) {
+ pipe_ctx->stream_res.audio = find_first_free_audio(
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+
+ /*
+ * Audio assigned in order first come first get.
+ * There are asics which has number of audio
+ * resources less then number of pipes
+ */
+ if (pipe_ctx->stream_res.audio)
+ update_audio_usage(&context->res_ctx, pool,
+ pipe_ctx->stream_res.audio, true);
+ }
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ return DC_OK;
+ }
+
+ DC_ERROR("Stream %p not found in new ctx!\n", stream);
+ return DC_ERROR_UNEXPECTED;
+}
+
+/* first stream in the context is used to populate the rest */
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams)
+{
+ int i;
+
+ for (i = 1; i < max_streams; i++) {
+ context->streams[i] = context->streams[0];
+
+ copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
+ &context->res_ctx.pipe_ctx[i]);
+ context->res_ctx.pipe_ctx[i].stream =
+ context->res_ctx.pipe_ctx[0].stream;
+
+ dc_stream_retain(context->streams[i]);
+ context->stream_count++;
+ }
+}
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dc_resource_state_copy_construct(dc->current_state, dst_ctx);
+}
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dst_ctx->dis_clk = dc->res_pool->display_clock;
+}
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i, j;
+
+ if (!new_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->res_pool->funcs->validate_global) {
+ result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+ if (result != DC_OK)
+ return result;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ struct dc_stream_state *stream = new_ctx->streams[i];
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Switch to dp clock source only if there is
+ * no non dp stream that shares the same timing
+ * with the dp stream.
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ !find_pll_sharable_stream(stream, new_ctx)) {
+
+ resource_unreference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ pipe_ctx->clock_source = dc->res_pool->dp_clock_source;
+ resource_reference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+ }
+ }
+ }
+
+ result = resource_build_scaling_params_for_context(dc, new_ctx);
+
+ if (result == DC_OK)
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void patch_gamut_packet_checksum(
+ struct encoder_info_packet *gamut_packet)
+{
+ /* For gamut we recalc checksum */
+ if (gamut_packet->valid) {
+ uint8_t chk_sum = 0;
+ uint8_t *ptr;
+ uint8_t i;
+
+ /*start of the Gamut data. */
+ ptr = &gamut_packet->sb[3];
+
+ for (i = 0; i <= gamut_packet->sb[1]; i++)
+ chk_sum += ptr[i];
+
+ gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum);
+ }
+}
+
+static void set_avi_info_frame(
+ struct encoder_info_packet *info_packet,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+ struct info_frame info_frame = { {0} };
+ uint32_t pixel_encoding = 0;
+ enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+ enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+ bool itc = false;
+ uint8_t itc_value = 0;
+ uint8_t cn0_cn1 = 0;
+ unsigned int cn0_cn1_value = 0;
+ uint8_t *check_sum = NULL;
+ uint8_t byte_index = 0;
+ union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+ union display_content_support support = {0};
+ unsigned int vic = pipe_ctx->stream->timing.vic;
+ enum dc_timing_3d_format format;
+
+ color_space = pipe_ctx->stream->output_color_space;
+ if (color_space == COLOR_SPACE_UNKNOWN)
+ color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
+ COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
+
+ /* Initialize header */
+ hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+ /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+ * not be used in HDMI 2.0 (Section 10.1) */
+ hdmi_info->bits.header.version = 2;
+ hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+
+ /*
+ * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+ * according to HDMI 2.0 spec (Section 10.1)
+ */
+
+ switch (stream->timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = 1;
+ break;
+
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = 2;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = 3;
+ break;
+
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = 0;
+ }
+
+ /* Y0_Y1_Y2 : The pixel encoding */
+ /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+ hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+
+ /* A0 = 1 Active Format Information valid */
+ hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+
+ /* B0, B1 = 3; Bar info data is valid */
+ hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+
+ hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+
+ /* S0, S1 : Underscan / Overscan */
+ /* TODO: un-hardcode scan type */
+ scan_type = SCANNING_TYPE_UNDERSCAN;
+ hdmi_info->bits.S0_S1 = scan_type;
+
+ /* C0, C1 : Colorimetry */
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+ else if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+ else {
+ hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+ }
+ if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
+ color_space == COLOR_SPACE_2020_YCBCR) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ } else if (color_space == COLOR_SPACE_ADOBERGB) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ }
+
+ /* TODO: un-hardcode aspect ratio */
+ aspect = stream->timing.aspect_ratio;
+
+ switch (aspect) {
+ case ASPECT_RATIO_4_3:
+ case ASPECT_RATIO_16_9:
+ hdmi_info->bits.M0_M1 = aspect;
+ break;
+
+ case ASPECT_RATIO_NO_DATA:
+ case ASPECT_RATIO_64_27:
+ case ASPECT_RATIO_256_135:
+ default:
+ hdmi_info->bits.M0_M1 = 0;
+ }
+
+ /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+ hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+ /* TODO: un-hardcode cn0_cn1 and itc */
+
+ cn0_cn1 = 0;
+ cn0_cn1_value = 0;
+
+ itc = true;
+ itc_value = 1;
+
+ support = stream->sink->edid_caps.content_support;
+
+ if (itc) {
+ if (!support.bits.valid_content_type) {
+ cn0_cn1_value = 0;
+ } else {
+ if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GRAPHICS) {
+ if (support.bits.graphics_content == 1) {
+ cn0_cn1_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_PHOTO) {
+ if (support.bits.photo_content == 1) {
+ cn0_cn1_value = 1;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_CINEMA) {
+ if (support.bits.cinema_content == 1) {
+ cn0_cn1_value = 2;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GAME) {
+ if (support.bits.game_content == 1) {
+ cn0_cn1_value = 3;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ }
+ }
+ hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
+ hdmi_info->bits.ITC = itc_value;
+ }
+
+ /* TODO : We should handle YCC quantization */
+ /* but we do not have matrix calculation */
+ if (stream->sink->edid_caps.qs_bit == 1 &&
+ stream->sink->edid_caps.qy_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+
+ ///VIC
+ format = stream->timing.timing_3d_format;
+ /*todo, add 3DStereo support*/
+ if (format != TIMING_3D_FORMAT_NONE) {
+ // Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled
+ switch (pipe_ctx->stream->timing.hdmi_vic) {
+ case 1:
+ vic = 95;
+ break;
+ case 2:
+ vic = 94;
+ break;
+ case 3:
+ vic = 93;
+ break;
+ case 4:
+ vic = 98;
+ break;
+ default:
+ break;
+ }
+ }
+ hdmi_info->bits.VIC0_VIC7 = vic;
+
+ /* pixel repetition
+ * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+ * repetition start from 1 */
+ hdmi_info->bits.PR0_PR3 = 0;
+
+ /* Bar Info
+ * barTop: Line Number of End of Top Bar.
+ * barBottom: Line Number of Start of Bottom Bar.
+ * barLeft: Pixel Number of End of Left Bar.
+ * barRight: Pixel Number of Start of Right Bar. */
+ hdmi_info->bits.bar_top = stream->timing.v_border_top;
+ hdmi_info->bits.bar_bottom = (stream->timing.v_total
+ - stream->timing.v_border_bottom + 1);
+ hdmi_info->bits.bar_left = stream->timing.h_border_left;
+ hdmi_info->bits.bar_right = (stream->timing.h_total
+ - stream->timing.h_border_right + 1);
+
+ /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+ check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+
+ *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+
+ for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+ *check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+
+ /* one byte complement */
+ *check_sum = (uint8_t) (0x100 - *check_sum);
+
+ /* Store in hw_path_mode */
+ info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
+ info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
+ info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+
+ for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb); byte_index++)
+ info_packet->sb[byte_index] = info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+ info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ uint32_t length = 0;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ // Can be different depending on packet content /*todo*/
+ // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
+
+ info_packet->valid = false;
+
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ /* Can be different depending on packet content */
+ length = 5;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160)
+ hdmi_vic_mode = true;
+
+ /* According to HDMI 1.4a CTS, VSIF should be sent
+ * for both 3D stereo and HDMI VIC modes.
+ * For all other modes, there is no VSIF sent. */
+
+ if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ return;
+
+ /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+ * The value for HDMI_Video_Format are:
+ * 0x0 (0b000) - No additional HDMI video format is presented in this
+ * packet
+ * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+ * parameter follows
+ * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+ * potentially 3D_Ext_Data follows
+ * 0x3..0x7 (0b011..0b111) - reserved for future use */
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+ * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+ * The value for 3D_Structure are:
+ * 0x0 - Frame Packing
+ * 0x1 - Field Alternative
+ * 0x2 - Line Alternative
+ * 0x3 - Side-by-Side (full)
+ * 0x4 - L + depth
+ * 0x5 - L + depth + graphics + graphics-depth
+ * 0x6 - Top-and-Bottom
+ * 0x7 - Reserved for future use
+ * 0x8 - Side-by-Side (Half)
+ * 0x9..0xE - Reserved for future use
+ * 0xF - Not used */
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ /*PB5: If PB4 is set to 0x1 (extended resolution format)
+ * fill PB5 with the correct HDMI VIC code */
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ /* Header */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
+ info_packet->hb1 = 0x01; /* Version */
+
+ /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+ info_packet->hb2 = (uint8_t) (length);
+
+ /* Calculate checksum */
+ checksum = 0;
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_spd_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for FreeSync */
+
+ unsigned char checksum = 0;
+ unsigned int idx, payload_size = 0;
+
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (stream->freesync_ctx.supported == false)
+ return;
+
+ if (dc_is_hdmi_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB1 = Version = 0x01 */
+ info_packet->hb1 = 0x01;
+
+ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+ info_packet->hb2 = 0x08;
+
+ payload_size = 0x08;
+
+ } else if (dc_is_dp_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
+ * when used to associate audio related info packets
+ */
+ info_packet->hb0 = 0x00;
+
+ /* HB1 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb1 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB2 = [Bits 7:0 = Least significant eight bits -
+ * For INFOFRAME, the value must be 1Bh]
+ */
+ info_packet->hb2 = 0x1B;
+
+ /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
+ * [Bits 1:0 = Most significant two bits = 0x00]
+ */
+ info_packet->hb3 = 0x04;
+
+ payload_size = 0x1B;
+ }
+
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ info_packet->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ info_packet->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ info_packet->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+ info_packet->sb[4] = 0x00;
+
+ /* PB5 = Reserved */
+ info_packet->sb[5] = 0x00;
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+ info_packet->sb[6] = 0x00;
+
+ if (stream->freesync_ctx.supported == true)
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ info_packet->sb[6] |= 0x01;
+
+ if (stream->freesync_ctx.enabled == true)
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ info_packet->sb[6] |= 0x02;
+
+ if (stream->freesync_ctx.active == true)
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ info_packet->sb[6] |= 0x04;
+
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ info_packet->sb[7] = (unsigned char) (stream->freesync_ctx.
+ min_refresh_in_micro_hz / 1000000);
+
+ /* PB8 = FreeSync Maximum refresh rate (Hz)
+ *
+ * Note: We do not use the maximum capable refresh rate
+ * of the panel, because we should never go above the field
+ * rate of the mode timing set.
+ */
+ info_packet->sb[8] = (unsigned char) (stream->freesync_ctx.
+ nominal_refresh_in_micro_hz / 1000000);
+
+ /* PB9 - PB27 = Reserved */
+ for (idx = 9; idx <= 27; idx++)
+ info_packet->sb[idx] = 0x00;
+
+ /* Calculate checksum */
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+ checksum += info_packet->hb3;
+
+ for (idx = 1; idx <= payload_size; idx++)
+ checksum += info_packet->sb[idx];
+
+ /* PB0 = Checksum (one byte complement) */
+ info_packet->sb[0] = (unsigned char) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_hdr_static_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_plane_state *plane_state,
+ struct dc_stream_state *stream)
+{
+ uint16_t i = 0;
+ enum signal_type signal = stream->signal;
+ struct dc_hdr_static_metadata hdr_metadata;
+ uint32_t data;
+
+ if (!plane_state)
+ return;
+
+ hdr_metadata = plane_state->hdr_static_ctx;
+
+ if (!hdr_metadata.hdr_supported)
+ return;
+
+ if (dc_is_hdmi_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x87;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = 0x1A;
+ i = 1;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x87;
+ info_packet->hb2 = 0x1D;
+ info_packet->hb3 = (0x13 << 2);
+ i = 2;
+ }
+
+ data = hdr_metadata.is_hdr;
+ info_packet->sb[i++] = data ? 0x02 : 0x00;
+ info_packet->sb[i++] = 0x00;
+
+ data = hdr_metadata.chromaticity_green_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_green_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.max_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.min_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_content_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_frame_average_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t checksum = 0;
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= info_packet->hb2; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = 0x100 - checksum;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->sb[0] = 0x01;
+ info_packet->sb[1] = 0x1A;
+ }
+}
+
+static void set_vsc_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ unsigned int vscPacketRevision = 0;
+ unsigned int i;
+
+ if (stream->sink->link->psr_enabled) {
+ vscPacketRevision = 2;
+ }
+
+ /* VSC packet not needed based on the features
+ * supported by this DP display
+ */
+ if (vscPacketRevision == 0)
+ return;
+
+ if (vscPacketRevision == 0x2) {
+ /* Secondary-data Packet ID = 0*/
+ info_packet->hb0 = 0x00;
+ /* 07h - Packet Type Value indicating Video
+ * Stream Configuration packet
+ */
+ info_packet->hb1 = 0x07;
+ /* 02h = VSC SDP supporting 3D stereo and PSR
+ * (applies to eDP v1.3 or higher).
+ */
+ info_packet->hb2 = 0x02;
+ /* 08h = VSC packet supporting 3D stereo + PSR
+ * (HB2 = 02h).
+ */
+ info_packet->hb3 = 0x08;
+
+ for (i = 0; i < 28; i++)
+ info_packet->sb[i] = 0;
+
+ info_packet->valid = true;
+ }
+
+ /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
+}
+
+void dc_resource_state_destruct(struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ context->stream_status[i].plane_states[j]);
+
+ context->stream_status[i].plane_count = 0;
+ dc_stream_release(context->streams[i]);
+ context->streams[i] = NULL;
+ }
+}
+
+/*
+ * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
+ * by the src_ctx
+ */
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx)
+{
+ int i, j;
+ struct kref refcount = dst_ctx->refcount;
+
+ *dst_ctx = *src_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < dst_ctx->stream_count; i++) {
+ dc_stream_retain(dst_ctx->streams[i]);
+ for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_ctx->stream_status[i].plane_states[j]);
+ }
+
+ /* context refcount should not be overridden */
+ dst_ctx->refcount = refcount;
+
+}
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; ++i) {
+ if (res_ctx->clock_source_ref_count[i] == 0)
+ return pool->clock_sources[i];
+ }
+
+ return NULL;
+}
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* default all packets to invalid */
+ info->avi.valid = false;
+ info->gamut.valid = false;
+ info->vendor.valid = false;
+ info->spd.valid = false;
+ info->hdrsmd.valid = false;
+ info->vsc.valid = false;
+
+ signal = pipe_ctx->stream->signal;
+
+ /* HDMi and DP have different info packets*/
+ if (dc_is_hdmi_signal(signal)) {
+ set_avi_info_frame(&info->avi, pipe_ctx);
+
+ set_vendor_info_packet(&info->vendor, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+
+ } else if (dc_is_dp_signal(signal)) {
+ set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+ }
+
+ patch_gamut_packet_checksum(&info->gamut);
+}
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ /* acquire new resources */
+ const struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source = pool->dp_clock_source;
+ else {
+ pipe_ctx->clock_source = NULL;
+
+ if (!dc->config.disable_disp_pll_sharing)
+ pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing(
+ &context->res_ctx,
+ pipe_ctx);
+
+ if (pipe_ctx->clock_source == NULL)
+ pipe_ctx->clock_source =
+ dc_resource_find_first_free_pll(
+ &context->res_ctx,
+ pool);
+ }
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx, pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+/*
+ * Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx_old->stream)
+ return false;
+
+ if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
+ return true;
+
+ if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
+ return true;
+
+ if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio)
+ return true;
+
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
+ && pipe_ctx_old->stream != pipe_ctx->stream)
+ return true;
+
+ if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
+ return true;
+
+ if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
+
+ return false;
+}
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+{
+ struct dc *core_dc = dc;
+ struct dc_link *link = stream->sink->link;
+ struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ enum dc_status res = DC_OK;
+
+ calculate_phy_pix_clks(stream);
+
+ if (!tg->funcs->validate_timing(tg, &stream->timing))
+ res = DC_FAIL_CONTROLLER_VALIDATE;
+
+ if (res == DC_OK)
+ if (!link->link_enc->funcs->validate_output_with_stream(
+ link->link_enc, stream))
+ res = DC_FAIL_ENC_VALIDATE;
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ if (res == DC_OK)
+ res = dc_link_validate_mode_timing(stream,
+ link,
+ &stream->timing);
+
+ return res;
+}
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state)
+{
+ enum dc_status res = DC_OK;
+
+ /* TODO For now validates pixel format only */
+ if (dc->res_pool->funcs->validate_plane)
+ return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
+
+ return res;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
new file mode 100644
index 000000000000..25fae38409ab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct dc_sink *sink)
+{
+ if (sink->dc_container_id) {
+ kfree(sink->dc_container_id);
+ sink->dc_container_id = NULL;
+ }
+}
+
+static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+ struct dc_link *link = init_params->link;
+
+ if (!link)
+ return false;
+
+ sink->sink_signal = init_params->sink_signal;
+ sink->link = link;
+ sink->ctx = link->ctx;
+ sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+ sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->dc_container_id = NULL;
+
+ return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(struct dc_sink *sink)
+{
+ kref_get(&sink->refcount);
+}
+
+static void dc_sink_free(struct kref *kref)
+{
+ struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
+ destruct(sink);
+ kfree(sink);
+}
+
+void dc_sink_release(struct dc_sink *sink)
+{
+ kref_put(&sink->refcount, dc_sink_free);
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+ struct dc_sink *sink = kzalloc(sizeof(*sink), GFP_KERNEL);
+
+ if (NULL == sink)
+ goto alloc_fail;
+
+ if (false == construct(sink, init_params))
+ goto construct_fail;
+
+ kref_init(&sink->refcount);
+
+ return sink;
+
+construct_fail:
+ kfree(sink);
+
+alloc_fail:
+ return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
new file mode 100644
index 000000000000..e230cc44a0a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
+static void update_stream_signal(struct dc_stream_state *stream)
+{
+ if (stream->output_signal == SIGNAL_TYPE_NONE) {
+ struct dc_sink *dc_sink = stream->sink;
+
+ if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
+ stream->signal = stream->sink->link->connector_signal;
+ else
+ stream->signal = dc_sink->sink_signal;
+ } else {
+ stream->signal = stream->output_signal;
+ }
+
+ if (dc_is_dvi_signal(stream->signal)) {
+ if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
+ stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ else
+ stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+}
+
+static void construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data)
+{
+ uint32_t i = 0;
+
+ stream->sink = dc_sink_data;
+ stream->ctx = stream->sink->ctx;
+
+ dc_sink_retain(dc_sink_data);
+
+ /* Copy audio modes */
+ /* TODO - Remove this translation */
+ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+ {
+ stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+ stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+ stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+ stream->audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+ }
+ stream->audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+ stream->audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+ stream->audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+ memmove(
+ stream->audio_info.display_name,
+ dc_sink_data->edid_caps.display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+ stream->audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+ stream->audio_info.product_id = dc_sink_data->edid_caps.product_id;
+ stream->audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+ if (dc_sink_data->dc_container_id != NULL) {
+ struct dc_container_id *dc_container_id = dc_sink_data->dc_container_id;
+
+ stream->audio_info.port_id[0] = dc_container_id->portId[0];
+ stream->audio_info.port_id[1] = dc_container_id->portId[1];
+ } else {
+ /* TODO - WindowDM has implemented,
+ other DMs need Unhardcode port_id */
+ stream->audio_info.port_id[0] = 0x5558859e;
+ stream->audio_info.port_id[1] = 0xd989449;
+ }
+
+ /* EDID CAP translation for HDMI 2.0 */
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+
+ stream->status.link = stream->sink->link;
+
+ update_stream_signal(stream);
+}
+
+static void destruct(struct dc_stream_state *stream)
+{
+ dc_sink_release(stream->sink);
+ if (stream->out_transfer_func != NULL) {
+ dc_transfer_func_release(
+ stream->out_transfer_func);
+ stream->out_transfer_func = NULL;
+ }
+}
+
+void dc_stream_retain(struct dc_stream_state *stream)
+{
+ kref_get(&stream->refcount);
+}
+
+static void dc_stream_free(struct kref *kref)
+{
+ struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
+
+ destruct(stream);
+ kfree(stream);
+}
+
+void dc_stream_release(struct dc_stream_state *stream)
+{
+ if (stream != NULL) {
+ kref_put(&stream->refcount, dc_stream_free);
+ }
+}
+
+struct dc_stream_state *dc_create_stream_for_sink(
+ struct dc_sink *sink)
+{
+ struct dc_stream_state *stream;
+
+ if (sink == NULL)
+ return NULL;
+
+ stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (stream == NULL)
+ return NULL;
+
+ construct(stream, sink);
+
+ kref_init(&stream->refcount);
+
+ return stream;
+}
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (stream == dc->current_state->streams[i])
+ return &dc->current_state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+ if (NULL == attributes) {
+ dm_error("DC: attributes is NULL!\n");
+ return false;
+ }
+
+ if (attributes->address.quad_part == 0) {
+ dm_output_to_console("DC: Cursor address is 0!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ continue;
+
+
+ if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+ pipe_ctx->plane_res.ipp, attributes);
+
+ if (pipe_ctx->plane_res.hubp != NULL &&
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+
+ if (pipe_ctx->plane_res.mi != NULL &&
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.mi, attributes);
+
+
+ if (pipe_ctx->plane_res.xfm != NULL &&
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.xfm, attributes);
+
+ if (pipe_ctx->plane_res.dpp != NULL &&
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes);
+ }
+
+ stream->cursor_attributes = *attributes;
+
+ return true;
+}
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (NULL == position) {
+ dm_error("DC: cursor position is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_cursor_position pos_cpy = *position;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = stream->timing.pix_clk_khz,
+ .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->stream != stream ||
+ (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
+ !pipe_ctx->plane_state ||
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+
+ if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
+ ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+
+ if (mi != NULL && mi->funcs->set_cursor_position != NULL)
+ mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+
+ if (!hubp)
+ continue;
+
+ if (hubp->funcs->set_cursor_position != NULL)
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+
+ if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
+ dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+
+ }
+
+ return true;
+}
+
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+ }
+
+ return 0;
+}
+
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ uint8_t i;
+ bool ret = false;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+ v_blank_start,
+ v_blank_end,
+ h_position,
+ v_position);
+
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dm_logger,
+ enum dc_log_type log_type)
+{
+
+ dm_logger_write(dm_logger,
+ log_type,
+ "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ stream,
+ stream->src.x,
+ stream->src.y,
+ stream->src.width,
+ stream->src.height,
+ stream->dst.x,
+ stream->dst.y,
+ stream->dst.width,
+ stream->dst.height,
+ stream->output_color_space);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ stream->timing.pix_clk_khz,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pixel_encoding,
+ stream->timing.display_color_depth);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tsink name: %s, serial: %d\n",
+ stream->sink->edid_caps.display_name,
+ stream->sink->edid_caps.serial_number);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tlink: %d\n",
+ stream->sink->link->link_index);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
new file mode 100644
index 000000000000..ade5b8ee9c3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_types.h"
+#include "transform.h"
+#include "dpp.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+{
+ plane_state->ctx = ctx;
+}
+
+static void destruct(struct dc_plane_state *plane_state)
+{
+ if (plane_state->gamma_correction != NULL) {
+ dc_gamma_release(&plane_state->gamma_correction);
+ }
+ if (plane_state->in_transfer_func != NULL) {
+ dc_transfer_func_release(
+ plane_state->in_transfer_func);
+ plane_state->in_transfer_func = NULL;
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id)
+{
+ plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+ /*register_flip_interrupt(surface);*/
+}
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+
+ struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
+ GFP_KERNEL);
+
+ if (NULL == plane_state)
+ return NULL;
+
+ kref_init(&plane_state->refcount);
+ construct(core_dc->ctx, plane_state);
+
+ return plane_state;
+}
+
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state)
+{
+ const struct dc_plane_status *plane_status;
+ struct dc *core_dc;
+ int i;
+
+ if (!plane_state ||
+ !plane_state->ctx ||
+ !plane_state->ctx->dc) {
+ ASSERT(0);
+ return NULL; /* remove this if above assert never hit */
+ }
+
+ plane_status = &plane_state->status;
+ core_dc = plane_state->ctx->dc;
+
+ if (core_dc->current_state == NULL)
+ return NULL;
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ core_dc->hwss.update_pending_status(pipe_ctx);
+ }
+
+ return plane_status;
+}
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state)
+{
+ kref_get(&plane_state->refcount);
+}
+
+static void dc_plane_state_free(struct kref *kref)
+{
+ struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
+ destruct(plane_state);
+ kfree(plane_state);
+}
+
+void dc_plane_state_release(struct dc_plane_state *plane_state)
+{
+ kref_put(&plane_state->refcount, dc_plane_state_free);
+}
+
+void dc_gamma_retain(struct dc_gamma *gamma)
+{
+ kref_get(&gamma->refcount);
+}
+
+static void dc_gamma_free(struct kref *kref)
+{
+ struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
+ kfree(gamma);
+}
+
+void dc_gamma_release(struct dc_gamma **gamma)
+{
+ kref_put(&(*gamma)->refcount, dc_gamma_free);
+ *gamma = NULL;
+}
+
+struct dc_gamma *dc_create_gamma(void)
+{
+ struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+
+ if (gamma == NULL)
+ goto alloc_fail;
+
+ kref_init(&gamma->refcount);
+ return gamma;
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_transfer_func_retain(struct dc_transfer_func *tf)
+{
+ kref_get(&tf->refcount);
+}
+
+static void dc_transfer_func_free(struct kref *kref)
+{
+ struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
+ kfree(tf);
+}
+
+void dc_transfer_func_release(struct dc_transfer_func *tf)
+{
+ kref_put(&tf->refcount, dc_transfer_func_free);
+}
+
+struct dc_transfer_func *dc_create_transfer_func(void)
+{
+ struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+
+ if (tf == NULL)
+ goto alloc_fail;
+
+ kref_init(&tf->refcount);
+
+ return tf;
+
+alloc_fail:
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
new file mode 100644
index 000000000000..9d8f4a55c74e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -0,0 +1,1103 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INTERFACE_H_
+#define DC_INTERFACE_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+#include "logger_types.h"
+#include "gpio_types.h"
+#include "link_service_types.h"
+#include "grph_object_ctrl_defs.h"
+#include <inc/hw/opp.h>
+
+#include "inc/hw_sequencer.h"
+#include "inc/compressor.h"
+#include "dml/display_mode_lib.h"
+
+#define DC_VER "3.1.07"
+
+#define MAX_SURFACES 3
+#define MAX_STREAMS 6
+#define MAX_SINKS_PER_LINK 4
+
+
+/*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+struct dc_caps {
+ uint32_t max_streams;
+ uint32_t max_links;
+ uint32_t max_audios;
+ uint32_t max_slave_planes;
+ uint32_t max_planes;
+ uint32_t max_downscale_ratio;
+ uint32_t i2c_speed_in_khz;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
+ bool dcc_const_color;
+ bool dynamic_audio;
+};
+
+struct dc_dcc_surface_param {
+ struct dc_size surface_size;
+ enum surface_pixel_format format;
+ enum swizzle_mode_values swizzle_mode;
+ enum dc_scan_direction scan;
+};
+
+struct dc_dcc_setting {
+ unsigned int max_compressed_blk_size;
+ unsigned int max_uncompressed_blk_size;
+ bool independent_64b_blks;
+};
+
+struct dc_surface_dcc_cap {
+ union {
+ struct {
+ struct dc_dcc_setting rgb;
+ } grph;
+
+ struct {
+ struct dc_dcc_setting luma;
+ struct dc_dcc_setting chroma;
+ } video;
+ };
+
+ bool capable;
+ bool const_color_support;
+};
+
+struct dc_static_screen_events {
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+};
+
+/* Forward declaration*/
+struct dc;
+struct dc_plane_state;
+struct dc_state;
+
+struct dc_cap_funcs {
+ bool (*get_dcc_compression_cap)(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+};
+
+struct dc_stream_state_funcs {
+ bool (*adjust_vmin_vmax)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ int vmin,
+ int vmax);
+ bool (*get_crtc_position)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+ bool (*set_gamut_remap)(struct dc *dc,
+ const struct dc_stream_state *stream);
+
+ bool (*program_csc_matrix)(struct dc *dc,
+ struct dc_stream_state *stream);
+
+ void (*set_static_screen_events)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ const struct dc_static_screen_events *events);
+
+ void (*set_dither_option)(struct dc_stream_state *stream,
+ enum dc_dither_option option);
+
+ void (*set_dpms)(struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off);
+};
+
+struct link_training_settings;
+
+struct dc_link_funcs {
+ void (*set_drive_settings)(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+ void (*perform_link_training)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+ void (*set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+};
+
+/* Structure to hold configuration flags set by dm at dc creation. */
+struct dc_config {
+ bool gpu_vm_support;
+ bool disable_disp_pll_sharing;
+};
+
+enum dcc_option {
+ DCC_ENABLE = 0,
+ DCC_DISABLE = 1,
+ DCC_HALF_REQ_DISALBE = 2,
+};
+
+enum pipe_split_policy {
+ MPC_SPLIT_DYNAMIC = 0,
+ MPC_SPLIT_AVOID = 1,
+ MPC_SPLIT_AVOID_MULT_DISP = 2,
+};
+
+enum wm_report_mode {
+ WM_REPORT_DEFAULT = 0,
+ WM_REPORT_OVERRIDE = 1,
+};
+
+struct dc_debug {
+ bool surface_visual_confirm;
+ bool sanity_checks;
+ bool max_disp_clk;
+ bool surface_trace;
+ bool timing_trace;
+ bool clock_trace;
+ bool validation_trace;
+
+ /* stutter efficiency related */
+ bool disable_stutter;
+ bool use_max_lb;
+ enum dcc_option disable_dcc;
+ enum pipe_split_policy pipe_split_policy;
+ bool force_single_disp_pipe_split;
+ bool voltage_align_fclk;
+
+ bool disable_dfs_bypass;
+ bool disable_dpp_power_gate;
+ bool disable_hubp_power_gate;
+ bool disable_pplib_wm_range;
+ enum wm_report_mode pplib_wm_report_mode;
+ unsigned int min_disp_clk_khz;
+ int sr_exit_time_dpm0_ns;
+ int sr_enter_plus_exit_time_dpm0_ns;
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+ int always_scale;
+ bool disable_pplib_clock_request;
+ bool disable_clock_gate;
+ bool disable_dmcu;
+ bool disable_psr;
+ bool force_abm_enable;
+ bool disable_hbup_pg;
+ bool disable_dpp_pg;
+ bool disable_stereo_support;
+ bool vsr_support;
+ bool performance_trace;
+};
+struct dc_state;
+struct resource_pool;
+struct dce_hwseq;
+struct dc {
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_stream_state_funcs stream_funcs;
+ struct dc_link_funcs link_funcs;
+ struct dc_config config;
+ struct dc_debug debug;
+
+ struct dc_context *ctx;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_PIPES * 2];
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+#endif
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* temp store of dm_pp_display_configuration
+ * to compare to see if display config changed
+ */
+ struct dm_pp_display_configuration prev_display_config;
+
+ /* FBC compressor */
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct compressor *fbc_compressor;
+#endif
+};
+
+enum frame_buffer_mode {
+ FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
+ FRAME_BUFFER_MODE_ZFB_ONLY,
+ FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
+} ;
+
+struct dchub_init_data {
+ int64_t zfb_phys_addr_base;
+ int64_t zfb_mc_base_addr;
+ uint64_t zfb_size_in_byte;
+ enum frame_buffer_mode fb_mode;
+ bool dchub_initialzied;
+ bool dchub_info_valid;
+};
+
+struct dc_init_data {
+ struct hw_asic_id asic_id;
+ void *driver; /* ctx */
+ struct cgs_device *cgs_device;
+
+ int num_virtual_links;
+ /*
+ * If 'vbios_override' not NULL, it will be called instead
+ * of the real VBIOS. Intended use is Diagnostics on FPGA.
+ */
+ struct dc_bios *vbios_override;
+ enum dce_environment dce_environment;
+
+ struct dc_config flags;
+ uint32_t log_mask;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+struct dc *dc_create(const struct dc_init_data *init_params);
+
+void dc_destroy(struct dc **dc);
+
+/*******************************************************************************
+ * Surface Interfaces
+ ******************************************************************************/
+
+enum {
+ TRANSFER_FUNC_POINTS = 1025
+};
+
+// Moved here from color module for linux
+enum color_transfer_func {
+ transfer_func_unknown,
+ transfer_func_srgb,
+ transfer_func_bt709,
+ transfer_func_pq2084,
+ transfer_func_pq2084_interim,
+ transfer_func_linear_0_1,
+ transfer_func_linear_0_125,
+ transfer_func_dolbyvision,
+ transfer_func_gamma_22,
+ transfer_func_gamma_26
+};
+
+enum color_color_space {
+ color_space_unsupported,
+ color_space_srgb,
+ color_space_bt601,
+ color_space_bt709,
+ color_space_xv_ycc_bt601,
+ color_space_xv_ycc_bt709,
+ color_space_xr_rgb,
+ color_space_bt2020,
+ color_space_adobe,
+ color_space_dci_p3,
+ color_space_sc_rgb_ms_ref,
+ color_space_display_native,
+ color_space_app_ctrl,
+ color_space_dolby_vision,
+ color_space_custom_coordinates
+};
+
+struct dc_hdr_static_metadata {
+ /* display chromaticities and white point in units of 0.00001 */
+ unsigned int chromaticity_green_x;
+ unsigned int chromaticity_green_y;
+ unsigned int chromaticity_blue_x;
+ unsigned int chromaticity_blue_y;
+ unsigned int chromaticity_red_x;
+ unsigned int chromaticity_red_y;
+ unsigned int chromaticity_white_point_x;
+ unsigned int chromaticity_white_point_y;
+
+ uint32_t min_luminance;
+ uint32_t max_luminance;
+ uint32_t maximum_content_light_level;
+ uint32_t maximum_frame_average_light_level;
+
+ bool hdr_supported;
+ bool is_hdr;
+};
+
+enum dc_transfer_func_type {
+ TF_TYPE_PREDEFINED,
+ TF_TYPE_DISTRIBUTED_POINTS,
+ TF_TYPE_BYPASS
+};
+
+struct dc_transfer_func_distributed_points {
+ struct fixed31_32 red[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 green[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 blue[TRANSFER_FUNC_POINTS];
+
+ uint16_t end_exponent;
+ uint16_t x_point_at_y1_red;
+ uint16_t x_point_at_y1_green;
+ uint16_t x_point_at_y1_blue;
+};
+
+enum dc_transfer_func_predefined {
+ TRANSFER_FUNCTION_SRGB,
+ TRANSFER_FUNCTION_BT709,
+ TRANSFER_FUNCTION_PQ,
+ TRANSFER_FUNCTION_LINEAR,
+};
+
+struct dc_transfer_func {
+ struct kref refcount;
+ struct dc_transfer_func_distributed_points tf_pts;
+ enum dc_transfer_func_type type;
+ enum dc_transfer_func_predefined tf;
+ struct dc_context *ctx;
+};
+
+/*
+ * This structure is filled in by dc_surface_get_status and contains
+ * the last requested address and the currently active address so the called
+ * can determine if there are any outstanding flips
+ */
+struct dc_plane_status {
+ struct dc_plane_address requested_address;
+ struct dc_plane_address current_address;
+ bool is_flip_pending;
+ bool is_right_eye;
+};
+
+struct dc_plane_state {
+ struct dc_plane_address address;
+ struct scaling_taps scaling_quality;
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+
+ struct dc_plane_dcc_param dcc;
+ struct dc_hdr_static_metadata hdr_static_ctx;
+
+ struct dc_gamma *gamma_correction;
+ struct dc_transfer_func *in_transfer_func;
+
+ // sourceContentAttribute cache
+ bool is_source_input_valid;
+ struct dc_hdr_static_metadata source_input_mastering_info;
+ enum color_color_space source_input_color_space;
+ enum color_transfer_func source_input_tf;
+
+ enum dc_color_space color_space;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+
+ bool per_pixel_alpha;
+ bool visible;
+ bool flip_immediate;
+ bool horizontal_mirror;
+
+ /* private to DC core */
+ struct dc_plane_status status;
+ struct dc_context *ctx;
+
+ /* private to dc_surface.c */
+ enum dc_irq_source irq_source;
+ struct kref refcount;
+};
+
+struct dc_plane_info {
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+ struct dc_plane_dcc_param dcc;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+ enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+ bool horizontal_mirror;
+ bool visible;
+ bool per_pixel_alpha;
+};
+
+struct dc_scaling_info {
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+ struct scaling_taps scaling_quality;
+};
+
+struct dc_surface_update {
+ struct dc_plane_state *surface;
+
+ /* isr safe update parameters. null means no updates */
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ /* following updates require alloc/sleep/spin that is not isr safe,
+ * null means no updates
+ */
+ /* gamma TO BE REMOVED */
+ struct dc_gamma *gamma;
+ struct dc_transfer_func *in_transfer_func;
+ struct dc_hdr_static_metadata *hdr_static_metadata;
+};
+
+/*
+ * Create a new surface with default parameters;
+ */
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+void dc_gamma_retain(struct dc_gamma *dc_gamma);
+void dc_gamma_release(struct dc_gamma **dc_gamma);
+struct dc_gamma *dc_create_gamma(void);
+
+void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
+void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
+struct dc_transfer_func *dc_create_transfer_func(void);
+
+/*
+ * This structure holds a surface address. There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+struct dc_flip_addrs {
+ struct dc_plane_address address;
+ bool flip_immediate;
+ /* TODO: add flip duration for FreeSync */
+};
+
+bool dc_post_update_surfaces_to_stream(
+ struct dc *dc);
+
+/* Surface update type is used by dc_update_surfaces_and_stream
+ * The update type is determined at the very beginning of the function based
+ * on parameters passed in and decides how much programming (or updating) is
+ * going to be done during the call.
+ *
+ * UPDATE_TYPE_FAST is used for really fast updates that do not require much
+ * logical calculations or hardware register programming. This update MUST be
+ * ISR safe on windows. Currently fast update will only be used to flip surface
+ * address.
+ *
+ * UPDATE_TYPE_MED is used for slower updates which require significant hw
+ * re-programming however do not affect bandwidth consumption or clock
+ * requirements. At present, this is the level at which front end updates
+ * that do not require us to run bw_calcs happen. These are in/out transfer func
+ * updates, viewport offset changes, recout size changes and pixel depth changes.
+ * This update can be done at ISR, but we want to minimize how often this happens.
+ *
+ * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
+ * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
+ * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
+ * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
+ * a full update. This cannot be done at ISR level and should be a rare event.
+ * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
+ * underscan we don't expect to see this call at all.
+ */
+
+enum surface_update_type {
+ UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
+ UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
+ UPDATE_TYPE_FULL, /* may need to shuffle resources */
+};
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+
+struct dc_stream_status {
+ int primary_otg_inst;
+ int stream_enc_inst;
+ int plane_count;
+ struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+
+ /*
+ * link this stream passes through
+ */
+ struct dc_link *link;
+};
+
+struct dc_stream_state {
+ struct dc_sink *sink;
+ struct dc_crtc_timing timing;
+
+ struct rect src; /* composition area */
+ struct rect dst; /* stream addressable area */
+
+ struct audio_info audio_info;
+
+ struct freesync_context freesync_ctx;
+
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+ struct csc_transform csc_color_matrix;
+
+ enum signal_type output_signal;
+
+ enum dc_color_space output_color_space;
+ enum dc_dither_option dither_option;
+
+ enum view_3d_format view_format;
+
+ bool ignore_msa_timing_param;
+ /* TODO: custom INFO packets */
+ /* TODO: ABM info (DMCU) */
+ /* TODO: PSR info */
+ /* TODO: CEA VIC */
+
+ /* from core_stream struct */
+ struct dc_context *ctx;
+
+ /* used by DCP and FMT */
+ struct bit_depth_reduction_params bit_depth_params;
+ struct clamping_and_pixel_encoding_params clamping;
+
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
+
+ struct dc_stream_status status;
+
+ struct dc_cursor_attributes cursor_attributes;
+
+ /* from stream struct */
+ struct kref refcount;
+};
+
+struct dc_stream_update {
+ struct rect src;
+ struct rect dst;
+ struct dc_transfer_func *out_transfer_func;
+};
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ * Surfaces attributes are programmed and configured to be composed into stream.
+ * This does not trigger a flip. No surface address is programmed.
+ */
+
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state);
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state);
+/*
+ * Log the current stream state.
+ */
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dc_logger,
+ enum dc_log_type log_type);
+
+uint8_t dc_get_current_stream_count(struct dc *dc);
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+
+/*
+ * Return the current frame counter.
+ */
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context);
+
+/*
+ * Structure to store surface/stream associations for validation
+ */
+struct dc_validation_set {
+ struct dc_stream_state *stream;
+ struct dc_plane_state *plane_states[MAX_SURFACES];
+ uint8_t plane_count;
+};
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx);
+
+/*
+ * This function takes a stream and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC similar streams are supported.
+ *
+ * After this call:
+ * No hardware is programmed for call. Only validation is done.
+ */
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_destruct(struct dc_state *context);
+
+/*
+ * TODO update to make it about validation sets
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+bool dc_commit_state(struct dc *dc, struct dc_state *context);
+
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+/*
+ * Enable stereo when commit_streams is not required,
+ * for example, frame alternate.
+ */
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+
+void dc_stream_retain(struct dc_stream_state *dc_stream);
+void dc_stream_release(struct dc_stream_state *dc_stream);
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *dc_stream);
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status);
+
+
+struct dc_state *dc_create_state(void);
+void dc_retain_state(struct dc_state *context);
+void dc_release_state(struct dc_state *context);
+
+/*******************************************************************************
+ * Link Interfaces
+ ******************************************************************************/
+
+struct dpcd_caps {
+ union dpcd_rev dpcd_rev;
+ union max_lane_count max_ln_count;
+ union max_down_spread max_down_spread;
+
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ /* Dongle's downstream count. */
+ union sink_count sink_count;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+ bool dpcd_display_control_capable;
+};
+
+struct dc_link_status {
+ struct dpcd_caps *dpcd_caps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting;
+ struct dc_link_settings preferred_link_setting;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ enum edp_revision edp_revision;
+ bool psr_enabled;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link. dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+};
+
+bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/*******************************************************************************
+ * Sink Interfaces - A sink corresponds to a display output device
+ ******************************************************************************/
+
+struct dc_container_id {
+ // 128bit GUID in binary form
+ unsigned char guid[16];
+ // 8 byte port ID -> ELD.PortID
+ unsigned int portId[2];
+ // 128bit GUID in binary formufacturer name -> ELD.ManufacturerName
+ unsigned short manufacturerName;
+ // 2 byte product code -> ELD.ProductCode
+ unsigned short productCode;
+};
+
+
+
+/*
+ * The sink structure contains EDID and other display device properties
+ */
+struct dc_sink {
+ enum signal_type sink_signal;
+ struct dc_edid dc_edid; /* raw edid */
+ struct dc_edid_caps edid_caps; /* parse display caps */
+ struct dc_container_id *dc_container_id;
+ uint32_t dongle_max_pix_clk;
+ void *priv;
+ struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
+ bool converter_disable_audio;
+
+ /* private to DC core */
+ struct dc_link *link;
+ struct dc_context *ctx;
+
+ /* private to dc_sink.c */
+ struct kref refcount;
+};
+
+void dc_sink_retain(struct dc_sink *sink);
+void dc_sink_release(struct dc_sink *sink);
+
+struct dc_sink_init_data {
+ enum signal_type sink_signal;
+ struct dc_link *link;
+ uint32_t dongle_max_pix_clk;
+ bool converter_disable_audio;
+};
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a stream
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes);
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position);
+
+/* Newer interfaces */
+struct dc_cursor {
+ struct dc_plane_address address;
+ struct dc_cursor_attributes attributes;
+};
+
+/*******************************************************************************
+ * Interrupt interfaces
+ ******************************************************************************/
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id);
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+ struct dc *dc, uint32_t link_index);
+
+/*******************************************************************************
+ * Power Interfaces
+ ******************************************************************************/
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state);
+void dc_resume(struct dc *dc);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+
+#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
new file mode 100644
index 000000000000..273d80a4ebce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_BIOS_TYPES_H
+#define DC_BIOS_TYPES_H
+
+/******************************************************************************
+ * Interface file for VBIOS implementations.
+ *
+ * The default implementation is inside DC.
+ * Display Manager (which instantiates DC) has the option to supply it's own
+ * (external to DC) implementation of VBIOS, which will be called by DC, using
+ * this interface.
+ * (The intended use is Diagnostics, but other uses may appear.)
+ *****************************************************************************/
+
+#include "include/bios_parser_types.h"
+
+struct dc_vbios_funcs {
+ uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+ struct graphics_object_id (*get_encoder_id)(
+ struct dc_bios *bios,
+ uint32_t i);
+ struct graphics_object_id (*get_connector_id)(
+ struct dc_bios *bios,
+ uint8_t connector_index);
+ uint32_t (*get_dst_number)(
+ struct dc_bios *bios,
+ struct graphics_object_id id);
+
+ enum bp_result (*get_src_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id);
+ enum bp_result (*get_dst_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id);
+
+ enum bp_result (*get_i2c_info)(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info);
+
+ enum bp_result (*get_voltage_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t index,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_thermal_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_hpd_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info);
+ enum bp_result (*get_device_tag)(
+ struct dc_bios *bios,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info);
+ enum bp_result (*get_firmware_info)(
+ struct dc_bios *bios,
+ struct dc_firmware_info *info);
+ enum bp_result (*get_spread_spectrum_info)(
+ struct dc_bios *bios,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+ uint32_t (*get_ss_entry_number)(
+ struct dc_bios *bios,
+ enum as_signal_type signal);
+ enum bp_result (*get_embedded_panel_info)(
+ struct dc_bios *bios,
+ struct embedded_panel_info *info);
+ enum bp_result (*get_gpio_pin_info)(
+ struct dc_bios *bios,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info);
+ enum bp_result (*get_encoder_cap_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+ bool (*is_lid_status_changed)(
+ struct dc_bios *bios);
+ bool (*is_display_config_changed)(
+ struct dc_bios *bios);
+ bool (*is_accelerated_mode)(
+ struct dc_bios *bios);
+ void (*get_bios_event_info)(
+ struct dc_bios *bios,
+ struct bios_event_info *info);
+ void (*update_requested_backlight_level)(
+ struct dc_bios *bios,
+ uint32_t backlight_8bit);
+ uint32_t (*get_requested_backlight_level)(
+ struct dc_bios *bios);
+ void (*take_backlight_control)(
+ struct dc_bios *bios,
+ bool cntl);
+
+ bool (*is_active_display)(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
+ enum controller_id (*get_embedded_display_controller_id)(
+ struct dc_bios *bios);
+ uint32_t (*get_embedded_display_refresh_rate)(
+ struct dc_bios *bios);
+
+ void (*set_scratch_critical_state)(
+ struct dc_bios *bios,
+ bool state);
+ bool (*is_device_id_supported)(
+ struct dc_bios *bios,
+ struct device_id id);
+
+ /* COMMANDS */
+
+ enum bp_result (*encoder_control)(
+ struct dc_bios *bios,
+ struct bp_encoder_control *cntl);
+ enum bp_result (*transmitter_control)(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl);
+ enum bp_result (*crt_control)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock);
+ enum bp_result (*enable_crtc)(
+ struct dc_bios *bios,
+ enum controller_id id,
+ bool enable);
+ enum bp_result (*adjust_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_dce_clock)(
+ struct dc_bios *bios,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct dc_bios *bios);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct dc_bios *bios,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*program_crtc_timing)(
+ struct dc_bios *bios,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+ enum bp_result (*crtc_source_select)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*program_display_engine_pll)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+
+ enum signal_type (*dac_load_detect)(
+ struct dc_bios *bios,
+ struct graphics_object_id encoder,
+ struct graphics_object_id connector,
+ enum signal_type display_signal);
+
+ enum bp_result (*enable_disp_power_gating)(
+ struct dc_bios *bios,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action);
+
+ void (*post_init)(struct dc_bios *bios);
+
+ void (*bios_parser_destroy)(struct dc_bios **dcb);
+};
+
+struct bios_registers {
+ uint32_t BIOS_SCRATCH_6;
+};
+
+struct dc_bios {
+ const struct dc_vbios_funcs *funcs;
+
+ uint8_t *bios;
+ uint32_t bios_size;
+
+ uint8_t *bios_local_image;
+
+ struct dc_context *ctx;
+ const struct bios_registers *regs;
+ struct integrated_info *integrated_info;
+};
+
+#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
new file mode 100644
index 000000000000..e1affeb5cc51
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_DDC_TYPES_H_
+#define DC_DDC_TYPES_H_
+
+struct i2c_payload {
+ bool write;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2c_command_engine {
+ I2C_COMMAND_ENGINE_DEFAULT,
+ I2C_COMMAND_ENGINE_SW,
+ I2C_COMMAND_ENGINE_HW
+};
+
+struct i2c_command {
+ struct i2c_payload *payloads;
+ uint8_t number_of_payloads;
+
+ enum i2c_command_engine engine;
+
+ /* expressed in KHz
+ * zero means "use default value" */
+ uint32_t speed;
+};
+
+struct gpio_ddc_hw_info {
+ bool hw_supported;
+ uint32_t ddc_channel;
+};
+
+struct ddc {
+ struct gpio *pin_data;
+ struct gpio *pin_clock;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_context *ctx;
+};
+
+union ddc_wa {
+ struct {
+ uint32_t DP_SKIP_POWER_OFF:1;
+ uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct ddc_flags {
+ uint8_t EDID_QUERY_DONE_ONCE:1;
+ uint8_t IS_INTERNAL_DISPLAY:1;
+ uint8_t FORCE_READ_REPEATED_START:1;
+ uint8_t EDID_STRESS_READ:1;
+
+};
+
+enum ddc_transaction_type {
+ DDC_TRANSACTION_TYPE_NONE = 0,
+ DDC_TRANSACTION_TYPE_I2C,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
+};
+
+enum display_dongle_type {
+ DISPLAY_DONGLE_NONE = 0,
+ /* Active converter types*/
+ DISPLAY_DONGLE_DP_VGA_CONVERTER,
+ DISPLAY_DONGLE_DP_DVI_CONVERTER,
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ /* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
+ DISPLAY_DONGLE_DP_DVI_DONGLE,
+ DISPLAY_DONGLE_DP_HDMI_DONGLE,
+ /* Other types of dongle*/
+ DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
+};
+
+struct ddc_service {
+ struct ddc *ddc_pin;
+ struct ddc_flags flags;
+ union ddc_wa wa;
+ enum ddc_transaction_type transaction_type;
+ enum display_dongle_type dongle_type;
+ struct dc_context *ctx;
+ struct dc_link *link;
+
+ uint32_t address;
+ uint32_t edid_buf_len;
+ uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
new file mode 100644
index 000000000000..77e2de69cca3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_DP_TYPES_H
+#define DC_DP_TYPES_H
+
+enum dc_lane_count {
+ LANE_COUNT_UNKNOWN = 0,
+ LANE_COUNT_ONE = 1,
+ LANE_COUNT_TWO = 2,
+ LANE_COUNT_FOUR = 4,
+ LANE_COUNT_EIGHT = 8,
+ LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
+};
+
+/* This is actually a reference clock (27MHz) multiplier
+ * 162MBps bandwidth for 1.62GHz like rate,
+ * 270MBps for 2.70GHz,
+ * 324MBps for 3.24Ghz,
+ * 540MBps for 5.40GHz
+ * 810MBps for 8.10GHz
+ */
+enum dc_link_rate {
+ LINK_RATE_UNKNOWN = 0,
+ LINK_RATE_LOW = 0x06,
+ LINK_RATE_HIGH = 0x0A,
+ LINK_RATE_RBR2 = 0x0C,
+ LINK_RATE_HIGH2 = 0x14,
+ LINK_RATE_HIGH3 = 0x1E
+};
+
+enum dc_link_spread {
+ LINK_SPREAD_DISABLED = 0x00,
+ /* 0.5 % downspread 30 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
+ /* 0.5 % downspread 33 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
+};
+
+enum dc_voltage_swing {
+ VOLTAGE_SWING_LEVEL0 = 0, /* direct HW translation! */
+ VOLTAGE_SWING_LEVEL1,
+ VOLTAGE_SWING_LEVEL2,
+ VOLTAGE_SWING_LEVEL3,
+ VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
+};
+
+enum dc_pre_emphasis {
+ PRE_EMPHASIS_DISABLED = 0, /* direct HW translation! */
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
+};
+/* Post Cursor 2 is optional for transmitter
+ * and it applies only to the main link operating at HBR2
+ */
+enum dc_post_cursor2 {
+ POST_CURSOR2_DISABLED = 0, /* direct HW translation! */
+ POST_CURSOR2_LEVEL1,
+ POST_CURSOR2_LEVEL2,
+ POST_CURSOR2_LEVEL3,
+ POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
+};
+
+struct dc_link_settings {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+ enum dc_link_spread link_spread;
+};
+
+struct dc_lane_settings {
+ enum dc_voltage_swing VOLTAGE_SWING;
+ enum dc_pre_emphasis PRE_EMPHASIS;
+ enum dc_post_cursor2 POST_CURSOR2;
+};
+
+struct dc_link_training_settings {
+ struct dc_link_settings link;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+};
+
+
+union dpcd_rev {
+ struct {
+ uint8_t MINOR:4;
+ uint8_t MAJOR:4;
+ } bits;
+ uint8_t raw;
+};
+
+union max_lane_count {
+ struct {
+ uint8_t MAX_LANE_COUNT:5;
+ uint8_t POST_LT_ADJ_REQ_SUPPORTED:1;
+ uint8_t TPS3_SUPPORTED:1;
+ uint8_t ENHANCED_FRAME_CAP:1;
+ } bits;
+ uint8_t raw;
+};
+
+union max_down_spread {
+ struct {
+ uint8_t MAX_DOWN_SPREAD:1;
+ uint8_t RESERVED:5;
+ uint8_t NO_AUX_HANDSHAKE_LINK_TRAINING:1;
+ uint8_t TPS4_SUPPORTED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union mstm_cap {
+ struct {
+ uint8_t MST_CAP:1;
+ uint8_t RESERVED:7;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_count_set {
+ struct {
+ uint8_t LANE_COUNT_SET:5;
+ uint8_t POST_LT_ADJ_REQ_GRANTED:1;
+ uint8_t RESERVED:1;
+ uint8_t ENHANCED_FRAMING:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_status {
+ struct {
+ uint8_t CR_DONE_0:1;
+ uint8_t CHANNEL_EQ_DONE_0:1;
+ uint8_t SYMBOL_LOCKED_0:1;
+ uint8_t RESERVED0:1;
+ uint8_t CR_DONE_1:1;
+ uint8_t CHANNEL_EQ_DONE_1:1;
+ uint8_t SYMBOL_LOCKED_1:1;
+ uint8_t RESERVED_1:1;
+ } bits;
+ uint8_t raw;
+};
+
+union device_service_irq {
+ struct {
+ uint8_t REMOTE_CONTROL_CMD_PENDING:1;
+ uint8_t AUTOMATED_TEST:1;
+ uint8_t CP_IRQ:1;
+ uint8_t MCCS_IRQ:1;
+ uint8_t DOWN_REP_MSG_RDY:1;
+ uint8_t UP_REQ_MSG_RDY:1;
+ uint8_t SINK_SPECIFIC:1;
+ uint8_t reserved:1;
+ } bits;
+ uint8_t raw;
+};
+
+union sink_count {
+ struct {
+ uint8_t SINK_COUNT:6;
+ uint8_t CPREADY:1;
+ uint8_t RESERVED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_align_status_updated {
+ struct {
+ uint8_t INTERLANE_ALIGN_DONE:1;
+ uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+ uint8_t RESERVED:4;
+ uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
+ uint8_t LINK_STATUS_UPDATED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_adjust {
+ struct {
+ uint8_t VOLTAGE_SWING_LANE:2;
+ uint8_t PRE_EMPHASIS_LANE:2;
+ uint8_t RESERVED:4;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_training_pattern {
+ struct {
+ uint8_t TRAINING_PATTERN_SET:4;
+ uint8_t RECOVERED_CLOCK_OUT_EN:1;
+ uint8_t SCRAMBLING_DISABLE:1;
+ uint8_t SYMBOL_ERROR_COUNT_SEL:2;
+ } v1_4;
+ struct {
+ uint8_t TRAINING_PATTERN_SET:2;
+ uint8_t LINK_QUAL_PATTERN_SET:2;
+ uint8_t RESERVED:4;
+ } v1_3;
+ uint8_t raw;
+};
+
+/* Training Lane is used to configure downstream DP device's voltage swing
+and pre-emphasis levels*/
+/* The DPCD addresses are from 0x103 to 0x106*/
+union dpcd_training_lane {
+ struct {
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t RESERVED:2;
+ } bits;
+ uint8_t raw;
+};
+
+/* TMDS-converter related */
+union dwnstream_port_caps_byte0 {
+ struct {
+ uint8_t DWN_STRM_PORTX_TYPE:3;
+ uint8_t DWN_STRM_PORTX_HPD:1;
+ uint8_t RESERVERD:4;
+ } bits;
+ uint8_t raw;
+};
+
+/* these are the detailed types stored at DWN_STRM_PORTX_CAP (00080h)*/
+enum dpcd_downstream_port_detailed_type {
+ DOWN_STREAM_DETAILED_DP = 0,
+ DOWN_STREAM_DETAILED_VGA,
+ DOWN_STREAM_DETAILED_DVI,
+ DOWN_STREAM_DETAILED_HDMI,
+ DOWN_STREAM_DETAILED_NONDDC,/* has no EDID (TV,CV)*/
+ DOWN_STREAM_DETAILED_DP_PLUS_PLUS
+};
+
+union dwnstream_port_caps_byte1 {
+ struct {
+ uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_downstream_port_present {
+ uint8_t byte;
+ struct {
+ uint8_t PORT_PRESENT:1;
+ uint8_t PORT_TYPE:2;
+ uint8_t FMT_CONVERSION:1;
+ uint8_t DETAILED_CAPS:1;
+ uint8_t RESERVED:3;
+ } fields;
+};
+
+union dwnstream_port_caps_byte3_dvi {
+ struct {
+ uint8_t RESERVED1:1;
+ uint8_t DUAL_LINK:1;
+ uint8_t HIGH_COLOR_DEPTH:1;
+ uint8_t RESERVED2:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dwnstream_port_caps_byte3_hdmi {
+ struct {
+ uint8_t FRAME_SEQ_TO_FRAME_PACK:1;
+ uint8_t YCrCr422_PASS_THROUGH:1;
+ uint8_t YCrCr420_PASS_THROUGH:1;
+ uint8_t YCrCr422_CONVERSION:1;
+ uint8_t YCrCr420_CONVERSION:1;
+ uint8_t RESERVED:3;
+ } bits;
+ uint8_t raw;
+};
+
+/*4-byte structure for detailed capabilities of a down-stream port
+(DP-to-TMDS converter).*/
+
+union sink_status {
+ struct {
+ uint8_t RX_PORT0_STATUS:1;
+ uint8_t RX_PORT1_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+/*6-byte structure corresponding to 6 registers (200h-205h)
+read during handling of HPD-IRQ*/
+union hpd_irq_data {
+ struct {
+ union sink_count sink_cnt;/* 200h */
+ union device_service_irq device_service_irq;/* 201h */
+ union lane_status lane01_status;/* 202h */
+ union lane_status lane23_status;/* 203h */
+ union lane_align_status_updated lane_status_updated;/* 204h */
+ union sink_status sink_status;
+ } bytes;
+ uint8_t raw[6];
+};
+
+union down_stream_port_count {
+ struct {
+ uint8_t DOWN_STR_PORT_COUNT:4;
+ uint8_t RESERVED:2; /*Bits 5:4 = RESERVED. Read all 0s.*/
+ /*Bit 6 = MSA_TIMING_PAR_IGNORED
+ 0 = Sink device requires the MSA timing parameters
+ 1 = Sink device is capable of rendering incoming video
+ stream without MSA timing parameters*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ /*Bit 7 = OUI Support
+ 0 = OUI not supported
+ 1 = OUI supported
+ (OUI and Device Identification mandatory for DP 1.2)*/
+ uint8_t OUI_SUPPORT:1;
+ } bits;
+ uint8_t raw;
+};
+
+union down_spread_ctrl {
+ struct {
+ uint8_t RESERVED1:4;/* Bit 3:0 = RESERVED. Read all 0s*/
+ /* Bits 4 = SPREAD_AMP. Spreading amplitude
+ 0 = Main link signal is not downspread
+ 1 = Main link signal is downspread <= 0.5%
+ with frequency in the range of 30kHz ~ 33kHz*/
+ uint8_t SPREAD_AMP:1;
+ uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+ /*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
+ 0 = Source device will send valid data for the MSA Timing Params
+ 1 = Source device may send invalid data for these MSA Timing Params*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_edp_config {
+ struct {
+ uint8_t PANEL_MODE_EDP:1;
+ uint8_t FRAMING_CHANGE_ENABLE:1;
+ uint8_t RESERVED:5;
+ uint8_t PANEL_SELF_TEST_ENABLE:1;
+ } bits;
+ uint8_t raw;
+};
+
+struct dp_device_vendor_id {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+};
+
+struct dp_sink_hw_fw_revision {
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+};
+
+/*DPCD register of DP receiver capability field bits-*/
+union edp_configuration_cap {
+ struct {
+ uint8_t ALT_SCRAMBLER_RESET:1;
+ uint8_t FRAMING_CHANGE:1;
+ uint8_t RESERVED:1;
+ uint8_t DPCD_DISPLAY_CONTROL_CAPABLE:1;
+ uint8_t RESERVED2:4;
+ } bits;
+ uint8_t raw;
+};
+
+union training_aux_rd_interval {
+ struct {
+ uint8_t TRAINIG_AUX_RD_INTERVAL:7;
+ uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ } bits;
+ uint8_t raw;
+};
+
+/* Automated test structures */
+union test_request {
+ struct {
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+ uint8_t EDID_REAT :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+ uint8_t TEST_STEREO_3D :1;
+ } bits;
+ uint8_t raw;
+};
+
+union test_response {
+ struct {
+ uint8_t ACK :1;
+ uint8_t NO_ACK :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
+union phy_test_pattern {
+ struct {
+ /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
+ * and 3 bits for DP1.2.
+ */
+ uint8_t PATTERN :3;
+ /* BY speci, bit7:2 is 0 for DP1.1. */
+ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+};
+
+/* States of Compliance Test Specification (CTS DP1.2). */
+union compliance_test_state {
+ struct {
+ unsigned char STEREO_3D_RUNNING : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union link_test_pattern {
+ struct {
+ /* dpcd_link_test_patterns */
+ unsigned char PATTERN :2;
+ unsigned char RESERVED:6;
+ } bits;
+ unsigned char raw;
+};
+
+union test_misc {
+ struct dpcd_test_misc_bits {
+ unsigned char SYNC_CLOCK :1;
+ /* dpcd_test_color_format */
+ unsigned char CLR_FORMAT :2;
+ /* dpcd_test_dyn_range */
+ unsigned char DYN_RANGE :1;
+ unsigned char YCBCR :1;
+ /* dpcd_test_bit_depth */
+ unsigned char BPC :3;
+ } bits;
+ unsigned char raw;
+};
+
+#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
new file mode 100644
index 000000000000..90e81f7ba919
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * dc_helper.c
+ *
+ * Created on: Aug 30, 2016
+ * Author: agrodzov
+ */
+#include "dm_services.h"
+#include <stdarg.h>
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ dm_write_reg(ctx, addr, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ return reg_val;
+}
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ return reg_val;
+}
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ return reg_val;
+}
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ return reg_val;
+}
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ return reg_val;
+}
+
+/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+uint32_t generic_reg_get(const struct dc_context *ctx,
+ uint32_t addr, int n, ...)
+{
+ uint32_t shift, mask;
+ uint32_t *field_value;
+ uint32_t reg_val;
+ int i = 0;
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ va_list ap;
+ va_start(ap, n);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return reg_val;
+}
+*/
+
+uint32_t generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line)
+{
+ uint32_t field_value;
+ uint32_t reg_val;
+ int i;
+
+ /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ /* 35 seconds */
+ delay_between_poll_us = 35000;
+ time_out_num_tries = 1000;
+ }
+
+ for (i = 0; i <= time_out_num_tries; i++) {
+ if (i) {
+ if (delay_between_poll_us >= 1000)
+ msleep(delay_between_poll_us/1000);
+ else if (delay_between_poll_us > 0)
+ udelay(delay_between_poll_us);
+ }
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+ if (field_value == condition_value)
+ return reg_val;
+ }
+
+ dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ delay_between_poll_us, time_out_num_tries,
+ func_name, line);
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ BREAK_TO_DEBUGGER();
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
new file mode 100644
index 000000000000..1a9f57fb0838
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HW_TYPES_H
+#define DC_HW_TYPES_H
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "signal_types.h"
+
+/******************************************************************************
+ * Data types for Virtual HW Layer of DAL3.
+ * (see DAL3 design documents for HW Layer definition)
+ *
+ * The intended uses are:
+ * 1. Generation pseudocode sequences for HW programming.
+ * 2. Implementation of real HW programming by HW Sequencer of DAL3.
+ *
+ * Note: do *not* add any types which are *not* used for HW programming - this
+ * will ensure separation of Logic layer from HW layer.
+ ******************************************************************************/
+
+union large_integer {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ };
+
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } u;
+
+ int64_t quad_part;
+};
+
+#define PHYSICAL_ADDRESS_LOC union large_integer
+
+enum dc_plane_addr_type {
+ PLN_ADDR_TYPE_GRAPHICS = 0,
+ PLN_ADDR_TYPE_GRPH_STEREO,
+ PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+};
+
+struct dc_plane_address {
+ enum dc_plane_addr_type type;
+ bool tmz_surface;
+ union {
+ struct{
+ PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC meta_addr;
+ union large_integer dcc_const_color;
+ } grph;
+
+ /*stereo*/
+ struct {
+ PHYSICAL_ADDRESS_LOC left_addr;
+ PHYSICAL_ADDRESS_LOC left_meta_addr;
+ union large_integer left_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC right_addr;
+ PHYSICAL_ADDRESS_LOC right_meta_addr;
+ union large_integer right_dcc_const_color;
+
+ } grph_stereo;
+
+ /*video progressive*/
+ struct {
+ PHYSICAL_ADDRESS_LOC luma_addr;
+ PHYSICAL_ADDRESS_LOC luma_meta_addr;
+ union large_integer luma_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC chroma_addr;
+ PHYSICAL_ADDRESS_LOC chroma_meta_addr;
+ union large_integer chroma_dcc_const_color;
+ } video_progressive;
+ };
+};
+
+struct dc_size {
+ int width;
+ int height;
+};
+
+struct rect {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
+union plane_size {
+ /* Grph or Video will be selected
+ * based on format above:
+ * Use Video structure if
+ * format >= DalPixelFormat_VideoBegin
+ * else use Grph structure
+ */
+ struct {
+ struct rect surface_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch
+ * is 32 pixel aligned.
+ */
+ int surface_pitch;
+ } grph;
+
+ struct {
+ struct rect luma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int luma_pitch;
+
+ struct rect chroma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int chroma_pitch;
+ } video;
+};
+
+struct dc_plane_dcc_param {
+ bool enable;
+
+ union {
+ struct {
+ int meta_pitch;
+ bool independent_64b_blks;
+ } grph;
+
+ struct {
+ int meta_pitch_l;
+ bool independent_64b_blks_l;
+
+ int meta_pitch_c;
+ bool independent_64b_blks_c;
+ } video;
+ };
+};
+
+/*Displayable pixel format in fb*/
+enum surface_pixel_format {
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
+ /*TOBE REMOVED paletta 256 colors*/
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_RGB565,
+ /*32 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
+ /*32 bpp swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR8888,
+
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
+ /*swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
+ /*TOBE REMOVED swaped, XR_BIAS has no differance
+ * for pixel layout than previous and we can
+ * delete this after discusion*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
+ /*64 bpp */
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+ /*float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
+ /*swaped & float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+ /*grow graphics here if necessary */
+
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_INVALID
+
+ /*grow 444 video here if necessary */
+};
+
+
+
+/* Pixel format */
+enum pixel_format {
+ /*graph*/
+ PIXEL_FORMAT_UNINITIALIZED,
+ PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_RGB565,
+ PIXEL_FORMAT_ARGB8888,
+ PIXEL_FORMAT_ARGB2101010,
+ PIXEL_FORMAT_ARGB2101010_XRBIAS,
+ PIXEL_FORMAT_FP16,
+ /*video*/
+ PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_420BPP10,
+ /*end of pixel format definition*/
+ PIXEL_FORMAT_INVALID,
+
+ PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
+ PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP10,
+ PIXEL_FORMAT_UNKNOWN
+};
+
+enum tile_split_values {
+ DC_DISPLAY_MICRO_TILING = 0x0,
+ DC_THIN_MICRO_TILING = 0x1,
+ DC_DEPTH_MICRO_TILING = 0x2,
+ DC_ROTATED_MICRO_TILING = 0x3,
+};
+
+/* TODO: These values come from hardware spec. We need to readdress this
+ * if they ever change.
+ */
+enum array_mode_values {
+ DC_ARRAY_LINEAR_GENERAL = 0,
+ DC_ARRAY_LINEAR_ALLIGNED,
+ DC_ARRAY_1D_TILED_THIN1,
+ DC_ARRAY_1D_TILED_THICK,
+ DC_ARRAY_2D_TILED_THIN1,
+ DC_ARRAY_PRT_TILED_THIN1,
+ DC_ARRAY_PRT_2D_TILED_THIN1,
+ DC_ARRAY_2D_TILED_THICK,
+ DC_ARRAY_2D_TILED_X_THICK,
+ DC_ARRAY_PRT_TILED_THICK,
+ DC_ARRAY_PRT_2D_TILED_THICK,
+ DC_ARRAY_PRT_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THICK,
+ DC_ARRAY_3D_TILED_X_THICK,
+ DC_ARRAY_PRT_3D_TILED_THICK,
+};
+
+enum tile_mode_values {
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+};
+
+enum swizzle_mode_values {
+ DC_SW_LINEAR = 0,
+ DC_SW_256B_S = 1,
+ DC_SW_256_D = 2,
+ DC_SW_256_R = 3,
+ DC_SW_4KB_S = 5,
+ DC_SW_4KB_D = 6,
+ DC_SW_4KB_R = 7,
+ DC_SW_64KB_S = 9,
+ DC_SW_64KB_D = 10,
+ DC_SW_64KB_R = 11,
+ DC_SW_VAR_S = 13,
+ DC_SW_VAR_D = 14,
+ DC_SW_VAR_R = 15,
+ DC_SW_64KB_S_T = 17,
+ DC_SW_64KB_D_T = 18,
+ DC_SW_4KB_S_X = 21,
+ DC_SW_4KB_D_X = 22,
+ DC_SW_4KB_R_X = 23,
+ DC_SW_64KB_S_X = 25,
+ DC_SW_64KB_D_X = 26,
+ DC_SW_64KB_R_X = 27,
+ DC_SW_VAR_S_X = 29,
+ DC_SW_VAR_D_X = 30,
+ DC_SW_VAR_R_X = 31,
+ DC_SW_MAX
+};
+
+union dc_tiling_info {
+
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
+
+ struct {
+ unsigned int num_pipes;
+ unsigned int num_banks;
+ unsigned int pipe_interleave;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ unsigned int max_compressed_frags;
+ bool shaderEnable;
+
+ enum swizzle_mode_values swizzle;
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ } gfx9;
+};
+
+/* Rotation angle */
+enum dc_rotation_angle {
+ ROTATION_ANGLE_0 = 0,
+ ROTATION_ANGLE_90,
+ ROTATION_ANGLE_180,
+ ROTATION_ANGLE_270,
+ ROTATION_ANGLE_COUNT
+};
+
+enum dc_scan_direction {
+ SCAN_DIRECTION_UNKNOWN = 0,
+ SCAN_DIRECTION_HORIZONTAL = 1, /* 0, 180 rotation */
+ SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
+};
+
+struct dc_cursor_position {
+ uint32_t x;
+ uint32_t y;
+
+ uint32_t x_hotspot;
+ uint32_t y_hotspot;
+
+ /*
+ * This parameter indicates whether HW cursor should be enabled
+ */
+ bool enable;
+
+};
+
+struct dc_cursor_mi_param {
+ unsigned int pixel_clk_khz;
+ unsigned int ref_clk_khz;
+ unsigned int viewport_x_start;
+ unsigned int viewport_width;
+ struct fixed31_32 h_scale_ratio;
+};
+
+/* IPP related types */
+
+enum {
+ GAMMA_RGB_256_ENTRIES = 256,
+ GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
+ GAMMA_MAX_ENTRIES = 1024
+};
+
+enum dc_gamma_type {
+ GAMMA_RGB_256 = 1,
+ GAMMA_RGB_FLOAT_1024 = 2
+};
+
+struct dc_gamma {
+ struct kref refcount;
+ enum dc_gamma_type type;
+ unsigned int num_entries;
+
+ struct dc_gamma_entries {
+ struct fixed31_32 red[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 green[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 blue[GAMMA_MAX_ENTRIES];
+ } entries;
+
+ /* private to DC core */
+ struct dc_context *ctx;
+};
+
+/* Used by both ipp amd opp functions*/
+/* TODO: to be consolidated with enum color_space */
+
+/*
+ * This enum is for programming CURSOR_MODE register field. What this register
+ * should be programmed to depends on OS requested cursor shape flags and what
+ * we stored in the cursor surface.
+ */
+enum dc_cursor_color_format {
+ CURSOR_MODE_MONO,
+ CURSOR_MODE_COLOR_1BIT_AND,
+ CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
+ CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
+};
+
+/*
+ * This is all the parameters required by DAL in order to update the cursor
+ * attributes, including the new cursor image surface address, size, hotspot
+ * location, color format, etc.
+ */
+
+union dc_cursor_attribute_flags {
+ struct {
+ uint32_t ENABLE_MAGNIFICATION:1;
+ uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
+ uint32_t HORIZONTAL_MIRROR:1;
+ uint32_t VERTICAL_MIRROR:1;
+ uint32_t INVERT_PIXEL_DATA:1;
+ uint32_t ZERO_EXPANSION:1;
+ uint32_t MIN_MAX_INVERT:1;
+ uint32_t RESERVED:25;
+ } bits;
+ uint32_t value;
+};
+
+struct dc_cursor_attributes {
+ PHYSICAL_ADDRESS_LOC address;
+ uint32_t pitch;
+
+ /* Width and height should correspond to cursor surface width x heigh */
+ uint32_t width;
+ uint32_t height;
+
+ enum dc_cursor_color_format color_format;
+
+ /* In case we support HW Cursor rotation in the future */
+ enum dc_rotation_angle rotation_angle;
+
+ union dc_cursor_attribute_flags attribute_flags;
+};
+
+/* OPP */
+
+enum dc_color_space {
+ COLOR_SPACE_UNKNOWN,
+ COLOR_SPACE_SRGB,
+ COLOR_SPACE_SRGB_LIMITED,
+ COLOR_SPACE_YCBCR601,
+ COLOR_SPACE_YCBCR709,
+ COLOR_SPACE_YCBCR601_LIMITED,
+ COLOR_SPACE_YCBCR709_LIMITED,
+ COLOR_SPACE_2020_RGB_FULLRANGE,
+ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_ADOBERGB,
+};
+
+enum dc_dither_option {
+ DITHER_OPTION_DEFAULT,
+ DITHER_OPTION_DISABLE,
+ DITHER_OPTION_FM6,
+ DITHER_OPTION_FM8,
+ DITHER_OPTION_FM10,
+ DITHER_OPTION_SPATIAL6_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL8_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL10_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL6,
+ DITHER_OPTION_SPATIAL8,
+ DITHER_OPTION_SPATIAL10,
+ DITHER_OPTION_TRUN6,
+ DITHER_OPTION_TRUN8,
+ DITHER_OPTION_TRUN10,
+ DITHER_OPTION_TRUN10_SPATIAL8,
+ DITHER_OPTION_TRUN10_SPATIAL6,
+ DITHER_OPTION_TRUN10_FM8,
+ DITHER_OPTION_TRUN10_FM6,
+ DITHER_OPTION_TRUN10_SPATIAL8_FM6,
+ DITHER_OPTION_SPATIAL10_FM8,
+ DITHER_OPTION_SPATIAL10_FM6,
+ DITHER_OPTION_TRUN8_SPATIAL6,
+ DITHER_OPTION_TRUN8_FM6,
+ DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_MAX = DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_INVALID
+};
+
+enum dc_quantization_range {
+ QUANTIZATION_RANGE_UNKNOWN,
+ QUANTIZATION_RANGE_FULL,
+ QUANTIZATION_RANGE_LIMITED
+};
+
+/* XFM */
+
+/* used in struct dc_plane_state */
+struct scaling_taps {
+ uint32_t v_taps;
+ uint32_t h_taps;
+ uint32_t v_taps_c;
+ uint32_t h_taps_c;
+};
+
+enum dc_timing_standard {
+ TIMING_STANDARD_UNDEFINED,
+ TIMING_STANDARD_DMT,
+ TIMING_STANDARD_GTF,
+ TIMING_STANDARD_CVT,
+ TIMING_STANDARD_CVT_RB,
+ TIMING_STANDARD_CEA770,
+ TIMING_STANDARD_CEA861,
+ TIMING_STANDARD_HDMI,
+ TIMING_STANDARD_TV_NTSC,
+ TIMING_STANDARD_TV_NTSC_J,
+ TIMING_STANDARD_TV_PAL,
+ TIMING_STANDARD_TV_PAL_M,
+ TIMING_STANDARD_TV_PAL_CN,
+ TIMING_STANDARD_TV_SECAM,
+ TIMING_STANDARD_EXPLICIT,
+ /*!< For explicit timings from EDID, VBIOS, etc.*/
+ TIMING_STANDARD_USER_OVERRIDE,
+ /*!< For mode timing override by user*/
+ TIMING_STANDARD_MAX
+};
+
+
+
+enum dc_color_depth {
+ COLOR_DEPTH_UNDEFINED,
+ COLOR_DEPTH_666,
+ COLOR_DEPTH_888,
+ COLOR_DEPTH_101010,
+ COLOR_DEPTH_121212,
+ COLOR_DEPTH_141414,
+ COLOR_DEPTH_161616,
+ COLOR_DEPTH_COUNT
+};
+
+enum dc_pixel_encoding {
+ PIXEL_ENCODING_UNDEFINED,
+ PIXEL_ENCODING_RGB,
+ PIXEL_ENCODING_YCBCR422,
+ PIXEL_ENCODING_YCBCR444,
+ PIXEL_ENCODING_YCBCR420,
+ PIXEL_ENCODING_COUNT
+};
+
+enum dc_aspect_ratio {
+ ASPECT_RATIO_NO_DATA,
+ ASPECT_RATIO_4_3,
+ ASPECT_RATIO_16_9,
+ ASPECT_RATIO_64_27,
+ ASPECT_RATIO_256_135,
+ ASPECT_RATIO_FUTURE
+};
+
+enum scanning_type {
+ SCANNING_TYPE_NODATA = 0,
+ SCANNING_TYPE_OVERSCAN,
+ SCANNING_TYPE_UNDERSCAN,
+ SCANNING_TYPE_FUTURE,
+ SCANNING_TYPE_UNDEFINED
+};
+
+struct dc_crtc_timing_flags {
+ uint32_t INTERLACE :1;
+ uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+ uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+
+ uint32_t HORZ_COUNT_BY_TWO:1;
+
+ uint32_t EXCLUSIVE_3D :1; /* if this bit set,
+ timing can be driven in 3D format only
+ and there is no corresponding 2D timing*/
+ uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
+ (right eye = '1', left eye = '0') */
+ uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right images subsampled
+ when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
+ uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
+ because corresponding 2D timing also present in the list*/
+ uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
+ and we want to match priority of corresponding 3D timing*/
+ uint32_t Y_ONLY :1;
+
+ uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
+ uint32_t DTD_COUNTER :5; /* values 1 to 16 */
+
+ uint32_t FORCE_HDR :1;
+
+ /* HDMI 2.0 - Support scrambling for TMDS character
+ * rates less than or equal to 340Mcsc */
+ uint32_t LTE_340MCSC_SCRAMBLE:1;
+
+};
+
+enum dc_timing_3d_format {
+ TIMING_3D_FORMAT_NONE,
+ TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
+ TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
+ TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
+ /* for active DP-HDMI dongle*/
+ TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
+ TIMING_3D_FORMAT_HW_FRAME_PACKING,
+ TIMING_3D_FORMAT_SW_FRAME_PACKING,
+ TIMING_3D_FORMAT_ROW_INTERLEAVE,
+ TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
+ TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
+ TIMING_3D_FORMAT_SIDE_BY_SIDE,
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM,
+ TIMING_3D_FORMAT_SBS_SW_PACKED,
+ /* Side-by-side, packed by application/driver into 2D frame*/
+ TIMING_3D_FORMAT_TB_SW_PACKED,
+ /* Top-and-bottom, packed by application/driver into 2D frame*/
+
+ TIMING_3D_FORMAT_MAX,
+};
+
+
+struct dc_crtc_timing {
+
+ uint32_t h_total;
+ uint32_t h_border_left;
+ uint32_t h_addressable;
+ uint32_t h_border_right;
+ uint32_t h_front_porch;
+ uint32_t h_sync_width;
+
+ uint32_t v_total;
+ uint32_t v_border_top;
+ uint32_t v_addressable;
+ uint32_t v_border_bottom;
+ uint32_t v_front_porch;
+ uint32_t v_sync_width;
+
+ uint32_t pix_clk_khz;
+
+ uint32_t vic;
+ uint32_t hdmi_vic;
+ enum dc_timing_3d_format timing_3d_format;
+ enum dc_color_depth display_color_depth;
+ enum dc_pixel_encoding pixel_encoding;
+ enum dc_aspect_ratio aspect_ratio;
+ enum scanning_type scan_type;
+
+ struct dc_crtc_timing_flags flags;
+};
+
+#define MAX_TG_COLOR_VALUE 0x3FF
+struct tg_color {
+ /* Maximum 10 bits color value */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+};
+
+#endif /* DC_HW_TYPES_H */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
new file mode 100644
index 000000000000..a8698e399111
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -0,0 +1,652 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_TYPES_H_
+#define DC_TYPES_H_
+
+#include "fixed32_32.h"
+#include "fixed31_32.h"
+#include "irq_types.h"
+#include "dc_dp_types.h"
+#include "dc_hw_types.h"
+#include "dal_types.h"
+#include "grph_object_defs.h"
+
+/* forward declarations */
+struct dc_plane_state;
+struct dc_stream_state;
+struct dc_link;
+struct dc_sink;
+struct dal;
+
+/********************************
+ * Environment definitions
+ ********************************/
+enum dce_environment {
+ DCE_ENV_PRODUCTION_DRV = 0,
+ /* Emulation on FPGA, in "Maximus" System.
+ * This environment enforces that *only* DC registers accessed.
+ * (access to non-DC registers will hang FPGA) */
+ DCE_ENV_FPGA_MAXIMUS,
+ /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
+ * requirements of Diagnostics team. */
+ DCE_ENV_DIAG
+};
+
+/* Note: use these macro definitions instead of direct comparison! */
+#define IS_FPGA_MAXIMUS_DC(dce_environment) \
+ (dce_environment == DCE_ENV_FPGA_MAXIMUS)
+
+#define IS_DIAG_DC(dce_environment) \
+ (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
+
+struct hw_asic_id {
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t pci_revision_id;
+ uint32_t hw_internal_rev;
+ uint32_t vram_type;
+ uint32_t vram_width;
+ uint32_t feature_flags;
+ uint32_t fake_paths_num;
+ void *atombios_base_address;
+};
+
+struct dc_context {
+ struct dc *dc;
+
+ void *driver_context; /* e.g. amdgpu_device */
+
+ struct dal_logger *logger;
+ void *cgs_device;
+
+ enum dce_environment dce_environment;
+ struct hw_asic_id asic_id;
+
+ /* todo: below should probably move to dc. to facilitate removal
+ * of AS we will store these here
+ */
+ enum dce_version dce_version;
+ struct dc_bios *dc_bios;
+ bool created_bios;
+ struct gpio_service *gpio_service;
+ struct i2caux *i2caux;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+
+#define MAX_EDID_BUFFER_SIZE 512
+#define EDID_BLOCK_SIZE 128
+#define MAX_SURFACE_NUM 4
+#define NUM_PIXEL_FORMATS 10
+
+#include "dc_ddc_types.h"
+
+enum tiling_mode {
+ TILING_MODE_INVALID,
+ TILING_MODE_LINEAR,
+ TILING_MODE_TILED,
+ TILING_MODE_COUNT
+};
+
+enum view_3d_format {
+ VIEW_3D_FORMAT_NONE = 0,
+ VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
+ VIEW_3D_FORMAT_SIDE_BY_SIDE,
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM,
+ VIEW_3D_FORMAT_COUNT,
+ VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
+};
+
+enum plane_stereo_format {
+ PLANE_STEREO_FORMAT_NONE = 0,
+ PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
+ PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
+ PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
+ PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
+ PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
+ PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
+};
+
+/* TODO: Find way to calculate number of bits
+ * Please increase if pixel_format enum increases
+ * num from PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
+ */
+
+enum dc_edid_connector_type {
+ EDID_CONNECTOR_UNKNOWN = 0,
+ EDID_CONNECTOR_ANALOG = 1,
+ EDID_CONNECTOR_DIGITAL = 10,
+ EDID_CONNECTOR_DVI = 11,
+ EDID_CONNECTOR_HDMIA = 12,
+ EDID_CONNECTOR_MDDI = 14,
+ EDID_CONNECTOR_DISPLAYPORT = 15
+};
+
+enum dc_edid_status {
+ EDID_OK,
+ EDID_BAD_INPUT,
+ EDID_NO_RESPONSE,
+ EDID_BAD_CHECKSUM,
+ EDID_THE_SAME,
+};
+
+/* audio capability from EDID*/
+struct dc_cea_audio_mode {
+ uint8_t format_code; /* ucData[0] [6:3]*/
+ uint8_t channel_count; /* ucData[0] [2:0]*/
+ uint8_t sample_rate; /* ucData[1]*/
+ union {
+ uint8_t sample_size; /* for LPCM*/
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
+ uint8_t max_bit_rate;
+ uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
+ };
+};
+
+struct dc_edid {
+ uint32_t length;
+ uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+};
+
+/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+ * is used. In this case we assume speaker location are: front left, front
+ * right and front center. */
+#define DEFAULT_SPEAKER_LOCATION 5
+
+#define DC_MAX_AUDIO_DESC_COUNT 16
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+
+union display_content_support {
+ unsigned int raw;
+ struct {
+ unsigned int valid_content_type :1;
+ unsigned int game_content :1;
+ unsigned int cinema_content :1;
+ unsigned int photo_content :1;
+ unsigned int graphics_content :1;
+ unsigned int reserved :27;
+ } bits;
+};
+
+struct dc_edid_caps {
+ /* sink identification */
+ uint16_t manufacturer_id;
+ uint16_t product_id;
+ uint32_t serial_number;
+ uint8_t manufacture_week;
+ uint8_t manufacture_year;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+
+ /* audio caps */
+ uint8_t speaker_flags;
+ uint32_t audio_mode_count;
+ struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
+ uint32_t audio_latency;
+ uint32_t video_latency;
+
+ union display_content_support content_support;
+
+ uint8_t qs_bit;
+ uint8_t qy_bit;
+
+ /*HDMI 2.0 caps*/
+ bool lte_340mcsc_scramble;
+
+ bool edid_hdmi;
+};
+
+struct view {
+ uint32_t width;
+ uint32_t height;
+};
+
+struct dc_mode_flags {
+ /* note: part of refresh rate flag*/
+ uint32_t INTERLACE :1;
+ /* native display timing*/
+ uint32_t NATIVE :1;
+ /* preferred is the recommended mode, one per display */
+ uint32_t PREFERRED :1;
+ /* true if this mode should use reduced blanking timings
+ *_not_ related to the Reduced Blanking adjustment*/
+ uint32_t REDUCED_BLANKING :1;
+ /* note: part of refreshrate flag*/
+ uint32_t VIDEO_OPTIMIZED_RATE :1;
+ /* should be reported to upper layers as mode_flags*/
+ uint32_t PACKED_PIXEL_FORMAT :1;
+ /*< preferred view*/
+ uint32_t PREFERRED_VIEW :1;
+ /* this timing should be used only in tiled mode*/
+ uint32_t TILED_MODE :1;
+ uint32_t DSE_MODE :1;
+ /* Refresh rate divider when Miracast sink is using a
+ different rate than the output display device
+ Must be zero for wired displays and non-zero for
+ Miracast displays*/
+ uint32_t MIRACAST_REFRESH_DIVIDER;
+};
+
+
+enum dc_timing_source {
+ TIMING_SOURCE_UNDEFINED,
+
+ /* explicitly specifed by user, most important*/
+ TIMING_SOURCE_USER_FORCED,
+ TIMING_SOURCE_USER_OVERRIDE,
+ TIMING_SOURCE_CUSTOM,
+ TIMING_SOURCE_EXPLICIT,
+
+ /* explicitly specified by the display device, more important*/
+ TIMING_SOURCE_EDID_CEA_SVD_3D,
+ TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
+ TIMING_SOURCE_EDID_CEA_SVD_420,
+ TIMING_SOURCE_EDID_DETAILED,
+ TIMING_SOURCE_EDID_ESTABLISHED,
+ TIMING_SOURCE_EDID_STANDARD,
+ TIMING_SOURCE_EDID_CEA_SVD,
+ TIMING_SOURCE_EDID_CVT_3BYTE,
+ TIMING_SOURCE_EDID_4BYTE,
+ TIMING_SOURCE_VBIOS,
+ TIMING_SOURCE_CV,
+ TIMING_SOURCE_TV,
+ TIMING_SOURCE_HDMI_VIC,
+
+ /* implicitly specified by display device, still safe but less important*/
+ TIMING_SOURCE_DEFAULT,
+
+ /* only used for custom base modes */
+ TIMING_SOURCE_CUSTOM_BASE,
+
+ /* these timing might not work, least important*/
+ TIMING_SOURCE_RANGELIMIT,
+ TIMING_SOURCE_OS_FORCED,
+ TIMING_SOURCE_IMPLICIT,
+
+ /* only used by default mode list*/
+ TIMING_SOURCE_BASICMODE,
+
+ TIMING_SOURCE_COUNT
+};
+
+
+struct stereo_3d_features {
+ bool supported ;
+ bool allTimings ;
+ bool cloneMode ;
+ bool scaling ;
+ bool singleFrameSWPacked;
+};
+
+enum dc_timing_support_method {
+ TIMING_SUPPORT_METHOD_UNDEFINED,
+ TIMING_SUPPORT_METHOD_EXPLICIT,
+ TIMING_SUPPORT_METHOD_IMPLICIT,
+ TIMING_SUPPORT_METHOD_NATIVE
+};
+
+struct dc_mode_info {
+ uint32_t pixel_width;
+ uint32_t pixel_height;
+ uint32_t field_rate;
+ /* Vertical refresh rate for progressive modes.
+ * Field rate for interlaced modes.*/
+
+ enum dc_timing_standard timing_standard;
+ enum dc_timing_source timing_source;
+ struct dc_mode_flags flags;
+};
+
+enum dc_power_state {
+ DC_POWER_STATE_ON = 1,
+ DC_POWER_STATE_STANDBY,
+ DC_POWER_STATE_SUSPEND,
+ DC_POWER_STATE_OFF
+};
+
+/* DC PowerStates */
+enum dc_video_power_state {
+ DC_VIDEO_POWER_UNSPECIFIED = 0,
+ DC_VIDEO_POWER_ON = 1,
+ DC_VIDEO_POWER_STANDBY,
+ DC_VIDEO_POWER_SUSPEND,
+ DC_VIDEO_POWER_OFF,
+ DC_VIDEO_POWER_HIBERNATE,
+ DC_VIDEO_POWER_SHUTDOWN,
+ DC_VIDEO_POWER_ULPS, /* BACO or Ultra-Light-Power-State */
+ DC_VIDEO_POWER_AFTER_RESET,
+ DC_VIDEO_POWER_MAXIMUM
+};
+
+enum dc_acpi_cm_power_state {
+ DC_ACPI_CM_POWER_STATE_D0 = 1,
+ DC_ACPI_CM_POWER_STATE_D1 = 2,
+ DC_ACPI_CM_POWER_STATE_D2 = 4,
+ DC_ACPI_CM_POWER_STATE_D3 = 8
+};
+
+enum dc_connection_type {
+ dc_connection_none,
+ dc_connection_single,
+ dc_connection_mst_branch,
+ dc_connection_active_dongle
+};
+
+struct dc_csc_adjustments {
+ struct fixed31_32 contrast;
+ struct fixed31_32 saturation;
+ struct fixed31_32 brightness;
+ struct fixed31_32 hue;
+};
+
+enum {
+ MAX_LANES = 2,
+ MAX_COFUNC_PATH = 6,
+ LAYER_INDEX_PRIMARY = -1,
+};
+
+enum dpcd_downstream_port_max_bpc {
+ DOWN_STREAM_MAX_8BPC = 0,
+ DOWN_STREAM_MAX_10BPC,
+ DOWN_STREAM_MAX_12BPC,
+ DOWN_STREAM_MAX_16BPC
+};
+struct dc_dongle_caps {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool extendedCapValid;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ bool is_dp_hdmi_ycbcr422_pass_through;
+ bool is_dp_hdmi_ycbcr420_pass_through;
+ bool is_dp_hdmi_ycbcr422_converter;
+ bool is_dp_hdmi_ycbcr420_converter;
+ uint32_t dp_hdmi_max_bpc;
+ uint32_t dp_hdmi_max_pixel_clk;
+};
+/* Scaling format */
+enum scaling_transformation {
+ SCALING_TRANSFORMATION_UNINITIALIZED,
+ SCALING_TRANSFORMATION_IDENTITY = 0x0001,
+ SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
+ SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
+ SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
+ SCALING_TRANSFORMATION_INVALID = 0x80000000,
+
+ /* Flag the first and last */
+ SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
+ SCALING_TRANSFORMATION_END =
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
+};
+
+enum display_content_type {
+ DISPLAY_CONTENT_TYPE_NO_DATA = 0,
+ DISPLAY_CONTENT_TYPE_GRAPHICS = 1,
+ DISPLAY_CONTENT_TYPE_PHOTO = 2,
+ DISPLAY_CONTENT_TYPE_CINEMA = 4,
+ DISPLAY_CONTENT_TYPE_GAME = 8
+};
+
+/* audio*/
+
+union audio_sample_rates {
+ struct sample_rates {
+ uint8_t RATE_32:1;
+ uint8_t RATE_44_1:1;
+ uint8_t RATE_48:1;
+ uint8_t RATE_88_2:1;
+ uint8_t RATE_96:1;
+ uint8_t RATE_176_4:1;
+ uint8_t RATE_192:1;
+ } rate;
+
+ uint8_t all;
+};
+
+struct audio_speaker_flags {
+ uint32_t FL_FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RR:1;
+ uint32_t RC:1;
+ uint32_t FLC_FRC:1;
+ uint32_t RLC_RRC:1;
+ uint32_t SUPPORT_AI:1;
+};
+
+struct audio_speaker_info {
+ uint32_t ALLSPEAKERS:7;
+ uint32_t SUPPORT_AI:1;
+};
+
+
+struct audio_info_flags {
+
+ union {
+
+ struct audio_speaker_flags speaker_flags;
+ struct audio_speaker_info info;
+
+ uint8_t all;
+ };
+};
+
+enum audio_format_code {
+ AUDIO_FORMAT_CODE_FIRST = 1,
+ AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
+
+ AUDIO_FORMAT_CODE_AC3,
+ /*Layers 1 & 2 */
+ AUDIO_FORMAT_CODE_MPEG1,
+ /*MPEG1 Layer 3 */
+ AUDIO_FORMAT_CODE_MP3,
+ /*multichannel */
+ AUDIO_FORMAT_CODE_MPEG2,
+ AUDIO_FORMAT_CODE_AAC,
+ AUDIO_FORMAT_CODE_DTS,
+ AUDIO_FORMAT_CODE_ATRAC,
+ AUDIO_FORMAT_CODE_1BITAUDIO,
+ AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
+ AUDIO_FORMAT_CODE_DTS_HD,
+ AUDIO_FORMAT_CODE_MAT_MLP,
+ AUDIO_FORMAT_CODE_DST,
+ AUDIO_FORMAT_CODE_WMAPRO,
+ AUDIO_FORMAT_CODE_LAST,
+ AUDIO_FORMAT_CODE_COUNT =
+ AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
+};
+
+struct audio_mode {
+ /* ucData[0] [6:3] */
+ enum audio_format_code format_code;
+ /* ucData[0] [2:0] */
+ uint8_t channel_count;
+ /* ucData[1] */
+ union audio_sample_rates sample_rates;
+ union {
+ /* for LPCM */
+ uint8_t sample_size;
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
+ uint8_t max_bit_rate;
+ /* for Audio Formats 9-15 */
+ uint8_t vendor_specific;
+ };
+};
+
+struct audio_info {
+ struct audio_info_flags flags;
+ uint32_t video_latency;
+ uint32_t audio_latency;
+ uint32_t display_index;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+ uint32_t manufacture_id;
+ uint32_t product_id;
+ /* PortID used for ContainerID when defined */
+ uint32_t port_id[2];
+ uint32_t mode_count;
+ /* this field must be last in this struct */
+ struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
+};
+
+struct freesync_context {
+ bool supported;
+ bool enabled;
+ bool active;
+
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int nominal_refresh_in_micro_hz;
+};
+
+struct psr_config {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
+union dmcu_psr_level {
+ struct {
+ unsigned int SKIP_CRC:1;
+ unsigned int SKIP_DP_VID_STREAM_DISABLE:1;
+ unsigned int SKIP_PHY_POWER_DOWN:1;
+ unsigned int SKIP_AUX_ACK_CHECK:1;
+ unsigned int SKIP_CRTC_DISABLE:1;
+ unsigned int SKIP_AUX_RFB_CAPTURE_CHECK:1;
+ unsigned int SKIP_SMU_NOTIFICATION:1;
+ unsigned int SKIP_AUTO_STATE_ADVANCE:1;
+ unsigned int DISABLE_PSR_ENTRY_ABORT:1;
+ unsigned int SKIP_SINGLE_OTG_DISABLE:1;
+ unsigned int RESERVED:22;
+ } bits;
+ unsigned int u32all;
+};
+
+enum physical_phy_id {
+ PHYLD_0,
+ PHYLD_1,
+ PHYLD_2,
+ PHYLD_3,
+ PHYLD_4,
+ PHYLD_5,
+ PHYLD_6,
+ PHYLD_7,
+ PHYLD_8,
+ PHYLD_9,
+ PHYLD_COUNT,
+ PHYLD_UNKNOWN = (-1L)
+};
+
+enum phy_type {
+ PHY_TYPE_UNKNOWN = 1,
+ PHY_TYPE_PCIE_PHY = 2,
+ PHY_TYPE_UNIPHY = 3,
+};
+
+struct psr_context {
+ /* ddc line */
+ enum channel_id channel;
+ /* Transmitter id */
+ enum transmitter transmitterId;
+ /* Engine Id is used for Dig Be source select */
+ enum engine_id engineId;
+ /* Controller Id used for Dig Fe source select */
+ enum controller_id controllerId;
+ /* Pcie or Uniphy */
+ enum phy_type phyType;
+ /* Physical PHY Id used by SMU interpretation */
+ enum physical_phy_id smuPhyId;
+ /* Vertical total pixels from crtc timing.
+ * This is used for static screen detection.
+ * ie. If we want to detect half a frame,
+ * we use this to determine the hyst lines.
+ */
+ unsigned int crtcTimingVerticalTotal;
+ /* PSR supported from panel capabilities and
+ * current display configuration
+ */
+ bool psrSupportedDisplayConfig;
+ /* Whether fast link training is supported by the panel */
+ bool psrExitLinkTrainingRequired;
+ /* If RFB setup time is greater than the total VBLANK time,
+ * it is not possible for the sink to capture the video frame
+ * in the same frame the SDP is sent. In this case,
+ * the frame capture indication bit should be set and an extra
+ * static frame should be transmitted to the sink.
+ */
+ bool psrFrameCaptureIndicationReq;
+ /* Set the last possible line SDP may be transmitted without violating
+ * the RFB setup time or entering the active video frame.
+ */
+ unsigned int sdpTransmitLineNumDeadline;
+ /* The VSync rate in Hz used to calculate the
+ * step size for smooth brightness feature
+ */
+ unsigned int vsyncRateHz;
+ unsigned int skipPsrWaitForPllLock;
+ unsigned int numberOfControllers;
+ /* Unused, for future use. To indicate that first changed frame from
+ * state3 shouldn't result in psr_inactive, but rather to perform
+ * an automatic single frame rfb_update.
+ */
+ bool rfb_update_auto_en;
+ /* Number of frame before entering static screen */
+ unsigned int timehyst_frames;
+ /* Partial frames before entering static screen */
+ unsigned int hyst_lines;
+ /* # of repeated AUX transaction attempts to make before
+ * indicating failure to the driver
+ */
+ unsigned int aux_repeats;
+ /* Controls hw blocks to power down during PSR active state */
+ union dmcu_psr_level psr_level;
+ /* Controls additional delay after remote frame capture before
+ * continuing powerd own
+ */
+ unsigned int frame_delay;
+};
+
+struct colorspace_transform {
+ struct fixed31_32 matrix[12];
+ bool enable_remap;
+};
+
+struct csc_transform {
+ uint16_t matrix[12];
+ bool enable_adjustment;
+};
+
+enum i2c_mot_mode {
+ I2C_MOT_UNDEF,
+ I2C_MOT_TRUE,
+ I2C_MOT_FALSE
+};
+
+#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
new file mode 100644
index 000000000000..11401fd8e535
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -0,0 +1,36 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for common 'dce' logic
+# HW object file under this folder follow similar pattern for HW programming
+# - register offset and/or shift + mask stored in the dec_hw struct
+# - register programming through common macros that look up register
+# offset/shift/mask stored in dce_hw struct
+
+DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+
+
+AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
new file mode 100644
index 000000000000..0e0336c5af4e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_abm.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#include "atom.h"
+
+
+#define TO_DCE_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (abm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
+
+#define CTX \
+ abm_dce->base.ctx
+
+#define MCP_ABM_LEVEL_SET 0x65
+#define MCP_ABM_PIPE_SET 0x66
+#define MCP_BL_SET 0x67
+
+#define MCP_DISABLE_ABM_IMMEDIATELY 255
+
+struct abm_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+/* registers setting needs to be save and restored used at InitBacklight */
+static struct abm_backlight_registers stored_backlight_registers = {0};
+
+
+static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+{
+ uint32_t backlight_24bit;
+ uint32_t backlight_17bit;
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t rounding_bit;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+
+ /*
+ * 1. Convert 8-bit value to 17 bit U1.16 format
+ * (1 integer, 16 fractional bits)
+ */
+
+ /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
+ * effectively multiplying value by 256/255
+ * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
+ */
+ backlight_24bit = level * 0x10101;
+
+ /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
+ * used for rounding, take most significant bit of fraction for
+ * rounding, e.g. for 0xEFEFEF, rounding bit is 1
+ */
+ rounding_bit = (backlight_24bit >> 7) & 1;
+
+ /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
+ * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
+ */
+ backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
+
+ /*
+ * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 2.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_17bit * masked_pwm_period;
+
+ /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 3. Program register with updated value
+ */
+
+ /* 3.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 3.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 3.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 5.4.4 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dmcu_set_backlight_level(
+ struct dce_abm *abm_dce,
+ uint32_t level,
+ uint32_t frame_ramp,
+ uint32_t controller_id)
+{
+ unsigned int backlight_16_bit = (level * 0x10101) >> 8;
+ unsigned int backlight_17_bit = backlight_16_bit +
+ (((backlight_16_bit & 0x80) >> 7) & 1);
+ uint32_t rampingBoundary = 0xFFFF;
+ uint32_t s2;
+
+ /* set ramping boundary */
+ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+ /* setDMCUParam_Pipe */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+ MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
+
+ /* setDMCUParam_BL */
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+
+ /* write ramp */
+ if (controller_id == 0)
+ frame_ramp = 0;
+ REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* UpdateRequestedBacklightLevel */
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dce_abm_init(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+}
+
+static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ return (backlight >> 8);
+}
+
+static bool dce_abm_set_level(struct abm *abm, uint32_t level)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, level);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_immediate_disable(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_init_backlight(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ uint32_t value;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+ if (value == 0 || value == 1) {
+ if (stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ /* Have driver take backlight control
+ * TakeBacklightControl(true)
+ */
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ /* Enable the backlight output */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ /* Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ return true;
+}
+
+static bool is_dmcu_initialized(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int dmcu_uc_reset;
+
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ return !dmcu_uc_reset;
+}
+
+static bool dce_abm_set_backlight_level(
+ struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n",
+ backlight_level, backlight_level);
+
+ /* If DMCU is in reset state, DMCU is uninitialized */
+ if (is_dmcu_initialized(abm))
+ dmcu_set_backlight_level(abm_dce,
+ backlight_level,
+ frame_ramp,
+ controller_id);
+ else
+ driver_set_backlight_level(abm_dce, backlight_level);
+
+ return true;
+}
+
+static const struct abm_funcs dce_funcs = {
+ .abm_init = dce_abm_init,
+ .set_abm_level = dce_abm_set_level,
+ .init_backlight = dce_abm_init_backlight,
+ .set_backlight_level = dce_abm_set_backlight_level,
+ .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
+ .is_dmcu_initialized = is_dmcu_initialized
+};
+
+static void dce_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ abm_dce->base.funcs = &dce_funcs;
+
+ return &abm_dce->base;
+}
+
+void dce_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
new file mode 100644
index 000000000000..59e909ec88f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_ABM_H_
+#define _DCE_ABM_H_
+
+#include "abm.h"
+
+#define ABM_COMMON_REG_LIST_DCE_BASE() \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(LVTMA_PWRSEQ_REF_DIV), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(DMCU_STATUS)
+
+#define ABM_DCE110_COMMON_REG_LIST() \
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DC_ABM1_HG_SAMPLE_RATE), \
+ SR(DC_ABM1_LS_SAMPLE_RATE), \
+ SR(BL1_PWM_BL_UPDATE_SAMPLE_RATE), \
+ SR(DC_ABM1_HG_MISC_CTRL), \
+ SR(DC_ABM1_IPCSC_COEFF_SEL), \
+ SR(BL1_PWM_CURRENT_ABM_LEVEL), \
+ SR(BL1_PWM_TARGET_ABM_LEVEL), \
+ SR(BL1_PWM_USER_LEVEL), \
+ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
+ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+ SR(BIOS_SCRATCH_2)
+
+#define ABM_DCN10_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+
+#define ABM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
+ ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
+ ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCN10(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_REG_FIELD_LIST(type) \
+ type ABM1_HG_NUM_OF_BINS_SEL; \
+ type ABM1_HG_VMAX_SEL; \
+ type ABM1_HG_BIN_BITWIDTH_SIZE_SEL; \
+ type ABM1_IPCSC_COEFF_SEL_R; \
+ type ABM1_IPCSC_COEFF_SEL_G; \
+ type ABM1_IPCSC_COEFF_SEL_B; \
+ type BL1_PWM_CURRENT_ABM_LEVEL; \
+ type BL1_PWM_TARGET_ABM_LEVEL; \
+ type BL1_PWM_USER_LEVEL; \
+ type ABM1_LS_MIN_PIXEL_VALUE_THRES; \
+ type ABM1_LS_MAX_PIXEL_VALUE_THRES; \
+ type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type MASTER_COMM_INTERRUPT; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_CMD_REG_BYTE1; \
+ type MASTER_COMM_CMD_REG_BYTE2; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type UC_IN_RESET; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_abm_shift {
+ ABM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_abm_mask {
+ ABM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_abm_registers {
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t LVTMA_PWRSEQ_REF_DIV;
+ uint32_t DC_ABM1_HG_SAMPLE_RATE;
+ uint32_t DC_ABM1_LS_SAMPLE_RATE;
+ uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
+ uint32_t DC_ABM1_HG_MISC_CTRL;
+ uint32_t DC_ABM1_IPCSC_COEFF_SEL;
+ uint32_t BL1_PWM_CURRENT_ABM_LEVEL;
+ uint32_t BL1_PWM_TARGET_ABM_LEVEL;
+ uint32_t BL1_PWM_USER_LEVEL;
+ uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
+ uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t BIOS_SCRATCH_2;
+ uint32_t DMCU_STATUS;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+};
+
+struct dce_abm {
+ struct abm base;
+ const struct dce_abm_registers *regs;
+ const struct dce_abm_shift *abm_shift;
+ const struct dce_abm_mask *abm_mask;
+};
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+
+void dce_abm_destroy(struct abm **abm);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
new file mode 100644
index 000000000000..0df9ecb2710c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dce_audio.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define DCE_AUD(audio)\
+ container_of(audio, struct dce_audio, base)
+
+#define CTX \
+ aud->base.ctx
+#define REG(reg)\
+ (aud->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ aud->shifts->field_name, aud->masks->field_name
+
+#define IX_REG(reg)\
+ ix ## reg
+
+#define AZ_REG_READ(reg_name) \
+ read_indirect_azalia_reg(audio, IX_REG(reg_name))
+
+#define AZ_REG_WRITE(reg_name, value) \
+ write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
+
+static void write_indirect_azalia_reg(struct audio *audio,
+ uint32_t reg_index,
+ uint32_t reg_data)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
+ AZALIA_ENDPOINT_REG_DATA, reg_data);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, reg_data);
+}
+
+static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t value = 0;
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, value);
+
+ return value;
+}
+
+static bool is_audio_format_supported(
+ const struct audio_info *audio_info,
+ enum audio_format_code audio_format_code,
+ uint32_t *format_index)
+{
+ uint32_t index;
+ uint32_t max_channe_index = 0;
+ bool found = false;
+
+ if (audio_info == NULL)
+ return found;
+
+ /* pass through whole array */
+ for (index = 0; index < audio_info->mode_count; index++) {
+ if (audio_info->modes[index].format_code == audio_format_code) {
+ if (found) {
+ /* format has multiply entries, choose one with
+ * highst number of channels */
+ if (audio_info->modes[index].channel_count >
+ audio_info->modes[max_channe_index].channel_count) {
+ max_channe_index = index;
+ }
+ } else {
+ /* format found, save it's index */
+ found = true;
+ max_channe_index = index;
+ }
+ }
+ }
+
+ /* return index */
+ if (found && format_index != NULL)
+ *format_index = max_channe_index;
+
+ return found;
+}
+
+/*For HDMI, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_hdmi(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ uint32_t samples;
+ uint32_t h_blank;
+ bool limit_freq_to_48_khz = false;
+ bool limit_freq_to_88_2_khz = false;
+ bool limit_freq_to_96_khz = false;
+ bool limit_freq_to_174_4_khz = false;
+
+ /* For two channels supported return whatever sink support,unmodified*/
+ if (channel_count > 2) {
+
+ /* Based on HDMI spec 1.3 Table 7.5 */
+ if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced) &&
+ !(crtc_info->pixel_repetition == 2 ||
+ crtc_info->pixel_repetition == 4)) {
+ limit_freq_to_48_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ (crtc_info->interlaced) &&
+ (crtc_info->pixel_repetition == 2)) {
+ limit_freq_to_88_2_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 54000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced)) {
+ limit_freq_to_174_4_khz = true;
+ }
+ }
+
+ /* Also do some calculation for the available Audio Bandwidth for the
+ * 8 ch (i.e. for the Layout 1 => ch > 2)
+ */
+ h_blank = crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ h_blank *= crtc_info->pixel_repetition;
+
+ /*based on HDMI spec 1.3 Table 7.5 */
+ h_blank -= 58;
+ /*for Control Period */
+ h_blank -= 16;
+
+ samples = h_blank * 10;
+ /* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
+ * of Audio samples per line multiplied by 10 - Layout 1)
+ */
+ samples /= 32;
+ samples *= crtc_info->v_active;
+ /*Number of samples multiplied by 10, per second */
+ samples *= crtc_info->refresh_rate;
+ /*Number of Audio samples per second */
+ samples /= 10;
+
+ /* @todo do it after deep color is implemented
+ * 8xx - deep color bandwidth scaling
+ * Extra bandwidth is avaliable in deep color b/c link runs faster than
+ * pixel rate. This has the effect of allowing more tmds characters to
+ * be transmitted during blank
+ */
+
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_888:
+ samples *= 4;
+ break;
+ case COLOR_DEPTH_101010:
+ samples *= 5;
+ break;
+ case COLOR_DEPTH_121212:
+ samples *= 6;
+ break;
+ default:
+ samples *= 4;
+ break;
+ }
+
+ samples /= 4;
+
+ /*check limitation*/
+ if (samples < 88200)
+ limit_freq_to_48_khz = true;
+ else if (samples < 96000)
+ limit_freq_to_88_2_khz = true;
+ else if (samples < 176400)
+ limit_freq_to_96_khz = true;
+ else if (samples < 192000)
+ limit_freq_to_174_4_khz = true;
+
+ if (sample_rates != NULL) {
+ /* limit frequencies */
+ if (limit_freq_to_174_4_khz)
+ sample_rates->rate.RATE_192 = 0;
+
+ if (limit_freq_to_96_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ }
+ if (limit_freq_to_88_2_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ }
+ if (limit_freq_to_48_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ sample_rates->rate.RATE_88_2 = 0;
+ }
+ }
+}
+
+/*For DP SST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpsst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+/*For DP MST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpmst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+static void check_audio_bandwidth(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ enum signal_type signal,
+ union audio_sample_rates *sample_rates)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ check_audio_bandwidth_hdmi(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ check_audio_bandwidth_dpsst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ check_audio_bandwidth_dpmst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ default:
+ break;
+ }
+}
+
+/* expose/not expose HBR capability to Audio driver */
+static void set_high_bit_rate_capable(
+ struct audio *audio,
+ bool capable)
+{
+ uint32_t value = 0;
+
+ /* set high bit rate audio capable*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
+
+ set_reg_field_value(value, capable,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
+ HBR_CAPABLE);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
+}
+
+/* set video latency in in ms/2+1 */
+static void set_video_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if ((latency_in_ms < 0) || (latency_in_ms > 255))
+ return;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ VIDEO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+/* set audio latency in in ms/2+1 */
+static void set_audio_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if (latency_in_ms < 0)
+ latency_in_ms = 0;
+
+ if (latency_in_ms > 255)
+ latency_in_ms = 255;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ AUDIO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+void dce_aud_az_enable(struct audio *audio)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+ uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_disable(struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_configure(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
+ uint32_t value;
+ uint32_t field = 0;
+ enum audio_format_code audio_format_code;
+ uint32_t format_index;
+ uint32_t index;
+ bool is_ac3_supported = false;
+ union audio_sample_rates sample_rate;
+ uint32_t strlen = 0;
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ /* Speaker Allocation */
+ /*
+ uint32_t value;
+ uint32_t field = 0;*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+
+ set_reg_field_value(value,
+ speakers,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ SPEAKER_ALLOCATION);
+
+ /* LFE_PLAYBACK_LEVEL = LFEPBL
+ * LFEPBL = 0 : Unknown or refer to other information
+ * LFEPBL = 1 : 0dB playback
+ * LFEPBL = 2 : +10dB playback
+ * LFE_BL = 3 : Reserved
+ */
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ LFE_PLAYBACK_LEVEL);
+ /* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
+ * why are we writing to it? DCE8 does not write this */
+
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+
+ field = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ field &= ~0x1;
+
+ set_reg_field_value(value,
+ field,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ /* set audio for output signal */
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ break;
+
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
+
+ /* Audio Descriptors */
+ /* pass through all formats */
+ for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
+ format_index++) {
+ audio_format_code =
+ (AUDIO_FORMAT_CODE_FIRST + format_index);
+
+ /* those are unsupported, skip programming */
+ if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
+ audio_format_code == AUDIO_FORMAT_CODE_DST)
+ continue;
+
+ value = 0;
+
+ /* check if supported */
+ if (is_audio_format_supported(
+ audio_info, audio_format_code, &index)) {
+ const struct audio_mode *audio_mode =
+ &audio_info->modes[index];
+ union audio_sample_rates sample_rates =
+ audio_mode->sample_rates;
+ uint8_t byte2 = audio_mode->max_bit_rate;
+
+ /* adjust specific properties */
+ switch (audio_format_code) {
+ case AUDIO_FORMAT_CODE_LINEARPCM: {
+ check_audio_bandwidth(
+ crtc_info,
+ audio_mode->channel_count,
+ signal,
+ &sample_rates);
+
+ byte2 = audio_mode->sample_size;
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES_STEREO);
+ }
+ break;
+ case AUDIO_FORMAT_CODE_AC3:
+ is_ac3_supported = true;
+ break;
+ case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
+ case AUDIO_FORMAT_CODE_DTS_HD:
+ case AUDIO_FORMAT_CODE_MAT_MLP:
+ case AUDIO_FORMAT_CODE_DST:
+ case AUDIO_FORMAT_CODE_WMAPRO:
+ byte2 = audio_mode->vendor_specific;
+ break;
+ default:
+ break;
+ }
+
+ /* fill audio format data */
+ set_reg_field_value(value,
+ audio_mode->channel_count - 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ MAX_CHANNELS);
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES);
+
+ set_reg_field_value(value,
+ byte2,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ DESCRIPTOR_BYTE_2);
+ } /* if */
+
+ AZ_REG_WRITE(
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
+ value);
+ } /* for */
+
+ if (is_ac3_supported)
+ /* todo: this reg global. why program global register? */
+ REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
+ 0x05);
+
+ /* check for 192khz/8-Ch support for HBR requirements */
+ sample_rate.all = 0;
+ sample_rate.rate.RATE_192 = 1;
+
+ check_audio_bandwidth(
+ crtc_info,
+ 8,
+ signal,
+ &sample_rate);
+
+ set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
+
+ /* Audio and Video Lipsync */
+ set_video_latency(audio, audio_info->video_latency);
+ set_audio_latency(audio, audio_info->audio_latency);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->manufacture_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ MANUFACTURER_ID);
+
+ set_reg_field_value(value, audio_info->product_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ PRODUCT_ID);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ value);
+
+ value = 0;
+
+ /*get display name string length */
+ while (audio_info->display_name[strlen++] != '\0') {
+ if (strlen >=
+ MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
+ break;
+ }
+ set_reg_field_value(value, strlen,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ SINK_DESCRIPTION_LEN);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ value);
+
+ /*
+ *write the port ID:
+ *PORT_ID0 = display index
+ *PORT_ID1 = 16bit BDF
+ *(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
+ */
+
+ value = 0;
+
+ set_reg_field_value(value, audio_info->port_id[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
+ PORT_ID0);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->port_id[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
+ PORT_ID1);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
+
+ /*write the 18 char monitor string */
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION0);
+
+ set_reg_field_value(value, audio_info->display_name[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION1);
+
+ set_reg_field_value(value, audio_info->display_name[2],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION2);
+
+ set_reg_field_value(value, audio_info->display_name[3],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION3);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[4],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION4);
+
+ set_reg_field_value(value, audio_info->display_name[5],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION5);
+
+ set_reg_field_value(value, audio_info->display_name[6],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION6);
+
+ set_reg_field_value(value, audio_info->display_name[7],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION7);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[8],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION8);
+
+ set_reg_field_value(value, audio_info->display_name[9],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION9);
+
+ set_reg_field_value(value, audio_info->display_name[10],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION10);
+
+ set_reg_field_value(value, audio_info->display_name[11],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION11);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[12],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION12);
+
+ set_reg_field_value(value, audio_info->display_name[13],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION13);
+
+ set_reg_field_value(value, audio_info->display_name[14],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION14);
+
+ set_reg_field_value(value, audio_info->display_name[15],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION15);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[16],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION16);
+
+ set_reg_field_value(value, audio_info->display_name[17],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION17);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+}
+
+/*
+* todo: wall clk related functionality probably belong to clock_src.
+*/
+
+/* search pixel clock value for Azalia HDMI Audio */
+static void get_azalia_clock_info_hdmi(
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* audio_dto_phase= 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase =
+ 24 * 10000;
+
+ /* audio_dto_module = PCLKFrequency * 10,000;
+ * [khz] -> [100Hz] */
+ azalia_clock_info->audio_dto_module =
+ actual_pixel_clock_in_khz * 10;
+}
+
+static void get_azalia_clock_info_dp(
+ uint32_t requested_pixel_clock_in_khz,
+ const struct audio_pll_info *pll_info,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* Reported dpDtoSourceClockInkhz value for
+ * DCE8 already adjusted for SS, do not need any
+ * adjustment here anymore
+ */
+
+ /*audio_dto_phase = 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase = 24 * 10000;
+
+ /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
+ * [khz] ->[100Hz] */
+ azalia_clock_info->audio_dto_module =
+ pll_info->dp_dto_source_clock_in_khz * 10;
+}
+
+void dce_aud_wall_dto_setup(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ struct azalia_clock_info clock_info = { 0 };
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t src_sel;
+
+ /*DTO0 Programming goal:
+ -generate 24MHz, 128*Fs from 24MHz
+ -use DTO0 when an active HDMI port is connected
+ (optionally a DP is connected) */
+
+ /* calculate DTO settings */
+ get_azalia_clock_info_hdmi(
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &clock_info);
+
+ dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
+ "\n%s:Input::requested_pixel_clock = %d"\
+ "calculated_pixel_clock =%d\n"\
+ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
+ crtc_info->requested_pixel_clock,\
+ crtc_info->calculated_pixel_clock,\
+ clock_info.audio_dto_module,\
+ clock_info.audio_dto_phase);
+
+ /* On TN/SI, Program DTO source select and DTO select before
+ programming DTO modulo and DTO phase. These bits must be
+ programmed first, otherwise there will be no HDMI audio at boot
+ up. This is a HW sequence change (different from old ASICs).
+ Caution when changing this programming sequence.
+
+ HDMI enabled, using DTO0
+ program master CRTC for DTO0 */
+ src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+ REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+ DCCG_AUDIO_DTO_SEL, 0);
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+ DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+ DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+ } else {
+ /*DTO1 Programming goal:
+ -generate 24MHz, 512*Fs, 128*Fs from 24MHz
+ -default is to used DTO1, and switch to DTO0 when an audio
+ master HDMI port is connected
+ -use as default for DP
+
+ calculate DTO settings */
+ get_azalia_clock_info_dp(
+ crtc_info->requested_pixel_clock,
+ pll_info,
+ &clock_info);
+
+ /* Program DTO select before programming DTO modulo and DTO
+ phase. default to use DTO1 */
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+ /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+ * Select 512fs for DP TODO: web register definition
+ * does not match register header file
+ * DCE11 version it's commented out while DCE8 it's set to 1
+ */
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+ DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+ DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
+
+ }
+}
+
+static bool dce_aud_endpoint_valid(struct audio *audio)
+{
+ uint32_t value;
+ uint32_t port_connectivity;
+
+ value = AZ_REG_READ(
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+
+ port_connectivity = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+ PORT_CONNECTIVITY);
+
+ return !(port_connectivity == 1);
+}
+
+/* initialize HW state */
+void dce_aud_hw_init(
+ struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* we only need to program the following registers once, so we only do
+ it for the inst 0*/
+ if (audio->inst != 0)
+ return;
+
+ /* Suport R5 - 32khz
+ * Suport R6 - 44.1khz
+ * Suport R7 - 48khz
+ */
+ /*disable clock gating before write to endpoint register*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
+ AUDIO_RATE_CAPABILITIES, 0x70);
+
+ /*Keep alive bit to verify HW block in BU. */
+ REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
+ CLKSTOP, 1,
+ EPSS, 1);
+}
+
+static const struct audio_funcs funcs = {
+ .endpoint_valid = dce_aud_endpoint_valid,
+ .hw_init = dce_aud_hw_init,
+ .wall_dto_setup = dce_aud_wall_dto_setup,
+ .az_enable = dce_aud_az_enable,
+ .az_disable = dce_aud_az_disable,
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+};
+
+void dce_aud_destroy(struct audio **audio)
+{
+ struct dce_audio *aud = DCE_AUD(*audio);
+
+ kfree(aud);
+ *audio = NULL;
+}
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks
+ )
+{
+ struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+
+ if (audio == NULL) {
+ ASSERT_CRITICAL(audio);
+ return NULL;
+ }
+
+ audio->base.ctx = ctx;
+ audio->base.inst = inst;
+ audio->base.funcs = &funcs;
+
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+
+ return &audio->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
new file mode 100644
index 000000000000..0dc5ff137c7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_AUDIO_DCE_110_H__
+#define __DAL_AUDIO_DCE_110_H__
+
+#include "audio.h"
+
+#define AUD_COMMON_REG_LIST(id)\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id),\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DCCG_AUDIO_DTO0_MODULE),\
+ SR(DCCG_AUDIO_DTO0_PHASE),\
+ SR(DCCG_AUDIO_DTO1_MODULE),\
+ SR(DCCG_AUDIO_DTO1_PHASE)
+
+
+ /* set field name */
+#define SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh)
+
+#define AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+
+
+struct dce_audio_registers {
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_DATA;
+
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES;
+
+ uint32_t DCCG_AUDIO_DTO_SOURCE;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+};
+
+struct dce_audio_shift {
+ uint8_t AZALIA_ENDPOINT_REG_INDEX;
+ uint8_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint8_t AUDIO_RATE_CAPABILITIES;
+ uint8_t CLKSTOP;
+ uint8_t EPSS;
+
+ uint8_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint8_t DCCG_AUDIO_DTO_SEL;
+ uint8_t DCCG_AUDIO_DTO0_MODULE;
+ uint8_t DCCG_AUDIO_DTO0_PHASE;
+ uint8_t DCCG_AUDIO_DTO1_MODULE;
+ uint8_t DCCG_AUDIO_DTO1_PHASE;
+ uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_aduio_mask {
+ uint32_t AZALIA_ENDPOINT_REG_INDEX;
+ uint32_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+ uint32_t CLKSTOP;
+ uint32_t EPSS;
+
+ uint32_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint32_t DCCG_AUDIO_DTO_SEL;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+ uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_audio {
+ struct audio base;
+ const struct dce_audio_registers *regs;
+ const struct dce_audio_shift *shifts;
+ const struct dce_aduio_mask *masks;
+};
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks);
+
+void dce_aud_destroy(struct audio **audio);
+
+void dce_aud_hw_init(struct audio *audio);
+
+void dce_aud_az_enable(struct audio *audio);
+void dce_aud_az_disable(struct audio *audio);
+
+void dce_aud_az_configure(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+void dce_aud_wall_dto_setup(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+#endif /*__DAL_AUDIO_DCE_110_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
new file mode 100644
index 000000000000..31280d252753
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -0,0 +1,1383 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "dc_types.h"
+#include "core_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+
+#include "dce_clock_source.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (clk_src->regs->reg)
+
+#define CTX \
+ clk_src->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
+
+#define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6
+#define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
+#define MAX_PLL_CALC_ERROR 0xFFFFFFFF
+
+static const struct spread_spectrum_data *get_ss_data_entry(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal,
+ uint32_t pix_clk_khz)
+{
+
+ uint32_t entrys_num;
+ uint32_t i;
+ struct spread_spectrum_data *ss_parm = NULL;
+ struct spread_spectrum_data *ret = NULL;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ ss_parm = clk_src->dvi_ss_params;
+ entrys_num = clk_src->dvi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ss_parm = clk_src->hdmi_ss_params;
+ entrys_num = clk_src->hdmi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ ss_parm = clk_src->dp_ss_params;
+ entrys_num = clk_src->dp_ss_params_cnt;
+ break;
+
+ default:
+ ss_parm = NULL;
+ entrys_num = 0;
+ break;
+ }
+
+ if (ss_parm == NULL)
+ return ret;
+
+ for (i = 0; i < entrys_num; ++i, ++ss_parm) {
+ if (ss_parm->freq_range_khz >= pix_clk_khz) {
+ ret = ss_parm;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+* Function: calculate_fb_and_fractional_fb_divider
+*
+* * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+*
+*PARAMETERS:
+* targetPixelClock Desired frequency in 10 KHz
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* feedback_divider_param Pointer where to store
+* calculated feedback divider value
+* fract_feedback_divider_param Pointer where to store
+* calculated fract feedback divider value
+*
+*RETURNS:
+* It fills the locations pointed by feedback_divider_param
+* and fract_feedback_divider_param
+* It returns - true if feedback divider not 0
+* - false should never happen)
+*/
+static bool calculate_fb_and_fractional_fb_divider(
+ struct calc_pll_clock_source *calc_pll_cs,
+ uint32_t target_pix_clk_khz,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t *feedback_divider_param,
+ uint32_t *fract_feedback_divider_param)
+{
+ uint64_t feedback_divider;
+
+ feedback_divider =
+ (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ feedback_divider *= 10;
+ /* additional factor, since we divide by 10 afterwards */
+ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
+ feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz);
+
+/*Round to the number of precision
+ * The following code replace the old code (ullfeedbackDivider + 5)/10
+ * for example if the difference between the number
+ * of fractional feedback decimal point and the fractional FB Divider precision
+ * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
+
+ feedback_divider += (uint64_t)
+ (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider =
+ div_u64(feedback_divider,
+ calc_pll_cs->fract_fb_divider_precision_factor * 10);
+ feedback_divider *= (uint64_t)
+ (calc_pll_cs->fract_fb_divider_precision_factor);
+
+ *feedback_divider_param =
+ div_u64_rem(
+ feedback_divider,
+ calc_pll_cs->fract_fb_divider_factor,
+ fract_feedback_divider_param);
+
+ if (*feedback_divider_param != 0)
+ return true;
+ return false;
+}
+
+/**
+*calc_fb_divider_checking_tolerance
+*
+*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
+* for passed Reference and Post divider, checking for tolerance.
+*PARAMETERS:
+* pll_settings Pointer to structure
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* tolerance Tolerance for Calculated Pixel Clock to be within
+*
+*RETURNS:
+* It fills the PLLSettings structure with PLL Dividers values
+* if calculated values are within required tolerance
+* It returns - true if eror is within tolerance
+* - false if eror is not within tolerance
+*/
+static bool calc_fb_divider_checking_tolerance(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t tolerance)
+{
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t actual_calculated_clock_khz;
+ uint32_t abs_err;
+ uint64_t actual_calc_clk_khz;
+
+ calculate_fb_and_fractional_fb_divider(
+ calc_pll_cs,
+ pll_settings->adjusted_pix_clk,
+ ref_divider,
+ post_divider,
+ &feedback_divider,
+ &fract_feedback_divider);
+
+ /*Actual calculated value*/
+ actual_calc_clk_khz = (uint64_t)(feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor) +
+ fract_feedback_divider;
+ actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
+ actual_calc_clk_khz =
+ div_u64(actual_calc_clk_khz,
+ ref_divider * post_divider *
+ calc_pll_cs->fract_fb_divider_factor);
+
+ actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz);
+
+ abs_err = (actual_calculated_clock_khz >
+ pll_settings->adjusted_pix_clk)
+ ? actual_calculated_clock_khz -
+ pll_settings->adjusted_pix_clk
+ : pll_settings->adjusted_pix_clk -
+ actual_calculated_clock_khz;
+
+ if (abs_err <= tolerance) {
+ /*found good values*/
+ pll_settings->reference_freq = calc_pll_cs->ref_freq_khz;
+ pll_settings->reference_divider = ref_divider;
+ pll_settings->feedback_divider = feedback_divider;
+ pll_settings->fract_feedback_divider = fract_feedback_divider;
+ pll_settings->pix_clk_post_divider = post_divider;
+ pll_settings->calculated_pix_clk =
+ actual_calculated_clock_khz;
+ pll_settings->vco_freq =
+ actual_calculated_clock_khz * post_divider;
+ return true;
+ }
+ return false;
+}
+
+static bool calc_pll_dividers_in_range(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t min_ref_divider,
+ uint32_t max_ref_divider,
+ uint32_t min_post_divider,
+ uint32_t max_post_divider,
+ uint32_t err_tolerance)
+{
+ uint32_t ref_divider;
+ uint32_t post_divider;
+ uint32_t tolerance;
+
+/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
+ * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
+ tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) /
+ 10000;
+ if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
+ tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
+
+ for (
+ post_divider = max_post_divider;
+ post_divider >= min_post_divider;
+ --post_divider) {
+ for (
+ ref_divider = min_ref_divider;
+ ref_divider <= max_ref_divider;
+ ++ref_divider) {
+ if (calc_fb_divider_checking_tolerance(
+ calc_pll_cs,
+ pll_settings,
+ ref_divider,
+ post_divider,
+ tolerance)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static uint32_t calculate_pixel_clock_pll_dividers(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings)
+{
+ uint32_t err_tolerance;
+ uint32_t min_post_divider;
+ uint32_t max_post_divider;
+ uint32_t min_ref_divider;
+ uint32_t max_ref_divider;
+
+ if (pll_settings->adjusted_pix_clk == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Bad requested pixel clock", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 1) Find Post divider ranges */
+ if (pll_settings->pix_clk_post_divider) {
+ min_post_divider = pll_settings->pix_clk_post_divider;
+ max_post_divider = pll_settings->pix_clk_post_divider;
+ } else {
+ min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
+ if (min_post_divider * pll_settings->adjusted_pix_clk <
+ calc_pll_cs->min_vco_khz) {
+ min_post_divider = calc_pll_cs->min_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ if ((min_post_divider *
+ pll_settings->adjusted_pix_clk) <
+ calc_pll_cs->min_vco_khz)
+ min_post_divider++;
+ }
+
+ max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
+ if (max_post_divider * pll_settings->adjusted_pix_clk
+ > calc_pll_cs->max_vco_khz)
+ max_post_divider = calc_pll_cs->max_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ }
+
+/* 2) Find Reference divider ranges
+ * When SS is enabled, or for Display Port even without SS,
+ * pll_settings->referenceDivider is not zero.
+ * So calculate PPLL FB and fractional FB divider
+ * using the passed reference divider*/
+
+ if (pll_settings->reference_divider) {
+ min_ref_divider = pll_settings->reference_divider;
+ max_ref_divider = pll_settings->reference_divider;
+ } else {
+ min_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz)
+ > calc_pll_cs->min_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz
+ : calc_pll_cs->min_pll_ref_divider;
+
+ max_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->min_pll_input_freq_khz)
+ < calc_pll_cs->max_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz /
+ calc_pll_cs->min_pll_input_freq_khz
+ : calc_pll_cs->max_pll_ref_divider;
+ }
+
+/* If some parameters are invalid we could have scenario when "min">"max"
+ * which produced endless loop later.
+ * We should investigate why we get the wrong parameters.
+ * But to follow the similar logic when "adjustedPixelClock" is set to be 0
+ * it is better to return here than cause system hang/watchdog timeout later.
+ * ## SVS Wed 15 Jul 2009 */
+
+ if (min_post_divider > max_post_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Post divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+ if (min_ref_divider > max_ref_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Reference divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 3) Try to find PLL dividers given ranges
+ * starting with minimal error tolerance.
+ * Increase error tolerance until PLL dividers found*/
+ err_tolerance = MAX_PLL_CALC_ERROR;
+
+ while (!calc_pll_dividers_in_range(
+ calc_pll_cs,
+ pll_settings,
+ min_ref_divider,
+ max_ref_divider,
+ min_post_divider,
+ max_post_divider,
+ err_tolerance))
+ err_tolerance += (err_tolerance > 10)
+ ? (err_tolerance / 10)
+ : 1;
+
+ return err_tolerance;
+}
+
+static bool pll_adjust_pix_clk(
+ struct dce110_clk_src *clk_src,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t actual_pix_clk_khz = 0;
+ uint32_t requested_clk_khz = 0;
+ struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
+ 0 };
+ enum bp_result bp_result;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ requested_clk_khz = (requested_clk_khz * 5) >> 2;
+ break; /* x1.25*/
+ case COLOR_DEPTH_121212:
+ requested_clk_khz = (requested_clk_khz * 6) >> 2;
+ break; /* x1.5*/
+ case COLOR_DEPTH_161616:
+ requested_clk_khz = requested_clk_khz * 2;
+ break; /* x2.0*/
+ default:
+ break;
+ }
+ }
+ actual_pix_clk_khz = requested_clk_khz;
+ }
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ requested_clk_khz = pix_clk_params->requested_sym_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+
+ default:
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+ }
+
+ bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz;
+ bp_adjust_pixel_clock_params.
+ encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
+ bp_adjust_pixel_clock_params.
+ ss_enable = pix_clk_params->flags.ENABLE_SS;
+ bp_result = clk_src->bios->funcs->adjust_pixel_clock(
+ clk_src->bios, &bp_adjust_pixel_clock_params);
+ if (bp_result == BP_RESULT_OK) {
+ pll_settings->actual_pix_clk = actual_pix_clk_khz;
+ pll_settings->adjusted_pix_clk =
+ bp_adjust_pixel_clock_params.adjusted_pixel_clock;
+ pll_settings->reference_divider =
+ bp_adjust_pixel_clock_params.reference_divider;
+ pll_settings->pix_clk_post_divider =
+ bp_adjust_pixel_clock_params.pixel_clock_post_divider;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Calculate PLL Dividers for given Clock Value.
+ * First will call VBIOS Adjust Exec table to check if requested Pixel clock
+ * will be Adjusted based on usage.
+ * Then it will calculate PLL Dividers for this Adjusted clock using preferred
+ * method (Maximum VCO frequency).
+ *
+ * \return
+ * Calculation error in units of 0.01%
+ */
+
+static uint32_t dce110_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t field = 0;
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ /* Check if reference clock is external (not pcie/xtalin)
+ * HW Dce80 spec:
+ * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
+ * 04 - HSYNCA, 05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */
+ REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field);
+ pll_settings->use_external_clk = (field > 1);
+
+ /* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always
+ * (we do not care any more from SI for some older DP Sink which
+ * does not report SS support, no known issues) */
+ if ((pix_clk_params->flags.ENABLE_SS) ||
+ (dc_is_dp_signal(pix_clk_params->signal_type))) {
+
+ const struct spread_spectrum_data *ss_data = get_ss_data_entry(
+ clk_src,
+ pix_clk_params->signal_type,
+ pll_settings->adjusted_pix_clk);
+
+ if (NULL != ss_data)
+ pll_settings->ss_percentage = ss_data->percentage;
+ }
+
+ /* Check VBIOS AdjustPixelClock Exec table */
+ if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
+ /* Should never happen, ASSERT and fill up values to be able
+ * to continue. */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Failed to adjust pixel clock!!", __func__);
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ pll_settings->adjusted_pix_clk =
+ pix_clk_params->requested_pix_clk;
+
+ if (dc_is_dp_signal(pix_clk_params->signal_type))
+ pll_settings->adjusted_pix_clk = 100000;
+ }
+
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ /*Calculate Dividers by HDMI object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll_hdmi,
+ pll_settings);
+ else
+ /*Calculate Dividers by default object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll,
+ pll_settings);
+
+ return pll_calc_error;
+}
+
+static void dce112_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t actualPixelClockInKHz;
+
+ actualPixelClockInKHz = pix_clk_params->requested_pix_clk;
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2;
+ break;
+ case COLOR_DEPTH_121212:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2;
+ break;
+ case COLOR_DEPTH_161616:
+ actualPixelClockInKHz = actualPixelClockInKHz * 2;
+ break;
+ default:
+ break;
+ }
+ }
+ pll_settings->actual_pix_clk = actualPixelClockInKHz;
+ pll_settings->adjusted_pix_clk = actualPixelClockInKHz;
+ pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk;
+}
+
+static uint32_t dce110_get_pix_clk_dividers(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ if (pix_clk_params == NULL || pll_settings == NULL
+ || pix_clk_params->requested_pix_clk == 0) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Invalid parameters!!\n", __func__);
+ return pll_calc_error;
+ }
+
+ memset(pll_settings, 0, sizeof(*pll_settings));
+
+ if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
+ cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
+ pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ return 0;
+ }
+
+ switch (cs->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ pll_calc_error =
+ dce110_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ dce112_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ default:
+ break;
+ }
+
+ return pll_calc_error;
+}
+
+static uint32_t dce110_get_pll_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+
+}
+
+static uint32_t dce110_get_dp_pixel_rate_from_combo_phy_pll(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+}
+
+static uint32_t dce110_get_d_to_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ int dto_enabled = 0;
+ struct fixed31_32 pix_rate;
+
+ REG_GET(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, &dto_enabled);
+
+ if (dto_enabled) {
+ uint32_t phase = 0;
+ uint32_t modulo = 0;
+ REG_GET(PHASE[inst], DP_DTO0_PHASE, &phase);
+ REG_GET(MODULO[inst], DP_DTO0_MODULO, &modulo);
+
+ if (modulo == 0) {
+ return 0;
+ }
+
+ pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
+ pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
+
+ return dal_fixed31_32_round(pix_rate);
+ } else {
+ return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
+ }
+}
+
+static uint32_t dce110_get_pix_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t pix_rate = 0;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ pix_rate = dce110_get_d_to_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ default:
+ pix_rate = dce110_get_pll_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ }
+
+ return pix_rate;
+}
+
+static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
+{
+ enum bp_result result;
+ struct bp_spread_spectrum_parameters bp_ss_params = {0};
+
+ bp_ss_params.pll_id = clk_src->base.id;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_ss_params,
+ false);
+
+ return result == BP_RESULT_OK;
+}
+
+static bool calculate_ss(
+ const struct pll_settings *pll_settings,
+ const struct spread_spectrum_data *ss_data,
+ struct delta_sigma_data *ds_data)
+{
+ struct fixed32_32 fb_div;
+ struct fixed32_32 ss_amount;
+ struct fixed32_32 ss_nslip_amount;
+ struct fixed32_32 ss_ds_frac_amount;
+ struct fixed32_32 ss_step_size;
+ struct fixed32_32 modulation_time;
+
+ if (ds_data == NULL)
+ return false;
+ if (ss_data == NULL)
+ return false;
+ if (ss_data->percentage == 0)
+ return false;
+ if (pll_settings == NULL)
+ return false;
+
+ memset(ds_data, 0, sizeof(struct delta_sigma_data));
+
+ /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
+ /* 6 decimal point support in fractional feedback divider */
+ fb_div = dal_fixed32_32_from_fraction(
+ pll_settings->fract_feedback_divider, 1000000);
+ fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
+
+ ds_data->ds_frac_amount = 0;
+ /*spreadSpectrumPercentage is in the unit of .01%,
+ * so have to divided by 100 * 100*/
+ ss_amount = dal_fixed32_32_mul(
+ fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
+ 100 * ss_data->percentage_divider));
+ ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
+
+ ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
+ dal_fixed32_32_from_int(ds_data->feedback_amount));
+ ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
+ ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
+
+ ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
+ dal_fixed32_32_from_int(ds_data->nfrac_amount));
+ ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
+ ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
+
+ /* compute SS_STEP_SIZE_DSFRAC */
+ modulation_time = dal_fixed32_32_from_fraction(
+ pll_settings->reference_freq * 1000,
+ pll_settings->reference_divider * ss_data->modulation_freq_hz);
+
+ if (ss_data->flags.CENTER_SPREAD)
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
+ else
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
+
+ ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
+ /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
+ ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
+ ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
+
+ return true;
+}
+
+static bool enable_spread_spectrum(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal, struct pll_settings *pll_settings)
+{
+ struct bp_spread_spectrum_parameters bp_params = {0};
+ struct delta_sigma_data d_s_data;
+ const struct spread_spectrum_data *ss_data = NULL;
+
+ ss_data = get_ss_data_entry(
+ clk_src,
+ signal,
+ pll_settings->calculated_pix_clk);
+
+/* Pixel clock PLL has been programmed to generate desired pixel clock,
+ * now enable SS on pixel clock */
+/* TODO is it OK to return true not doing anything ??*/
+ if (ss_data != NULL && pll_settings->ss_percentage != 0) {
+ if (calculate_ss(pll_settings, ss_data, &d_s_data)) {
+ bp_params.ds.feedback_amount =
+ d_s_data.feedback_amount;
+ bp_params.ds.nfrac_amount =
+ d_s_data.nfrac_amount;
+ bp_params.ds.ds_frac_size = d_s_data.ds_frac_size;
+ bp_params.ds_frac_amount =
+ d_s_data.ds_frac_amount;
+ bp_params.flags.DS_TYPE = 1;
+ bp_params.pll_id = clk_src->base.id;
+ bp_params.percentage = ss_data->percentage;
+ if (ss_data->flags.CENTER_SPREAD)
+ bp_params.flags.CENTER_SPREAD = 1;
+ if (ss_data->flags.EXTERNAL_SS)
+ bp_params.flags.EXTERNAL_SS = 1;
+
+ if (BP_RESULT_OK !=
+ clk_src->bios->funcs->
+ enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_params,
+ true))
+ return false;
+ } else
+ return false;
+ }
+ return true;
+}
+
+static void dce110_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth)
+{
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 1);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 2);
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce112_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth,
+ bool enable_ycbcr420)
+{
+ uint32_t deep_color_cntl = 0;
+ uint32_t double_rate_enable = 0;
+
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ double_rate_enable = enable_ycbcr420 ? 1 : 0;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ deep_color_cntl = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ deep_color_cntl = 1;
+ break;
+ case COLOR_DEPTH_121212:
+ deep_color_cntl = 2;
+ break;
+ case COLOR_DEPTH_161616:
+ deep_color_cntl = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (clk_src->cs_mask->PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE)
+ REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl,
+ PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, double_rate_enable);
+ else
+ REG_UPDATE(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl);
+
+}
+
+static bool dce110_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned dp_dto_ref_kHz = 600000;
+ /* DPREF clock from FPGA TODO: Does FPGA have this value? */
+ unsigned clock_kHz = pll_settings->actual_pix_clk;
+
+ /* For faster simulation, if mode pixe clock less than 290MHz,
+ * pixel clock can be hard coded to 290Mhz. For 4K mode, pixel clock
+ * is greater than 500Mhz, need real pixel clock
+ * clock_kHz = 290000;
+ */
+ /* TODO: un-hardcode when we can set display clock properly*/
+ /*clock_kHz = pix_clk_params->requested_pix_clk;*/
+ clock_kHz = 290000;
+
+ /* Set DTO values: phase = target clock, modulo = reference clock */
+ REG_WRITE(PHASE[inst], clock_kHz);
+ REG_WRITE(MODULO[inst], dp_dto_ref_kHz);
+
+ /* Enable DTO */
+ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ return true;
+ }
+#endif
+ /* First disable SS
+ * ATOMBIOS will enable by default SS on PLL for DP,
+ * do not disable it here
+ */
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL &&
+ !dc_is_dp_signal(pix_clk_params->signal_type) &&
+ clock_source->ctx->dce_version <= DCE_VERSION_11_0)
+ disable_spread_spectrum(clk_src);
+
+ /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
+ bp_pc_params.controller_id = pix_clk_params->controller_id;
+ bp_pc_params.pll_id = clock_source->id;
+ bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
+ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+ switch (clock_source->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ bp_pc_params.reference_divider = pll_settings->reference_divider;
+ bp_pc_params.feedback_divider = pll_settings->feedback_divider;
+ bp_pc_params.fractional_feedback_divider =
+ pll_settings->fract_feedback_divider;
+ bp_pc_params.pixel_clock_post_divider =
+ pll_settings->pix_clk_post_divider;
+ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Enable SS
+ * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
+ * based on HW display PLL team, SS control settings should be programmed
+ * during PLL Reset, but they do not have effect
+ * until SS_EN is asserted.*/
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL
+ && !dc_is_dp_signal(pix_clk_params->signal_type)) {
+
+ if (pix_clk_params->flags.ENABLE_SS)
+ if (!enable_spread_spectrum(clk_src,
+ pix_clk_params->signal_type,
+ pll_settings))
+ return false;
+
+ /* Resync deep color DTO */
+ dce110_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth);
+ }
+
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+ bp_pc_params.flags.SET_XTALIN_REF_SRC =
+ !pll_settings->use_external_clk;
+ if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+ bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+ }
+ }
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Resync deep color DTO */
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
+ dce112_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth,
+ pix_clk_params->flags.SUPPORT_YCBCR420);
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool dce110_clock_source_power_down(
+ struct clock_source *clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
+ enum bp_result bp_result;
+ struct bp_pixel_clock_parameters bp_pixel_clock_params = {0};
+
+ if (clk_src->dp_clk_src)
+ return true;
+
+ /* If Pixel Clock is 0 it means Power Down Pll*/
+ bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED;
+ bp_pixel_clock_params.pll_id = clk_src->id;
+ bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ bp_result = dce110_clk_src->bios->funcs->set_pixel_clock(
+ dce110_clk_src->bios,
+ &bp_pixel_clock_params);
+
+ return bp_result == BP_RESULT_OK;
+}
+
+/*****************************************/
+/* Constructor */
+/*****************************************/
+static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dce110_program_pix_clk,
+ .get_pix_clk_dividers = dce110_get_pix_clk_dividers,
+ .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz
+};
+
+static void get_ss_info_from_atombios(
+ struct dce110_clk_src *clk_src,
+ enum as_signal_type as_signal,
+ struct spread_spectrum_data *spread_spectrum_data[],
+ uint32_t *ss_entries_num)
+{
+ enum bp_result bp_result = BP_RESULT_FAILURE;
+ struct spread_spectrum_info *ss_info;
+ struct spread_spectrum_data *ss_data;
+ struct spread_spectrum_info *ss_info_cur;
+ struct spread_spectrum_data *ss_data_cur;
+ uint32_t i;
+
+ if (ss_entries_num == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid entry !!!\n");
+ return;
+ }
+ if (spread_spectrum_data == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid array pointer!!!\n");
+ return;
+ }
+
+ spread_spectrum_data[0] = NULL;
+ *ss_entries_num = 0;
+
+ *ss_entries_num = clk_src->bios->funcs->get_ss_entry_number(
+ clk_src->bios,
+ as_signal);
+
+ if (*ss_entries_num == 0)
+ return;
+
+ ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num),
+ GFP_KERNEL);
+ ss_info_cur = ss_info;
+ if (ss_info == NULL)
+ return;
+
+ ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num),
+ GFP_KERNEL);
+ if (ss_data == NULL)
+ goto out_free_info;
+
+ for (i = 0, ss_info_cur = ss_info;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur) {
+
+ bp_result = clk_src->bios->funcs->get_spread_spectrum_info(
+ clk_src->bios,
+ as_signal,
+ i,
+ ss_info_cur);
+
+ if (bp_result != BP_RESULT_OK)
+ goto out_free_data;
+ }
+
+ for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur, ++ss_data_cur) {
+
+ if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid ATOMBIOS SS Table!!!\n");
+ goto out_free_data;
+ }
+
+ /* for HDMI check SS percentage,
+ * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/
+ if (as_signal == AS_SIGNAL_TYPE_HDMI
+ && ss_info_cur->spread_spectrum_percentage > 6){
+ /* invalid input, do nothing */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid SS percentage ");
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "for HDMI in ATOMBIOS info Table!!!\n");
+ continue;
+ }
+ if (ss_info_cur->spread_percentage_divider == 1000) {
+ /* Keep previous precision from ATOMBIOS for these
+ * in case new precision set by ATOMBIOS for these
+ * (otherwise all code in DCE specific classes
+ * for all previous ASICs would need
+ * to be updated for SS calculations,
+ * Audio SS compensation and DP DTO SS compensation
+ * which assumes fixed SS percentage Divider = 100)*/
+ ss_info_cur->spread_spectrum_percentage /= 10;
+ ss_info_cur->spread_percentage_divider = 100;
+ }
+
+ ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range;
+ ss_data_cur->percentage =
+ ss_info_cur->spread_spectrum_percentage;
+ ss_data_cur->percentage_divider =
+ ss_info_cur->spread_percentage_divider;
+ ss_data_cur->modulation_freq_hz =
+ ss_info_cur->spread_spectrum_range;
+
+ if (ss_info_cur->type.CENTER_MODE)
+ ss_data_cur->flags.CENTER_SPREAD = 1;
+
+ if (ss_info_cur->type.EXTERNAL)
+ ss_data_cur->flags.EXTERNAL_SS = 1;
+
+ }
+
+ *spread_spectrum_data = ss_data;
+ kfree(ss_info);
+ return;
+
+out_free_data:
+ kfree(ss_data);
+ *ss_entries_num = 0;
+out_free_info:
+ kfree(ss_info);
+}
+
+static void ss_info_from_atombios_create(
+ struct dce110_clk_src *clk_src)
+{
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ &clk_src->dp_ss_params,
+ &clk_src->dp_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_HDMI,
+ &clk_src->hdmi_ss_params,
+ &clk_src->hdmi_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DVI,
+ &clk_src->dvi_ss_params,
+ &clk_src->dvi_ss_params_cnt);
+}
+
+static bool calc_pll_max_vco_construct(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct calc_pll_clock_source_init_data *init_data)
+{
+ uint32_t i;
+ struct dc_firmware_info fw_info = { { 0 } };
+ if (calc_pll_cs == NULL ||
+ init_data == NULL ||
+ init_data->bp == NULL)
+ return false;
+
+ if (init_data->bp->funcs->get_firmware_info(
+ init_data->bp,
+ &fw_info) != BP_RESULT_OK)
+ return false;
+
+ calc_pll_cs->ctx = init_data->ctx;
+ calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+ calc_pll_cs->min_vco_khz =
+ fw_info.pll_info.min_output_pxl_clk_pll_frequency;
+ calc_pll_cs->max_vco_khz =
+ fw_info.pll_info.max_output_pxl_clk_pll_frequency;
+
+ if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->max_pll_input_freq_khz =
+ init_data->max_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->max_pll_input_freq_khz =
+ fw_info.pll_info.max_input_pxl_clk_pll_frequency;
+
+ if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->min_pll_input_freq_khz =
+ init_data->min_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->min_pll_input_freq_khz =
+ fw_info.pll_info.min_input_pxl_clk_pll_frequency;
+
+ calc_pll_cs->min_pix_clock_pll_post_divider =
+ init_data->min_pix_clk_pll_post_divider;
+ calc_pll_cs->max_pix_clock_pll_post_divider =
+ init_data->max_pix_clk_pll_post_divider;
+ calc_pll_cs->min_pll_ref_divider =
+ init_data->min_pll_ref_divider;
+ calc_pll_cs->max_pll_ref_divider =
+ init_data->max_pll_ref_divider;
+
+ if (init_data->num_fract_fb_divider_decimal_point == 0 ||
+ init_data->num_fract_fb_divider_decimal_point_precision >
+ init_data->num_fract_fb_divider_decimal_point) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "The dec point num or precision is incorrect!");
+ return false;
+ }
+ if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "Incorrect fract feedback divider precision num!");
+ return false;
+ }
+
+ calc_pll_cs->fract_fb_divider_decimal_points_num =
+ init_data->num_fract_fb_divider_decimal_point;
+ calc_pll_cs->fract_fb_divider_precision =
+ init_data->num_fract_fb_divider_decimal_point_precision;
+ calc_pll_cs->fract_fb_divider_factor = 1;
+ for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i)
+ calc_pll_cs->fract_fb_divider_factor *= 10;
+
+ calc_pll_cs->fract_fb_divider_precision_factor = 1;
+ for (
+ i = 0;
+ i < (calc_pll_cs->fract_fb_divider_decimal_points_num -
+ calc_pll_cs->fract_fb_divider_precision);
+ ++i)
+ calc_pll_cs->fract_fb_divider_precision_factor *= 10;
+
+ return true;
+}
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
+
+ clk_src->base.ctx = ctx;
+ clk_src->bios = bios;
+ clk_src->base.id = id;
+ clk_src->base.funcs = &dce110_clk_src_funcs;
+
+ clk_src->regs = regs;
+ clk_src->cs_shift = cs_shift;
+ clk_src->cs_mask = cs_mask;
+
+ if (clk_src->bios->funcs->get_firmware_info(
+ clk_src->bios, &fw_info) != BP_RESULT_OK) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+ clk_src->ext_clk_khz =
+ fw_info.external_clock_source_frequency_for_dp;
+
+ switch (clk_src->base.ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+
+ /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
+ calc_pll_cs_init_data.bp = bios;
+ calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data.ctx = ctx;
+
+ /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
+ calc_pll_cs_init_data_hdmi.bp = bios;
+ calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data_hdmi.ctx = ctx;
+
+ clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+
+ if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
+ return true;
+
+ /* PLL only from here on */
+ ss_info_from_atombios_create(clk_src);
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll,
+ &calc_pll_cs_init_data)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+
+ calc_pll_cs_init_data_hdmi.
+ min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2;
+ calc_pll_cs_init_data_hdmi.
+ max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz;
+
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+unexpected_failure:
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
new file mode 100644
index 000000000000..c45e2f76189e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -0,0 +1,145 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_DCE_H__
+#define __DC_CLOCK_SOURCE_DCE_H__
+
+#include "../inc/clock_source.h"
+
+#define TO_DCE110_CLK_SRC(clk_src)\
+ container_of(clk_src, struct dce110_clk_src, base)
+
+#define CS_COMMON_REG_LIST_DCE_100_110(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, BPHYC_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_80(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, DCCG_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_112(id) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, id)
+
+
+#define CS_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ CS_SF(PLL_CNTL, PLL_REF_DIV_SRC, mask_sh),\
+ CS_SF(PIXCLK1_RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, mask_sh),\
+ CS_SF(PLL_POST_DIV, PLL_POST_DIV_PIXCLK, mask_sh),\
+ CS_SF(PLL_REF_DIV, PLL_REF_DIV, mask_sh)
+
+#define CS_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+
+#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ SRII(PIXEL_RATE_CNTL, OTG, 2), \
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+
+#define CS_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
+ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#endif
+
+#define CS_REG_FIELD_LIST(type) \
+ type PLL_REF_DIV_SRC; \
+ type DCCG_DEEP_COLOR_CNTL1; \
+ type PHYPLLA_DCCG_DEEP_COLOR_CNTL; \
+ type PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE; \
+ type PLL_POST_DIV_PIXCLK; \
+ type PLL_REF_DIV; \
+ type DP_DTO0_PHASE; \
+ type DP_DTO0_MODULO; \
+ type DP_DTO0_ENABLE;
+
+struct dce110_clk_src_shift {
+ CS_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce110_clk_src_mask{
+ CS_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce110_clk_src_regs {
+ uint32_t RESYNC_CNTL;
+ uint32_t PIXCLK_RESYNC_CNTL;
+ uint32_t PLL_CNTL;
+
+ /* below are for DTO.
+ * todo: should probably use different struct to not waste space
+ */
+ uint32_t PHASE[MAX_PIPES];
+ uint32_t MODULO[MAX_PIPES];
+ uint32_t PIXEL_RATE_CNTL[MAX_PIPES];
+};
+
+struct dce110_clk_src {
+ struct clock_source base;
+ const struct dce110_clk_src_regs *regs;
+ const struct dce110_clk_src_mask *cs_mask;
+ const struct dce110_clk_src_shift *cs_shift;
+ struct dc_bios *bios;
+
+ struct spread_spectrum_data *dp_ss_params;
+ uint32_t dp_ss_params_cnt;
+ struct spread_spectrum_data *hdmi_ss_params;
+ uint32_t hdmi_ss_params_cnt;
+ struct spread_spectrum_data *dvi_ss_params;
+ uint32_t dvi_ss_params_cnt;
+
+ uint32_t ext_clk_khz;
+ uint32_t ref_freq_khz;
+
+ struct calc_pll_clock_source calc_pll;
+ struct calc_pll_clock_source calc_pll_hdmi;
+};
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
new file mode 100644
index 000000000000..9031d22285ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clocks.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "bios_parser_interface.h"
+#include "dc.h"
+#include "dce_abm.h"
+#include "dmcu.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn_calcs.h"
+#endif
+#include "core_types.h"
+
+
+#define TO_DCE_CLOCKS(clocks)\
+ container_of(clocks, struct dce_disp_clk, base)
+
+#define REG(reg) \
+ (clk_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
+
+#define CTX \
+ clk_dce->base.ctx
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+/* Starting point for each divider range.*/
+enum dce_divider_range_start {
+ DIVIDER_RANGE_01_START = 200, /* 2.00*/
+ DIVIDER_RANGE_02_START = 1600, /* 16.00*/
+ DIVIDER_RANGE_03_START = 3200, /* 32.00*/
+ DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+};
+
+/* Ranges for divider identifiers (Divider ID or DID)
+ mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
+enum dce_divider_id_register_setting {
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
+ DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+};
+
+/* Step size between each divider within a range.
+ Incrementing the DENTIST_DISPCLK_WDIVIDER by one
+ will increment the divider by this much.*/
+enum dce_divider_range_step_size {
+ DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
+ DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
+ DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
+};
+
+static bool dce_divider_range_construct(
+ struct dce_divider_range *div_range,
+ int range_start,
+ int range_step,
+ int did_min,
+ int did_max)
+{
+ div_range->div_range_start = range_start;
+ div_range->div_range_step = range_step;
+ div_range->did_min = did_min;
+ div_range->did_max = did_max;
+
+ if (div_range->div_range_step == 0) {
+ div_range->div_range_step = 1;
+ /*div_range_step cannot be zero*/
+ BREAK_TO_DEBUGGER();
+ }
+ /* Calculate this based on the other inputs.*/
+ /* See DividerRange.h for explanation of */
+ /* the relationship between divider id (DID) and a divider.*/
+ /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
+ /* Maximum divider identified in this range =
+ * (Number of Divider IDs)*Step size between dividers
+ * + The start of this range.*/
+ div_range->div_range_end = (did_max - did_min) * range_step
+ + range_start;
+ return true;
+}
+
+static int dce_divider_range_calc_divider(
+ struct dce_divider_range *div_range,
+ int did)
+{
+ /* Is this DID within our range?*/
+ if ((did < div_range->did_min) || (did >= div_range->did_max))
+ return INVALID_DIVIDER;
+
+ return ((did - div_range->did_min) * div_range->div_range_step)
+ + div_range->div_range_start;
+
+}
+
+static int dce_divider_range_get_divider(
+ struct dce_divider_range *div_range,
+ int ranges_num,
+ int did)
+{
+ int div = INVALID_DIVIDER;
+ int i;
+
+ for (i = 0; i < ranges_num; i++) {
+ /* Calculate divider with given divider ID*/
+ div = dce_divider_range_calc_divider(&div_range[i], did);
+ /* Found a valid return divider*/
+ if (div != INVALID_DIVIDER)
+ break;
+ }
+ return div;
+}
+
+static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div = INVALID_DIVIDER;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dce_divider_range_get_divider(
+ clk_dce->divider_ranges,
+ DIVIDER_RANGE_MAX,
+ dprefclk_wdivider);
+
+ if (target_div != INVALID_DIVIDER) {
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
+ }
+
+ /* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+
+/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
+ * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
+ * clock implementation
+ */
+static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dp_ref_clk_khz = 600000;
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct display_clock *clk,
+ struct state_dependent_clocks *req_clocks)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (req_clocks->display_clk_khz >
+ clk_dce->max_clks_by_state[i].display_clk_khz
+ || req_clocks->pixel_clk_khz >
+ clk_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk->max_clks_state) {
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "%s: clocks unsupported", __func__);
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ }
+
+ return low_req_clk;
+}
+
+static bool dce_clock_set_min_clocks_state(
+ struct display_clock *clk,
+ enum dm_pp_clocks_state clocks_state)
+{
+ struct dm_pp_power_level_change_request level_change_req = {
+ clocks_state };
+
+ if (clocks_state > clk->max_clks_state) {
+ /*Requested state exceeds max supported state.*/
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "Requested state exceeds max supported state");
+ return false;
+ } else if (clocks_state == clk->cur_min_clks_state) {
+ /*if we're trying to set the same state, we can just return
+ * since nothing needs to be done*/
+ return true;
+ }
+
+ /* get max clock state from PPLIB */
+ if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
+ clk->cur_min_clks_state = clocks_state;
+
+ return true;
+}
+
+static int dce_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_dce->dfs_bypass_enabled) {
+
+ /* Cache the fixed display clock*/
+ clk_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ return actual_clock;
+}
+
+static int dce_psr_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dc_context *ctx = clk_dce->base.ctx;
+ struct dc *core_dc = ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clk_khz = requested_clk_khz;
+
+ actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
+
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
+ return actual_clk_khz;
+}
+
+static int dce112_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ struct dc *core_dc = clk->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ clk_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ clk_dce->use_max_disp_clk = debug->max_disp_clk;
+}
+
+static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+static bool dce_apply_clock_voltage_request(
+ struct display_clock *clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk)
+{
+ bool send_request = false;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ clock_voltage_req.clk_type = clocks_type;
+ clock_voltage_req.clocks_in_khz = clocks_in_khz;
+
+ /* to pplib */
+ if (pre_mode_set) {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
+ clk->cur_clocks_value.dispclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.dispclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ } else {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
+ send_request = true;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ if (send_request) {
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
+ struct dc *core_dc = clk->ctx->dc;
+ /*use dcfclk request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz =
+ dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
+ }
+#endif
+ dm_pp_apply_clock_for_voltage_request(
+ clk->ctx, &clock_voltage_req);
+ }
+ if (update_dp_phyclk && (clocks_in_khz >
+ clk->cur_clocks_value.max_dp_phyclk_in_khz))
+ clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
+
+ return true;
+}
+
+
+static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+ .apply_clock_voltage_request = dce_apply_clock_voltage_request,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_psr_set_clock
+};
+
+static const struct display_clock_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_set_clock
+};
+
+static void dce_disp_clk_construct(
+ struct dce_disp_clk *clk_dce,
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct display_clock *base = &clk_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_dce->regs = regs;
+ clk_dce->clk_shift = clk_shift;
+ clk_dce->clk_mask = clk_mask;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_dce);
+ dce_clock_read_ss_info(clk_dce);
+
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_01],
+ DIVIDER_RANGE_01_START,
+ DIVIDER_RANGE_01_STEP_SIZE,
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_02],
+ DIVIDER_RANGE_02_START,
+ DIVIDER_RANGE_02_STEP_SIZE,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_03],
+ DIVIDER_RANGE_03_START,
+ DIVIDER_RANGE_03_STEP_SIZE,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_MAX_DIVIDER_ID);
+}
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce110_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce112_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+ clk_dce->base.funcs = &dce120_funcs;
+
+ /* new in dce120 */
+ if (!ctx->dc->debug.disable_pplib_clock_request &&
+ dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
+ && clk_level_info.num_levels)
+ clk_dce->max_displ_clk_in_khz =
+ clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
+ else
+ clk_dce->max_displ_clk_in_khz = 1133000;
+
+ return &clk_dce->base;
+}
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+
+ kfree(clk_dce);
+ *disp_clk = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
new file mode 100644
index 000000000000..0e717e0dc8f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLOCKS_H_
+#define _DCE_CLOCKS_H_
+
+#include "display_clock.h"
+
+#define CLK_COMMON_REG_LIST_DCE_BASE() \
+ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
+#define CLK_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+#define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+ type DENTIST_DPREFCLK_WDIVIDER;
+
+struct dce_disp_clk_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_disp_clk_mask {
+ CLK_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_disp_clk_registers {
+ uint32_t DPREFCLK_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+};
+
+/* Array identifiers and count for the divider ranges.*/
+enum dce_divider_range_count {
+ DIVIDER_RANGE_01 = 0,
+ DIVIDER_RANGE_02,
+ DIVIDER_RANGE_03,
+ DIVIDER_RANGE_MAX /* == 3*/
+};
+
+enum dce_divider_error_types {
+ INVALID_DID = 0,
+ INVALID_DIVIDER = 1
+};
+
+struct dce_divider_range {
+ int div_range_start;
+ /* The end of this range of dividers.*/
+ int div_range_end;
+ /* The distance between each divider in this range.*/
+ int div_range_step;
+ /* The divider id for the lowest divider.*/
+ int did_min;
+ /* The divider id for the highest divider.*/
+ int did_max;
+};
+
+struct dce_disp_clk {
+ struct display_clock base;
+ const struct dce_disp_clk_registers *regs;
+ const struct dce_disp_clk_shift *clk_shift;
+ const struct dce_disp_clk_mask *clk_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+ bool use_max_disp_clk;
+ int dentist_vco_freq_khz;
+
+ /* Cache the status of DFS-bypass feature*/
+ bool dfs_bypass_enabled;
+ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+ int dfs_bypass_disp_clk;
+
+ /* Flag for Enabled SS on DPREFCLK */
+ bool ss_on_dprefclk;
+ /* DPREFCLK SS percentage (if down-spread enabled) */
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+
+ /* max disp_clk from PPLIB for max validation display clock*/
+ int max_displ_clk_in_khz;
+};
+
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk);
+
+#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
new file mode 100644
index 000000000000..fd77df573b61
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_dmcu.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#define TO_DCE_DMCU(dmcu)\
+ container_of(dmcu, struct dce_dmcu, base)
+
+#define REG(reg) \
+ (dmcu_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dmcu_dce->dmcu_shift->field_name, dmcu_dce->dmcu_mask->field_name
+
+#define CTX \
+ dmcu_dce->base.ctx
+
+/* PSR related commands */
+#define PSR_ENABLE 0x20
+#define PSR_EXIT 0x21
+#define PSR_SET 0x23
+#define PSR_SET_WAITLOOP 0x31
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+unsigned int cached_wait_loop_number = 0;
+
+bool dce_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dce_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 100; retryCount++) {
+ dce_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(10);
+ }
+ }
+}
+
+static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (cached_wait_loop_number == wait_loop_number)
+ return;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ REG_UPDATE(DMCU_CTRL, DMCU_ENABLE, 1);
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
+ }
+
+ /* assert if max retry hit */
+ ASSERT(retryCount <= 1000);
+ }
+}
+
+static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dcn10_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (wait_loop_number != 0) {
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ }
+}
+
+static void dcn10_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#endif
+
+static const struct dmcu_funcs dce_funcs = {
+ .load_iram = dce_dmcu_load_iram,
+ .set_psr_enable = dce_dmcu_set_psr_enable,
+ .setup_psr = dce_dmcu_setup_psr,
+ .get_psr_state = dce_get_dmcu_psr_state,
+ .set_psr_wait_loop = dce_psr_wait_loop,
+ .get_psr_wait_loop = dce_get_psr_wait_loop
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static const struct dmcu_funcs dcn10_funcs = {
+ .load_iram = dcn10_dmcu_load_iram,
+ .set_psr_enable = dcn10_dmcu_set_psr_enable,
+ .setup_psr = dcn10_dmcu_setup_psr,
+ .get_psr_state = dcn10_get_dmcu_psr_state,
+ .set_psr_wait_loop = dcn10_psr_wait_loop,
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop
+};
+#endif
+
+static void dce_dmcu_construct(
+ struct dce_dmcu *dmcu_dce,
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dmcu *base = &dmcu_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ dmcu_dce->regs = regs;
+ dmcu_dce->dmcu_shift = dmcu_shift;
+ dmcu_dce->dmcu_mask = dmcu_mask;
+}
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dce_funcs;
+
+ return &dmcu_dce->base;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dcn10_funcs;
+
+ return &dmcu_dce->base;
+}
+#endif
+
+void dce_dmcu_destroy(struct dmcu **dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
+
+ kfree(dmcu_dce);
+ *dmcu = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
new file mode 100644
index 000000000000..b85f53c2f6f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_DMCU_H_
+#define _DCE_DMCU_H_
+
+#include "dmcu.h"
+
+#define DMCU_COMMON_REG_LIST_DCE_BASE() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(SMU_INTERRUPT_CONTROL)
+
+#define DMCU_DCE110_COMMON_REG_LIST() \
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define DMCU_DCN10_REG_LIST()\
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DMU_MEM_PWR_CNTL)
+
+#define DMCU_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN2_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN3_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DCI_MEM_PWR_STATUS, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCN10(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DMU_MEM_PWR_CNTL, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_REG_FIELD_LIST(type) \
+ type DMCU_IRAM_MEM_PWR_STATE; \
+ type IRAM_HOST_ACCESS_EN; \
+ type IRAM_WR_ADDR_AUTO_INC; \
+ type DMCU_ENABLE; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_INTERRUPT; \
+ type DPHY_RX_FAST_TRAINING_CAPABLE; \
+ type DPHY_LOAD_BS_COUNT; \
+ type STATIC_SCREEN1_INT_TO_UC_EN; \
+ type STATIC_SCREEN2_INT_TO_UC_EN; \
+ type STATIC_SCREEN3_INT_TO_UC_EN; \
+ type STATIC_SCREEN4_INT_TO_UC_EN; \
+ type DP_SEC_GSP0_LINE_NUM; \
+ type DP_SEC_GSP0_PRIORITY; \
+ type DC_SMU_INT_ENABLE
+
+struct dce_dmcu_shift {
+ DMCU_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_dmcu_mask {
+ DMCU_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_dmcu_registers {
+ uint32_t DMCU_CTRL;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_WR_CTRL;
+ uint32_t DMCU_IRAM_WR_DATA;
+
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+ uint32_t SMU_INTERRUPT_CONTROL;
+};
+
+struct dce_dmcu {
+ struct dmcu base;
+ const struct dce_dmcu_registers *regs;
+ const struct dce_dmcu_shift *dmcu_shift;
+ const struct dce_dmcu_mask *dmcu_mask;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG1 Bit position Data
+ * 7:0 hyst_frames[7:0]
+ * 14:8 hyst_lines[6:0]
+ * 15 RFB_UPDATE_AUTO_EN
+ * 18:16 phy_num[2:0]
+ * 21:19 dcp_sel[2:0]
+ * 22 phy_type
+ * 23 frame_cap_ind
+ * 26:24 aux_chan[2:0]
+ * 30:27 aux_repeat[3:0]
+ * 31:31 reserved[31:31]
+ ******************************************************************/
+union dce_dmcu_psr_config_data_reg1 {
+ struct {
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int reserved:1; /*[31:31]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG2
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg2 {
+ struct {
+ unsigned int dig_fe:3; /*[2:0]*/
+ unsigned int dig_be:3; /*[5:3]*/
+ unsigned int skip_wait_for_pll_lock:1; /*[6:6]*/
+ unsigned int reserved:9; /*[15:7]*/
+ unsigned int frame_delay:8; /*[23:16]*/
+ unsigned int smu_phy_id:4; /*[27:24]*/
+ unsigned int num_of_controllers:4; /*[31:28]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG3
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg3 {
+ struct {
+ unsigned int psr_level:16; /*[15:0]*/
+ unsigned int link_rate:4; /*[19:16]*/
+ unsigned int reserved:12; /*[31:20]*/
+ } bits;
+ unsigned int u32All;
+};
+
+union dce_dmcu_psr_config_data_wait_loop_reg1 {
+ struct {
+ unsigned int wait_loop:16; /* [15:0] */
+ unsigned int reserved:16; /* [31:16] */
+ } bits;
+ unsigned int u32;
+};
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+void dce_dmcu_destroy(struct dmcu **dmcu);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
new file mode 100644
index 000000000000..d2e66b1bc0ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_hwseq.h"
+#include "reg_helper.h"
+#include "hw_sequencer.h"
+#include "core_types.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+void dce_enable_fe_clock(struct dce_hwseq *hws,
+ unsigned int fe_inst, bool enable)
+{
+ REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst],
+ DCFE_CLOCK_ENABLE, enable);
+}
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ uint32_t lock_val = lock ? 1 : 0;
+ uint32_t dcp_grph, scl, blnd, update_lock_mode, val;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Not lock pipe when blank */
+ if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+ return;
+
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
+ BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, &scl,
+ BLND_BLND_V_UPDATE_LOCK, &blnd,
+ BLND_V_UPDATE_LOCK_MODE, &update_lock_mode);
+
+ dcp_grph = lock_val;
+ scl = lock_val;
+ blnd = lock_val;
+ update_lock_mode = lock_val;
+
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, scl);
+
+ if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_BLND_V_UPDATE_LOCK, blnd,
+ BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
+
+ if (hws->wa.blnd_crtc_trigger) {
+ if (!lock) {
+ uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
+ REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
+ }
+ }
+}
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst,
+ enum blnd_mode mode)
+{
+ uint32_t feedthrough = 1;
+ uint32_t blnd_mode = 0;
+ uint32_t multiplied_mode = 0;
+ uint32_t alpha_mode = 2;
+
+ switch (mode) {
+ case BLND_MODE_OTHER_PIPE:
+ feedthrough = 0;
+ blnd_mode = 1;
+ alpha_mode = 0;
+ break;
+ case BLND_MODE_BLENDING:
+ feedthrough = 0;
+ blnd_mode = 2;
+ alpha_mode = 0;
+ multiplied_mode = 1;
+ break;
+ case BLND_MODE_CURRENT_PIPE:
+ default:
+ if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) ||
+ blnd_inst == 0)
+ feedthrough = 0;
+ break;
+ }
+
+ REG_UPDATE(BLND_CONTROL[blnd_inst],
+ BLND_MODE, blnd_mode);
+
+ if (hws->masks->BLND_ALPHA_MODE != 0) {
+ REG_UPDATE_3(BLND_CONTROL[blnd_inst],
+ BLND_FEEDTHROUGH_EN, feedthrough,
+ BLND_ALPHA_MODE, alpha_mode,
+ BLND_MULTIPLIED_MODE, multiplied_mode);
+ }
+}
+
+
+static void dce_disable_sram_shut_down(struct dce_hwseq *hws)
+{
+ if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL))
+ REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL,
+ DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
+}
+
+static void dce_underlay_clock_enable(struct dce_hwseq *hws)
+{
+ /* todo: why do we need this at boot? is dce_enable_fe_clock enough? */
+ if (REG(DCFEV_CLOCK_CONTROL))
+ REG_UPDATE(DCFEV_CLOCK_CONTROL,
+ DCFEV_CLOCK_ENABLE, 1);
+}
+
+static void enable_hw_base_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+static void disable_sw_manual_control_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable)
+{
+ if (enable) {
+ enable_hw_base_light_sleep();
+ disable_sw_manual_control_light_sleep();
+ } else {
+ dce_disable_sram_shut_down(hws);
+ dce_underlay_clock_enable(hws);
+ }
+}
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst)
+{
+ if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO || clk_src->dp_clk_src) {
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 1);
+
+ } else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0;
+
+ REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PHYPLL_PIXEL_RATE_SOURCE, rate_source,
+ PIXEL_RATE_PLL_SOURCE, 0);
+
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 0);
+
+ } else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0;
+
+ REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_SOURCE, rate_source,
+ DP_DTO0_ENABLE, 0);
+
+ if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst]))
+ REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_PLL_SOURCE, 1);
+ } else {
+ DC_ERR("Unknown clock source. clk_src id: %d, TG_inst: %d",
+ clk_src->id, tg_inst);
+ }
+}
+
+/* Only use LUT for 8 bit formats */
+bool dce_use_lut(const struct dc_plane_state *plane_state)
+{
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
new file mode 100644
index 000000000000..52506155e361
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_HWSEQ_H__
+#define __DCE_HWSEQ_H__
+
+#include "hw_sequencer.h"
+
+#define BL_REG_LIST()\
+ SR(LVTMA_PWRSEQ_CNTL), \
+ SR(LVTMA_PWRSEQ_STATE)
+
+#define HWSEQ_DCEF_REG_LIST_DCE8() \
+ .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[2] = mmCRTC2_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[3] = mmCRTC3_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[4] = mmCRTC4_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[5] = mmCRTC5_CRTC_DCFE_CLOCK_CONTROL
+
+#define HWSEQ_DCEF_REG_LIST() \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 3), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 4), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 5), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+
+#define HWSEQ_BLND_REG_LIST() \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 3), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 4), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 5), \
+ SRII(BLND_CONTROL, BLND, 0), \
+ SRII(BLND_CONTROL, BLND, 1), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 3), \
+ SRII(BLND_CONTROL, BLND, 4), \
+ SRII(BLND_CONTROL, BLND, 5)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_DCE11_REG_LIST_BASE() \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SR(DCFEV_CLOCK_CONTROL), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 0),\
+ SRII(CRTC_H_BLANK_START_END, CRTC, 1),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1),\
+ SRII(BLND_CONTROL, BLND, 0),\
+ SRII(BLND_CONTROL, BLND, 1),\
+ SR(BLNDV_CONTROL),\
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE8_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST_DCE8(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE10_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_ST_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ .DCFE_CLOCK_CONTROL[2] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[2] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[2] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[2] = mmBLNDV_CONTROL
+
+#define HWSEQ_CZ_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ .DCFE_CLOCK_CONTROL[3] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[3] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[3] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[3] = mmBLNDV_CONTROL
+
+#define HWSEQ_DCE120_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCE112_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCN_REG_LIST()\
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 0), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 1), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 2), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 3), \
+ SRII(DCHUBP_CNTL, HUBP, 0), \
+ SRII(DCHUBP_CNTL, HUBP, 1), \
+ SRII(DCHUBP_CNTL, HUBP, 2), \
+ SRII(DCHUBP_CNTL, HUBP, 3), \
+ SRII(HUBP_CLK_CNTL, HUBP, 0), \
+ SRII(HUBP_CLK_CNTL, HUBP, 1), \
+ SRII(HUBP_CLK_CNTL, HUBP, 2), \
+ SRII(HUBP_CLK_CNTL, HUBP, 3), \
+ SRII(DPP_CONTROL, DPP_TOP, 0), \
+ SRII(DPP_CONTROL, DPP_TOP, 1), \
+ SRII(DPP_CONTROL, DPP_TOP, 2), \
+ SRII(DPP_CONTROL, DPP_TOP, 3), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
+ SR(REFCLK_CNTL), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
+ SR(DCHUBBUB_ARB_SAT_LEVEL),\
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCFCLK_CNTL),\
+ SR(DCFCLK_CNTL), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
+
+#define HWSEQ_SR_WATERMARK_REG_LIST()\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
+
+#define HWSEQ_DCN1_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_SR_WATERMARK_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
+ HWSEQ_PHYPLL_REG_LIST(OTG), \
+ SR(DCHUBBUB_SDPIF_FB_TOP),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SR(DCHUBBUB_SDPIF_AGP_BASE),\
+ SR(DCHUBBUB_SDPIF_AGP_BOT),\
+ SR(DCHUBBUB_SDPIF_AGP_TOP),\
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN4_PG_CONFIG), \
+ SR(DOMAIN5_PG_CONFIG), \
+ SR(DOMAIN6_PG_CONFIG), \
+ SR(DOMAIN7_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN4_PG_STATUS), \
+ SR(DOMAIN5_PG_STATUS), \
+ SR(DOMAIN6_PG_STATUS), \
+ SR(DOMAIN7_PG_STATUS), \
+ SR(D1VGA_CONTROL), \
+ SR(D2VGA_CONTROL), \
+ SR(D3VGA_CONTROL), \
+ SR(D4VGA_CONTROL), \
+ SR(DC_IP_REQUEST_CNTL), \
+ BL_REG_LIST()
+
+struct dce_hwseq_registers {
+
+ /* Backlight registers */
+ uint32_t LVTMA_PWRSEQ_CNTL;
+ uint32_t LVTMA_PWRSEQ_STATE;
+
+ uint32_t DCFE_CLOCK_CONTROL[6];
+ uint32_t DCFEV_CLOCK_CONTROL;
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
+ uint32_t BLND_V_UPDATE_LOCK[6];
+ uint32_t BLND_CONTROL[6];
+ uint32_t BLNDV_CONTROL;
+ uint32_t CRTC_H_BLANK_START_END[6];
+ uint32_t PIXEL_RATE_CNTL[6];
+ uint32_t PHYPLL_PIXEL_RATE_CNTL[6];
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+
+ uint32_t OTG_GLOBAL_SYNC_STATUS[4];
+ uint32_t DCHUBP_CNTL[4];
+ uint32_t HUBP_CLK_CNTL[4];
+ uint32_t DPP_CONTROL[4];
+ uint32_t OPP_PIPE_CONTROL[4];
+ uint32_t REFCLK_CNTL;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
+ uint32_t DCHUBBUB_ARB_SAT_LEVEL;
+ uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+ uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
+ uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
+ uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
+ uint32_t DCHUBBUB_TEST_DEBUG_DATA;
+ uint32_t DCHUBBUB_SDPIF_FB_TOP;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCHUBBUB_SDPIF_AGP_BASE;
+ uint32_t DCHUBBUB_SDPIF_AGP_BOT;
+ uint32_t DCHUBBUB_SDPIF_AGP_TOP;
+ uint32_t DC_IP_REQUEST_CNTL;
+ uint32_t DOMAIN0_PG_CONFIG;
+ uint32_t DOMAIN1_PG_CONFIG;
+ uint32_t DOMAIN2_PG_CONFIG;
+ uint32_t DOMAIN3_PG_CONFIG;
+ uint32_t DOMAIN4_PG_CONFIG;
+ uint32_t DOMAIN5_PG_CONFIG;
+ uint32_t DOMAIN6_PG_CONFIG;
+ uint32_t DOMAIN7_PG_CONFIG;
+ uint32_t DOMAIN0_PG_STATUS;
+ uint32_t DOMAIN1_PG_STATUS;
+ uint32_t DOMAIN2_PG_STATUS;
+ uint32_t DOMAIN3_PG_STATUS;
+ uint32_t DOMAIN4_PG_STATUS;
+ uint32_t DOMAIN5_PG_STATUS;
+ uint32_t DOMAIN6_PG_STATUS;
+ uint32_t DOMAIN7_PG_STATUS;
+ uint32_t DIO_MEM_PWR_CTRL;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+ uint32_t DCFCLK_CNTL;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+ uint32_t MILLISECOND_TIME_BASE_DIV;
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t RBBMIF_TIMEOUT_DIS;
+ uint32_t RBBMIF_TIMEOUT_DIS_2;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DCHUBBUB_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_B_A;
+ uint32_t MPC_CRC_CTRL;
+ uint32_t MPC_CRC_RESULT_GB;
+ uint32_t MPC_CRC_RESULT_C;
+ uint32_t MPC_CRC_RESULT_AR;
+ uint32_t D1VGA_CONTROL;
+ uint32_t D2VGA_CONTROL;
+ uint32_t D3VGA_CONTROL;
+ uint32_t D4VGA_CONTROL;
+ /* MMHUB registers. read only. temporary hack */
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+};
+ /* set field name */
+#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define HWS_SF1(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## blk_name ## field_name ## post_fix
+
+
+#define HWSEQ_DCEF_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, CLOCK_CONTROL, DCFE_CLOCK_ENABLE, mask_sh),\
+ SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
+
+#define HWSEQ_BLND_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_BLND_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_V_UPDATE_LOCK_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_FEEDTHROUGH_EN, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_ALPHA_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MULTIPLIED_MODE, mask_sh)
+
+#define HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PIXEL_RATE_CNTL, PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF(blk, PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#define HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+
+#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
+ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE12_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE0_DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_OCCURRED, mask_sh), \
+ HWS_SF(HUBP0_, DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh), \
+ HWS_SF(HUBP0_, HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+
+#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, LOGICAL_PAGE_NUMBER_HI4, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, LOGICAL_PAGE_NUMBER_LO32, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, PHYSICAL_PAGE_ADDR_HI4, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, PHYSICAL_PAGE_ADDR_LO32, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, PHYSICAL_PAGE_NUMBER_MSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, PHYSICAL_PAGE_NUMBER_LSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_LOW_ADDR, LOGICAL_ADDR, mask_sh),\
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN1_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN2_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN3_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_STATUS, DOMAIN4_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_STATUS, DOMAIN5_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_REG_FIELD_LIST(type) \
+ type DCFE_CLOCK_ENABLE; \
+ type DCFEV_CLOCK_ENABLE; \
+ type DC_MEM_GLOBAL_PWR_REQ_DIS; \
+ type BLND_DCP_GRPH_V_UPDATE_LOCK; \
+ type BLND_SCL_V_UPDATE_LOCK; \
+ type BLND_DCP_GRPH_SURF_V_UPDATE_LOCK; \
+ type BLND_BLND_V_UPDATE_LOCK; \
+ type BLND_V_UPDATE_LOCK_MODE; \
+ type BLND_FEEDTHROUGH_EN; \
+ type BLND_ALPHA_MODE; \
+ type BLND_MODE; \
+ type BLND_MULTIPLIED_MODE; \
+ type DP_DTO0_ENABLE; \
+ type PIXEL_RATE_SOURCE; \
+ type PHYPLL_PIXEL_RATE_SOURCE; \
+ type PIXEL_RATE_PLL_SOURCE; \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR; \
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type LVTMA_BLON;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R;
+
+#define HWSEQ_DCN_REG_FIELD_LIST(type) \
+ type VUPDATE_NO_LOCK_EVENT_CLEAR; \
+ type VUPDATE_NO_LOCK_EVENT_OCCURRED; \
+ type HUBP_VTG_SEL; \
+ type HUBP_CLOCK_ENABLE; \
+ type DPP_CLOCK_ENABLE; \
+ type DPPCLK_RATE_CONTROL; \
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_SAT_LEVEL;\
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
+ type OPP_PIPE_CLOCK_EN;\
+ type IP_REQUEST_EN; \
+ type DOMAIN0_POWER_FORCEON; \
+ type DOMAIN0_POWER_GATE; \
+ type DOMAIN1_POWER_FORCEON; \
+ type DOMAIN1_POWER_GATE; \
+ type DOMAIN2_POWER_FORCEON; \
+ type DOMAIN2_POWER_GATE; \
+ type DOMAIN3_POWER_FORCEON; \
+ type DOMAIN3_POWER_GATE; \
+ type DOMAIN4_POWER_FORCEON; \
+ type DOMAIN4_POWER_GATE; \
+ type DOMAIN5_POWER_FORCEON; \
+ type DOMAIN5_POWER_GATE; \
+ type DOMAIN6_POWER_FORCEON; \
+ type DOMAIN6_POWER_GATE; \
+ type DOMAIN7_POWER_FORCEON; \
+ type DOMAIN7_POWER_GATE; \
+ type DOMAIN0_PGFSM_PWR_STATUS; \
+ type DOMAIN1_PGFSM_PWR_STATUS; \
+ type DOMAIN2_PGFSM_PWR_STATUS; \
+ type DOMAIN3_PGFSM_PWR_STATUS; \
+ type DOMAIN4_PGFSM_PWR_STATUS; \
+ type DOMAIN5_PGFSM_PWR_STATUS; \
+ type DOMAIN6_PGFSM_PWR_STATUS; \
+ type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DCFCLK_GATE_DIS; \
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
+ type DENTIST_DPPCLK_WDIVIDER;
+
+struct dce_hwseq_shift {
+ HWSEQ_REG_FIELD_LIST(uint8_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_hwseq_mask {
+ HWSEQ_REG_FIELD_LIST(uint32_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint32_t)
+};
+
+
+enum blnd_mode {
+ BLND_MODE_CURRENT_PIPE = 0,/* Data from current pipe only */
+ BLND_MODE_OTHER_PIPE, /* Data from other pipe only */
+ BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
+};
+
+void dce_enable_fe_clock(struct dce_hwseq *hwss,
+ unsigned int inst, bool enable);
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst, enum blnd_mode mode);
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable);
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst);
+
+bool dce_use_lut(const struct dc_plane_state *plane_state);
+#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
new file mode 100644
index 000000000000..d618fdd0cc82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_ipp.h"
+#include "reg_helper.h"
+#include "dm_services.h"
+
+#define REG(reg) \
+ (ipp_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ipp_dce->ipp_shift->field_name, ipp_dce->ipp_mask->field_name
+
+#define CTX \
+ ipp_dce->base.ctx
+
+
+static void dce_ipp_cursor_set_position(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Flag passed in structure differentiates cursor enable/disable. */
+ /* Update if it differs from cached state. */
+ REG_UPDATE(CUR_CONTROL, CURSOR_EN, position->enable);
+
+ REG_SET_2(CUR_POSITION, 0,
+ CURSOR_X_POSITION, position->x,
+ CURSOR_Y_POSITION, position->y);
+
+ REG_SET_2(CUR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, position->x_hotspot,
+ CURSOR_HOT_SPOT_Y, position->y_hotspot);
+
+ /* unlock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+static void dce_ipp_cursor_set_attributes(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ int mode;
+
+ /* Lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Program cursor control */
+ switch (attributes->color_format) {
+ case CURSOR_MODE_MONO:
+ mode = 0;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ mode = 1;
+ break;
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ mode = 2;
+ break;
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ mode = 3;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* unsupported */
+ mode = 0;
+ }
+
+ REG_UPDATE_3(CUR_CONTROL,
+ CURSOR_MODE, mode,
+ CURSOR_2X_MAGNIFY, attributes->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CUR_INV_TRANS_CLAMP, attributes->attribute_flags.bits.INVERSE_TRANSPARENT_CLAMPING);
+
+ if (attributes->color_format == CURSOR_MODE_MONO) {
+ REG_SET_3(CUR_COLOR1, 0,
+ CUR_COLOR1_BLUE, 0,
+ CUR_COLOR1_GREEN, 0,
+ CUR_COLOR1_RED, 0);
+
+ REG_SET_3(CUR_COLOR2, 0,
+ CUR_COLOR2_BLUE, 0xff,
+ CUR_COLOR2_GREEN, 0xff,
+ CUR_COLOR2_RED, 0xff);
+ }
+
+ /*
+ * Program cursor size -- NOTE: HW spec specifies that HW register
+ * stores size as (height - 1, width - 1)
+ */
+ REG_SET_2(CUR_SIZE, 0,
+ CURSOR_WIDTH, attributes->width-1,
+ CURSOR_HEIGHT, attributes->height-1);
+
+ /* Program cursor surface address */
+ /* SURFACE_ADDRESS_HIGH: Higher order bits (39:32) of hardware cursor
+ * surface base address in byte. It is 4K byte aligned.
+ * The correct way to program cursor surface address is to first write
+ * to CUR_SURFACE_ADDRESS_HIGH, and then write to CUR_SURFACE_ADDRESS
+ */
+ REG_SET(CUR_SURFACE_ADDRESS_HIGH, 0,
+ CURSOR_SURFACE_ADDRESS_HIGH, attributes->address.high_part);
+
+ REG_SET(CUR_SURFACE_ADDRESS, 0,
+ CURSOR_SURFACE_ADDRESS, attributes->address.low_part);
+
+ /* Unlock Cursor registers. */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+
+static void dce_ipp_program_prescale(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* set to bypass mode first before change */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS,
+ 1);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_R, 0,
+ GRPH_PRESCALE_SCALE_R, params->scale,
+ GRPH_PRESCALE_BIAS_R, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_G, 0,
+ GRPH_PRESCALE_SCALE_G, params->scale,
+ GRPH_PRESCALE_BIAS_G, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_B, 0,
+ GRPH_PRESCALE_SCALE_B, params->scale,
+ GRPH_PRESCALE_BIAS_B, params->bias);
+
+ if (params->mode != IPP_PRESCALE_MODE_BYPASS) {
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS, 0);
+
+ /* If prescale is in use, then legacy lut should be bypassed */
+ REG_UPDATE(INPUT_GAMMA_CONTROL,
+ GRPH_INPUT_GAMMA_MODE, 1);
+ }
+}
+
+static void dce_ipp_program_input_lut(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 1);
+
+ /* enable all */
+ REG_SET(DC_LUT_WRITE_EN_MASK, 0, DC_LUT_WRITE_EN_MASK, 0x7);
+
+ /* 256 entry mode */
+ REG_UPDATE(DC_LUT_RW_MODE, DC_LUT_RW_MODE, 0);
+
+ /* LUT-256, unsigned, integer, new u0.12 format */
+ REG_SET_3(DC_LUT_CONTROL, 0,
+ DC_LUT_DATA_R_FORMAT, 3,
+ DC_LUT_DATA_G_FORMAT, 3,
+ DC_LUT_DATA_B_FORMAT, 3);
+
+ /* start from index 0 */
+ REG_SET(DC_LUT_RW_INDEX, 0,
+ DC_LUT_RW_INDEX, 0);
+
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+
+ /* power off LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 0);
+
+ /* bypass prescale, enable legacy LUT */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
+ REG_UPDATE(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
+}
+
+static void dce_ipp_set_degamma(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
+
+ ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS ||
+ mode == IPP_DEGAMMA_MODE_HW_sRGB);
+
+ REG_SET_3(DEGAMMA_CONTROL, 0,
+ GRPH_DEGAMMA_MODE, degamma_type,
+ CURSOR_DEGAMMA_MODE, degamma_type,
+ CURSOR2_DEGAMMA_MODE, degamma_type);
+}
+
+static const struct ipp_funcs dce_ipp_funcs = {
+ .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes,
+ .ipp_cursor_set_position = dce_ipp_cursor_set_position,
+ .ipp_program_prescale = dce_ipp_program_prescale,
+ .ipp_program_input_lut = dce_ipp_program_input_lut,
+ .ipp_set_degamma = dce_ipp_set_degamma
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_ipp_construct(
+ struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask)
+{
+ ipp_dce->base.ctx = ctx;
+ ipp_dce->base.inst = inst;
+ ipp_dce->base.funcs = &dce_ipp_funcs;
+
+ ipp_dce->regs = regs;
+ ipp_dce->ipp_shift = ipp_shift;
+ ipp_dce->ipp_mask = ipp_mask;
+}
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCE_IPP(*ipp));
+ *ipp = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
new file mode 100644
index 000000000000..ca04e97d44c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_IPP_H_
+#define _DCE_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCE_IPP(ipp)\
+ container_of(ipp, struct dce_ipp, base)
+
+#define IPP_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(CUR_UPDATE, DCP, id), \
+ SRI(CUR_CONTROL, DCP, id), \
+ SRI(CUR_POSITION, DCP, id), \
+ SRI(CUR_HOT_SPOT, DCP, id), \
+ SRI(CUR_COLOR1, DCP, id), \
+ SRI(CUR_COLOR2, DCP, id), \
+ SRI(CUR_SIZE, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS_HIGH, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS, DCP, id), \
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_R, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_G, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_B, DCP, id), \
+ SRI(INPUT_GAMMA_CONTROL, DCP, id), \
+ SRI(DC_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(DC_LUT_RW_MODE, DCP, id), \
+ SRI(DC_LUT_CONTROL, DCP, id), \
+ SRI(DC_LUT_RW_INDEX, DCP, id), \
+ SRI(DC_LUT_SEQ_COLOR, DCP, id), \
+ SRI(DEGAMMA_CONTROL, DCP, id)
+
+#define IPP_DCE100_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id)
+
+#define IPP_DCE110_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id)
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_SF(CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ IPP_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh)
+
+#define IPP_DCE120_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ IPP_SF(DCP0_CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(DCP0_INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_REG_FIELD_LIST(type) \
+ type CURSOR_UPDATE_LOCK; \
+ type CURSOR_EN; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CUR_INV_TRANS_CLAMP; \
+ type CUR_COLOR1_BLUE; \
+ type CUR_COLOR1_GREEN; \
+ type CUR_COLOR1_RED; \
+ type CUR_COLOR2_BLUE; \
+ type CUR_COLOR2_GREEN; \
+ type CUR_COLOR2_RED; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type GRPH_PRESCALE_BYPASS; \
+ type GRPH_PRESCALE_SCALE_R; \
+ type GRPH_PRESCALE_BIAS_R; \
+ type GRPH_PRESCALE_SCALE_G; \
+ type GRPH_PRESCALE_BIAS_G; \
+ type GRPH_PRESCALE_SCALE_B; \
+ type GRPH_PRESCALE_BIAS_B; \
+ type GRPH_INPUT_GAMMA_MODE; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type DC_LUT_WRITE_EN_MASK; \
+ type DC_LUT_RW_MODE; \
+ type DC_LUT_DATA_R_FORMAT; \
+ type DC_LUT_DATA_G_FORMAT; \
+ type DC_LUT_DATA_B_FORMAT; \
+ type DC_LUT_RW_INDEX; \
+ type DC_LUT_SEQ_COLOR; \
+ type GRPH_DEGAMMA_MODE; \
+ type CURSOR_DEGAMMA_MODE; \
+ type CURSOR2_DEGAMMA_MODE
+
+struct dce_ipp_shift {
+ IPP_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_ipp_mask {
+ IPP_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_ipp_registers {
+ uint32_t CUR_UPDATE;
+ uint32_t CUR_CONTROL;
+ uint32_t CUR_POSITION;
+ uint32_t CUR_HOT_SPOT;
+ uint32_t CUR_COLOR1;
+ uint32_t CUR_COLOR2;
+ uint32_t CUR_SIZE;
+ uint32_t CUR_SURFACE_ADDRESS_HIGH;
+ uint32_t CUR_SURFACE_ADDRESS;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t PRESCALE_VALUES_GRPH_R;
+ uint32_t PRESCALE_VALUES_GRPH_G;
+ uint32_t PRESCALE_VALUES_GRPH_B;
+ uint32_t INPUT_GAMMA_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DC_LUT_WRITE_EN_MASK;
+ uint32_t DC_LUT_RW_MODE;
+ uint32_t DC_LUT_CONTROL;
+ uint32_t DC_LUT_RW_INDEX;
+ uint32_t DC_LUT_SEQ_COLOR;
+ uint32_t DEGAMMA_CONTROL;
+};
+
+struct dce_ipp {
+ struct input_pixel_processor base;
+ const struct dce_ipp_registers *regs;
+ const struct dce_ipp_shift *ipp_shift;
+ const struct dce_ipp_mask *ipp_mask;
+};
+
+void dce_ipp_construct(struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask);
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp);
+
+#endif /* _DCE_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
new file mode 100644
index 000000000000..fe88852b4774
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -0,0 +1,1379 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xa
+#endif
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK 0x00000400L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#endif
+
+#define CTX \
+ enc110->base.ctx
+
+#define REG(reg)\
+ (enc110->link_regs->reg)
+
+#define AUX_REG(reg)\
+ (enc110->aux_regs->reg)
+
+#define HPD_REG(reg)\
+ (enc110->hpd_regs->reg)
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+/*
+ * @brief
+ * Trigger Source Select
+ * ASIC-dependent, actual values for register programming
+ */
+#define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
+
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+/* For current ASICs pixel clock - 600MHz */
+#define MAX_ENCODER_CLOCK 600000
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DIG_REG(reg)\
+ (reg + enc110->offsets.dig)
+
+#define DP_REG(reg)\
+ (reg + enc110->offsets.dp)
+
+static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce110_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dce110_link_encoder_enable_hpd,
+ .disable_hpd = dce110_link_encoder_disable_hpd,
+ .destroy = dce110_link_encoder_destroy
+};
+
+static enum bp_result link_transmitter_control(
+ struct dce110_link_encoder *enc110,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc110->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+static void enable_phy_bypass_mode(
+ struct dce110_link_encoder *enc110,
+ bool enable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
+
+}
+
+static void disable_prbs_symbols(
+ struct dce110_link_encoder *enc110,
+ bool disable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE_4(DP_DPHY_CNTL,
+ DPHY_ATEST_SEL_LANE0, disable,
+ DPHY_ATEST_SEL_LANE1, disable,
+ DPHY_ATEST_SEL_LANE2, disable,
+ DPHY_ATEST_SEL_LANE3, disable);
+}
+
+static void disable_prbs_mode(
+ struct dce110_link_encoder *enc110)
+{
+ REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
+}
+
+static void program_pattern_symbols(
+ struct dce110_link_encoder *enc110,
+ uint16_t pattern_symbols[8])
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM0, 0,
+ DPHY_SYM1, pattern_symbols[0],
+ DPHY_SYM2, pattern_symbols[1],
+ DPHY_SYM3, pattern_symbols[2]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM1, 0,
+ DPHY_SYM4, pattern_symbols[3],
+ DPHY_SYM5, pattern_symbols[4],
+ DPHY_SYM6, pattern_symbols[5]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_2(DP_DPHY_SYM2, 0,
+ DPHY_SYM7, pattern_symbols[6],
+ DPHY_SYM8, pattern_symbols[7]);
+}
+
+static void set_dp_phy_pattern_d102(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* For 10-bit PRBS or debug symbols
+ * please use the following sequence: */
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+
+ /* Program debug symbols to be output */
+ {
+ uint16_t pattern_symbols[8] = {
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA,
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA
+ };
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_link_training_complete(
+ struct dce110_link_encoder *enc110,
+ bool complete)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
+
+}
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ /* Write Training Pattern */
+
+ REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
+
+ /* Set HW Register Training Complete to false */
+
+ set_link_training_complete(enc110, false);
+
+ /* Disable PHY Bypass mode to output Training Pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+static void setup_panel_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ uint32_t value;
+
+ ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+ value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ value = 0x1;
+ break;
+ case DP_PANEL_MODE_SPECIAL:
+ value = 0x11;
+ break;
+ default:
+ value = 0x0;
+ break;
+ }
+
+ REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+}
+
+static void set_dp_phy_pattern_symbol_error(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* program correct panel mode*/
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* A PRBS23 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 1,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_prbs7(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* A PRBS7 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 0,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_80bit_custom(
+ struct dce110_link_encoder *enc110,
+ const uint8_t *pattern)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Enable PHY bypass mode to enable the test pattern */
+ /* TODO is it really needed ? */
+
+ enable_phy_bypass_mode(enc110, true);
+
+ /* Program 80 bit custom pattern */
+ {
+ uint16_t pattern_symbols[8];
+
+ pattern_symbols[0] =
+ ((pattern[1] & 0x03) << 8) | pattern[0];
+ pattern_symbols[1] =
+ ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
+ pattern_symbols[2] =
+ ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
+ pattern_symbols[3] =
+ (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
+ pattern_symbols[4] =
+ ((pattern[6] & 0x03) << 8) | pattern[5];
+ pattern_symbols[5] =
+ ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
+ pattern_symbols[6] =
+ ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
+ pattern_symbols[7] =
+ (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
+ struct dce110_link_encoder *enc110,
+ unsigned int cp2520_pattern)
+{
+
+ /* previously there is a register DP_HBR2_EYE_PATTERN
+ * that is enabled to get the pattern.
+ * But it does not work with the latest spec change,
+ * so we are programming the following registers manually.
+ *
+ * The following settings have been confirmed
+ * by Nick Chorney and Sandra Liu */
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Setup DIG encoder in DP SST mode */
+ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+ /* ensure normal panel mode. */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* no vbid after BS (SR)
+ * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+ * 11000260 / 11000104 / 110000FC */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0xFC,
+ DP_VBID_DISABLE, 1,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ /* swap every BS with SR */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
+
+ /* select cp2520 patterns */
+ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
+ REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
+ DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
+ else
+ /* pre-DCE11 can only generate CP2520 pattern 2 */
+ ASSERT(cp2520_pattern == 2);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* disable video stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+}
+
+static void set_dp_phy_pattern_passthrough_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ /* program correct panel mode */
+ setup_panel_mode(enc110, panel_mode);
+
+ /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
+ * in case we were doing HBR2 compliance pattern before
+ */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0x2000,
+ DP_VBID_DISABLE, 0,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+/* return value is bit-vector */
+static uint8_t get_frontend_source(
+ enum engine_id engine)
+{
+ switch (engine) {
+ case ENGINE_ID_DIGA:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGA;
+ case ENGINE_ID_DIGB:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGB;
+ case ENGINE_ID_DIGC:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGC;
+ case ENGINE_ID_DIGD:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGD;
+ case ENGINE_ID_DIGE:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGE;
+ case ENGINE_ID_DIGF:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGF;
+ case ENGINE_ID_DIGG:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGG;
+ default:
+ ASSERT_CRITICAL(false);
+ return DCE110_DIG_FE_SOURCE_SELECT_INVALID;
+ }
+}
+
+static void configure_encoder(
+ struct dce110_link_encoder *enc110,
+ const struct dc_link_settings *link_settings)
+{
+ /* set number of lanes */
+
+ REG_SET(DP_CONFIG, 0,
+ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+ /* setup scrambler */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
+}
+
+static void aux_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+ uint32_t addr = AUX_REG(AUX_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL);
+ set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = AUX_REG(AUX_DPHY_RX_CONTROL0);
+ value = dm_read_reg(ctx, addr);
+
+ /* 1/4 window (the maximum allowed) */
+ set_reg_field_value(value, 1,
+ AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW);
+ dm_write_reg(ctx, addr, value);
+
+}
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ if (exit_link_training_required)
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 1);
+ else {
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 0);
+ /*In DCE 11, we are able to pre-program a Force SR register
+ * to be able to trigger SR symbol after 5 idle patterns
+ * transmitted. Upon PSR Exit, DMCU can trigger
+ * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
+ * DPHY_LOAD_BS_COUNT_START and the internal counter
+ * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
+ * replaced by SR symbol once.
+ */
+
+ REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
+ }
+}
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ REG_UPDATE_2(DP_SEC_CNTL1,
+ DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
+ DP_SEC_GSP0_PRIORITY, 1);
+}
+
+static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+{
+ uint32_t value;
+
+ REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+ return value;
+}
+
+static void link_encoder_disable(struct dce110_link_encoder *enc110)
+{
+ /* reset training pattern */
+ REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
+ DPHY_TRAINING_PATTERN_SEL, 0);
+
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+
+ /* reset panel mode */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+}
+
+static void hpd_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ /* Associate HPD with DIG_BE */
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
+}
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
+
+ if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ max_pixel_clock *= 2;
+
+ /* This handles the case of HDMI downgrade to DVI we don't want to
+ * we don't want to cap the pixel clock if the DDI is not DVI.
+ */
+ if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
+ connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ max_pixel_clock = enc110->base.features.max_hdmi_pixel_clock;
+
+ /* DVI only support RGB pixel encoding */
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ /*connect DVI via adpater's HDMI connector*/
+ if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
+ signal != SIGNAL_TYPE_HDMI_TYPE_A &&
+ crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK)
+ return false;
+ if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if (crtc_timing->pix_clk_khz > max_pixel_clock)
+ return false;
+
+ /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ case COLOR_DEPTH_161616:
+ if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool dce110_link_encoder_validate_hdmi_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing,
+ int adjusted_pix_clk_khz)
+{
+ enum dc_color_depth max_deep_color =
+ enc110->base.features.max_hdmi_deep_color;
+
+ if (max_deep_color < crtc_timing->display_color_depth)
+ return false;
+
+ if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
+ return false;
+ if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if ((adjusted_pix_clk_khz == 0) ||
+ (adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock))
+ return false;
+
+ /* DCE11 HW does not support 420 */
+ if (!enc110->base.features.ycbcr420_supported &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+ if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
+ adjusted_pix_clk_khz >= 300000)
+ return false;
+ return true;
+}
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* default RGB only */
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+
+ if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
+ return true;
+
+ /* for DCE 8.x or later DP Y-only feature,
+ * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
+ if (crtc_timing->flags.Y_ONLY &&
+ enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+ crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ return true;
+
+ return false;
+}
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+
+ enc110->base.funcs = &dce110_lnk_enc_funcs;
+ enc110->base.ctx = init_data->ctx;
+ enc110->base.id = init_data->encoder;
+
+ enc110->base.hpd_source = init_data->hpd_source;
+ enc110->base.connector = init_data->connector;
+
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc110->base.features = *enc_features;
+
+ enc110->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc110->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc110->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc110->link_regs = link_regs;
+ enc110->aux_regs = aux_regs;
+ enc110->hpd_regs = hpd_regs;
+
+ switch (enc110->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc110->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc110->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc110->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc110->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc110->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc110->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc110->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* Override features with DCE-specific values */
+ if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
+ enc110->base.ctx->dc_bios, enc110->base.id,
+ &bp_cap_info)) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ }
+}
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ bool is_valid;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ is_valid = dce110_link_encoder_validate_dvi_output(
+ enc110,
+ stream->sink->link->connector_signal,
+ stream->signal,
+ &stream->timing);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ is_valid = dce110_link_encoder_validate_hdmi_output(
+ enc110,
+ &stream->timing,
+ stream->phy_pix_clk);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ is_valid = dce110_link_encoder_validate_dp_output(
+ enc110, &stream->timing);
+ break;
+ case SIGNAL_TYPE_EDP:
+ is_valid =
+ (stream->timing.
+ pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ is_valid = true;
+ break;
+ default:
+ is_valid = false;
+ break;
+ }
+
+ return is_valid;
+}
+
+void dce110_link_encoder_hw_init(
+ struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ cntl.action = TRANSMITTER_CONTROL_INIT;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.coherent = false;
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enc110->base.connector.id == CONNECTOR_ID_LVDS) {
+ cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ ASSERT(result == BP_RESULT_OK);
+
+ } else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ ctx->dc->hwss.edp_power_control(enc, true);
+ }
+ aux_initialize(enc110);
+
+ /* reinitialize HPD.
+ * hpd_initialize() will pass DIG_FE id to HW context.
+ * All other routine within HW context will use fe_engine_offset
+ * as DIG_FE id even caller pass DIG_FE id.
+ * So this routine must be called first. */
+ hpd_initialize(enc110);
+}
+
+void dce110_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(TO_DCE110_LINK_ENC(*enc));
+ *enc = NULL;
+}
+
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ /* DP SST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
+ break;
+ case SIGNAL_TYPE_LVDS:
+ /* LVDS */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ /* TMDS-DVI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* TMDS-HDMI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* DP MST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ /* invalid mode ! */
+ break;
+ }
+
+}
+
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ if (hdmi) {
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.lanes_number = 4;
+ } else if (dual_link) {
+ cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ cntl.lanes_number = 8;
+ } else {
+ cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.lanes_number = 4;
+ }
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ cntl.pixel_clock = pixel_clock;
+ cntl.color_depth = color_depth;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+/*
+ * @brief
+ * Disable transmitter and its encoder
+ */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal,
+ struct dc_link *link)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ if (!is_dig_enabled(enc110)) {
+ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ return;
+ }
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP)
+ ctx->dc->hwss.edp_backlight_control(link, false);
+ /* Power-down RX and disable GPU PHY should be paired.
+ * Disabling PHY without powering down RX may cause
+ * symbol lock loss, on which we will get DP Sink interrupt. */
+
+ /* There is a case for the DP active dongles
+ * where we want to disable the PHY but keep RX powered,
+ * for those we need to ignore DP Sink interrupt
+ * by checking lane count that has been set
+ * on the last do_enable_output(). */
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.signal = signal;
+ cntl.connector_obj_id = enc110->base.connector;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ if (dc_is_dp_signal(signal))
+ link_encoder_disable(enc110);
+
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ /* power down eDP panel */
+ /* TODO: Power control cause regression, we should implement
+ * it properly, for now just comment it.
+ *
+ * link_encoder_edp_wait_for_hpd_ready(
+ link_enc,
+ link_enc->connector,
+ false);
+
+ * link_encoder_edp_power_control(
+ link_enc, false); */
+ }
+}
+
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ union dpcd_training_lane_set training_lane_set = { { 0 } };
+ int32_t lane = 0;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (!link_settings) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = link_settings->link_settings.lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_settings.link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+ /* translate lane settings */
+
+ training_lane_set.bits.VOLTAGE_SWING_SET =
+ link_settings->lane_settings[lane].VOLTAGE_SWING;
+ training_lane_set.bits.PRE_EMPHASIS_SET =
+ link_settings->lane_settings[lane].PRE_EMPHASIS;
+
+ /* post cursor 2 setting only applies to HBR2 link rate */
+ if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+ /* this is passed to VBIOS
+ * to program post cursor 2 level */
+
+ training_lane_set.bits.POST_CURSOR2_SET =
+ link_settings->lane_settings[lane].POST_CURSOR2;
+ }
+
+ cntl.lane_select = lane;
+ cntl.lane_settings = training_lane_set.raw;
+
+ /* call VBIOS table to set voltage swing and pre-emphasis */
+ link_transmitter_control(enc110, &cntl);
+ }
+}
+
+/* set DP PHY test and training patterns */
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (param->dp_phy_pattern) {
+ case DP_TEST_PATTERN_TRAINING_PATTERN1:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN2:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN3:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
+ break;
+ case DP_TEST_PATTERN_D102:
+ set_dp_phy_pattern_d102(enc110);
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ set_dp_phy_pattern_symbol_error(enc110);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ set_dp_phy_pattern_prbs7(enc110);
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ set_dp_phy_pattern_80bit_custom(
+ enc110, param->custom_pattern);
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1);
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2);
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3);
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE: {
+ set_dp_phy_pattern_passthrough_mode(
+ enc110, param->dp_panel_mode);
+ break;
+ }
+
+ default:
+ /* invalid phy pattern */
+ ASSERT_CRITICAL(false);
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
+
+ if (stream_enc) {
+ *src = stream_enc->id;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t value0 = 0;
+ uint32_t value1 = 0;
+ uint32_t value2 = 0;
+ uint32_t slots = 0;
+ uint32_t src = 0;
+ uint32_t retries = 0;
+
+ /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC0, src,
+ DP_MSE_SAT_SLOT_COUNT0, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC1, src,
+ DP_MSE_SAT_SLOT_COUNT1, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC2, src,
+ DP_MSE_SAT_SLOT_COUNT2, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC3, src,
+ DP_MSE_SAT_SLOT_COUNT3, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT) ?
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link */
+
+
+ /* DP_MSE_SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger */
+
+ REG_UPDATE(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
+ * then wait for the transmission
+ * of at least 16 MTP headers on immediate local link.
+ * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
+ * a value of 1 indicates that DP MST mode
+ * is in the 16 MTP keepout region after a VC has been added.
+ * MST stream bandwidth (VC rate) can be configured
+ * after this bit is cleared */
+
+ do {
+ udelay(10);
+
+ value0 = REG_READ(DP_MSE_SAT_UPDATE);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, &value1);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_16_MTP_KEEPOUT, &value2);
+
+ /* bit field DP_MSE_SAT_UPDATE is set to 1 already */
+ if (!value1 && !value2)
+ break;
+ ++retries;
+ } while (retries < DP_MST_UPDATE_MAX_RETRY);
+}
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t field;
+
+ if (engine != ENGINE_ID_UNKNOWN) {
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
+
+ if (connect)
+ field |= get_frontend_source(engine);
+ else
+ field &= ~get_frontend_source(engine);
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
+ }
+}
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t hpd_enable = 0;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+
+ if (hpd_enable == 0)
+ set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
+}
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
new file mode 100644
index 000000000000..494067dedd03
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCE110_H__
+#define __DC_LINK_ENCODER__DCE110_H__
+
+#include "link_encoder.h"
+
+#define TO_DCE110_LINK_ENC(link_encoder)\
+ container_of(link_encoder, struct dce110_link_encoder, base)
+
+/* Not found regs in dce120 spec
+ * BIOS_SCRATCH_2
+ * DP_DPHY_INTERNAL_CTRL
+ */
+
+#define AUX_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+
+#define HPD_REG_LIST(id)\
+ SRI(DC_HPD_CONTROL, HPD, id)
+
+#define LE_COMMON_REG_LIST_BASE(id) \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id)
+
+#define LE_COMMON_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE80_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ LE_COMMON_REG_LIST_BASE(id)
+
+#define LE_DCE100_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE110_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE120_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCN10_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+struct dce110_link_enc_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL0;
+};
+
+struct dce110_link_enc_hpd_registers {
+ uint32_t DC_HPD_CONTROL;
+};
+
+struct dce110_link_enc_registers {
+ /* DMCU registers */
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+
+ /* Common DP registers */
+ uint32_t DIG_BE_CNTL;
+ uint32_t DIG_BE_EN_CNTL;
+ uint32_t DP_CONFIG;
+ uint32_t DP_DPHY_CNTL;
+ uint32_t DP_DPHY_INTERNAL_CTRL;
+ uint32_t DP_DPHY_PRBS_CNTL;
+ uint32_t DP_DPHY_SCRAM_CNTL;
+ uint32_t DP_DPHY_SYM0;
+ uint32_t DP_DPHY_SYM1;
+ uint32_t DP_DPHY_SYM2;
+ uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
+ uint32_t DP_LINK_CNTL;
+ uint32_t DP_LINK_FRAMING_CNTL;
+ uint32_t DP_MSE_SAT0;
+ uint32_t DP_MSE_SAT1;
+ uint32_t DP_MSE_SAT2;
+ uint32_t DP_MSE_SAT_UPDATE;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_DPHY_FAST_TRAINING;
+ uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+ uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
+ uint32_t DP_SEC_CNTL1;
+};
+
+struct dce110_link_encoder {
+ struct link_encoder base;
+ const struct dce110_link_enc_registers *link_regs;
+ const struct dce110_link_enc_aux_registers *aux_regs;
+ const struct dce110_link_enc_hpd_registers *hpd_regs;
+};
+
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs);
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_wireless_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream);
+
+/****************** HW programming ************************/
+
+/* initialize HW */ /* why do we initialze aux in here? */
+void dce110_link_encoder_hw_init(struct link_encoder *enc);
+
+void dce110_link_encoder_destroy(struct link_encoder **enc);
+
+/* program DIG_MODE in DIG_BE */
+/* TODO can this be combined with enable_output? */
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/* enables TMDS PHY output */
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* disable PHY output */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link);
+
+/* set DP lane settings */
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param);
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index);
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc);
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc);
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required);
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+
+#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
new file mode 100644
index 000000000000..0790f25c7b3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_mem_input.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define CTX \
+ dce_mi->base.ctx
+#define REG(reg)\
+ dce_mi->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_mi->shifts->field_name, dce_mi->masks->field_name
+
+struct pte_setting {
+ unsigned int bpp;
+ unsigned int page_width;
+ unsigned int page_height;
+ unsigned char min_pte_before_flip_horiz_scan;
+ unsigned char min_pte_before_flip_vert_scan;
+ unsigned char pte_req_per_chunk;
+ unsigned char param_6;
+ unsigned char param_7;
+ unsigned char param_8;
+};
+
+enum mi_bits_per_pixel {
+ mi_bpp_8 = 0,
+ mi_bpp_16,
+ mi_bpp_32,
+ mi_bpp_64,
+ mi_bpp_count,
+};
+
+enum mi_tiling_format {
+ mi_tiling_linear = 0,
+ mi_tiling_1D,
+ mi_tiling_2D,
+ mi_tiling_count,
+};
+
+static const struct pte_setting pte_settings[mi_tiling_count][mi_bpp_count] = {
+ [mi_tiling_linear] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+ },
+ [mi_tiling_1D] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+ },
+ [mi_tiling_2D] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+ },
+};
+
+static enum mi_bits_per_pixel get_mi_bpp(
+ enum surface_pixel_format format)
+{
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ return mi_bpp_64;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888)
+ return mi_bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB1555)
+ return mi_bpp_16;
+ else
+ return mi_bpp_8;
+}
+
+static enum mi_tiling_format get_mi_tiling(
+ union dc_tiling_info *tiling_info)
+{
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return mi_tiling_1D;
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return mi_tiling_2D;
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return mi_tiling_linear;
+ default:
+ return mi_tiling_2D;
+ }
+}
+
+static bool is_vert_scan(enum dc_rotation_angle rotation)
+{
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void dce_mi_program_pte_vm(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ enum mi_bits_per_pixel mi_bpp = get_mi_bpp(format);
+ enum mi_tiling_format mi_tiling = get_mi_tiling(tiling_info);
+ const struct pte_setting *pte = &pte_settings[mi_tiling][mi_bpp];
+
+ unsigned int page_width = log_2(pte->page_width);
+ unsigned int page_height = log_2(pte->page_height);
+ unsigned int min_pte_before_flip = is_vert_scan(rotation) ?
+ pte->min_pte_before_flip_vert_scan :
+ pte->min_pte_before_flip_horiz_scan;
+
+ REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
+ GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
+
+ REG_UPDATE_3(DVMM_PTE_CONTROL,
+ DVMM_PAGE_WIDTH, page_width,
+ DVMM_PAGE_HEIGHT, page_height,
+ DVMM_MIN_PTE_BEFORE_FLIP, min_pte_before_flip);
+
+ REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
+ DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
+ DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
+}
+
+static void program_urgency_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t urgency_low_wm,
+ uint32_t urgency_high_wm)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK, wm_select);
+
+ REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+ URGENCY_LOW_WATERMARK, urgency_low_wm,
+ URGENCY_HIGH_WATERMARK, urgency_high_wm);
+}
+
+static void program_nbp_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t nbp_wm)
+{
+ if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE, 1,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+
+ if (REG(DPG_PIPE_LOW_POWER_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_ENABLE, 1,
+ PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+}
+
+static void program_stutter_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t stutter_mark)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+ if (REG(DPG_PIPE_STUTTER_CONTROL2))
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+ else
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+
+static void dce_mi_program_display_marks(
+ struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 2, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */
+}
+
+static void dce120_mi_program_display_marks(struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 0, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set b */
+ urgent.b_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 2, /* set c */
+ urgent.c_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 3, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
+ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
+ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */
+ program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */
+ program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */
+}
+
+static void program_tiling(
+ struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+{
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE_6(GRPH_CONTROL,
+ GRPH_SW_MODE, info->gfx9.swizzle,
+ GRPH_NUM_BANKS, log_2(info->gfx9.num_banks),
+ GRPH_NUM_SHADER_ENGINES, log_2(info->gfx9.num_shader_engines),
+ GRPH_NUM_PIPES, log_2(info->gfx9.num_pipes),
+ GRPH_COLOR_EXPANSION_MODE, 1,
+ GRPH_SE_ENABLE, info->gfx9.shaderEnable);
+ /* TODO: DCP0_GRPH_CONTROL__GRPH_SE_ENABLE where to get info
+ GRPH_SE_ENABLE, 1,
+ GRPH_Z, 0);
+ */
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
+ REG_UPDATE_9(GRPH_CONTROL,
+ GRPH_NUM_BANKS, info->gfx8.num_banks,
+ GRPH_BANK_WIDTH, info->gfx8.bank_width,
+ GRPH_BANK_HEIGHT, info->gfx8.bank_height,
+ GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
+ GRPH_TILE_SPLIT, info->gfx8.tile_split,
+ GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode,
+ GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
+ GRPH_ARRAY_MODE, info->gfx8.array_mode,
+ GRPH_COLOR_EXPANSION_MODE, 1);
+ /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
+ /*
+ GRPH_Z, 0);
+ */
+ }
+}
+
+
+static void program_size_and_rotation(
+ struct dce_mem_input *dce_mi,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ const struct rect *in_rect = &plane_size->grph.surface_size;
+ struct rect hw_rect = plane_size->grph.surface_size;
+ const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
+ [ROTATION_ANGLE_0] = 0,
+ [ROTATION_ANGLE_90] = 1,
+ [ROTATION_ANGLE_180] = 2,
+ [ROTATION_ANGLE_270] = 3,
+ };
+
+ if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) {
+ hw_rect.x = in_rect->y;
+ hw_rect.y = in_rect->x;
+
+ hw_rect.height = in_rect->width;
+ hw_rect.width = in_rect->height;
+ }
+
+ REG_SET(GRPH_X_START, 0,
+ GRPH_X_START, hw_rect.x);
+
+ REG_SET(GRPH_Y_START, 0,
+ GRPH_Y_START, hw_rect.y);
+
+ REG_SET(GRPH_X_END, 0,
+ GRPH_X_END, hw_rect.width);
+
+ REG_SET(GRPH_Y_END, 0,
+ GRPH_Y_END, hw_rect.height);
+
+ REG_SET(GRPH_PITCH, 0,
+ GRPH_PITCH, plane_size->grph.surface_pitch);
+
+ REG_SET(HW_ROTATION, 0,
+ GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
+}
+
+static void program_grph_pixel_format(
+ struct dce_mem_input *dce_mi,
+ enum surface_pixel_format format)
+{
+ uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */
+ uint32_t grph_depth = 0, grph_format = 0;
+ uint32_t sign = 0, floating = 0;
+
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 ||
+ /*todo: doesn't look like we handle BGRA here,
+ * should problem swap endian*/
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ /* ABGR formats */
+ red_xbar = 2;
+ blue_xbar = 2;
+ }
+
+ REG_SET_2(GRPH_SWAP_CNTL, 0,
+ GRPH_RED_CROSSBAR, red_xbar,
+ GRPH_BLUE_CROSSBAR, blue_xbar);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ grph_depth = 1;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ sign = 1;
+ floating = 1;
+ /* no break */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ DC_ERR("unsupported grph pixel format");
+ break;
+ }
+
+ REG_UPDATE_2(GRPH_CONTROL,
+ GRPH_DEPTH, grph_depth,
+ GRPH_FORMAT, grph_format);
+
+ REG_UPDATE_4(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_SELECT, floating,
+ GRPH_PRESCALE_R_SIGN, sign,
+ GRPH_PRESCALE_G_SIGN, sign,
+ GRPH_PRESCALE_B_SIGN, sign);
+}
+
+static void dce_mi_program_surface_config(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
+
+ program_tiling(dce_mi, tiling_info);
+ program_size_and_rotation(dce_mi, rotation, plane_size);
+
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
+ format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ program_grph_pixel_format(dce_mi, format);
+}
+
+static uint32_t get_dmif_switch_time_us(
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz)
+{
+ uint32_t frame_time;
+ uint32_t pixels_per_second;
+ uint32_t pixels_per_frame;
+ uint32_t refresh_rate;
+ const uint32_t us_in_sec = 1000000;
+ const uint32_t min_single_frame_time_us = 30000;
+ /*return double of frame time*/
+ const uint32_t single_frame_time_multiplier = 2;
+
+ if (!h_total || v_total || !pix_clk_khz)
+ return single_frame_time_multiplier * min_single_frame_time_us;
+
+ /*TODO: should we use pixel format normalized pixel clock here?*/
+ pixels_per_second = pix_clk_khz * 1000;
+ pixels_per_frame = h_total * v_total;
+
+ if (!pixels_per_second || !pixels_per_frame) {
+ /* avoid division by zero */
+ ASSERT(pixels_per_frame);
+ ASSERT(pixels_per_second);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ refresh_rate = pixels_per_second / pixels_per_frame;
+
+ if (!refresh_rate) {
+ /* avoid division by zero*/
+ ASSERT(refresh_rate);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ frame_time = us_in_sec / refresh_rate;
+
+ if (frame_time < min_single_frame_time_us)
+ frame_time = min_single_frame_time_us;
+
+ frame_time *= single_frame_time_multiplier;
+
+ return frame_time;
+}
+
+static void dce_mi_allocate_dmif(
+ struct mem_input *mi,
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ const uint32_t retry_delay = 10;
+ uint32_t retry_count = get_dmif_switch_time_us(
+ h_total,
+ v_total,
+ pix_clk_khz) / retry_delay;
+
+ uint32_t pix_dur;
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 2)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 2);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ retry_delay, retry_count);
+
+ if (pix_clk_khz != 0) {
+ pix_dur = 1000000000ULL / pix_clk_khz;
+
+ REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION, pix_dur);
+ }
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+static void dce_mi_free_dmif(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 0)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 0);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ 10, 3500);
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+
+static void program_sec_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET_2(GRPH_SECONDARY_SURFACE_ADDRESS, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS, address.low_part >> 8,
+ GRPH_SECONDARY_DFQ_ENABLE, 0);
+}
+
+static void program_pri_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS,
+ address.low_part >> 8);
+}
+
+
+static bool dce_mi_is_flip_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t update_pending;
+
+ REG_GET(GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, &update_pending);
+ if (update_pending)
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+static bool dce_mi_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+
+ REG_UPDATE(
+ GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, flip_immediate ? 1 : 0);
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0 ||
+ address->grph_stereo.right_addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph_stereo.left_addr);
+ program_sec_addr(dce_mi, address->grph_stereo.right_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ mem_input->request_address = *address;
+
+ if (flip_immediate)
+ mem_input->current_address = *address;
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+
+ return true;
+}
+
+static struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_program_display_marks = dce_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mi->base.ctx = ctx;
+
+ dce_mi->base.inst = inst;
+ dce_mi->base.funcs = &dce_mi_funcs;
+
+ dce_mi->regs = regs;
+ dce_mi->shifts = mi_shift;
+ dce_mi->masks = mi_mask;
+}
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+ dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
new file mode 100644
index 000000000000..05d39c0cbe87
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_MEM_INPUT_H__
+#define __DCE_MEM_INPUT_H__
+
+#include "dc_hw_types.h"
+#include "mem_input.h"
+
+#define TO_DCE_MEM_INPUT(mem_input)\
+ container_of(mem_input, struct dce_mem_input, base)
+
+#define MI_DCE_BASE_REG_LIST(id)\
+ SRI(GRPH_ENABLE, DCP, id),\
+ SRI(GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_X_START, DCP, id),\
+ SRI(GRPH_Y_START, DCP, id),\
+ SRI(GRPH_X_END, DCP, id),\
+ SRI(GRPH_Y_END, DCP, id),\
+ SRI(GRPH_PITCH, DCP, id),\
+ SRI(HW_ROTATION, DCP, id),\
+ SRI(GRPH_SWAP_CNTL, DCP, id),\
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_UPDATE, DCP, id),\
+ SRI(GRPH_FLIP_CONTROL, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
+ SRI(DPG_WATERMARK_MASK_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
+ SRI(DMIF_BUFFER_CONTROL, PIPE, id)
+
+#define MI_DCE_PTE_REG_LIST(id)\
+ SRI(DVMM_PTE_CONTROL, DCP, id),\
+ SRI(DVMM_PTE_ARB_CONTROL, DCP, id)
+
+#define MI_DCE8_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
+
+#define MI_DCE11_2_REG_LIST(id)\
+ MI_DCE8_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id)
+
+#define MI_DCE11_REG_LIST(id)\
+ MI_DCE11_2_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id)
+
+#define MI_DCE12_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL2, DMIF_PG, id),\
+ SRI(DPG_PIPE_LOW_POWER_CONTROL, DMIF_PG, id),\
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP)
+
+struct dce_mem_input_registers {
+ /* DCP */
+ uint32_t GRPH_ENABLE;
+ uint32_t GRPH_CONTROL;
+ uint32_t GRPH_X_START;
+ uint32_t GRPH_Y_START;
+ uint32_t GRPH_X_END;
+ uint32_t GRPH_Y_END;
+ uint32_t GRPH_PITCH;
+ uint32_t HW_ROTATION;
+ uint32_t GRPH_SWAP_CNTL;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT;
+ uint32_t DVMM_PTE_CONTROL;
+ uint32_t DVMM_PTE_ARB_CONTROL;
+ uint32_t GRPH_UPDATE;
+ uint32_t GRPH_FLIP_CONTROL;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS_HIGH;
+ /* DMIF_PG */
+ uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
+ uint32_t DPG_WATERMARK_MASK_CONTROL;
+ uint32_t DPG_PIPE_URGENCY_CONTROL;
+ uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
+ uint32_t DPG_PIPE_LOW_POWER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL2;
+ /* DCI */
+ uint32_t DMIF_BUFFER_CONTROL;
+ /* MC_HUB */
+ uint32_t MC_HUB_RDREQ_DMIF_LIMIT;
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+};
+
+/* Set_Filed_for_Block */
+#define SFB(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCP_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
+ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
+ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
+ SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
+ SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
+ SFB(blk, HW_ROTATION, GRPH_ROTATION_ANGLE, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS, GRPH_PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\
+ SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh)
+
+#define MI_DCP_DCE11_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, mask_sh)
+
+#define MI_DCP_PTE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_DCE(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_DCE8_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_DCE(mask_sh, ),\
+ MI_GFX8_TILE_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_2_MASK_SH_LIST(mask_sh)\
+ MI_DCE8_MASK_SH_LIST(mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_MASK_SH_LIST(mask_sh)\
+ MI_DCE11_2_MASK_SH_LIST(mask_sh),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, )
+
+#define MI_GFX9_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_SW_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_SE_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_SHADER_ENGINES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_PIPES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh)
+
+#define MI_DCE12_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, DCP0_),\
+ SF(DCP0_GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_DFQ_ENABLE, mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_GFX9_TILE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
+
+#define MI_REG_FIELD_LIST(type) \
+ type GRPH_ENABLE; \
+ type GRPH_X_START; \
+ type GRPH_Y_START; \
+ type GRPH_X_END; \
+ type GRPH_Y_END; \
+ type GRPH_PITCH; \
+ type GRPH_ROTATION_ANGLE; \
+ type GRPH_RED_CROSSBAR; \
+ type GRPH_BLUE_CROSSBAR; \
+ type GRPH_PRESCALE_SELECT; \
+ type GRPH_PRESCALE_R_SIGN; \
+ type GRPH_PRESCALE_G_SIGN; \
+ type GRPH_PRESCALE_B_SIGN; \
+ type GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT; \
+ type DVMM_PAGE_WIDTH; \
+ type DVMM_PAGE_HEIGHT; \
+ type DVMM_MIN_PTE_BEFORE_FLIP; \
+ type DVMM_PTE_REQ_PER_CHUNK; \
+ type DVMM_MAX_PTE_REQ_OUTSTANDING; \
+ type GRPH_DEPTH; \
+ type GRPH_FORMAT; \
+ type GRPH_NUM_BANKS; \
+ type GRPH_BANK_WIDTH;\
+ type GRPH_BANK_HEIGHT;\
+ type GRPH_MACRO_TILE_ASPECT;\
+ type GRPH_TILE_SPLIT;\
+ type GRPH_MICRO_TILE_MODE;\
+ type GRPH_PIPE_CONFIG;\
+ type GRPH_ARRAY_MODE;\
+ type GRPH_COLOR_EXPANSION_MODE;\
+ type GRPH_SW_MODE; \
+ type GRPH_SE_ENABLE; \
+ type GRPH_NUM_SHADER_ENGINES; \
+ type GRPH_NUM_PIPES; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS; \
+ type GRPH_SECONDARY_DFQ_ENABLE; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS; \
+ type GRPH_SURFACE_UPDATE_PENDING; \
+ type GRPH_SURFACE_UPDATE_H_RETRACE_EN; \
+ type GRPH_UPDATE_LOCK; \
+ type PIXEL_DURATION; \
+ type URGENCY_WATERMARK_MASK; \
+ type PSTATE_CHANGE_WATERMARK_MASK; \
+ type NB_PSTATE_CHANGE_WATERMARK_MASK; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
+ type URGENCY_LOW_WATERMARK; \
+ type URGENCY_HIGH_WATERMARK; \
+ type NB_PSTATE_CHANGE_ENABLE; \
+ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_WATERMARK; \
+ type PSTATE_CHANGE_ENABLE; \
+ type PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type PSTATE_CHANGE_WATERMARK; \
+ type STUTTER_ENABLE; \
+ type STUTTER_IGNORE_FBC; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+ type DMIF_BUFFERS_ALLOCATED; \
+ type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
+ type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
+ type FB_BASE; \
+ type FB_TOP; \
+ type AGP_BASE; \
+ type AGP_TOP; \
+ type AGP_BOT; \
+
+struct dce_mem_input_shift {
+ MI_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_mem_input_mask {
+ MI_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_mem_input_wa {
+ uint8_t single_head_rdreq_dmif_limit;
+};
+
+struct dce_mem_input {
+ struct mem_input base;
+
+ const struct dce_mem_input_registers *regs;
+ const struct dce_mem_input_shift *shifts;
+ const struct dce_mem_input_mask *masks;
+
+ struct dce_mem_input_wa wa;
+};
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
new file mode 100644
index 000000000000..3931412ab6d3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "basics/conversion.h"
+
+#include "dce_opp.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (opp110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ opp110->opp_shift->field_name, opp110->opp_mask->field_name
+
+#define CTX \
+ opp110->base.ctx
+
+enum {
+ MAX_PWL_ENTRY = 128,
+ MAX_REGIONS_NUMBER = 16
+};
+
+enum {
+ MAX_LUT_ENTRY = 256,
+ MAX_NUMBER_OF_ENTRIES = 256
+};
+
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+
+
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 0,
+ FMT_TRUNCATE_DEPTH, 0,
+ FMT_TRUNCATE_MODE, 0);
+
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ /* 8bpc trunc on YCbCr422*/
+ if (params->flags.TRUNCATE_DEPTH == 1)
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 1,
+ FMT_TRUNCATE_MODE, 0);
+ else if (params->flags.TRUNCATE_DEPTH == 2)
+ /* 10bpc trunc on YCbCr422*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 2,
+ FMT_TRUNCATE_MODE, 0);
+ return;
+ }
+ /* on other format-to do */
+ if (params->flags.TRUNCATE_ENABLED == 0 ||
+ params->flags.TRUNCATE_DEPTH == 2)
+ return;
+ /*Set truncation depth and Enable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH,
+ params->flags.TRUNCATE_MODE,
+ FMT_TRUNCATE_MODE,
+ params->flags.TRUNCATE_DEPTH);
+}
+
+
+/**
+ * set_spatial_dither
+ * 1) set spatial dithering mode: pattern of seed
+ * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
+ * 3) set random seed
+ * 4) set random mode
+ * lfsr is reset every frame or not reset
+ * RGB dithering method
+ * 0: RGB data are all dithered with x^28+x^3+1
+ * 1: R data is dithered with x^28+x^3+1
+ * G data is dithered with x^28+X^9+1
+ * B data is dithered with x^28+x^13+1
+ * enable high pass filter or not
+ * 5) enable spatical dithering
+ */
+static void set_spatial_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_SPATIAL_DITHER_MODE, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0);
+
+ /* no 10bpc on DCE11*/
+ if (params->flags.SPATIAL_DITHER_ENABLED == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 2)
+ return;
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+
+ if (opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX &&
+ opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP) {
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else
+ return;
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+ }
+ /* Set seed for random values for
+ * spatial dithering for R,G,B channels
+ */
+ REG_UPDATE(FMT_DITHER_RAND_R_SEED,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_G_SEED,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_B_SEED,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ /* Disable High pass filter
+ * Reset only at startup
+ * Set RGB data dithered with x^28+x^3+1
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+
+ /* Set spatial dithering bit depth
+ * Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ * Enable spatial dithering
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ FMT_SPATIAL_DITHER_EN, 1);
+}
+
+/**
+ * SetTemporalDither (Frame Modulation)
+ * 1) set temporal dither depth
+ * 2) select pattern: from hard-coded pattern or programmable pattern
+ * 3) select optimized strips for BGR or RGB LCD sub-pixel
+ * 4) set s matrix
+ * 5) set t matrix
+ * 6) set grey level for 0.25, 0.5, 0.75
+ * 7) enable temporal dithering
+ */
+
+static void set_temporal_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable temporal (frame modulation) dithering first*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_LEVEL, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, 0,
+ FMT_50FRC_SEL, 0,
+ FMT_75FRC_SEL, 0);
+
+ /* no 10bpc dither on DCE11*/
+ if (params->flags.FRAME_MODULATION_ENABLED == 0 ||
+ params->flags.FRAME_MODULATION_DEPTH == 2)
+ return;
+
+ /* Set temporal dithering depth*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, params->flags.FRAME_MODULATION_DEPTH,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ /*Select legacy pattern based on FRC and Temporal level*/
+ if (REG(FMT_TEMPORAL_DITHER_PATTERN_CONTROL)) {
+ REG_WRITE(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, 0);
+ /*Set s matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, 0);
+ /*Set t matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, 0);
+ }
+
+ /*Select patterns for 0.25, 0.5 and 0.75 grey level*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_LEVEL, params->flags.TEMPORAL_LEVEL);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, params->flags.FRC25,
+ FMT_50FRC_SEL, params->flags.FRC50,
+ FMT_75FRC_SEL, params->flags.FRC75);
+
+ /*Enable bit reduction by temporal (frame modulation) dithering*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 1);
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /*Set clamp control*/
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 7);
+
+ /*set the defaults*/
+ REG_SET_2(FMT_CLAMP_COMPONENT_R, 0,
+ FMT_CLAMP_LOWER_R, 0x10,
+ FMT_CLAMP_UPPER_R, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_G, 0,
+ FMT_CLAMP_LOWER_G, 0x10,
+ FMT_CLAMP_UPPER_G, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_B, 0,
+ FMT_CLAMP_LOWER_B, 0x10,
+ FMT_CLAMP_UPPER_B, 0xFEF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS)
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
+ else
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0);
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_ORDER, 0);
+ }
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
+ }
+
+}
+
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ set_truncation(opp110, params);
+ set_spatial_dither(opp110, params);
+ set_temporal_dither(opp110, params);
+}
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ dce110_opp_set_clamping(opp110, params);
+ set_pixel_encoding(opp110, params);
+}
+
+static void program_formatter_420_memory(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+ uint32_t fmt_mem_cntl_value;
+
+ /* Program source select*/
+ /* Use HW default source select for FMT_MEMORYx_CONTROL */
+ /* Use that value for FMT_SRC_SELECT as well*/
+ REG_GET(CONTROL,
+ FMT420_MEM0_SOURCE_SEL, &fmt_mem_cntl_value);
+
+ REG_UPDATE(FMT_CONTROL,
+ FMT_SRC_SELECT, fmt_mem_cntl_value);
+
+ /* Turn on the memory */
+ REG_UPDATE(CONTROL,
+ FMT420_MEM0_PWR_FORCE, 0);
+}
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(
+ FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void program_formatter_reset_dig_resync_fifo(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ /* clear previous phase lock status*/
+ REG_UPDATE(FMT_CONTROL,
+ FMT_420_PIXEL_PHASE_LOCKED_CLEAR, 1);
+
+ /* poll until FMT_420_PIXEL_PHASE_LOCKED become 1*/
+ REG_WAIT(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, 1, 10, 10);
+
+}
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_420_memory(opp);
+
+ dce110_opp_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ dce110_opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_reset_dig_resync_fifo(opp);
+
+ return;
+}
+
+
+
+
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+
+ opp110->base.inst = inst;
+
+ opp110->regs = regs;
+ opp110->opp_shift = opp_shift;
+ opp110->opp_mask = opp_mask;
+}
+
+void dce110_opp_destroy(struct output_pixel_processor **opp)
+{
+ if (*opp)
+ kfree(FROM_DCE11_OPP(*opp));
+ *opp = NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
new file mode 100644
index 000000000000..2ab0147cbd9d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -0,0 +1,310 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE_H__
+#define __DC_OPP_DCE_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+#define FROM_DCE11_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+enum dce110_opp_reg_type {
+ DCE110_OPP_REG_DCP = 0,
+ DCE110_OPP_REG_DCFE,
+ DCE110_OPP_REG_FMT,
+
+ DCE110_OPP_REG_MAX
+};
+
+#define OPP_COMMON_REG_LIST_BASE(id) \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_R, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_G, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_B, FMT, id)
+
+#define OPP_DCE_80_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_100_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_110_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_112_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_DCE_120_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_110(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_100(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_80(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_120(mask_sh)\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh)
+
+#define OPP_REG_FIELD_LIST(type) \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_TEMPORAL_DITHER_RESET; \
+ type FMT_TEMPORAL_DITHER_OFFSET; \
+ type FMT_TEMPORAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_LEVEL; \
+ type FMT_25FRC_SEL; \
+ type FMT_50FRC_SEL; \
+ type FMT_75FRC_SEL; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT420_MEM0_SOURCE_SEL; \
+ type FMT420_MEM0_PWR_FORCE; \
+ type FMT_SRC_SELECT; \
+ type FMT_420_PIXEL_PHASE_LOCKED_CLEAR; \
+ type FMT_420_PIXEL_PHASE_LOCKED; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_CLAMP_LOWER_R; \
+ type FMT_CLAMP_UPPER_R; \
+ type FMT_CLAMP_LOWER_G; \
+ type FMT_CLAMP_UPPER_G; \
+ type FMT_CLAMP_LOWER_B; \
+ type FMT_CLAMP_UPPER_B; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_ORDER; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS;\
+
+struct dce_opp_shift {
+ OPP_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_opp_mask {
+ OPP_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_opp_registers {
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_TEMPORAL_DITHER_PATTERN_CONTROL;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX;
+ uint32_t CONTROL;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_CLAMP_COMPONENT_R;
+ uint32_t FMT_CLAMP_COMPONENT_G;
+ uint32_t FMT_CLAMP_COMPONENT_B;
+};
+
+/* OPP RELATED */
+#define TO_DCE110_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+struct dce110_opp {
+ struct output_pixel_processor base;
+ const struct dce_opp_registers *regs;
+ const struct dce_opp_shift *opp_shift;
+ const struct dce_opp_mask *opp_mask;
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask);
+
+void dce110_opp_destroy(struct output_pixel_processor **opp);
+
+
+
+/* FORMATTER RELATED */
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params);
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
new file mode 100644
index 000000000000..6243450b41b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -0,0 +1,1119 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "transform.h"
+
+static const uint16_t filter_2tap_16p[18] = {
+ 4096, 0,
+ 3840, 256,
+ 3584, 512,
+ 3328, 768,
+ 3072, 1024,
+ 2816, 1280,
+ 2560, 1536,
+ 2304, 1792,
+ 2048, 2048
+};
+
+static const uint16_t filter_3tap_16p_upscale[27] = {
+ 2048, 2048, 0,
+ 1708, 2424, 16348,
+ 1372, 2796, 16308,
+ 1056, 3148, 16272,
+ 768, 3464, 16244,
+ 512, 3728, 16236,
+ 296, 3928, 16252,
+ 124, 4052, 16296,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_16p_117[27] = {
+ 2048, 2048, 0,
+ 1824, 2276, 16376,
+ 1600, 2496, 16380,
+ 1376, 2700, 16,
+ 1156, 2880, 52,
+ 948, 3032, 108,
+ 756, 3144, 192,
+ 580, 3212, 296,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_16p_150[27] = {
+ 2048, 2048, 0,
+ 1872, 2184, 36,
+ 1692, 2308, 88,
+ 1516, 2420, 156,
+ 1340, 2516, 236,
+ 1168, 2592, 328,
+ 1004, 2648, 440,
+ 844, 2684, 560,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_16p_183[27] = {
+ 2048, 2048, 0,
+ 1892, 2104, 92,
+ 1744, 2152, 196,
+ 1592, 2196, 300,
+ 1448, 2232, 412,
+ 1304, 2256, 528,
+ 1168, 2276, 648,
+ 1032, 2288, 772,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_16p_upscale[36] = {
+ 0, 4096, 0, 0,
+ 16240, 4056, 180, 16380,
+ 16136, 3952, 404, 16364,
+ 16072, 3780, 664, 16344,
+ 16040, 3556, 952, 16312,
+ 16036, 3284, 1268, 16272,
+ 16052, 2980, 1604, 16224,
+ 16084, 2648, 1952, 16176,
+ 16128, 2304, 2304, 16128
+};
+
+static const uint16_t filter_4tap_16p_117[36] = {
+ 428, 3236, 428, 0,
+ 276, 3232, 604, 16364,
+ 148, 3184, 800, 16340,
+ 44, 3104, 1016, 16312,
+ 16344, 2984, 1244, 16284,
+ 16284, 2832, 1488, 16256,
+ 16244, 2648, 1732, 16236,
+ 16220, 2440, 1976, 16220,
+ 16212, 2216, 2216, 16212
+};
+
+static const uint16_t filter_4tap_16p_150[36] = {
+ 696, 2700, 696, 0,
+ 560, 2700, 848, 16364,
+ 436, 2676, 1008, 16348,
+ 328, 2628, 1180, 16336,
+ 232, 2556, 1356, 16328,
+ 152, 2460, 1536, 16328,
+ 84, 2344, 1716, 16332,
+ 28, 2208, 1888, 16348,
+ 16376, 2052, 2052, 16376
+};
+
+static const uint16_t filter_4tap_16p_183[36] = {
+ 940, 2208, 940, 0,
+ 832, 2200, 1052, 4,
+ 728, 2180, 1164, 16,
+ 628, 2148, 1280, 36,
+ 536, 2100, 1392, 60,
+ 448, 2044, 1504, 92,
+ 368, 1976, 1612, 132,
+ 296, 1900, 1716, 176,
+ 232, 1812, 1812, 232
+};
+
+static const uint16_t filter_2tap_64p[66] = {
+ 4096, 0,
+ 4032, 64,
+ 3968, 128,
+ 3904, 192,
+ 3840, 256,
+ 3776, 320,
+ 3712, 384,
+ 3648, 448,
+ 3584, 512,
+ 3520, 576,
+ 3456, 640,
+ 3392, 704,
+ 3328, 768,
+ 3264, 832,
+ 3200, 896,
+ 3136, 960,
+ 3072, 1024,
+ 3008, 1088,
+ 2944, 1152,
+ 2880, 1216,
+ 2816, 1280,
+ 2752, 1344,
+ 2688, 1408,
+ 2624, 1472,
+ 2560, 1536,
+ 2496, 1600,
+ 2432, 1664,
+ 2368, 1728,
+ 2304, 1792,
+ 2240, 1856,
+ 2176, 1920,
+ 2112, 1984,
+ 2048, 2048 };
+
+static const uint16_t filter_3tap_64p_upscale[99] = {
+ 2048, 2048, 0,
+ 1960, 2140, 16376,
+ 1876, 2236, 16364,
+ 1792, 2328, 16356,
+ 1708, 2424, 16348,
+ 1620, 2516, 16336,
+ 1540, 2612, 16328,
+ 1456, 2704, 16316,
+ 1372, 2796, 16308,
+ 1292, 2884, 16296,
+ 1212, 2976, 16288,
+ 1136, 3060, 16280,
+ 1056, 3148, 16272,
+ 984, 3228, 16264,
+ 908, 3312, 16256,
+ 836, 3388, 16248,
+ 768, 3464, 16244,
+ 700, 3536, 16240,
+ 636, 3604, 16236,
+ 572, 3668, 16236,
+ 512, 3728, 16236,
+ 456, 3784, 16236,
+ 400, 3836, 16240,
+ 348, 3884, 16244,
+ 296, 3928, 16252,
+ 252, 3964, 16260,
+ 204, 4000, 16268,
+ 164, 4028, 16284,
+ 124, 4052, 16296,
+ 88, 4072, 16316,
+ 56, 4084, 16336,
+ 24, 4092, 16356,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_64p_117[99] = {
+ 2048, 2048, 0,
+ 1992, 2104, 16380,
+ 1936, 2160, 16380,
+ 1880, 2220, 16376,
+ 1824, 2276, 16376,
+ 1768, 2332, 16376,
+ 1712, 2388, 16376,
+ 1656, 2444, 16376,
+ 1600, 2496, 16380,
+ 1544, 2548, 0,
+ 1488, 2600, 4,
+ 1432, 2652, 8,
+ 1376, 2700, 16,
+ 1320, 2748, 20,
+ 1264, 2796, 32,
+ 1212, 2840, 40,
+ 1156, 2880, 52,
+ 1104, 2920, 64,
+ 1052, 2960, 80,
+ 1000, 2996, 92,
+ 948, 3032, 108,
+ 900, 3060, 128,
+ 852, 3092, 148,
+ 804, 3120, 168,
+ 756, 3144, 192,
+ 712, 3164, 216,
+ 668, 3184, 240,
+ 624, 3200, 268,
+ 580, 3212, 296,
+ 540, 3220, 328,
+ 500, 3228, 360,
+ 464, 3232, 392,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_64p_150[99] = {
+ 2048, 2048, 0,
+ 2004, 2080, 8,
+ 1960, 2116, 16,
+ 1916, 2148, 28,
+ 1872, 2184, 36,
+ 1824, 2216, 48,
+ 1780, 2248, 60,
+ 1736, 2280, 76,
+ 1692, 2308, 88,
+ 1648, 2336, 104,
+ 1604, 2368, 120,
+ 1560, 2392, 136,
+ 1516, 2420, 156,
+ 1472, 2444, 172,
+ 1428, 2472, 192,
+ 1384, 2492, 212,
+ 1340, 2516, 236,
+ 1296, 2536, 256,
+ 1252, 2556, 280,
+ 1212, 2576, 304,
+ 1168, 2592, 328,
+ 1124, 2608, 356,
+ 1084, 2624, 384,
+ 1044, 2636, 412,
+ 1004, 2648, 440,
+ 964, 2660, 468,
+ 924, 2668, 500,
+ 884, 2676, 528,
+ 844, 2684, 560,
+ 808, 2688, 596,
+ 768, 2692, 628,
+ 732, 2696, 664,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_64p_183[99] = {
+ 2048, 2048, 0,
+ 2008, 2060, 20,
+ 1968, 2076, 44,
+ 1932, 2088, 68,
+ 1892, 2104, 92,
+ 1856, 2116, 120,
+ 1816, 2128, 144,
+ 1780, 2140, 168,
+ 1744, 2152, 196,
+ 1704, 2164, 220,
+ 1668, 2176, 248,
+ 1632, 2188, 272,
+ 1592, 2196, 300,
+ 1556, 2204, 328,
+ 1520, 2216, 356,
+ 1484, 2224, 384,
+ 1448, 2232, 412,
+ 1412, 2240, 440,
+ 1376, 2244, 468,
+ 1340, 2252, 496,
+ 1304, 2256, 528,
+ 1272, 2264, 556,
+ 1236, 2268, 584,
+ 1200, 2272, 616,
+ 1168, 2276, 648,
+ 1132, 2280, 676,
+ 1100, 2284, 708,
+ 1064, 2288, 740,
+ 1032, 2288, 772,
+ 996, 2292, 800,
+ 964, 2292, 832,
+ 932, 2292, 868,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_64p_upscale[132] = {
+ 0, 4096, 0, 0,
+ 16344, 4092, 40, 0,
+ 16308, 4084, 84, 16380,
+ 16272, 4072, 132, 16380,
+ 16240, 4056, 180, 16380,
+ 16212, 4036, 232, 16376,
+ 16184, 4012, 288, 16372,
+ 16160, 3984, 344, 16368,
+ 16136, 3952, 404, 16364,
+ 16116, 3916, 464, 16360,
+ 16100, 3872, 528, 16356,
+ 16084, 3828, 596, 16348,
+ 16072, 3780, 664, 16344,
+ 16060, 3728, 732, 16336,
+ 16052, 3676, 804, 16328,
+ 16044, 3616, 876, 16320,
+ 16040, 3556, 952, 16312,
+ 16036, 3492, 1028, 16300,
+ 16032, 3424, 1108, 16292,
+ 16032, 3356, 1188, 16280,
+ 16036, 3284, 1268, 16272,
+ 16036, 3212, 1352, 16260,
+ 16040, 3136, 1436, 16248,
+ 16044, 3056, 1520, 16236,
+ 16052, 2980, 1604, 16224,
+ 16060, 2896, 1688, 16212,
+ 16064, 2816, 1776, 16200,
+ 16076, 2732, 1864, 16188,
+ 16084, 2648, 1952, 16176,
+ 16092, 2564, 2040, 16164,
+ 16104, 2476, 2128, 16152,
+ 16116, 2388, 2216, 16140,
+ 16128, 2304, 2304, 16128 };
+
+static const uint16_t filter_4tap_64p_117[132] = {
+ 420, 3248, 420, 0,
+ 380, 3248, 464, 16380,
+ 344, 3248, 508, 16372,
+ 308, 3248, 552, 16368,
+ 272, 3240, 596, 16364,
+ 236, 3236, 644, 16356,
+ 204, 3224, 692, 16352,
+ 172, 3212, 744, 16344,
+ 144, 3196, 796, 16340,
+ 116, 3180, 848, 16332,
+ 88, 3160, 900, 16324,
+ 60, 3136, 956, 16320,
+ 36, 3112, 1012, 16312,
+ 16, 3084, 1068, 16304,
+ 16380, 3056, 1124, 16296,
+ 16360, 3024, 1184, 16292,
+ 16340, 2992, 1244, 16284,
+ 16324, 2956, 1304, 16276,
+ 16308, 2920, 1364, 16268,
+ 16292, 2880, 1424, 16264,
+ 16280, 2836, 1484, 16256,
+ 16268, 2792, 1548, 16252,
+ 16256, 2748, 1608, 16244,
+ 16248, 2700, 1668, 16240,
+ 16240, 2652, 1732, 16232,
+ 16232, 2604, 1792, 16228,
+ 16228, 2552, 1856, 16224,
+ 16220, 2500, 1916, 16220,
+ 16216, 2444, 1980, 16216,
+ 16216, 2388, 2040, 16216,
+ 16212, 2332, 2100, 16212,
+ 16212, 2276, 2160, 16212,
+ 16212, 2220, 2220, 16212 };
+
+static const uint16_t filter_4tap_64p_150[132] = {
+ 696, 2700, 696, 0,
+ 660, 2704, 732, 16380,
+ 628, 2704, 768, 16376,
+ 596, 2704, 804, 16372,
+ 564, 2700, 844, 16364,
+ 532, 2696, 884, 16360,
+ 500, 2692, 924, 16356,
+ 472, 2684, 964, 16352,
+ 440, 2676, 1004, 16352,
+ 412, 2668, 1044, 16348,
+ 384, 2656, 1088, 16344,
+ 360, 2644, 1128, 16340,
+ 332, 2632, 1172, 16336,
+ 308, 2616, 1216, 16336,
+ 284, 2600, 1260, 16332,
+ 260, 2580, 1304, 16332,
+ 236, 2560, 1348, 16328,
+ 216, 2540, 1392, 16328,
+ 196, 2516, 1436, 16328,
+ 176, 2492, 1480, 16324,
+ 156, 2468, 1524, 16324,
+ 136, 2440, 1568, 16328,
+ 120, 2412, 1612, 16328,
+ 104, 2384, 1656, 16328,
+ 88, 2352, 1700, 16332,
+ 72, 2324, 1744, 16332,
+ 60, 2288, 1788, 16336,
+ 48, 2256, 1828, 16340,
+ 36, 2220, 1872, 16344,
+ 24, 2184, 1912, 16352,
+ 12, 2148, 1952, 16356,
+ 4, 2112, 1996, 16364,
+ 16380, 2072, 2036, 16372 };
+
+static const uint16_t filter_4tap_64p_183[132] = {
+ 944, 2204, 944, 0,
+ 916, 2204, 972, 0,
+ 888, 2200, 996, 0,
+ 860, 2200, 1024, 4,
+ 832, 2196, 1052, 4,
+ 808, 2192, 1080, 8,
+ 780, 2188, 1108, 12,
+ 756, 2180, 1140, 12,
+ 728, 2176, 1168, 16,
+ 704, 2168, 1196, 20,
+ 680, 2160, 1224, 24,
+ 656, 2152, 1252, 28,
+ 632, 2144, 1280, 36,
+ 608, 2132, 1308, 40,
+ 584, 2120, 1336, 48,
+ 560, 2112, 1364, 52,
+ 536, 2096, 1392, 60,
+ 516, 2084, 1420, 68,
+ 492, 2072, 1448, 76,
+ 472, 2056, 1476, 84,
+ 452, 2040, 1504, 92,
+ 428, 2024, 1532, 100,
+ 408, 2008, 1560, 112,
+ 392, 1992, 1584, 120,
+ 372, 1972, 1612, 132,
+ 352, 1956, 1636, 144,
+ 336, 1936, 1664, 156,
+ 316, 1916, 1688, 168,
+ 300, 1896, 1712, 180,
+ 284, 1876, 1736, 192,
+ 268, 1852, 1760, 208,
+ 252, 1832, 1784, 220,
+ 236, 1808, 1808, 236 };
+
+static const uint16_t filter_5tap_64p_upscale[165] = {
+ 15936, 2496, 2496, 15936, 0,
+ 15948, 2404, 2580, 15924, 0,
+ 15960, 2312, 2664, 15912, 4,
+ 15976, 2220, 2748, 15904, 8,
+ 15992, 2128, 2832, 15896, 12,
+ 16004, 2036, 2912, 15888, 16,
+ 16020, 1944, 2992, 15880, 20,
+ 16036, 1852, 3068, 15876, 20,
+ 16056, 1760, 3140, 15876, 24,
+ 16072, 1668, 3216, 15872, 28,
+ 16088, 1580, 3284, 15872, 32,
+ 16104, 1492, 3352, 15876, 32,
+ 16120, 1404, 3420, 15876, 36,
+ 16140, 1316, 3480, 15884, 40,
+ 16156, 1228, 3540, 15892, 40,
+ 16172, 1144, 3600, 15900, 40,
+ 16188, 1060, 3652, 15908, 44,
+ 16204, 980, 3704, 15924, 44,
+ 16220, 900, 3756, 15936, 44,
+ 16236, 824, 3800, 15956, 44,
+ 16248, 744, 3844, 15972, 44,
+ 16264, 672, 3884, 15996, 44,
+ 16276, 600, 3920, 16020, 44,
+ 16292, 528, 3952, 16044, 40,
+ 16304, 460, 3980, 16072, 40,
+ 16316, 396, 4008, 16104, 36,
+ 16328, 332, 4032, 16136, 32,
+ 16336, 272, 4048, 16172, 28,
+ 16348, 212, 4064, 16208, 24,
+ 16356, 156, 4080, 16248, 16,
+ 16368, 100, 4088, 16292, 12,
+ 16376, 48, 4092, 16336, 4,
+ 0, 0, 4096, 0, 0 };
+
+static const uint16_t filter_5tap_64p_117[165] = {
+ 16056, 2372, 2372, 16056, 0,
+ 16052, 2312, 2432, 16060, 0,
+ 16052, 2252, 2488, 16064, 0,
+ 16052, 2188, 2548, 16072, 0,
+ 16052, 2124, 2600, 16076, 0,
+ 16052, 2064, 2656, 16088, 0,
+ 16052, 2000, 2708, 16096, 0,
+ 16056, 1932, 2760, 16108, 0,
+ 16060, 1868, 2808, 16120, 0,
+ 16064, 1804, 2856, 16132, 0,
+ 16068, 1740, 2904, 16148, 16380,
+ 16076, 1676, 2948, 16164, 16380,
+ 16080, 1612, 2992, 16180, 16376,
+ 16088, 1544, 3032, 16200, 16372,
+ 16096, 1480, 3072, 16220, 16372,
+ 16104, 1420, 3108, 16244, 16368,
+ 16112, 1356, 3144, 16268, 16364,
+ 16120, 1292, 3180, 16292, 16360,
+ 16128, 1232, 3212, 16320, 16356,
+ 16136, 1168, 3240, 16344, 16352,
+ 16144, 1108, 3268, 16376, 16344,
+ 16156, 1048, 3292, 20, 16340,
+ 16164, 988, 3316, 52, 16332,
+ 16172, 932, 3336, 88, 16328,
+ 16184, 872, 3356, 124, 16320,
+ 16192, 816, 3372, 160, 16316,
+ 16204, 760, 3388, 196, 16308,
+ 16212, 708, 3400, 236, 16300,
+ 16220, 656, 3412, 276, 16292,
+ 16232, 604, 3420, 320, 16284,
+ 16240, 552, 3424, 364, 16276,
+ 16248, 504, 3428, 408, 16268,
+ 16256, 456, 3428, 456, 16256 };
+
+static const uint16_t filter_5tap_64p_150[165] = {
+ 16368, 2064, 2064, 16368, 0,
+ 16352, 2028, 2100, 16380, 16380,
+ 16340, 1996, 2132, 12, 16376,
+ 16328, 1960, 2168, 24, 16376,
+ 16316, 1924, 2204, 44, 16372,
+ 16308, 1888, 2236, 60, 16368,
+ 16296, 1848, 2268, 76, 16364,
+ 16288, 1812, 2300, 96, 16360,
+ 16280, 1772, 2328, 116, 16356,
+ 16272, 1736, 2360, 136, 16352,
+ 16268, 1696, 2388, 160, 16348,
+ 16260, 1656, 2416, 180, 16344,
+ 16256, 1616, 2440, 204, 16340,
+ 16248, 1576, 2464, 228, 16336,
+ 16244, 1536, 2492, 252, 16332,
+ 16240, 1496, 2512, 276, 16324,
+ 16240, 1456, 2536, 304, 16320,
+ 16236, 1416, 2556, 332, 16316,
+ 16232, 1376, 2576, 360, 16312,
+ 16232, 1336, 2592, 388, 16308,
+ 16232, 1296, 2612, 416, 16300,
+ 16232, 1256, 2628, 448, 16296,
+ 16232, 1216, 2640, 480, 16292,
+ 16232, 1172, 2652, 512, 16288,
+ 16232, 1132, 2664, 544, 16284,
+ 16232, 1092, 2676, 576, 16280,
+ 16236, 1056, 2684, 608, 16272,
+ 16236, 1016, 2692, 644, 16268,
+ 16240, 976, 2700, 680, 16264,
+ 16240, 936, 2704, 712, 16260,
+ 16244, 900, 2708, 748, 16256,
+ 16248, 860, 2708, 788, 16252,
+ 16248, 824, 2708, 824, 16248 };
+
+static const uint16_t filter_5tap_64p_183[165] = {
+ 228, 1816, 1816, 228, 0,
+ 216, 1792, 1836, 248, 16380,
+ 200, 1772, 1860, 264, 16376,
+ 184, 1748, 1884, 280, 16376,
+ 168, 1728, 1904, 300, 16372,
+ 156, 1704, 1928, 316, 16368,
+ 144, 1680, 1948, 336, 16364,
+ 128, 1656, 1968, 356, 16364,
+ 116, 1632, 1988, 376, 16360,
+ 104, 1604, 2008, 396, 16356,
+ 96, 1580, 2024, 416, 16356,
+ 84, 1556, 2044, 440, 16352,
+ 72, 1528, 2060, 460, 16348,
+ 64, 1504, 2076, 484, 16348,
+ 52, 1476, 2092, 504, 16344,
+ 44, 1448, 2104, 528, 16344,
+ 36, 1424, 2120, 552, 16340,
+ 28, 1396, 2132, 576, 16340,
+ 20, 1368, 2144, 600, 16340,
+ 12, 1340, 2156, 624, 16336,
+ 4, 1312, 2168, 652, 16336,
+ 0, 1284, 2180, 676, 16336,
+ 16376, 1256, 2188, 700, 16332,
+ 16372, 1228, 2196, 728, 16332,
+ 16368, 1200, 2204, 752, 16332,
+ 16364, 1172, 2212, 780, 16332,
+ 16356, 1144, 2216, 808, 16332,
+ 16352, 1116, 2220, 836, 16332,
+ 16352, 1084, 2224, 860, 16332,
+ 16348, 1056, 2228, 888, 16336,
+ 16344, 1028, 2232, 916, 16336,
+ 16340, 1000, 2232, 944, 16336,
+ 16340, 972, 2232, 972, 16340 };
+
+static const uint16_t filter_6tap_64p_upscale[198] = {
+ 0, 0, 4092, 0, 0, 0,
+ 12, 16332, 4092, 52, 16368, 0,
+ 24, 16280, 4088, 108, 16356, 0,
+ 36, 16236, 4080, 168, 16340, 0,
+ 44, 16188, 4064, 228, 16324, 0,
+ 56, 16148, 4052, 292, 16308, 0,
+ 64, 16108, 4032, 356, 16292, 4,
+ 72, 16072, 4008, 424, 16276, 4,
+ 80, 16036, 3980, 492, 16256, 4,
+ 88, 16004, 3952, 564, 16240, 8,
+ 96, 15972, 3920, 636, 16220, 8,
+ 100, 15944, 3884, 712, 16204, 12,
+ 108, 15916, 3844, 788, 16184, 16,
+ 112, 15896, 3800, 864, 16164, 20,
+ 116, 15872, 3756, 944, 16144, 20,
+ 120, 15852, 3708, 1024, 16124, 24,
+ 120, 15836, 3656, 1108, 16104, 28,
+ 124, 15824, 3600, 1192, 16084, 32,
+ 124, 15808, 3544, 1276, 16064, 36,
+ 124, 15800, 3484, 1360, 16044, 40,
+ 128, 15792, 3420, 1448, 16024, 44,
+ 128, 15784, 3352, 1536, 16004, 48,
+ 124, 15780, 3288, 1624, 15988, 52,
+ 124, 15776, 3216, 1712, 15968, 56,
+ 124, 15776, 3144, 1800, 15948, 64,
+ 120, 15776, 3068, 1888, 15932, 68,
+ 120, 15780, 2992, 1976, 15912, 72,
+ 116, 15784, 2916, 2064, 15896, 76,
+ 112, 15792, 2836, 2152, 15880, 80,
+ 108, 15796, 2752, 2244, 15868, 84,
+ 104, 15804, 2672, 2328, 15852, 88,
+ 104, 15816, 2588, 2416, 15840, 92,
+ 100, 15828, 2504, 2504, 15828, 100 };
+
+static const uint16_t filter_6tap_64p_117[198] = {
+ 16168, 476, 3568, 476, 16168, 0,
+ 16180, 428, 3564, 528, 16156, 0,
+ 16192, 376, 3556, 584, 16144, 4,
+ 16204, 328, 3548, 636, 16128, 4,
+ 16216, 280, 3540, 692, 16116, 8,
+ 16228, 232, 3524, 748, 16104, 12,
+ 16240, 188, 3512, 808, 16092, 12,
+ 16252, 148, 3492, 864, 16080, 16,
+ 16264, 104, 3472, 924, 16068, 16,
+ 16276, 64, 3452, 984, 16056, 20,
+ 16284, 28, 3428, 1044, 16048, 24,
+ 16296, 16376, 3400, 1108, 16036, 24,
+ 16304, 16340, 3372, 1168, 16024, 28,
+ 16316, 16304, 3340, 1232, 16016, 32,
+ 16324, 16272, 3308, 1296, 16004, 32,
+ 16332, 16244, 3272, 1360, 15996, 36,
+ 16344, 16212, 3236, 1424, 15988, 36,
+ 16352, 16188, 3200, 1488, 15980, 40,
+ 16360, 16160, 3160, 1552, 15972, 40,
+ 16368, 16136, 3116, 1616, 15964, 40,
+ 16372, 16112, 3072, 1680, 15956, 44,
+ 16380, 16092, 3028, 1744, 15952, 44,
+ 0, 16072, 2980, 1808, 15948, 44,
+ 8, 16052, 2932, 1872, 15944, 48,
+ 12, 16036, 2880, 1936, 15940, 48,
+ 16, 16020, 2828, 2000, 15936, 48,
+ 20, 16008, 2776, 2064, 15936, 48,
+ 24, 15996, 2724, 2128, 15936, 48,
+ 28, 15984, 2668, 2192, 15936, 48,
+ 32, 15972, 2612, 2252, 15940, 44,
+ 36, 15964, 2552, 2316, 15940, 44,
+ 40, 15956, 2496, 2376, 15944, 44,
+ 40, 15952, 2436, 2436, 15952, 40 };
+
+static const uint16_t filter_6tap_64p_150[198] = {
+ 16148, 920, 2724, 920, 16148, 0,
+ 16152, 880, 2724, 956, 16148, 0,
+ 16152, 844, 2720, 996, 16144, 0,
+ 16156, 804, 2716, 1032, 16144, 0,
+ 16156, 768, 2712, 1072, 16144, 0,
+ 16160, 732, 2708, 1112, 16144, 16380,
+ 16164, 696, 2700, 1152, 16144, 16380,
+ 16168, 660, 2692, 1192, 16148, 16380,
+ 16172, 628, 2684, 1232, 16148, 16380,
+ 16176, 592, 2672, 1272, 16152, 16376,
+ 16180, 560, 2660, 1312, 16152, 16376,
+ 16184, 524, 2648, 1348, 16156, 16376,
+ 16192, 492, 2632, 1388, 16160, 16372,
+ 16196, 460, 2616, 1428, 16164, 16372,
+ 16200, 432, 2600, 1468, 16168, 16368,
+ 16204, 400, 2584, 1508, 16176, 16364,
+ 16212, 368, 2564, 1548, 16180, 16364,
+ 16216, 340, 2544, 1588, 16188, 16360,
+ 16220, 312, 2524, 1628, 16196, 16356,
+ 16228, 284, 2504, 1668, 16204, 16356,
+ 16232, 256, 2480, 1704, 16212, 16352,
+ 16240, 232, 2456, 1744, 16224, 16348,
+ 16244, 204, 2432, 1780, 16232, 16344,
+ 16248, 180, 2408, 1820, 16244, 16340,
+ 16256, 156, 2380, 1856, 16256, 16336,
+ 16260, 132, 2352, 1896, 16268, 16332,
+ 16268, 108, 2324, 1932, 16280, 16328,
+ 16272, 88, 2296, 1968, 16292, 16324,
+ 16276, 64, 2268, 2004, 16308, 16320,
+ 16284, 44, 2236, 2036, 16324, 16312,
+ 16288, 24, 2204, 2072, 16340, 16308,
+ 16292, 8, 2172, 2108, 16356, 16304,
+ 16300, 16372, 2140, 2140, 16372, 16300 };
+
+static const uint16_t filter_6tap_64p_183[198] = {
+ 16296, 1032, 2196, 1032, 16296, 0,
+ 16292, 1004, 2200, 1060, 16304, 16380,
+ 16288, 976, 2200, 1088, 16308, 16380,
+ 16284, 952, 2196, 1116, 16312, 16376,
+ 16284, 924, 2196, 1144, 16320, 16376,
+ 16280, 900, 2192, 1172, 16324, 16372,
+ 16276, 872, 2192, 1200, 16332, 16368,
+ 16276, 848, 2188, 1228, 16340, 16368,
+ 16272, 820, 2180, 1256, 16348, 16364,
+ 16272, 796, 2176, 1280, 16356, 16360,
+ 16268, 768, 2168, 1308, 16364, 16360,
+ 16268, 744, 2164, 1336, 16372, 16356,
+ 16268, 716, 2156, 1364, 16380, 16352,
+ 16264, 692, 2148, 1392, 4, 16352,
+ 16264, 668, 2136, 1420, 16, 16348,
+ 16264, 644, 2128, 1448, 28, 16344,
+ 16264, 620, 2116, 1472, 36, 16340,
+ 16264, 596, 2108, 1500, 48, 16340,
+ 16268, 572, 2096, 1524, 60, 16336,
+ 16268, 548, 2080, 1552, 72, 16332,
+ 16268, 524, 2068, 1576, 88, 16328,
+ 16268, 504, 2056, 1604, 100, 16324,
+ 16272, 480, 2040, 1628, 112, 16324,
+ 16272, 456, 2024, 1652, 128, 16320,
+ 16272, 436, 2008, 1680, 144, 16316,
+ 16276, 416, 1992, 1704, 156, 16312,
+ 16276, 392, 1976, 1724, 172, 16308,
+ 16280, 372, 1956, 1748, 188, 16308,
+ 16280, 352, 1940, 1772, 204, 16304,
+ 16284, 332, 1920, 1796, 224, 16300,
+ 16288, 312, 1900, 1816, 240, 16296,
+ 16288, 296, 1880, 1840, 256, 16296,
+ 16292, 276, 1860, 1860, 276, 16292 };
+
+static const uint16_t filter_7tap_64p_upscale[231] = {
+ 176, 15760, 2488, 2488, 15760, 176, 0,
+ 172, 15772, 2404, 2572, 15752, 180, 16380,
+ 168, 15784, 2324, 2656, 15740, 184, 16380,
+ 164, 15800, 2240, 2736, 15732, 188, 16376,
+ 160, 15812, 2152, 2816, 15728, 192, 16376,
+ 152, 15828, 2068, 2896, 15724, 192, 16376,
+ 148, 15848, 1984, 2972, 15720, 196, 16372,
+ 140, 15864, 1896, 3048, 15720, 196, 16372,
+ 136, 15884, 1812, 3124, 15720, 196, 16368,
+ 128, 15900, 1724, 3196, 15720, 196, 16368,
+ 120, 15920, 1640, 3268, 15724, 196, 16368,
+ 116, 15940, 1552, 3336, 15732, 196, 16364,
+ 108, 15964, 1468, 3400, 15740, 196, 16364,
+ 104, 15984, 1384, 3464, 15748, 192, 16364,
+ 96, 16004, 1300, 3524, 15760, 188, 16364,
+ 88, 16028, 1216, 3584, 15776, 184, 16364,
+ 84, 16048, 1132, 3640, 15792, 180, 16360,
+ 76, 16072, 1048, 3692, 15812, 176, 16360,
+ 68, 16092, 968, 3744, 15832, 168, 16360,
+ 64, 16116, 888, 3788, 15856, 160, 16360,
+ 56, 16140, 812, 3832, 15884, 152, 16360,
+ 52, 16160, 732, 3876, 15912, 144, 16360,
+ 44, 16184, 656, 3912, 15944, 136, 16364,
+ 40, 16204, 584, 3944, 15976, 124, 16364,
+ 32, 16228, 512, 3976, 16012, 116, 16364,
+ 28, 16248, 440, 4004, 16048, 104, 16364,
+ 24, 16268, 372, 4028, 16092, 88, 16368,
+ 20, 16288, 304, 4048, 16132, 76, 16368,
+ 12, 16308, 240, 4064, 16180, 60, 16372,
+ 8, 16328, 176, 4076, 16228, 48, 16372,
+ 4, 16348, 112, 4088, 16276, 32, 16376,
+ 0, 16364, 56, 4092, 16328, 16, 16380,
+ 0, 0, 0, 4096, 0, 0, 0 };
+
+static const uint16_t filter_7tap_64p_117[231] = {
+ 92, 15868, 2464, 2464, 15868, 92, 0,
+ 96, 15864, 2404, 2528, 15876, 88, 0,
+ 100, 15860, 2344, 2584, 15884, 84, 0,
+ 104, 15856, 2280, 2644, 15892, 76, 0,
+ 108, 15852, 2216, 2700, 15904, 72, 0,
+ 108, 15852, 2152, 2756, 15916, 64, 0,
+ 112, 15852, 2088, 2812, 15932, 60, 0,
+ 112, 15852, 2024, 2864, 15948, 52, 0,
+ 112, 15856, 1960, 2916, 15964, 44, 0,
+ 116, 15860, 1892, 2964, 15984, 36, 0,
+ 116, 15864, 1828, 3016, 16004, 24, 4,
+ 116, 15868, 1760, 3060, 16024, 16, 4,
+ 116, 15876, 1696, 3108, 16048, 8, 8,
+ 116, 15884, 1628, 3152, 16072, 16380, 8,
+ 112, 15892, 1564, 3192, 16100, 16372, 8,
+ 112, 15900, 1496, 3232, 16124, 16360, 12,
+ 112, 15908, 1428, 3268, 16156, 16348, 12,
+ 108, 15920, 1364, 3304, 16188, 16336, 16,
+ 108, 15928, 1300, 3340, 16220, 16324, 20,
+ 104, 15940, 1232, 3372, 16252, 16312, 20,
+ 104, 15952, 1168, 3400, 16288, 16300, 24,
+ 100, 15964, 1104, 3428, 16328, 16284, 28,
+ 96, 15980, 1040, 3452, 16364, 16272, 28,
+ 96, 15992, 976, 3476, 20, 16256, 32,
+ 92, 16004, 916, 3496, 64, 16244, 36,
+ 88, 16020, 856, 3516, 108, 16228, 40,
+ 84, 16032, 792, 3532, 152, 16216, 44,
+ 80, 16048, 732, 3544, 200, 16200, 48,
+ 80, 16064, 676, 3556, 248, 16184, 48,
+ 76, 16080, 616, 3564, 296, 16168, 52,
+ 72, 16092, 560, 3568, 344, 16156, 56,
+ 68, 16108, 504, 3572, 396, 16140, 60,
+ 64, 16124, 452, 3576, 452, 16124, 64 };
+
+static const uint16_t filter_7tap_64p_150[231] = {
+ 16224, 16380, 2208, 2208, 16380, 16224, 0,
+ 16232, 16360, 2172, 2236, 16, 16216, 0,
+ 16236, 16340, 2140, 2268, 40, 16212, 0,
+ 16244, 16324, 2104, 2296, 60, 16204, 4,
+ 16252, 16304, 2072, 2324, 84, 16196, 4,
+ 16256, 16288, 2036, 2352, 108, 16192, 4,
+ 16264, 16268, 2000, 2380, 132, 16184, 8,
+ 16272, 16252, 1960, 2408, 160, 16176, 8,
+ 16276, 16240, 1924, 2432, 184, 16172, 8,
+ 16284, 16224, 1888, 2456, 212, 16164, 8,
+ 16288, 16212, 1848, 2480, 240, 16160, 12,
+ 16296, 16196, 1812, 2500, 268, 16152, 12,
+ 16300, 16184, 1772, 2524, 296, 16144, 12,
+ 16308, 16172, 1736, 2544, 324, 16140, 12,
+ 16312, 16164, 1696, 2564, 356, 16136, 12,
+ 16320, 16152, 1656, 2584, 388, 16128, 12,
+ 16324, 16144, 1616, 2600, 416, 16124, 12,
+ 16328, 16136, 1576, 2616, 448, 16116, 12,
+ 16332, 16128, 1536, 2632, 480, 16112, 12,
+ 16340, 16120, 1496, 2648, 516, 16108, 12,
+ 16344, 16112, 1456, 2660, 548, 16104, 12,
+ 16348, 16104, 1416, 2672, 580, 16100, 12,
+ 16352, 16100, 1376, 2684, 616, 16096, 12,
+ 16356, 16096, 1336, 2696, 652, 16092, 12,
+ 16360, 16092, 1296, 2704, 688, 16088, 12,
+ 16364, 16088, 1256, 2712, 720, 16084, 12,
+ 16368, 16084, 1220, 2720, 760, 16084, 8,
+ 16368, 16080, 1180, 2724, 796, 16080, 8,
+ 16372, 16080, 1140, 2732, 832, 16080, 8,
+ 16376, 16076, 1100, 2732, 868, 16076, 4,
+ 16380, 16076, 1060, 2736, 908, 16076, 4,
+ 16380, 16076, 1020, 2740, 944, 16076, 0,
+ 0, 16076, 984, 2740, 984, 16076, 0 };
+
+static const uint16_t filter_7tap_64p_183[231] = {
+ 16216, 324, 1884, 1884, 324, 16216, 0,
+ 16220, 304, 1864, 1904, 344, 16216, 0,
+ 16224, 284, 1844, 1924, 364, 16216, 0,
+ 16224, 264, 1824, 1944, 384, 16212, 16380,
+ 16228, 248, 1804, 1960, 408, 16212, 16380,
+ 16228, 228, 1784, 1976, 428, 16208, 16380,
+ 16232, 212, 1760, 1996, 452, 16208, 16380,
+ 16236, 192, 1740, 2012, 472, 16208, 16376,
+ 16240, 176, 1716, 2028, 496, 16208, 16376,
+ 16240, 160, 1696, 2040, 516, 16208, 16376,
+ 16244, 144, 1672, 2056, 540, 16208, 16376,
+ 16248, 128, 1648, 2068, 564, 16208, 16372,
+ 16252, 112, 1624, 2084, 588, 16208, 16372,
+ 16256, 96, 1600, 2096, 612, 16208, 16368,
+ 16256, 84, 1576, 2108, 636, 16208, 16368,
+ 16260, 68, 1552, 2120, 660, 16208, 16368,
+ 16264, 56, 1524, 2132, 684, 16212, 16364,
+ 16268, 40, 1500, 2140, 712, 16212, 16364,
+ 16272, 28, 1476, 2152, 736, 16216, 16360,
+ 16276, 16, 1448, 2160, 760, 16216, 16356,
+ 16280, 4, 1424, 2168, 788, 16220, 16356,
+ 16284, 16376, 1396, 2176, 812, 16224, 16352,
+ 16288, 16368, 1372, 2184, 840, 16224, 16352,
+ 16292, 16356, 1344, 2188, 864, 16228, 16348,
+ 16292, 16344, 1320, 2196, 892, 16232, 16344,
+ 16296, 16336, 1292, 2200, 916, 16236, 16344,
+ 16300, 16324, 1264, 2204, 944, 16240, 16340,
+ 16304, 16316, 1240, 2208, 972, 16248, 16336,
+ 16308, 16308, 1212, 2212, 996, 16252, 16332,
+ 16312, 16300, 1184, 2216, 1024, 16256, 16332,
+ 16316, 16292, 1160, 2216, 1052, 16264, 16328,
+ 16316, 16284, 1132, 2216, 1076, 16268, 16324,
+ 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+
+static const uint16_t filter_8tap_64p_upscale[264] = {
+ 0, 0, 0, 4096, 0, 0, 0, 0,
+ 16376, 20, 16328, 4092, 56, 16364, 4, 0,
+ 16372, 36, 16272, 4088, 116, 16340, 12, 0,
+ 16364, 56, 16220, 4080, 180, 16320, 20, 0,
+ 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
+ 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
+ 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
+ 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
+ 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
+ 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
+ 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
+ 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
+ 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
+ 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
+ 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
+ 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
+ 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
+ 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
+ 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
+ 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
+ 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
+ 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
+ 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
+ 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
+ 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
+ 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
+ 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
+ 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
+ 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
+ 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
+ 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
+ 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
+ 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+
+static const uint16_t filter_8tap_64p_117[264] = {
+ 116, 16100, 428, 3564, 428, 16100, 116, 0,
+ 112, 16116, 376, 3564, 484, 16084, 120, 16380,
+ 104, 16136, 324, 3560, 540, 16064, 124, 16380,
+ 100, 16152, 272, 3556, 600, 16048, 128, 16380,
+ 96, 16168, 220, 3548, 656, 16032, 136, 16376,
+ 88, 16188, 172, 3540, 716, 16016, 140, 16376,
+ 84, 16204, 124, 3528, 780, 16000, 144, 16376,
+ 80, 16220, 76, 3512, 840, 15984, 148, 16372,
+ 76, 16236, 32, 3496, 904, 15968, 152, 16372,
+ 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
+ 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
+ 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
+ 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
+ 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
+ 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
+ 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
+ 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
+ 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
+ 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
+ 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
+ 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
+ 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
+ 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
+ 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
+ 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
+ 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
+ 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
+ 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
+ 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
+ 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
+ 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
+ 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
+ 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+
+static const uint16_t filter_8tap_64p_150[264] = {
+ 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
+ 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
+ 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
+ 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
+ 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
+ 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
+ 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
+ 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
+ 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
+ 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
+ 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
+ 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
+ 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
+ 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
+ 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
+ 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
+ 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
+ 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
+ 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
+ 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
+ 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
+ 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
+ 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
+ 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
+ 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
+ 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
+ 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
+ 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
+ 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
+ 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
+ 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
+ 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
+ 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+
+static const uint16_t filter_8tap_64p_183[264] = {
+ 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
+ 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
+ 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
+ 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
+ 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
+ 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
+ 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
+ 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
+ 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
+ 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
+ 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
+ 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
+ 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
+ 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
+ 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
+ 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
+ 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
+ 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
+ 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
+ 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
+ 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
+ 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
+ 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
+ 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
+ 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
+ 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
+ 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
+ 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
+ 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
+ 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
+ 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
+ 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
+ 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_16p_150;
+ else
+ return filter_3tap_16p_183;
+}
+
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_64p_150;
+ else
+ return filter_3tap_64p_183;
+}
+
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_16p_150;
+ else
+ return filter_4tap_16p_183;
+}
+
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_64p_150;
+ else
+ return filter_4tap_64p_183;
+}
+
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_5tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_5tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_5tap_64p_150;
+ else
+ return filter_5tap_64p_183;
+}
+
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_6tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_6tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_6tap_64p_150;
+ else
+ return filter_6tap_64p_183;
+}
+
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_7tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_7tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_7tap_64p_150;
+ else
+ return filter_7tap_64p_183;
+}
+
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_8tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_8tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_8tap_64p_150;
+ else
+ return filter_8tap_64p_183;
+}
+
+const uint16_t *get_filter_2tap_16p(void)
+{
+ return filter_2tap_16p;
+}
+
+const uint16_t *get_filter_2tap_64p(void)
+{
+ return filter_2tap_64p;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
new file mode 100644
index 000000000000..e42b6eb1c1f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -0,0 +1,1620 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dce_stream_encoder.h"
+#include "reg_helper.h"
+
+enum DP_PIXEL_ENCODING {
+DP_PIXEL_ENCODING_RGB444 = 0x00000000,
+DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
+DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
+DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
+DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
+DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
+DP_PIXEL_ENCODING_RESERVED = 0x00000006,
+};
+
+
+enum DP_COMPONENT_DEPTH {
+DP_COMPONENT_DEPTH_6BPC = 0x00000000,
+DP_COMPONENT_DEPTH_8BPC = 0x00000001,
+DP_COMPONENT_DEPTH_10BPC = 0x00000002,
+DP_COMPONENT_DEPTH_12BPC = 0x00000003,
+DP_COMPONENT_DEPTH_16BPC = 0x00000004,
+DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
+};
+
+
+#define REG(reg)\
+ (enc110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc110->se_shift->field_name, enc110->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DCE110_SE(audio)\
+ container_of(audio, struct dce110_stream_encoder, base)
+
+#define CTX \
+ enc110->base.ctx
+
+static void dce110_update_generic_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ uint32_t regval;
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ if (packet_index >= 8)
+ ASSERT(0);
+
+ /* poll dig_update_lock is not locked -> asic internal signal
+ * assume otg master lock will unlock it
+ */
+/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+ 0, 10, max_retries);*/
+
+ /* check if HW reading GSP memory */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+ }
+ /* choose which generic packet to use */
+ {
+ regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC_INDEX, packet_index);
+ }
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only) */
+ {
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, info_packet->hb0,
+ AFMT_GENERIC_HB1, info_packet->hb1,
+ AFMT_GENERIC_HB2, info_packet->hb2,
+ AFMT_GENERIC_HB3, info_packet->hb3);
+ }
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &info_packet->sb[0];
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content);
+ }
+
+ if (!REG(AFMT_VBI_PACKET_CONTROL1)) {
+ /* force double-buffered packet update */
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC0_UPDATE, (packet_index == 0),
+ AFMT_GENERIC2_UPDATE, (packet_index == 2));
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+}
+
+static void dce110_update_hdmi_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ dce110_update_generic_info_packet(
+ enc110,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case 4:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 5:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 6:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 7:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#endif
+ default:
+ /* invalid HW packet index */
+ dm_logger_write(
+ ctx->logger, LOG_WARNING,
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+/* setup stream encoder in dp mode */
+static void dce110_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t misc0 = 0;
+ uint32_t misc1 = 0;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint8_t synchronous_clock = 0; /* asynchronous mode */
+ uint8_t colorimetry_bpc;
+#endif
+
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_DB_CNTL))
+ REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+#endif
+
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR422);
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR444);
+
+ if (crtc_timing->flags.Y_ONLY)
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits */
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_Y_ONLY);
+ /* Note: DP_MSA_MISC1 bit 7 is the indicator
+ * of Y-only mode.
+ * This bit is set in HW if register
+ * DP_PIXEL_ENCODING is programmed to 0x4 */
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR420);
+ if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_VID_N_MUL)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+#endif
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_RGB444);
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_MISC))
+ misc1 = REG_READ(DP_MSA_MISC);
+#endif
+
+ /* set color depth */
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ 0);
+ break;
+ case COLOR_DEPTH_888:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_8BPC);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_10BPC);
+
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_12BPC);
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_6BPC);
+ break;
+ }
+
+ /* set dynamic range and YCbCr range */
+ if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
+ REG_UPDATE_2(
+ DP_PIXEL_FORMAT,
+ DP_DYN_RANGE, 0,
+ DP_YCBCR_RANGE, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ colorimetry_bpc = 0;
+ break;
+ case COLOR_DEPTH_888:
+ colorimetry_bpc = 1;
+ break;
+ case COLOR_DEPTH_101010:
+ colorimetry_bpc = 2;
+ break;
+ case COLOR_DEPTH_121212:
+ colorimetry_bpc = 3;
+ break;
+ default:
+ colorimetry_bpc = 0;
+ break;
+ }
+
+ misc0 = misc0 | synchronous_clock;
+ misc0 = colorimetry_bpc << 5;
+
+ if (REG(DP_MSA_TIMING_PARAM1)) {
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc0 = misc0 | 0x0;
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* do nothing */
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_COLORIMETRY))
+ REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+
+ if (REG(DP_MSA_MISC))
+ REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
+
+ /* dcn new register
+ * dc_crtc_timing is vesa dmt struct. data from edid
+ */
+ if (REG(DP_MSA_TIMING_PARAM1))
+ REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
+ DP_MSA_HTOTAL, crtc_timing->h_total,
+ DP_MSA_VTOTAL, crtc_timing->v_total);
+#endif
+
+ /* calcuate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+
+ h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
+ crtc_timing->h_addressable - crtc_timing->h_border_right;
+
+ h_back_porch = h_blank - crtc_timing->h_front_porch -
+ crtc_timing->h_sync_width;
+
+ /* start at begining of left border */
+ h_active_start = crtc_timing->h_sync_width + h_back_porch;
+
+
+ v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
+ crtc_timing->v_addressable - crtc_timing->v_border_bottom -
+ crtc_timing->v_front_porch;
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* start at begining of left border */
+ if (REG(DP_MSA_TIMING_PARAM2))
+ REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+ DP_MSA_HSTART, h_active_start,
+ DP_MSA_VSTART, v_active_start);
+
+ if (REG(DP_MSA_TIMING_PARAM3))
+ REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
+ DP_MSA_HSYNCWIDTH,
+ crtc_timing->h_sync_width,
+ DP_MSA_HSYNCPOLARITY,
+ !crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
+ DP_MSA_VSYNCWIDTH,
+ crtc_timing->v_sync_width,
+ DP_MSA_VSYNCPOLARITY,
+ !crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
+
+ /* HWDITH include border or overscan */
+ if (REG(DP_MSA_TIMING_PARAM4))
+ REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
+ DP_MSA_HWIDTH, crtc_timing->h_border_left +
+ crtc_timing->h_addressable + crtc_timing->h_border_right,
+ DP_MSA_VHEIGHT, crtc_timing->v_border_top +
+ crtc_timing->v_addressable + crtc_timing->v_border_bottom);
+#endif
+ }
+#endif
+}
+
+static void dce110_stream_encoder_set_stream_attribute_helper(
+ struct dce110_stream_encoder *enc110,
+ struct dc_crtc_timing *crtc_timing)
+{
+ if (enc110->regs->TMDS_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+ }
+
+}
+
+/* setup stream encoder in hdmi mode */
+static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+
+ /* setup HDMI engine */
+ if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ REG_UPDATE_3(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+ }
+
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+
+}
+
+/* setup stream encoder in dvi mode */
+static void dce110_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_khz;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+}
+
+static void dce110_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t x = dal_fixed31_32_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dal_fixed31_32_ceil(
+ dal_fixed31_32_shl(
+ dal_fixed31_32_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+
+ {
+ REG_SET_2(DP_MSE_RATE_CNTL, 0,
+ DP_MSE_RATE_X, x,
+ DP_MSE_RATE_Y, y);
+ }
+
+ /* wait for update to be completed on the link */
+ /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+ /* is reset to 0 (not pending) */
+ REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+ 0,
+ 10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void dce110_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+
+ if (info_frame->avi.valid) {
+ const uint32_t *content =
+ (const uint32_t *) &info_frame->avi.sb[0];
+
+ REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+ REG_WRITE(AFMT_AVI_INFO1, content[1]);
+
+ REG_WRITE(AFMT_AVI_INFO2, content[2]);
+
+ REG_WRITE(AFMT_AVI_INFO3, content[3]);
+
+ REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION,
+ info_frame->avi.hb1);
+
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 1,
+ HDMI_AVI_INFO_CONT, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ } else {
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 0,
+ HDMI_AVI_INFO_CONT, 0);
+ }
+ }
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->HDMI_DB_DISABLE) {
+ /* for bring up, disable dp double TODO */
+ if (REG(HDMI_DB_CONTROL))
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->avi);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 4, &info_frame->hdrsmd);
+ }
+#endif
+}
+
+static void dce110_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0 & 1 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0);
+
+ /* stop generic packets 2 & 3 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* stop generic packets 2 & 3 on HDMI */
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+#endif
+}
+
+static void dce110_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (info_frame->vsc.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+
+ if (info_frame->spd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 2, /* packetIndex */
+ &info_frame->spd);
+
+ if (info_frame->hdrsmd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
+ REG_SET_7(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_AVI_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
+ REG_SET_10(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_GSP4_ENABLE, 0,
+ DP_SEC_GSP5_ENABLE, 0,
+ DP_SEC_GSP6_ENABLE, 0,
+ DP_SEC_GSP7_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+#endif
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+static void dce110_stream_encoder_dp_blank(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t retries = 0;
+ uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+ /* Note: For CZ, we are changing driver default to disable
+ * stream deferred to next VBLANK. If results are positive, we
+ * will make the same change to all DCE versions. There are a
+ * handful of panels that cannot handle disable stream at
+ * HBLANK and will result in a white line flash across the
+ * screen on stream disable. */
+
+ /* Specify the video stream disable point
+ * (2 = start of the next vertical blank) */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+ /* Larger delay to wait until VBLANK - use max retry of
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ max_retries = DP_BLANK_MAX_RETRY * 150;
+
+ /* disable DP stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* the encoder stops sending the video stream
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
+
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+ 0,
+ 10, max_retries);
+
+ ASSERT(retries <= max_retries);
+
+ /* Tell the DP encoder to ignore timing from CRTC, must be done after
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+static void dce110_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+ uint32_t n_vid = 0x8000;
+ uint32_t m_vid;
+
+ /* M / N = Fstream / Flink
+ * m_vid / n_vid = pixel rate / link rate
+ */
+
+ uint64_t m_vid_l = n_vid;
+
+ m_vid_l *= param->pixel_clk_khz;
+ m_vid_l = div_u64(m_vid_l,
+ param->link_settings.link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ);
+
+ m_vid = (uint32_t) m_vid_l;
+
+ /* enable auto measurement */
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+ /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+ * therefore program initial value for Mvid and Nvid
+ */
+
+ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+ REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ }
+
+ /* set DIG_START to 0x1 to resync FIFO */
+
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+ /* switch DP encoder to CRTC data */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+ /* wait 100us for DIG/DP logic to prime
+ * (i.e. a few video lines)
+ */
+ udelay(100);
+
+ /* the hardware would start sending video at the start of the next DP
+ * frame (i.e. rising edge of the vblank).
+ * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+ * register has no effect on enable transition! HW always guarantees
+ * VID_STREAM enable at start of next frame, and this is not
+ * programmable
+ */
+
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+static void dce110_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ unsigned int value = enable ? 1 : 0;
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct audio_clock_info {
+ /* pixel clock frequency*/
+ uint32_t pixel_clock_in_10khz;
+ /* N - 32KHz audio */
+ uint32_t n_32khz;
+ /* CTS - 32KHz audio*/
+ uint32_t cts_32khz;
+ uint32_t n_44khz;
+ uint32_t cts_44khz;
+ uint32_t n_48khz;
+ uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[16] = {
+ {2517, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2518, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2520, 4096, 25200, 6272, 28000, 6144, 25200},
+ {2700, 4096, 27000, 6272, 30000, 6144, 27000},
+ {2702, 4096, 27027, 6272, 30030, 6144, 27027},
+ {2703, 4096, 27027, 6272, 30030, 6144, 27027},
+ {5400, 4096, 54000, 6272, 60000, 6144, 54000},
+ {5405, 4096, 54054, 6272, 60060, 6144, 54054},
+ {7417, 11648, 210937, 17836, 234375, 11648, 140625},
+ {7425, 4096, 74250, 6272, 82500, 6144, 74250},
+ {14835, 11648, 421875, 8918, 234375, 5824, 140625},
+ {14850, 4096, 148500, 6272, 165000, 6144, 148500},
+ {29670, 5824, 421875, 4459, 234375, 5824, 281250},
+ {29700, 3072, 222750, 4704, 247500, 5120, 247500},
+ {59340, 5824, 843750, 8918, 937500, 5824, 562500},
+ {59400, 3072, 445500, 9408, 990000, 6144, 594000}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
+ {2517, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2518, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2520, 4096, 37800, 6272, 42000, 6144, 37800},
+ {2700, 4096, 40500, 6272, 45000, 6144, 40500},
+ {2702, 8192, 81081, 6272, 45045, 8192, 54054},
+ {2703, 8192, 81081, 6272, 45045, 8192, 54054},
+ {5400, 4096, 81000, 6272, 90000, 6144, 81000},
+ {5405, 4096, 81081, 6272, 90090, 6144, 81081},
+ {7417, 11648, 316406, 17836, 351562, 11648, 210937},
+ {7425, 4096, 111375, 6272, 123750, 6144, 111375},
+ {14835, 11648, 632812, 17836, 703125, 11648, 421875},
+ {14850, 4096, 222750, 6272, 247500, 6144, 222750},
+ {29670, 5824, 632812, 8918, 703125, 5824, 421875},
+ {29700, 4096, 445500, 4704, 371250, 5120, 371250}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
+ {2517, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2518, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2520, 4096, 50400, 6272, 56000, 6144, 50400},
+ {2700, 4096, 54000, 6272, 60000, 6144, 54000},
+ {2702, 4096, 54054, 6267, 60060, 8192, 54054},
+ {2703, 4096, 54054, 6272, 60060, 8192, 54054},
+ {5400, 4096, 108000, 6272, 120000, 6144, 108000},
+ {5405, 4096, 108108, 6272, 120120, 6144, 108108},
+ {7417, 11648, 421875, 17836, 468750, 11648, 281250},
+ {7425, 4096, 148500, 6272, 165000, 6144, 148500},
+ {14835, 11648, 843750, 8918, 468750, 11648, 281250},
+ {14850, 4096, 297000, 6272, 330000, 6144, 297000},
+ {29670, 5824, 843750, 4459, 468750, 5824, 562500},
+ {29700, 3072, 445500, 4704, 495000, 5120, 495000}
+
+
+};
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static uint32_t calc_max_audio_packets_per_line(
+ const struct audio_crtc_info *crtc_info)
+{
+ uint32_t max_packets_per_line;
+
+ max_packets_per_line =
+ crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ max_packets_per_line *= crtc_info->pixel_repetition;
+
+ /* for other hdmi features */
+ max_packets_per_line -= 58;
+ /* for Control Period */
+ max_packets_per_line -= 16;
+ /* Number of Audio Packets per Line */
+ max_packets_per_line /= 32;
+
+ return max_packets_per_line;
+}
+
+static void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct audio_clock_info *audio_clock_info)
+{
+ const struct audio_clock_info *clock_info;
+ uint32_t index;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t audio_array_size;
+
+ switch (color_depth) {
+ case COLOR_DEPTH_161616:
+ clock_info = audio_clock_info_table_48bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_48bpc);
+ break;
+ case COLOR_DEPTH_121212:
+ clock_info = audio_clock_info_table_36bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_36bpc);
+ break;
+ default:
+ clock_info = audio_clock_info_table;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table);
+ break;
+ }
+
+ if (clock_info != NULL) {
+ /* search for exact pixel clock in table */
+ for (index = 0; index < audio_array_size; index++) {
+ if (clock_info[index].pixel_clock_in_10khz >
+ crtc_pixel_clock_in_10khz)
+ break; /* not match */
+ else if (clock_info[index].pixel_clock_in_10khz ==
+ crtc_pixel_clock_in_10khz) {
+ /* match found */
+ *audio_clock_info = clock_info[index];
+ return;
+ }
+ }
+ }
+
+ /* not found */
+ if (actual_pixel_clock_in_khz == 0)
+ actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+ /* See HDMI spec the table entry under
+ * pixel clock of "Other". */
+ audio_clock_info->pixel_clock_in_10khz =
+ actual_pixel_clock_in_khz / 10;
+ audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+ audio_clock_info->n_32khz = 4096;
+ audio_clock_info->n_44khz = 6272;
+ audio_clock_info->n_48khz = 6144;
+}
+
+static void dce110_se_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ if (audio_info == NULL)
+ /* This should not happen.it does so we don't get BSOD*/
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void dce110_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+ uint32_t max_packets_per_line;
+
+ /* For now still do calculation, although this field is ignored when
+ above HDMI_PACKET_GEN_VERSION set to 1 */
+ max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &audio_clock_info);
+ dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
+ "\n%s:Input::requested_pixel_clock = %d" \
+ "calculated_pixel_clock = %d \n", __func__, \
+ crtc_info->requested_pixel_clock, \
+ crtc_info->calculated_pixel_clock);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ be used by HD Audio driver
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ programmed below in interruppt callback */
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /*AFMT_60958_2 now keep this settings until
+ * Programming guide comes out*/
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void dce110_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ /* --- The following are the registers
+ * copied from the SetupHDMI --- */
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void dce110_se_enable_audio_clock(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (REG(AFMT_CNTL) == 0)
+ return; /* DCE8/10 does not have this register */
+
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+ /* wait for AFMT clock to turn on,
+ * expectation: this should complete in 1-2 reads
+ *
+ * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+ *
+ * TODO: wait for clock_on does not work well. May need HW
+ * program sequence. But audio seems work normally even without wait
+ * for clock_on status change
+ */
+}
+
+static void dce110_se_enable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SEC_CNTL,
+ DP_SEC_ATP_ENABLE, 1,
+ DP_SEC_AIP_ENABLE, 1);
+
+ /* Program STREAM_ENABLE after all the other enables. */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_se_disable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+ DP_SEC_ASP_ENABLE, 0,
+ DP_SEC_ATP_ENABLE, 0,
+ DP_SEC_AIP_ENABLE, 0,
+ DP_SEC_ACM_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+
+ /* This register shared with encoder info frame. Therefore we need to
+ keep master enabled if at least on of the fields is not 0 */
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_dp_audio(enc);
+ dce110_se_enable_dp_audio(enc);
+}
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_disable_dp_audio(enc);
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_hdmi_audio(enc, audio_crtc_info);
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+
+static void setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst, bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
+}
+
+
+static const struct stream_encoder_funcs dce110_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ dce110_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ dce110_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ dce110_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ dce110_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ dce110_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ dce110_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ dce110_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ dce110_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ dce110_stream_encoder_dp_blank,
+ .dp_unblank =
+ dce110_stream_encoder_dp_unblank,
+ .audio_mute_control = dce110_se_audio_mute_control,
+
+ .dp_audio_setup = dce110_se_dp_audio_setup,
+ .dp_audio_enable = dce110_se_dp_audio_enable,
+ .dp_audio_disable = dce110_se_dp_audio_disable,
+
+ .hdmi_audio_setup = dce110_se_hdmi_audio_setup,
+ .hdmi_audio_disable = dce110_se_hdmi_audio_disable,
+ .setup_stereo_sync = setup_stereo_sync,
+ .set_avmute = dce110_stream_encoder_set_avmute,
+
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask)
+{
+ enc110->base.funcs = &dce110_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+ enc110->regs = regs;
+ enc110->se_shift = se_shift;
+ enc110->se_mask = se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
new file mode 100644
index 000000000000..6c28229c76eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCE110_H__
+#define __DC_STREAM_ENCODER_DCE110_H__
+
+#include "stream_encoder.h"
+
+#define DCE110STRENC_FROM_STRENC(stream_encoder)\
+ container_of(stream_encoder, struct dce110_stream_encoder, base)
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+
+#define SE_COMMON_REG_LIST_DCE_BASE(id) \
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_AVI_INFO0, DIG, id), \
+ SRI(AFMT_AVI_INFO1, DIG, id), \
+ SRI(AFMT_AVI_INFO2, DIG, id), \
+ SRI(AFMT_AVI_INFO3, DIG, id)
+
+#define SE_COMMON_REG_LIST_BASE(id) \
+ SRI(AFMT_GENERIC_0, DIG, id), \
+ SRI(AFMT_GENERIC_1, DIG, id), \
+ SRI(AFMT_GENERIC_2, DIG, id), \
+ SRI(AFMT_GENERIC_3, DIG, id), \
+ SRI(AFMT_GENERIC_4, DIG, id), \
+ SRI(AFMT_GENERIC_5, DIG, id), \
+ SRI(AFMT_GENERIC_6, DIG, id), \
+ SRI(AFMT_GENERIC_7, DIG, id), \
+ SRI(AFMT_GENERIC_HDR, DIG, id), \
+ SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+ SRI(AFMT_60958_0, DIG, id), \
+ SRI(AFMT_60958_1, DIG, id), \
+ SRI(AFMT_60958_2, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(TMDS_CNTL, DIG, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_COMMON_REG_LIST(id)\
+ SE_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(AFMT_CNTL, DIG, id)
+
+#define SE_DCN_REG_LIST(id)\
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_CNTL, DIG, id),\
+ SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id)
+
+#define SE_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE112(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE120(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
+
+struct dce_stream_encoder_shift {
+ uint8_t AFMT_GENERIC_INDEX;
+ uint8_t AFMT_GENERIC0_UPDATE;
+ uint8_t AFMT_GENERIC2_UPDATE;
+ uint8_t AFMT_GENERIC_HB0;
+ uint8_t AFMT_GENERIC_HB1;
+ uint8_t AFMT_GENERIC_HB2;
+ uint8_t AFMT_GENERIC_HB3;
+ uint8_t AFMT_GENERIC_LOCK_STATUS;
+ uint8_t AFMT_GENERIC_CONFLICT;
+ uint8_t AFMT_GENERIC_CONFLICT_CLR;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint8_t HDMI_GENERIC0_CONT;
+ uint8_t HDMI_GENERIC0_SEND;
+ uint8_t HDMI_GENERIC0_LINE;
+ uint8_t HDMI_GENERIC1_CONT;
+ uint8_t HDMI_GENERIC1_SEND;
+ uint8_t HDMI_GENERIC1_LINE;
+ uint8_t DP_PIXEL_ENCODING;
+ uint8_t DP_COMPONENT_DEPTH;
+ uint8_t DP_DYN_RANGE;
+ uint8_t DP_YCBCR_RANGE;
+ uint8_t HDMI_PACKET_GEN_VERSION;
+ uint8_t HDMI_KEEPOUT_MODE;
+ uint8_t HDMI_DEEP_COLOR_ENABLE;
+ uint8_t HDMI_CLOCK_CHANNEL_RATE;
+ uint8_t HDMI_DEEP_COLOR_DEPTH;
+ uint8_t HDMI_GC_CONT;
+ uint8_t HDMI_GC_SEND;
+ uint8_t HDMI_NULL_SEND;
+ uint8_t HDMI_DATA_SCRAMBLE_EN;
+ uint8_t HDMI_AUDIO_INFO_SEND;
+ uint8_t AFMT_AUDIO_INFO_UPDATE;
+ uint8_t HDMI_AUDIO_INFO_LINE;
+ uint8_t HDMI_GC_AVMUTE;
+ uint8_t DP_MSE_RATE_X;
+ uint8_t DP_MSE_RATE_Y;
+ uint8_t DP_MSE_RATE_UPDATE_PENDING;
+ uint8_t AFMT_AVI_INFO_VERSION;
+ uint8_t HDMI_AVI_INFO_SEND;
+ uint8_t HDMI_AVI_INFO_CONT;
+ uint8_t HDMI_AVI_INFO_LINE;
+ uint8_t DP_SEC_GSP0_ENABLE;
+ uint8_t DP_SEC_STREAM_ENABLE;
+ uint8_t DP_SEC_GSP1_ENABLE;
+ uint8_t DP_SEC_GSP2_ENABLE;
+ uint8_t DP_SEC_GSP3_ENABLE;
+ uint8_t DP_SEC_GSP4_ENABLE;
+ uint8_t DP_SEC_GSP5_ENABLE;
+ uint8_t DP_SEC_GSP6_ENABLE;
+ uint8_t DP_SEC_GSP7_ENABLE;
+ uint8_t DP_SEC_AVI_ENABLE;
+ uint8_t DP_SEC_MPG_ENABLE;
+ uint8_t DP_VID_STREAM_DIS_DEFER;
+ uint8_t DP_VID_STREAM_ENABLE;
+ uint8_t DP_VID_STREAM_STATUS;
+ uint8_t DP_STEER_FIFO_RESET;
+ uint8_t DP_VID_M_N_GEN_EN;
+ uint8_t DP_VID_N;
+ uint8_t DP_VID_M;
+ uint8_t DIG_START;
+ uint8_t AFMT_AUDIO_SRC_SELECT;
+ uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint8_t HDMI_AUDIO_DELAY_EN;
+ uint8_t AFMT_60958_CS_UPDATE;
+ uint8_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint8_t AFMT_60958_OSF_OVRD;
+ uint8_t HDMI_ACR_AUTO_SEND;
+ uint8_t HDMI_ACR_SOURCE;
+ uint8_t HDMI_ACR_AUDIO_PRIORITY;
+ uint8_t HDMI_ACR_CTS_32;
+ uint8_t HDMI_ACR_N_32;
+ uint8_t HDMI_ACR_CTS_44;
+ uint8_t HDMI_ACR_N_44;
+ uint8_t HDMI_ACR_CTS_48;
+ uint8_t HDMI_ACR_N_48;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint8_t DP_SEC_AUD_N;
+ uint8_t DP_SEC_TIMESTAMP_MODE;
+ uint8_t DP_SEC_ASP_ENABLE;
+ uint8_t DP_SEC_ATP_ENABLE;
+ uint8_t DP_SEC_AIP_ENABLE;
+ uint8_t DP_SEC_ACM_ENABLE;
+ uint8_t AFMT_AUDIO_SAMPLE_SEND;
+ uint8_t AFMT_AUDIO_CLOCK_EN;
+ uint8_t TMDS_PIXEL_ENCODING;
+ uint8_t TMDS_COLOR_FORMAT;
+ uint8_t DIG_STEREOSYNC_SELECT;
+ uint8_t DIG_STEREOSYNC_GATE_EN;
+ uint8_t DP_DB_DISABLE;
+ uint8_t DP_MSA_MISC0;
+ uint8_t DP_MSA_HTOTAL;
+ uint8_t DP_MSA_VTOTAL;
+ uint8_t DP_MSA_HSTART;
+ uint8_t DP_MSA_VSTART;
+ uint8_t DP_MSA_HSYNCWIDTH;
+ uint8_t DP_MSA_HSYNCPOLARITY;
+ uint8_t DP_MSA_VSYNCWIDTH;
+ uint8_t DP_MSA_VSYNCPOLARITY;
+ uint8_t DP_MSA_HWIDTH;
+ uint8_t DP_MSA_VHEIGHT;
+ uint8_t HDMI_DB_DISABLE;
+ uint8_t DP_VID_N_MUL;
+ uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce_stream_encoder_mask {
+ uint32_t AFMT_GENERIC_INDEX;
+ uint32_t AFMT_GENERIC0_UPDATE;
+ uint32_t AFMT_GENERIC2_UPDATE;
+ uint32_t AFMT_GENERIC_HB0;
+ uint32_t AFMT_GENERIC_HB1;
+ uint32_t AFMT_GENERIC_HB2;
+ uint32_t AFMT_GENERIC_HB3;
+ uint32_t AFMT_GENERIC_LOCK_STATUS;
+ uint32_t AFMT_GENERIC_CONFLICT;
+ uint32_t AFMT_GENERIC_CONFLICT_CLR;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint32_t HDMI_GENERIC0_CONT;
+ uint32_t HDMI_GENERIC0_SEND;
+ uint32_t HDMI_GENERIC0_LINE;
+ uint32_t HDMI_GENERIC1_CONT;
+ uint32_t HDMI_GENERIC1_SEND;
+ uint32_t HDMI_GENERIC1_LINE;
+ uint32_t DP_PIXEL_ENCODING;
+ uint32_t DP_COMPONENT_DEPTH;
+ uint32_t DP_DYN_RANGE;
+ uint32_t DP_YCBCR_RANGE;
+ uint32_t HDMI_PACKET_GEN_VERSION;
+ uint32_t HDMI_KEEPOUT_MODE;
+ uint32_t HDMI_DEEP_COLOR_ENABLE;
+ uint32_t HDMI_CLOCK_CHANNEL_RATE;
+ uint32_t HDMI_DEEP_COLOR_DEPTH;
+ uint32_t HDMI_GC_CONT;
+ uint32_t HDMI_GC_SEND;
+ uint32_t HDMI_NULL_SEND;
+ uint32_t HDMI_DATA_SCRAMBLE_EN;
+ uint32_t HDMI_AUDIO_INFO_SEND;
+ uint32_t AFMT_AUDIO_INFO_UPDATE;
+ uint32_t HDMI_AUDIO_INFO_LINE;
+ uint32_t HDMI_GC_AVMUTE;
+ uint32_t DP_MSE_RATE_X;
+ uint32_t DP_MSE_RATE_Y;
+ uint32_t DP_MSE_RATE_UPDATE_PENDING;
+ uint32_t AFMT_AVI_INFO_VERSION;
+ uint32_t HDMI_AVI_INFO_SEND;
+ uint32_t HDMI_AVI_INFO_CONT;
+ uint32_t HDMI_AVI_INFO_LINE;
+ uint32_t DP_SEC_GSP0_ENABLE;
+ uint32_t DP_SEC_STREAM_ENABLE;
+ uint32_t DP_SEC_GSP1_ENABLE;
+ uint32_t DP_SEC_GSP2_ENABLE;
+ uint32_t DP_SEC_GSP3_ENABLE;
+ uint32_t DP_SEC_GSP4_ENABLE;
+ uint32_t DP_SEC_GSP5_ENABLE;
+ uint32_t DP_SEC_GSP6_ENABLE;
+ uint32_t DP_SEC_GSP7_ENABLE;
+ uint32_t DP_SEC_AVI_ENABLE;
+ uint32_t DP_SEC_MPG_ENABLE;
+ uint32_t DP_VID_STREAM_DIS_DEFER;
+ uint32_t DP_VID_STREAM_ENABLE;
+ uint32_t DP_VID_STREAM_STATUS;
+ uint32_t DP_STEER_FIFO_RESET;
+ uint32_t DP_VID_M_N_GEN_EN;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_M;
+ uint32_t DIG_START;
+ uint32_t AFMT_AUDIO_SRC_SELECT;
+ uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint32_t HDMI_AUDIO_DELAY_EN;
+ uint32_t AFMT_60958_CS_UPDATE;
+ uint32_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint32_t AFMT_60958_OSF_OVRD;
+ uint32_t HDMI_ACR_AUTO_SEND;
+ uint32_t HDMI_ACR_SOURCE;
+ uint32_t HDMI_ACR_AUDIO_PRIORITY;
+ uint32_t HDMI_ACR_CTS_32;
+ uint32_t HDMI_ACR_N_32;
+ uint32_t HDMI_ACR_CTS_44;
+ uint32_t HDMI_ACR_N_44;
+ uint32_t HDMI_ACR_CTS_48;
+ uint32_t HDMI_ACR_N_48;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP_MODE;
+ uint32_t DP_SEC_ASP_ENABLE;
+ uint32_t DP_SEC_ATP_ENABLE;
+ uint32_t DP_SEC_AIP_ENABLE;
+ uint32_t DP_SEC_ACM_ENABLE;
+ uint32_t AFMT_AUDIO_SAMPLE_SEND;
+ uint32_t AFMT_AUDIO_CLOCK_EN;
+ uint32_t TMDS_PIXEL_ENCODING;
+ uint32_t DIG_STEREOSYNC_SELECT;
+ uint32_t DIG_STEREOSYNC_GATE_EN;
+ uint32_t TMDS_COLOR_FORMAT;
+ uint32_t DP_DB_DISABLE;
+ uint32_t DP_MSA_MISC0;
+ uint32_t DP_MSA_HTOTAL;
+ uint32_t DP_MSA_VTOTAL;
+ uint32_t DP_MSA_HSTART;
+ uint32_t DP_MSA_VSTART;
+ uint32_t DP_MSA_HSYNCWIDTH;
+ uint32_t DP_MSA_HSYNCPOLARITY;
+ uint32_t DP_MSA_VSYNCWIDTH;
+ uint32_t DP_MSA_VSYNCPOLARITY;
+ uint32_t DP_MSA_HWIDTH;
+ uint32_t DP_MSA_VHEIGHT;
+ uint32_t HDMI_DB_DISABLE;
+ uint32_t DP_VID_N_MUL;
+ uint32_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce110_stream_enc_registers {
+ uint32_t AFMT_CNTL;
+ uint32_t AFMT_AVI_INFO0;
+ uint32_t AFMT_AVI_INFO1;
+ uint32_t AFMT_AVI_INFO2;
+ uint32_t AFMT_AVI_INFO3;
+ uint32_t AFMT_GENERIC_0;
+ uint32_t AFMT_GENERIC_1;
+ uint32_t AFMT_GENERIC_2;
+ uint32_t AFMT_GENERIC_3;
+ uint32_t AFMT_GENERIC_4;
+ uint32_t AFMT_GENERIC_5;
+ uint32_t AFMT_GENERIC_6;
+ uint32_t AFMT_GENERIC_7;
+ uint32_t AFMT_GENERIC_HDR;
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_VBI_PACKET_CONTROL1;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t DIG_FE_CNTL;
+ uint32_t DP_MSE_RATE_CNTL;
+ uint32_t DP_MSE_RATE_UPDATE;
+ uint32_t DP_PIXEL_FORMAT;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_STEER_FIFO;
+ uint32_t DP_VID_M;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_VID_TIMING;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP;
+ uint32_t HDMI_CONTROL;
+ uint32_t HDMI_GC;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+ uint32_t HDMI_INFOFRAME_CONTROL0;
+ uint32_t HDMI_INFOFRAME_CONTROL1;
+ uint32_t HDMI_VBI_PACKET_CONTROL;
+ uint32_t HDMI_AUDIO_PACKET_CONTROL;
+ uint32_t HDMI_ACR_PACKET_CONTROL;
+ uint32_t HDMI_ACR_32_0;
+ uint32_t HDMI_ACR_32_1;
+ uint32_t HDMI_ACR_44_0;
+ uint32_t HDMI_ACR_44_1;
+ uint32_t HDMI_ACR_48_0;
+ uint32_t HDMI_ACR_48_1;
+ uint32_t TMDS_CNTL;
+ uint32_t DP_DB_CNTL;
+ uint32_t DP_MSA_MISC;
+ uint32_t DP_MSA_COLORIMETRY;
+ uint32_t DP_MSA_TIMING_PARAM1;
+ uint32_t DP_MSA_TIMING_PARAM2;
+ uint32_t DP_MSA_TIMING_PARAM3;
+ uint32_t DP_MSA_TIMING_PARAM4;
+ uint32_t HDMI_DB_CONTROL;
+};
+
+struct dce110_stream_encoder {
+ struct stream_encoder base;
+ const struct dce110_stream_enc_registers *regs;
+ const struct dce_stream_encoder_shift *se_shift;
+ const struct dce_stream_encoder_mask *se_mask;
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask);
+
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc, bool mute);
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc);
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc);
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
new file mode 100644
index 000000000000..ae32af31eff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_transform.h"
+#include "reg_helper.h"
+#include "opp.h"
+#include "basics/conversion.h"
+#include "dc.h"
+
+#define REG(reg) \
+ (xfm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name
+
+#define CTX \
+ xfm_dce->base.ctx
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define GAMUT_MATRIX_SIZE 12
+#define SCL_PHASES 16
+
+enum dcp_out_trunc_round_mode {
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND
+};
+
+enum dcp_out_trunc_round_depth {
+ DCP_OUT_TRUNC_ROUND_DEPTH_14BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_13BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_11BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_9BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_8BIT
+};
+
+/* defines the various methods of bit reduction available for use */
+enum dcp_bit_depth_reduction_mode {
+ DCP_BIT_DEPTH_REDUCTION_MODE_DITHER,
+ DCP_BIT_DEPTH_REDUCTION_MODE_ROUND,
+ DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE,
+ DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED,
+ DCP_BIT_DEPTH_REDUCTION_MODE_INVALID
+};
+
+enum dcp_spatial_dither_mode {
+ DCP_SPATIAL_DITHER_MODE_AAAA,
+ DCP_SPATIAL_DITHER_MODE_A_AA_A,
+ DCP_SPATIAL_DITHER_MODE_AABBAABB,
+ DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC,
+ DCP_SPATIAL_DITHER_MODE_INVALID
+};
+
+enum dcp_spatial_dither_depth {
+ DCP_SPATIAL_DITHER_DEPTH_30BPP,
+ DCP_SPATIAL_DITHER_DEPTH_24BPP
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+
+ if (data->taps.h_taps + data->taps.v_taps <= 2) {
+ /* Set bypass */
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 0);
+ return false;
+ }
+
+ REG_SET_2(SCL_TAP_CONTROL, 0,
+ SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+
+ if (data->format <= PIXEL_FORMAT_GRPH_END)
+ REG_UPDATE(SCL_MODE, SCL_MODE, 1);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 2);
+
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE(SCL_MODE, SCL_PSCL_EN, 1);
+
+ /* 1 - Replace out of bound pixels with edge */
+ REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1);
+
+ return true;
+}
+
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ int overscan_right = data->h_active
+ - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active
+ - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, data->recout.x,
+ EXT_OVERSCAN_RIGHT, overscan_right);
+ REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_TOP, data->recout.y,
+ EXT_OVERSCAN_BOTTOM, overscan_bottom);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ int phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCL_PHASES / 2 + 1;
+
+ uint32_t power_ctl = 0;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ if (REG(DCFE_MEM_PWR_CTRL)) {
+ power_ctl = REG_READ(DCFE_MEM_PWR_CTRL);
+ REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1);
+
+ REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10);
+ }
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint16_t odd_coeff = 0;
+ uint16_t even_coeff = coeffs[array_idx];
+
+ REG_SET_3(SCL_COEF_RAM_SELECT, 0,
+ SCL_C_RAM_FILTER_TYPE, filter_type,
+ SCL_C_RAM_PHASE, phase,
+ SCL_C_RAM_TAP_PAIR_IDX, pair);
+
+ if (taps % 2 && pair == taps_pairs - 1)
+ array_idx++;
+ else {
+ odd_coeff = coeffs[array_idx + 1];
+ array_idx += 2;
+ }
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ SCL_C_RAM_EVEN_TAP_COEF_EN, 1,
+ SCL_C_RAM_EVEN_TAP_COEF, even_coeff,
+ SCL_C_RAM_ODD_TAP_COEF_EN, 1,
+ SCL_C_RAM_ODD_TAP_COEF, odd_coeff);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl);
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ const struct rect *view_port)
+{
+ REG_SET_2(VIEWPORT_START, 0,
+ VIEWPORT_X_START, view_port->x,
+ VIEWPORT_Y_START, view_port->y);
+
+ REG_SET_2(VIEWPORT_SIZE, 0,
+ VIEWPORT_HEIGHT, view_port->height,
+ VIEWPORT_WIDTH, view_port->width);
+
+ /* TODO: add stereo support */
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct scl_ratios_inits *inits)
+{
+ struct fixed31_32 h_init;
+ struct fixed31_32 v_init;
+
+ inits->h_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+
+ h_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.horz,
+ dal_fixed31_32_from_int(data->taps.h_taps + 1)),
+ 2);
+ inits->h_init.integer = dal_fixed31_32_floor(h_init);
+ inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
+
+ v_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.vert,
+ dal_fixed31_32_from_int(data->taps.v_taps + 1)),
+ 2);
+ inits->v_init.integer = dal_fixed31_32_floor(v_init);
+ inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct scl_ratios_inits *inits)
+{
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
+
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_INT, inits->h_init.integer,
+ SCL_H_INIT_FRAC, inits->h_init.fraction);
+
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_INT, inits->v_init.integer,
+ SCL_V_INIT_FRAC, inits->v_init.fraction);
+
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+}
+
+static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_16p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_16p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_16p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dce_transform_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h;
+
+ /*Use all three pieces of memory always*/
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ LB_MEMORY_CONFIG, 0,
+ LB_MEMORY_SIZE, xfm_dce->lb_memory_size);
+
+ /* Clear SCL_F_SHARP_CONTROL value to 0 */
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+
+ /* 1. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 2. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 3. Calculate and program ratio, filter initialization */
+ struct scl_ratios_inits inits = { 0 };
+
+ calculate_inits(xfm_dce, data, &inits);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
+
+ if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ /* 4. Program vertical filters */
+ if (xfm_dce->filter_v == NULL)
+ REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+ SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_ALPHA_VERTICAL);
+
+ /* 5. Program horizontal filters */
+ if (xfm_dce->filter_h == NULL)
+ REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+ SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_ALPHA_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_h = coeffs_h;
+ filter_updated = true;
+ }
+ }
+
+ /* 6. Program the viewport */
+ program_viewport(xfm_dce, &data->viewport);
+
+ /* 7. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1);
+
+ REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
+}
+
+/*****************************************************************************
+ * set_clamp
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ * @brief
+ * Programs clamp according to panel bit depth.
+ *
+ *******************************************************************************/
+static void set_clamp(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int clamp_max = 0;
+
+ /* At the clamp block the data will be MSB aligned, so we set the max
+ * clamp accordingly.
+ * For example, the max value for 6 bits MSB aligned (14 bit bus) would
+ * be "11 1111 0000 0000" in binary, so 0x3F00.
+ */
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */
+ clamp_max = 0x3F00;
+ break;
+ case COLOR_DEPTH_888:
+ /* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */
+ clamp_max = 0x3FC0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+ clamp_max = 0x3FFF;
+ break;
+ default:
+ clamp_max = 0x3FC0;
+ BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */
+ }
+ REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0,
+ OUT_CLAMP_MIN_B_CB, 0,
+ OUT_CLAMP_MAX_B_CB, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0,
+ OUT_CLAMP_MIN_G_Y, 0,
+ OUT_CLAMP_MAX_G_Y, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0,
+ OUT_CLAMP_MIN_R_CR, 0,
+ OUT_CLAMP_MAX_R_CR, clamp_max);
+}
+
+/*******************************************************************************
+ * set_round
+ *
+ * @brief
+ * Programs Round/Truncate
+ *
+ * @param [in] mode :round or truncate
+ * @param [in] depth :bit depth to round/truncate to
+ OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode
+ POSSIBLE VALUES:
+ 00 - truncate to u0.12
+ 01 - truncate to u0.11
+ 02 - truncate to u0.10
+ 03 - truncate to u0.9
+ 04 - truncate to u0.8
+ 05 - reserved
+ 06 - truncate to u0.14
+ 07 - truncate to u0.13 set_reg_field_value(
+ value,
+ clamp_max,
+ OUT_CLAMP_CONTROL_R_CR,
+ OUT_CLAMP_MAX_R_CR);
+ 08 - round to u0.12
+ 09 - round to u0.11
+ 10 - round to u0.10
+ 11 - round to u0.9
+ 12 - round to u0.8
+ 13 - reserved
+ 14 - round to u0.14
+ 15 - round to u0.13
+
+ ******************************************************************************/
+static void set_round(
+ struct dce_transform *xfm_dce,
+ enum dcp_out_trunc_round_mode mode,
+ enum dcp_out_trunc_round_depth depth)
+{
+ int depth_bits = 0;
+ int mode_bit = 0;
+
+ /* set up bit depth */
+ switch (depth) {
+ case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT:
+ depth_bits = 6;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT:
+ depth_bits = 7;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT:
+ depth_bits = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT:
+ depth_bits = 1;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT:
+ depth_bits = 2;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT:
+ depth_bits = 3;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT:
+ depth_bits = 4;
+ break;
+ default:
+ depth_bits = 4;
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */
+ }
+
+ /* set up round or truncate */
+ switch (mode) {
+ case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE:
+ mode_bit = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_MODE_ROUND:
+ mode_bit = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */
+ }
+
+ depth_bits |= mode_bit << 3;
+
+ REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits);
+}
+
+/*****************************************************************************
+ * set_dither
+ *
+ * @brief
+ * Programs Dither
+ *
+ * @param [in] dither_enable : enable dither
+ * @param [in] dither_mode : dither mode to set
+ * @param [in] dither_depth : bit depth to dither to
+ * @param [in] frame_random_enable : enable frame random
+ * @param [in] rgb_random_enable : enable rgb random
+ * @param [in] highpass_random_enable : enable highpass random
+ *
+ ******************************************************************************/
+
+static void set_dither(
+ struct dce_transform *xfm_dce,
+ bool dither_enable,
+ enum dcp_spatial_dither_mode dither_mode,
+ enum dcp_spatial_dither_depth dither_depth,
+ bool frame_random_enable,
+ bool rgb_random_enable,
+ bool highpass_random_enable)
+{
+ int dither_depth_bits = 0;
+ int dither_mode_bits = 0;
+
+ switch (dither_mode) {
+ case DCP_SPATIAL_DITHER_MODE_AAAA:
+ dither_mode_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_A_AA_A:
+ dither_mode_bits = 1;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBAABB:
+ dither_mode_bits = 2;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC:
+ dither_mode_bits = 3;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_mode */
+ BREAK_TO_DEBUGGER();
+ }
+
+ switch (dither_depth) {
+ case DCP_SPATIAL_DITHER_DEPTH_30BPP:
+ dither_depth_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_DEPTH_24BPP:
+ dither_depth_bits = 1;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_depth */
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* write the register */
+ REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0,
+ DCP_SPATIAL_DITHER_EN, dither_enable,
+ DCP_SPATIAL_DITHER_MODE, dither_mode_bits,
+ DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits,
+ DCP_FRAME_RANDOM_ENABLE, frame_random_enable,
+ DCP_RGB_RANDOM_ENABLE, rgb_random_enable,
+ DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable);
+}
+
+/*****************************************************************************
+ * dce_transform_bit_depth_reduction_program
+ *
+ * @brief
+ * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
+ * Dither) for dce
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ ******************************************************************************/
+static void program_bit_depth_reduction(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ enum dcp_bit_depth_reduction_mode depth_reduction_mode;
+ enum dcp_spatial_dither_mode spatial_dither_mode;
+ bool frame_random_enable;
+ bool rgb_random_enable;
+ bool highpass_random_enable;
+
+ ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+
+ if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
+ frame_random_enable = true;
+ rgb_random_enable = true;
+ highpass_random_enable = true;
+
+ } else {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
+ frame_random_enable = false;
+ rgb_random_enable = false;
+ highpass_random_enable = false;
+ }
+
+ spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
+
+ set_clamp(xfm_dce, depth);
+
+ switch (depth_reduction_mode) {
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
+ /* Spatial Dither: Set round/truncate to bypass (12bit),
+ * enable Dither (30bpp) */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, true, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
+ /* Round: Enable round (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
+ /* Truncate: Enable truncate (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
+ /* Truncate: Set round/truncate to bypass (12bit),
+ * disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ default:
+ /* Invalid DCP Depth reduction mode */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static int dce_transform_get_max_num_of_supported_lines(
+ struct dce_transform *xfm_dce,
+ enum lb_pixel_depth depth,
+ int pixel_width)
+{
+ int pixels_per_entries = 0;
+ int max_pixels_supports = 0;
+
+ ASSERT(pixel_width);
+
+ /* Find number of pixels that can fit into a single LB entry and
+ * take floor of the value since we cannot store a single pixel
+ * across multiple entries. */
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 18;
+ break;
+
+ case LB_PIXEL_DEPTH_24BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 24;
+ break;
+
+ case LB_PIXEL_DEPTH_30BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 30;
+ break;
+
+ case LB_PIXEL_DEPTH_36BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 36;
+ break;
+
+ default:
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LB pixel depth",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ ASSERT(pixels_per_entries);
+
+ max_pixels_supports =
+ pixels_per_entries *
+ xfm_dce->lb_memory_size;
+
+ return (max_pixels_supports / pixel_width);
+}
+
+static void set_denormalization(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int denorm_mode = 0;
+
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 63/64 for 6 bit output color depth */
+ denorm_mode = 1;
+ break;
+ case COLOR_DEPTH_888:
+ /* Unity for 8 bit output color depth
+ * because prescale is disabled by default */
+ denorm_mode = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ denorm_mode = 3;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ denorm_mode = 5;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* not valid used case! */
+ break;
+ }
+
+ REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode);
+}
+
+static void dce_transform_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth, expan_mode;
+ enum dc_color_depth color_depth;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ color_depth = COLOR_DEPTH_666;
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ color_depth = COLOR_DEPTH_888;
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ color_depth = COLOR_DEPTH_121212;
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_denormalization(xfm_dce, color_depth);
+ program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
+
+ REG_UPDATE_2(LB_DATA_FORMAT,
+ PIXEL_DEPTH, pixel_depth,
+ PIXEL_EXPAN_MODE, expan_mode);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static void program_gamut_remap(
+ struct dce_transform *xfm_dce,
+ const uint16_t *reg_val)
+{
+ if (reg_val) {
+ REG_SET_2(GAMUT_REMAP_C11_C12, 0,
+ GAMUT_REMAP_C11, reg_val[0],
+ GAMUT_REMAP_C12, reg_val[1]);
+ REG_SET_2(GAMUT_REMAP_C13_C14, 0,
+ GAMUT_REMAP_C13, reg_val[2],
+ GAMUT_REMAP_C14, reg_val[3]);
+ REG_SET_2(GAMUT_REMAP_C21_C22, 0,
+ GAMUT_REMAP_C21, reg_val[4],
+ GAMUT_REMAP_C22, reg_val[5]);
+ REG_SET_2(GAMUT_REMAP_C23_C24, 0,
+ GAMUT_REMAP_C23, reg_val[6],
+ GAMUT_REMAP_C24, reg_val[7]);
+ REG_SET_2(GAMUT_REMAP_C31_C32, 0,
+ GAMUT_REMAP_C31, reg_val[8],
+ GAMUT_REMAP_C32, reg_val[9]);
+ REG_SET_2(GAMUT_REMAP_C33_C34, 0,
+ GAMUT_REMAP_C33, reg_val[10],
+ GAMUT_REMAP_C34, reg_val[11]);
+
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1);
+ } else
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0);
+
+}
+
+/**
+ *****************************************************************************
+ * Function: dal_transform_wide_gamut_set_gamut_remap
+ *
+ * @param [in] const struct xfm_grph_csc_adjustment *adjust
+ *
+ * @return
+ * void
+ *
+ * @note calculate and apply color temperature adjustment to in Rgb color space
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void dce_transform_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(xfm_dce, NULL);
+ else {
+ struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
+ uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
+
+ program_gamut_remap(xfm_dce, arr_reg_val);
+ }
+}
+
+static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma)
+{
+ uint32_t taps;
+
+ if (IDENTITY_RATIO(ratio)) {
+ return 1;
+ } else if (in_taps != 0) {
+ taps = in_taps;
+ } else {
+ taps = 4;
+ }
+
+ if (chroma) {
+ taps /= 2;
+ if (taps < 2)
+ taps = 2;
+ }
+
+ return taps;
+}
+
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_width = scl_data->viewport.width;
+ int max_num_of_lines;
+
+ if (xfm_dce->prescaler_on &&
+ (scl_data->viewport.width > scl_data->recout.width))
+ pixel_width = scl_data->recout.width;
+
+ max_num_of_lines = dce_transform_get_max_num_of_supported_lines(
+ xfm_dce,
+ scl_data->lb_params.depth,
+ pixel_width);
+
+ /* Fail if in_taps are impossible */
+ if (in_taps->v_taps >= max_num_of_lines)
+ return false;
+
+ /*
+ * Set taps according to this policy (in this order)
+ * - Use 1 for no scaling
+ * - Use input taps
+ * - Use 4 and reduce as required by line buffer size
+ * - Decide chroma taps if chroma is scaled
+ *
+ * Ignore input chroma taps. Decide based on non-chroma
+ */
+ scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false);
+ scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false);
+ scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true);
+ scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true);
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert)) {
+ /* reduce v_taps if needed but ensure we have at least two */
+ if (in_taps->v_taps == 0
+ && max_num_of_lines <= scl_data->taps.v_taps
+ && scl_data->taps.v_taps > 1) {
+ scl_data->taps.v_taps = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps <= 1)
+ return false;
+ }
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) {
+ /* reduce chroma v_taps if needed but ensure we have at least two */
+ if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) {
+ scl_data->taps.v_taps_c = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps_c <= 1)
+ return false;
+ }
+
+ /* we've got valid taps */
+ return true;
+}
+
+static void dce_transform_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+}
+
+static void program_color_matrix(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ {
+ REG_SET_2(OUTPUT_CSC_C11_C12, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[0],
+ OUTPUT_CSC_C12, tbl_entry->regval[1]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C13_C14, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[2],
+ OUTPUT_CSC_C12, tbl_entry->regval[3]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C21_C22, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[4],
+ OUTPUT_CSC_C12, tbl_entry->regval[5]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C23_C24, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[6],
+ OUTPUT_CSC_C12, tbl_entry->regval[7]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C31_C32, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[8],
+ OUTPUT_CSC_C12, tbl_entry->regval[9]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C33_C34, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[10],
+ OUTPUT_CSC_C12, tbl_entry->regval[11]);
+ }
+}
+
+static bool configure_graphics_mode(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) {
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 4);
+ } else {
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+ }
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ return true;
+}
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+}
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file */
+ program_color_matrix(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix */
+
+ configure_graphics_mode(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+}
+
+static void program_pwl(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value;
+ int retval;
+
+ {
+ uint8_t max_tries = 10;
+ uint8_t counter = 0;
+
+ /* Power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 1);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 1);
+
+ while (counter < max_tries) {
+ if (REG(DCFE_MEM_PWR_STATUS)) {
+ value = REG_READ(DCFE_MEM_PWR_STATUS);
+ REG_GET(DCFE_MEM_PWR_STATUS,
+ DCP_REGAMMA_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ } else {
+ value = REG_READ(DCFE_MEM_LIGHT_SLEEP_CNTL);
+ REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ }
+ }
+
+ if (counter == max_tries) {
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: regamma lut was not powered on "
+ "in a timely manner,"
+ " programming still proceeds\n",
+ __func__);
+ }
+ }
+
+ REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK,
+ REGAMMA_LUT_WRITE_EN_MASK, 7);
+
+ REG_WRITE(REGAMMA_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb = params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+
+ /* we are done with DCP LUT memory; re-enable low power mode */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 0);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 0);
+}
+
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+
+ {
+ REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x,
+ REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0);
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope);
+
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_END_CNTL1, 0,
+ REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x);
+ }
+ {
+ REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0,
+ REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y,
+ REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[2].custom_float_slope);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+}
+
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE_2(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, power_on,
+ DCP_LUT_MEM_PWR_DIS, power_on);
+ else
+ REG_UPDATE_2(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on,
+ DCP_LUT_LIGHT_SLEEP_DIS, power_on);
+
+}
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ REG_SET(REGAMMA_CONTROL, 0,
+ GRPH_REGAMMA_MODE, mode);
+}
+
+static const struct transform_funcs dce_transform_funcs = {
+ .transform_reset = dce_transform_reset,
+ .transform_set_scaler =
+ dce_transform_set_scaler,
+ .transform_set_gamut_remap =
+ dce_transform_set_gamut_remap,
+ .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
+ .opp_set_csc_default = dce110_opp_set_csc_default,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
+ .transform_set_pixel_storage_depth =
+ dce_transform_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_transform_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.inst = inst;
+ xfm_dce->base.funcs = &dce_transform_funcs;
+
+ xfm_dce->regs = regs;
+ xfm_dce->xfm_shift = xfm_shift;
+ xfm_dce->xfm_mask = xfm_mask;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
new file mode 100644
index 000000000000..bfc94b4927b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_DCE_TRANSFORM_H_
+#define _DCE_DCE_TRANSFORM_H_
+
+
+#include "transform.h"
+
+#define TO_DCE_TRANSFORM(transform)\
+ container_of(transform, struct dce_transform, base)
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+#define XFM_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(LB_DATA_FORMAT, LB, id), \
+ SRI(GAMUT_REMAP_CONTROL, DCP, id), \
+ SRI(GAMUT_REMAP_C11_C12, DCP, id), \
+ SRI(GAMUT_REMAP_C13_C14, DCP, id), \
+ SRI(GAMUT_REMAP_C21_C22, DCP, id), \
+ SRI(GAMUT_REMAP_C23_C24, DCP, id), \
+ SRI(GAMUT_REMAP_C31_C32, DCP, id), \
+ SRI(GAMUT_REMAP_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_C11_C12, DCP, id), \
+ SRI(OUTPUT_CSC_C13_C14, DCP, id), \
+ SRI(OUTPUT_CSC_C21_C22, DCP, id), \
+ SRI(OUTPUT_CSC_C23_C24, DCP, id), \
+ SRI(OUTPUT_CSC_C31_C32, DCP, id), \
+ SRI(OUTPUT_CSC_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_CONTROL, DCP, id), \
+ SRI(REGAMMA_CNTLA_START_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_SLOPE_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL1, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL2, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_0_1, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_2_3, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_4_5, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_6_7, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_8_9, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_10_11, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_12_13, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_14_15, DCP, id), \
+ SRI(REGAMMA_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(REGAMMA_LUT_INDEX, DCP, id), \
+ SRI(REGAMMA_LUT_DATA, DCP, id), \
+ SRI(REGAMMA_CONTROL, DCP, id), \
+ SRI(DENORM_CONTROL, DCP, id), \
+ SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
+ SRI(OUT_ROUND_CONTROL, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_R_CR, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_G_Y, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_B_CB, DCP, id), \
+ SRI(SCL_MODE, SCL, id), \
+ SRI(SCL_TAP_CONTROL, SCL, id), \
+ SRI(SCL_CONTROL, SCL, id), \
+ SRI(SCL_BYPASS_CONTROL, SCL, id), \
+ SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
+ SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
+ SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_COEF_RAM_SELECT, SCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ SRI(VIEWPORT_START, SCL, id), \
+ SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, SCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+ SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
+ SRI(LB_MEMORY_CTRL, LB, id), \
+ SRI(SCL_UPDATE, SCL, id), \
+ SRI(SCL_F_SHARP_CONTROL, SCL, id)
+
+#define XFM_COMMON_REG_LIST_DCE80(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE100(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id), \
+ SRI(DCFE_MEM_PWR_STATUS, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE110(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
+ SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
+
+#define XFM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
+ XFM_SF(REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, ALPHA_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE80(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, DCP_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE110(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, ALPHA_EN, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(SCL0_SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_REG_FIELD_LIST(type) \
+ type OUT_CLAMP_MIN_B_CB; \
+ type OUT_CLAMP_MAX_B_CB; \
+ type OUT_CLAMP_MIN_G_Y; \
+ type OUT_CLAMP_MAX_G_Y; \
+ type OUT_CLAMP_MIN_R_CR; \
+ type OUT_CLAMP_MAX_R_CR; \
+ type OUT_ROUND_TRUNC_MODE; \
+ type DCP_SPATIAL_DITHER_EN; \
+ type DCP_SPATIAL_DITHER_MODE; \
+ type DCP_SPATIAL_DITHER_DEPTH; \
+ type DCP_FRAME_RANDOM_ENABLE; \
+ type DCP_RGB_RANDOM_ENABLE; \
+ type DCP_HIGHPASS_RANDOM_ENABLE; \
+ type DENORM_MODE; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type GAMUT_REMAP_C11; \
+ type GAMUT_REMAP_C12; \
+ type GAMUT_REMAP_C13; \
+ type GAMUT_REMAP_C14; \
+ type GAMUT_REMAP_C21; \
+ type GAMUT_REMAP_C22; \
+ type GAMUT_REMAP_C23; \
+ type GAMUT_REMAP_C24; \
+ type GAMUT_REMAP_C31; \
+ type GAMUT_REMAP_C32; \
+ type GAMUT_REMAP_C33; \
+ type GAMUT_REMAP_C34; \
+ type GRPH_GAMUT_REMAP_MODE; \
+ type OUTPUT_CSC_C11; \
+ type OUTPUT_CSC_C12; \
+ type OUTPUT_CSC_GRPH_MODE; \
+ type DCP_REGAMMA_MEM_PWR_DIS; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type REGAMMA_LUT_LIGHT_SLEEP_DIS; \
+ type DCP_LUT_LIGHT_SLEEP_DIS; \
+ type REGAMMA_CNTLA_EXP_REGION_START; \
+ type REGAMMA_CNTLA_EXP_REGION_START_SEGMENT; \
+ type REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION_END; \
+ type REGAMMA_CNTLA_EXP_REGION_END_BASE; \
+ type REGAMMA_CNTLA_EXP_REGION_END_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS; \
+ type REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS; \
+ type DCP_REGAMMA_MEM_PWR_STATE; \
+ type REGAMMA_LUT_MEM_PWR_STATE; \
+ type REGAMMA_LUT_WRITE_EN_MASK; \
+ type GRPH_REGAMMA_MODE; \
+ type SCL_MODE; \
+ type SCL_BYPASS_MODE; \
+ type SCL_PSCL_EN; \
+ type SCL_H_NUM_OF_TAPS; \
+ type SCL_V_NUM_OF_TAPS; \
+ type SCL_BOUNDARY_MODE; \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_TOP; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type SCL_COEFF_MEM_PWR_DIS; \
+ type SCL_COEFF_MEM_PWR_STATE; \
+ type SCL_C_RAM_FILTER_TYPE; \
+ type SCL_C_RAM_PHASE; \
+ type SCL_C_RAM_TAP_PAIR_IDX; \
+ type SCL_C_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_C_RAM_EVEN_TAP_COEF; \
+ type SCL_C_RAM_ODD_TAP_COEF_EN; \
+ type SCL_C_RAM_ODD_TAP_COEF; \
+ type VIEWPORT_X_START; \
+ type VIEWPORT_Y_START; \
+ type VIEWPORT_HEIGHT; \
+ type VIEWPORT_WIDTH; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC; \
+ type LB_MEMORY_CONFIG; \
+ type LB_MEMORY_SIZE; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_COEF_UPDATE_COMPLETE; \
+ type ALPHA_EN
+
+struct dce_transform_shift {
+ XFM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_transform_mask {
+ XFM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_transform_registers {
+ uint32_t LB_DATA_FORMAT;
+ uint32_t GAMUT_REMAP_CONTROL;
+ uint32_t GAMUT_REMAP_C11_C12;
+ uint32_t GAMUT_REMAP_C13_C14;
+ uint32_t GAMUT_REMAP_C21_C22;
+ uint32_t GAMUT_REMAP_C23_C24;
+ uint32_t GAMUT_REMAP_C31_C32;
+ uint32_t GAMUT_REMAP_C33_C34;
+ uint32_t OUTPUT_CSC_C11_C12;
+ uint32_t OUTPUT_CSC_C13_C14;
+ uint32_t OUTPUT_CSC_C21_C22;
+ uint32_t OUTPUT_CSC_C23_C24;
+ uint32_t OUTPUT_CSC_C31_C32;
+ uint32_t OUTPUT_CSC_C33_C34;
+ uint32_t OUTPUT_CSC_CONTROL;
+ uint32_t DCFE_MEM_LIGHT_SLEEP_CNTL;
+ uint32_t REGAMMA_CNTLA_START_CNTL;
+ uint32_t REGAMMA_CNTLA_SLOPE_CNTL;
+ uint32_t REGAMMA_CNTLA_END_CNTL1;
+ uint32_t REGAMMA_CNTLA_END_CNTL2;
+ uint32_t REGAMMA_CNTLA_REGION_0_1;
+ uint32_t REGAMMA_CNTLA_REGION_2_3;
+ uint32_t REGAMMA_CNTLA_REGION_4_5;
+ uint32_t REGAMMA_CNTLA_REGION_6_7;
+ uint32_t REGAMMA_CNTLA_REGION_8_9;
+ uint32_t REGAMMA_CNTLA_REGION_10_11;
+ uint32_t REGAMMA_CNTLA_REGION_12_13;
+ uint32_t REGAMMA_CNTLA_REGION_14_15;
+ uint32_t REGAMMA_LUT_WRITE_EN_MASK;
+ uint32_t REGAMMA_LUT_INDEX;
+ uint32_t REGAMMA_LUT_DATA;
+ uint32_t REGAMMA_CONTROL;
+ uint32_t DENORM_CONTROL;
+ uint32_t DCP_SPATIAL_DITHER_CNTL;
+ uint32_t OUT_ROUND_CONTROL;
+ uint32_t OUT_CLAMP_CONTROL_R_CR;
+ uint32_t OUT_CLAMP_CONTROL_G_Y;
+ uint32_t OUT_CLAMP_CONTROL_B_CB;
+ uint32_t SCL_MODE;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_CONTROL;
+ uint32_t SCL_BYPASS_CONTROL;
+ uint32_t EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t SCL_VERT_FILTER_CONTROL;
+ uint32_t SCL_HORZ_FILTER_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DCFE_MEM_PWR_STATUS;
+ uint32_t SCL_COEF_RAM_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t VIEWPORT_START;
+ uint32_t VIEWPORT_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_AUTOMATIC_MODE_CONTROL;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t SCL_UPDATE;
+ uint32_t SCL_F_SHARP_CONTROL;
+};
+
+struct init_int_and_frac {
+ uint32_t integer;
+ uint32_t fraction;
+};
+
+struct scl_ratios_inits {
+ uint32_t h_int_scale_ratio;
+ uint32_t v_int_scale_ratio;
+ struct init_int_and_frac h_init;
+ struct init_int_and_frac v_init;
+};
+
+enum ram_filter_type {
+ FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */
+ FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */
+ FILTER_TYPE_RGB_Y_HORIZONTAL = 2, /* 1 - RGB/Y Horizontal filter */
+ FILTER_TYPE_CBCR_HORIZONTAL = 3, /* 3 - CbCr Horizontal filter */
+ FILTER_TYPE_ALPHA_VERTICAL = 4, /* 4 - Alpha Vertical filter. */
+ FILTER_TYPE_ALPHA_HORIZONTAL = 5, /* 5 - Alpha Horizontal filter. */
+};
+
+struct dce_transform {
+ struct transform base;
+ const struct dce_transform_registers *regs;
+ const struct dce_transform_shift *xfm_shift;
+ const struct dce_transform_mask *xfm_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool prescaler_on;
+};
+
+void dce_transform_construct(struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask);
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+/* REGAMMA RELATED */
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif /* _DCE_DCE_TRANSFORM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
new file mode 100644
index 000000000000..a822d4e2a169
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -0,0 +1,44 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE100 = dce100_resource.o dce100_hw_sequencer.o
+
+AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
+
+
+###############################################################################
+# DCE 10x
+###############################################################################
+ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
+TG_DCE100 = dce100_resource.o
+
+AMD_DAL_TG_DCE100 = $(addprefix \
+ $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
+endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
new file mode 100644
index 000000000000..e7a694835e3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce100_hw_sequencer.h"
+#include "resource.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE10 register header files */
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+struct dce100_hw_seq_reg_offsets {
+ uint32_t blnd;
+ uint32_t crtc;
+};
+
+static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce100_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void dce100_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;*/
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce100_pplib_apply_display_requirements(dc, context);
+}
+
+
+/**************************************************************************/
+
+void dce100_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
new file mode 100644
index 000000000000..cb5384ef46c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -0,0 +1,42 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE100_H__
+#define __DC_HWSS_DCE100_H__
+
+#include "core_types.h"
+
+struct dc;
+struct dc_state;
+
+void dce100_hw_sequencer_construct(struct dc *dc);
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+#endif /* __DC_HWSS_DCE100_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
new file mode 100644
index 000000000000..3ea43e2a9450
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -0,0 +1,933 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE100_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE100(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE100_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_100_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
+};
+
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+
+
+#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce100_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce100_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id], &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE10_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE10_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE10_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce100_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce100_stream_encoder_create,
+ .create_hwseq = dce100_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce100_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce100_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce100_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce100_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 300000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce100_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct output_pixel_processor *dce100_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce100_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce100_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce100_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce100_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce100_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL)
+ dal_irq_service_destroy(&pool->base.irqs);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce100_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce100_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce100_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce100_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void dce100_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return DC_OK;
+
+ return DC_FAIL_SURFACE_VALIDATE;
+}
+
+static const struct resource_funcs dce100_res_pool_funcs = {
+ .destroy = dce100_destroy_resource_pool,
+ .link_enc_create = dce100_link_encoder_create,
+ .validate_guaranteed = dce100_validate_guaranteed,
+ .validate_bandwidth = dce100_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce100_validate_global
+};
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce100_res_pool_funcs;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce100_timing_generator_create(
+ ctx,
+ i,
+ &dce100_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce100_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce100_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce100_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce100_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce100_hw_sequencer_construct(dc);
+ return true;
+
+res_create_fail:
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
new file mode 100644
index 000000000000..2f366d66635d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+/*
+ * dce100_resource.h
+ *
+ * Created on: 2016-01-20
+ * Author: qyang
+ */
+
+#ifndef DCE100_RESOURCE_H_
+#define DCE100_RESOURCE_H_
+
+struct dc;
+struct resource_pool;
+struct dc_validation_set;
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+#endif /* DCE100_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
new file mode 100644
index 000000000000..d564c0eb8b04
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE110 = dce110_timing_generator.o \
+dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
+dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
+dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
+
+AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE110)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
new file mode 100644
index 000000000000..6923662413cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#include "gmc/gmc_8_2_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce110_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce110_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+
+static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce110_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ msleep(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_SYNC,
+ "FBC status changed to %d", enabled);
+ }
+
+
+}
+
+void dce110_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce110_compressor_enable_fbc(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (!dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL))) {
+
+ uint32_t addr;
+ uint32_t value, misc_value;
+
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /* Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* FBC usage with scatter & gather for dce110 */
+ misc_value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_INVALIDATE_ON_ERROR);
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_DECOMPRESS_ERROR_CLEAR);
+ set_reg_field_value(misc_value, 0x14,
+ FBC_MISC, FBC_SLOW_REQ_INTERVAL);
+
+ dm_write_reg(compressor->ctx, mmFBC_MISC, misc_value);
+
+ /* Enable FBC */
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce110_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce110_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce110_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx)
+{
+ struct dce110_compressor *cp110 =
+ kzalloc(sizeof(struct dce110_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce110_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce110_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE110_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
+
+bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
+ struct fbc_requested_compressed_size size)
+{
+ bool result = false;
+
+ unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
+
+ get_max_support_fbc_buffersize(&max_x, &max_y);
+
+ if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
+ /*
+ * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
+ * or 18000 chunks.
+ */
+ size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
+ size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
+ size.bits.preferred_must_be_framebuffer_pool = 1;
+ size.bits.min_must_be_framebuffer_pool = 1;
+
+ result = true;
+ }
+ /*
+ * Maybe to add registry key support with optional size here to override above
+ * for debugging purposes
+ */
+
+ return result;
+}
+
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
+{
+ *max_x = FBC_MAX_X;
+ *max_y = FBC_MAX_Y;
+
+ /* if (m_smallLocalFrameBufferMemory == 1)
+ * {
+ * *max_x = FBC_MAX_X_SG;
+ * *max_y = FBC_MAX_Y_SG;
+ * }
+ */
+}
+
+
+unsigned int controller_id_to_index(enum controller_id controller_id)
+{
+ unsigned int index = 0;
+
+ switch (controller_id) {
+ case CONTROLLER_ID_D0:
+ index = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ index = 1;
+ break;
+ case CONTROLLER_ID_D2:
+ index = 2;
+ break;
+ case CONTROLLER_ID_D3:
+ index = 3;
+ break;
+ default:
+ break;
+ }
+ return index;
+}
+
+
+static const struct compressor_funcs dce110_compressor_funcs = {
+ .power_up_fbc = dce110_compressor_power_up_fbc,
+ .enable_fbc = dce110_compressor_enable_fbc,
+ .disable_fbc = dce110_compressor_disable_fbc,
+ .set_fbc_invalidation_triggers = dce110_compressor_set_fbc_invalidation_triggers,
+ .surface_address_and_pitch = dce110_compressor_program_compressed_surface_address_and_pitch,
+ .is_fbc_enabled_in_hw = dce110_compressor_is_fbc_enabled_in_hw
+};
+
+
+void dce110_compressor_construct(struct dce110_compressor *compressor,
+ struct dc_context *ctx)
+{
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+
+ /* for dce 11 always use one dram channel for lpt */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /*
+ * check if this system has more than 1 dram channel; if only 1 then lpt
+ * should not be supported
+ */
+
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ compressor->base.funcs = &dce110_compressor_funcs;
+
+#endif
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
new file mode 100644
index 000000000000..26c7335a1cbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -0,0 +1,81 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE110_H__
+#define __DC_COMPRESSOR_DCE110_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE110_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce110_compressor, base)
+
+struct dce110_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce110_compressor {
+ struct compressor base;
+ struct dce110_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx);
+
+void dce110_compressor_construct(struct dce110_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce110_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce110_compressor_power_up_fbc(struct compressor *cp);
+
+void dce110_compressor_enable_fbc(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+void dce110_compressor_disable_fbc(struct compressor *cp);
+
+void dce110_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce110_compressor_enable_lpt(struct compressor *cp);
+
+void dce110_compressor_disable_lpt(struct compressor *cp);
+
+void dce110_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
new file mode 100644
index 000000000000..07ff8d2faf3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -0,0 +1,2991 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_bios_types.h"
+#include "core_types.h"
+#include "core_status.h"
+#include "resource.h"
+#include "dm_helpers.h"
+#include "dce110_hw_sequencer.h"
+#include "dce110_timing_generator.h"
+#include "dce/dce_hwseq.h"
+#include "gpio_service_interface.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110_compressor.h"
+#endif
+
+#include "bios/bios_parser_helper.h"
+#include "timing_generator.h"
+#include "mem_input.h"
+#include "opp.h"
+#include "ipp.h"
+#include "transform.h"
+#include "stream_encoder.h"
+#include "link_encoder.h"
+#include "link_hwss.h"
+#include "clock_source.h"
+#include "abm.h"
+#include "audio.h"
+#include "reg_helper.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "custom_float.h"
+
+/*
+ * All values are in milliseconds;
+ * For eDP, after power-up/power/down,
+ * 300/500 msec max. delay from LCDVCC to black video generation
+ */
+#define PANEL_POWER_UP_TIMEOUT 300
+#define PANEL_POWER_DOWN_TIMEOUT 500
+#define HPD_CHECK_INTERVAL 10
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce110_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce110_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTCV_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_BLND(reg, id)\
+ (reg + reg_offsets[id].blnd)
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define MAX_WATERMARK 0xFFFF
+#define SAFE_NBP_MARK 0x7FFF
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+static void dce110_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmUNP_DVMM_PTE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+/**************************************************************************/
+
+static void enable_display_pipe_clock_gating(
+ struct dc_context *ctx,
+ bool clock_gating)
+{
+ /*TODO*/
+}
+
+static bool dce110_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (controller_id == underlay_idx)
+ controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ *
+ * Bios parser accepts controller_id = 6 as indicative of
+ * underlay pipe in dce110. But we do not support more
+ * than 3.
+ */
+ if (controller_id < CONTROLLER_ID_MAX - 1)
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce110_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+ const struct dc_plane_state *plane_state)
+{
+ prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
+
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ prescale_params->scale = 0x2020;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ prescale_params->scale = 0x2008;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ prescale_params->scale = 0x2000;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+}
+
+static bool dce110_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ const struct dc_transfer_func *tf = NULL;
+ struct ipp_prescale_params prescale_params = { 0 };
+ bool result = true;
+
+ if (ipp == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ build_prescale_params(&prescale_params, plane_state);
+ ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+
+ if (tf == NULL) {
+ /* Default case if no input transfer function specified */
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ } else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS - Not supported in DCE 11*/
+ result = false;
+ }
+
+ return result;
+}
+
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[2].slope,
+ &fmt,
+ &arr_points[2].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ uint32_t i, j, k, seg_distr[16], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 16 segments
+ * segments are from 2^-11 to 2^5
+ */
+ segment_start = -11;
+ segment_end = 5;
+
+ seg_distr[0] = 2;
+ seg_distr[1] = 2;
+ seg_distr[2] = 2;
+ seg_distr[3] = 2;
+ seg_distr[4] = 2;
+ seg_distr[5] = 2;
+ seg_distr[6] = 3;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+ seg_distr[10] = 4;
+ seg_distr[11] = 5;
+ seg_distr[12] = 5;
+ seg_distr[13] = 5;
+ seg_distr[14] = 5;
+ seg_distr[15] = 5;
+
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ */
+ segment_start = -10;
+ segment_end = 0;
+
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+ seg_distr[10] = -1;
+ seg_distr[11] = -1;
+ seg_distr[12] = -1;
+ seg_distr[13] = -1;
+ seg_distr[14] = -1;
+ seg_distr[15] = -1;
+ }
+
+ for (k = 0; k < 16; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = 32 / (1 << seg_distr[k]);
+ start_index = (segment_start + k + 25) * 32;
+ for (i = start_index; i < start_index + 32; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + 25) * 32;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < 16 && i < 16; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dce110_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct transform *xfm = pipe_ctx->plane_res.xfm;
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, true);
+ xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
+ } else if (dce110_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &xfm->regamma_params)) {
+ xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
+ } else {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_BYPASS);
+ }
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, false);
+
+ return true;
+}
+
+static enum dc_status bios_parser_crtc_source_select(
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_bios *dcb;
+ /* call VBIOS table to set CRTC source for the HW
+ * encoder block
+ * note: video bios clears all FMT setting here. */
+ struct bp_crtc_source_select crtc_source_select = {0};
+ const struct dc_sink *sink = pipe_ctx->stream->sink;
+
+ crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
+ crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1;
+ /*TODO: Need to un-hardcode color depth, dp_audio and account for
+ * the case where signal and sink signal is different (translator
+ * encoder)*/
+ crtc_source_select.signal = pipe_ctx->stream->signal;
+ crtc_source_select.enable_dp_audio = false;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ crtc_source_select.display_output_bit_depth = PANEL_6BIT_COLOR;
+ break;
+ case COLOR_DEPTH_888:
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ crtc_source_select.display_output_bit_depth = PANEL_10BIT_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ crtc_source_select.display_output_bit_depth = PANEL_12BIT_COLOR;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ }
+
+ dcb = sink->ctx->dc_bios;
+
+ if (BP_RESULT_OK != dcb->funcs->crtc_source_select(
+ dcb,
+ &crtc_source_select)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+}
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
+{
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->sink->link->cur_link_settings.lane_count;
+
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* 1. update AVI info frame (HDMI, DP)
+ * we always need to update info frame
+ */
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ /* TODOFPGA may change to hwss.update_info_frame */
+ dce110_update_info_frame(pipe_ctx);
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
+
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
+
+ if (early_control == 0)
+ early_control = lane_count;
+
+ tg->funcs->set_early_control(tg, early_control);
+
+ /* enable audio only within mode set */
+ if (pipe_ctx->stream_res.audio != NULL) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
+ }
+
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+}
+
+/*todo: cloned in stream enc, fix*/
+static bool is_panel_backlight_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+static bool is_panel_powered_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
+ return value == 1;
+}
+
+static enum bp_result link_transmitter_control(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+
+ result = bios->funcs->transmitter_control(bios, cntl);
+
+ return result;
+}
+
+/*
+ * @brief
+ * eDP only.
+ */
+void hwss_edp_wait_for_hpd_ready(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct graphics_object_id connector = enc->connector;
+ struct gpio *hpd;
+ bool edp_hpd_high = false;
+ uint32_t time_elapsed = 0;
+ uint32_t timeout = power_up ?
+ PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+
+ if (dal_graphics_object_id_get_connector_id(connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!power_up)
+ /*
+ * From KV, we will not HPD low after turning off VCC -
+ * instead, we will check the SW timer in power_up().
+ */
+ return;
+
+ /*
+ * When we power on/off the eDP panel,
+ * we need to wait until SENSE bit is high/low.
+ */
+
+ /* obtain HPD */
+ /* TODO what to do with this? */
+ hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+
+ if (!hpd) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
+
+ /* wait until timeout or panel detected */
+
+ do {
+ uint32_t detected = 0;
+
+ dal_gpio_get_value(hpd, &detected);
+
+ if (!(detected ^ power_up)) {
+ edp_hpd_high = true;
+ break;
+ }
+
+ msleep(HPD_CHECK_INTERVAL);
+
+ time_elapsed += HPD_CHECK_INTERVAL;
+ } while (time_elapsed < timeout);
+
+ dal_gpio_close(hpd);
+
+ dal_gpio_destroy_irq(&hpd);
+
+ if (false == edp_hpd_high) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: wait timed out!\n", __func__);
+ }
+}
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct dce_hwseq *hwseq = ctx->dc->hwseq;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result bp_result;
+
+
+ if (dal_graphics_object_id_get_connector_id(enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (power_up != is_panel_powered_on(hwseq)) {
+ /* Send VBIOS command to prompt eDP panel power */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+
+ cntl.action = power_up ?
+ TRANSMITTER_CONTROL_POWER_ON :
+ TRANSMITTER_CONTROL_POWER_OFF;
+ cntl.transmitter = enc->transmitter;
+ cntl.connector_obj_id = enc->connector;
+ cntl.coherent = false;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = enc->hpd_source;
+
+ bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (bp_result != BP_RESULT_OK)
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Panel Power bp_result: %d\n",
+ __func__, bp_result);
+ } else {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Skipping Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+ }
+
+ hwss_edp_wait_for_hpd_ready(enc, true);
+}
+
+/*todo: cloned in stream enc, fix*/
+/*
+ * @brief
+ * eDP only. Control the backlight of the eDP panel
+ */
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable)
+{
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct dc_context *ctx = link->dc->ctx;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (dal_graphics_object_id_get_connector_id(link->link_id)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enable && is_panel_backlight_on(hws)) {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: panel already powered up. Do nothing.\n",
+ __func__);
+ return;
+ }
+
+ /* Send VBIOS command to control eDP panel backlight */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: backlight action: %s\n",
+ __func__, (enable ? "On":"Off"));
+
+ cntl.action = enable ?
+ TRANSMITTER_CONTROL_BACKLIGHT_ON :
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF;
+
+ /*cntl.engine_id = ctx->engine;*/
+ cntl.transmitter = link->link_enc->transmitter;
+ cntl.connector_obj_id = link->link_enc->connector;
+ /*todo: unhardcode*/
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = link->link_enc->hpd_source;
+
+ /* For eDP, the following delays might need to be considered
+ * after link training completed:
+ * idle period - min. accounts for required BS-Idle pattern,
+ * max. allows for source frame synchronization);
+ * 50 msec max. delay from valid video data from source
+ * to video on dislpay or backlight enable.
+ *
+ * Disable the delay for now.
+ * Enable it in the future if necessary.
+ */
+ /* dc_service_sleep_in_milliseconds(50); */
+ link_transmitter_control(link->dc->ctx->dc_bios, &cntl);
+}
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+ if (pipe_ctx->stream_res.audio) {
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ /*don't free audio if it is from retrain or internal disable stream*/
+ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ pipe_ctx->stream_res.audio = NULL;
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
+ }
+
+ /* TODO: notify audio driver for if audio modes list changed
+ * add audio mode list change flag */
+ /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
+ * stream->stream_engine_id);
+ */
+ }
+
+ /* blank at encoder level */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, false);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+ link->link_enc->funcs->connect_dig_be_to_fe(
+ link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
+}
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* only 3 items below are used by unblank */
+ params.pixel_clk_khz =
+ pipe_ctx->stream->timing.pix_clk_khz;
+ params.link_settings.link_rate = link_settings->link_rate;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, true);
+}
+
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ if (pipe_ctx != NULL && pipe_ctx->stream_res.stream_enc != NULL)
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
+}
+
+static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+{
+ switch (crtc_id) {
+ case CONTROLLER_ID_D0:
+ return DTO_SOURCE_ID0;
+ case CONTROLLER_ID_D1:
+ return DTO_SOURCE_ID1;
+ case CONTROLLER_ID_D2:
+ return DTO_SOURCE_ID2;
+ case CONTROLLER_ID_D3:
+ return DTO_SOURCE_ID3;
+ case CONTROLLER_ID_D4:
+ return DTO_SOURCE_ID4;
+ case CONTROLLER_ID_D5:
+ return DTO_SOURCE_ID5;
+ default:
+ return DTO_SOURCE_UNKNOWN;
+ }
+}
+
+static void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ audio_output->engine_id = pipe_ctx->stream_res.stream_enc->id;
+
+ audio_output->signal = pipe_ctx->stream->signal;
+
+ /* audio_crtc_info */
+
+ audio_output->crtc_info.h_total =
+ stream->timing.h_total;
+
+ /*
+ * Audio packets are sent during actual CRTC blank physical signal, we
+ * need to specify actual active signal portion
+ */
+ audio_output->crtc_info.h_active =
+ stream->timing.h_addressable
+ + stream->timing.h_border_left
+ + stream->timing.h_border_right;
+
+ audio_output->crtc_info.v_active =
+ stream->timing.v_addressable
+ + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+
+ audio_output->crtc_info.pixel_repetition = 1;
+
+ audio_output->crtc_info.interlaced =
+ stream->timing.flags.INTERLACE;
+
+ audio_output->crtc_info.refresh_rate =
+ (stream->timing.pix_clk_khz*1000)/
+ (stream->timing.h_total*stream->timing.v_total);
+
+ audio_output->crtc_info.color_depth =
+ stream->timing.display_color_depth;
+
+ audio_output->crtc_info.requested_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+/*for HDMI, audio ACR is with deep color ratio factor*/
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
+ audio_output->crtc_info.requested_pixel_clock ==
+ stream->timing.pix_clk_khz) {
+ if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ audio_output->crtc_info.requested_pixel_clock =
+ audio_output->crtc_info.requested_pixel_clock/2;
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk/2;
+
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ audio_output->pll_info.dp_dto_source_clock_in_khz =
+ state->dis_clk->funcs->get_dp_ref_clk_frequency(
+ state->dis_clk);
+ }
+
+ audio_output->pll_info.feed_back_divider =
+ pipe_ctx->pll_settings.feedback_divider;
+
+ audio_output->pll_info.dto_source =
+ translate_to_dto_source(
+ pipe_ctx->pipe_idx + 1);
+
+ /* TODO hard code to enable for now. Need get from stream */
+ audio_output->pll_info.ss_enabled = true;
+
+ audio_output->pll_info.ss_percentage =
+ pipe_ctx->pll_settings.ss_percentage;
+}
+
+static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void program_scaler(const struct dc *dc,
+ const struct pipe_ctx *pipe_ctx)
+{
+ struct tg_color color = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* TOFPGA */
+ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
+ return;
+#endif
+
+ if (dc->debug.surface_visual_confirm)
+ get_surface_visual_confirm_color(pipe_ctx, &color);
+ else
+ color_space_to_black_color(dc,
+ pipe_ctx->stream->output_color_space,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipe_ctx->plane_res.xfm,
+ pipe_ctx->plane_res.scl_data.lb_params.depth,
+ &pipe_ctx->stream->bit_depth_params);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
+ pipe_ctx->stream_res.tg,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data);
+}
+
+static enum dc_status dce110_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+ struct tg_color black_color = {0};
+
+ if (!pipe_ctx_old->stream) {
+
+ /* program blank color */
+ color_space_to_black_color(dc,
+ stream->output_color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ /*
+ * Must blank CRTC after disabling power gating and before any
+ * programming, otherwise CRTC will be hung in bad state
+ */
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx->stream_res.tg,
+ 0x182);
+ }
+
+ if (!pipe_ctx_old->stream) {
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(
+ pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ }
+
+
+
+ return DC_OK;
+}
+
+static enum dc_status apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+
+ /* */
+ dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
+
+ /* FPGA does not program backend */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ return DC_OK;
+ }
+ /* TODO: move to stream encoder */
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ stream->sink->link->link_enc->funcs->setup(
+ stream->sink->link->link_enc,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.tg->inst,
+ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->output_color_space);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->phy_pix_clk,
+ pipe_ctx->stream_res.audio != NULL);
+
+ if (dc_is_dvi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+
+ resource_build_info_frame(pipe_ctx);
+ dce110_update_info_frame(pipe_ctx);
+ if (!pipe_ctx_old->stream) {
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_enable_stream(context, pipe_ctx);
+ }
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ pipe_ctx->stream->sink->link->psr_enabled = false;
+
+ return DC_OK;
+}
+
+/******************************************************************************/
+
+static void power_down_encoders(struct dc *dc)
+{
+ int i;
+ enum connector_id connector_id;
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ /* do not know BIOS back-front mapping, simply blank all. It will not
+ * hurt for non-DP
+ */
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ dc->res_pool->stream_enc[i]->funcs->dp_blank(
+ dc->res_pool->stream_enc[i]);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
+ if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP)) {
+
+ if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ if (connector_id == CONNECTOR_ID_EDP)
+ signal = SIGNAL_TYPE_EDP;
+ }
+
+ dc->links[i]->link_enc->funcs->disable_output(
+ dc->links[i]->link_enc, signal, dc->links[i]);
+ }
+}
+
+static void power_down_controllers(struct dc *dc)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ dc->res_pool->timing_generators[i]->funcs->disable_crtc(
+ dc->res_pool->timing_generators[i]);
+ }
+}
+
+static void power_down_clock_sources(struct dc *dc)
+{
+ int i;
+
+ if (dc->res_pool->dp_clock_source->funcs->cs_power_down(
+ dc->res_pool->dp_clock_source) == false)
+ dm_error("Failed to power down pll! (dp clk src)\n");
+
+ for (i = 0; i < dc->res_pool->clk_src_count; i++) {
+ if (dc->res_pool->clock_sources[i]->funcs->cs_power_down(
+ dc->res_pool->clock_sources[i]) == false)
+ dm_error("Failed to power down pll! (clk src index=%d)\n", i);
+ }
+}
+
+static void power_down_all_hw_blocks(struct dc *dc)
+{
+ power_down_encoders(dc);
+
+ power_down_controllers(dc);
+
+ power_down_clock_sources(dc);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+}
+
+static void disable_vga_and_power_gate_all_controllers(
+ struct dc *dc)
+{
+ int i;
+ struct timing_generator *tg;
+ struct dc_context *ctx = dc->ctx;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_vga)
+ tg->funcs->disable_vga(tg);
+
+ /* Enable CLOCK gating for each pipe BEFORE controller
+ * powergating. */
+ enable_display_pipe_clock_gating(ctx,
+ true);
+
+ dc->hwss.power_down_front_end(dc, i);
+ }
+}
+
+/**
+ * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
+ * 1. Power down all DC HW blocks
+ * 2. Disable VGA engine on all controllers
+ * 3. Enable power gating for controller
+ * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
+ */
+void dce110_enable_accelerated_mode(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+
+ disable_vga_and_power_gate_all_controllers(dc);
+ bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
+}
+
+static uint32_t compute_pstate_blackout_duration(
+ struct bw_fixed blackout_duration,
+ const struct dc_stream_state *stream)
+{
+ uint32_t total_dest_line_time_ns;
+ uint32_t pstate_blackout_duration_ns;
+
+ pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24;
+
+ total_dest_line_time_ns = 1000000UL *
+ stream->timing.h_total /
+ stream->timing.pix_clk_khz +
+ pstate_blackout_duration_ns;
+
+ return total_dest_line_time_ns;
+}
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i, num_pipes;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ for (i = 0, num_pipes = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t total_dest_line_time_ns;
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ total_dest_line_time_ns = compute_pstate_blackout_duration(
+ dc->bw_vbios->blackout_duration, pipe_ctx->stream);
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ if (i == underlay_idx) {
+ num_pipes++;
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ }
+ num_pipes++;
+ }
+}
+
+static void set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ int underlay_idx = pool->underlay_pipe_index;
+ struct dce_watermarks max_marks = {
+ MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
+ struct dce_watermarks nbp_marks = {
+ SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
+ continue;
+
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ if (i == underlay_idx)
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->overlay_update)
+ value |= 0x100;
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ value |= 0x84;
+#endif
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet.
+ * TODO: after mode set, pre_mode_set = false,
+ * may read PLL register to get pixel clock
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context,
+ bool pre_mode_set)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ if (!pre_mode_set) {
+ /* TODO: read ASIC register to get pixel clock */
+ ASSERT(0);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+
+ if (max_pix_clk == 0)
+ ASSERT(0);
+
+ return max_pix_clk;
+}
+
+/*
+ * Find clock state based on clock requested. if clock value is 0, simply
+ * set clock state as requested without finding clock state by clock value
+ */
+
+static void apply_min_clocks(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dm_pp_clocks_state *clocks_state,
+ bool pre_mode_set)
+{
+ struct state_dependent_clocks req_clocks = {0};
+
+ if (!pre_mode_set) {
+ /* set clock_state without verification */
+ if (context->dis_clk->funcs->set_min_clocks_state) {
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ return;
+ }
+
+ /* TODO: This is incorrect. Figure out how to fix. */
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ context->dis_clk->cur_clocks_value.dispclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
+ pre_mode_set,
+ false);
+ return;
+ }
+
+ /* get the required state based on state dependent clocks:
+ * display clock and pixel clock
+ */
+ req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
+
+ req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
+ dc, context, true);
+
+ if (context->dis_clk->funcs->get_required_clocks_state) {
+ *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
+ context->dis_clk, &req_clocks);
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ } else {
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ req_clocks.display_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+
+/*
+ * Check if FBC can be enabled
+ */
+static enum dc_status validate_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ ASSERT(dc->fbc_compressor);
+
+ /* FBC memory should be allocated */
+ if (!dc->ctx->fbc_gpu_addr)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports single display */
+ if (context->stream_count != 1)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports eDP */
+ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
+ return DC_ERROR_UNEXPECTED;
+
+ /* PSR should not be enabled */
+ if (pipe_ctx->stream->sink->link->psr_enabled)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Nothing to compress */
+ if (!pipe_ctx->plane_state)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only for non-linear tiling */
+ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+/*
+ * Enable FBC
+ */
+static enum dc_status enable_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = validate_fbc(dc, context);
+
+ if (status == DC_OK) {
+ /* Program GRPH COMPRESSED ADDRESS and PITCH */
+ struct compr_addr_and_pitch_params params = {0, 0, 0};
+ struct compressor *compr = dc->fbc_compressor;
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ params.source_view_width =
+ pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height =
+ pipe_ctx->stream->timing.v_addressable;
+
+ compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
+
+ compr->funcs->surface_address_and_pitch(compr, &params);
+ compr->funcs->set_fbc_invalidation_triggers(compr, 1);
+
+ compr->funcs->enable_fbc(compr, &params);
+ }
+ return status;
+}
+#endif
+
+static enum dc_status apply_ctx_to_hw_fpga(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (status != DC_OK)
+ return status;
+ }
+
+ return DC_OK;
+}
+
+static void dce110_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+
+ /* Skip underlay pipe since it will be handled in commit surface*/
+ if (!pipe_ctx_old->stream || pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ /* Disable if new stream is null. O/w, if stream is
+ * disabled already, no need to disable again.
+ */
+ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
+
+ pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
+ if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) {
+ dm_error("DC: failed to blank crtc!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+
+ dc->hwss.power_down_front_end(dc, pipe_ctx_old->pipe_idx);
+
+ pipe_ctx_old->stream = NULL;
+ }
+ }
+}
+
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+ enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ apply_ctx_to_hw_fpga(dc, context);
+ return DC_OK;
+ }
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+ /*TODO: when pplib works*/
+ apply_min_clocks(dc, context, &clocks_state, true);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ if (context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ context->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ } else
+#endif
+ if (context->bw.dce.dispclk_khz
+ > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ }
+ /* program audio wall clock. use HDMI as clock source if HDMI
+ * audio active. Otherwise, use DP as clock source
+ * first, loop to find any HDMI audio, if not, loop find DP audio
+ */
+ /* Setup audio rate clock source */
+ /* Issue:
+ * Audio lag happened on DP monitor when unplug a HDMI monitor
+ *
+ * Cause:
+ * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
+ * is set to either dto0 or dto1, audio should work fine.
+ * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
+ * set to dto0 will cause audio lag.
+ *
+ * Solution:
+ * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
+ * find first available pipe with audio, setup audio wall DTO per topology
+ * instead of per pipe.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+
+ /* no HDMI audio is found, try DP audio */
+ if (i == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (!dc_is_dp_signal(pipe_ctx->stream->signal))
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx_old->stream
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
+
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (dc->hwss.power_on_front_end)
+ dc->hwss.power_on_front_end(dc, pipe_ctx, context);
+
+ if (DC_OK != status)
+ return status;
+ }
+
+ /* pplib is notified if disp_num changed */
+ dc->hwss.set_bandwidth(dc, context, true);
+
+ /* to save power */
+ apply_min_clocks(dc, context, &clocks_state, false);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ enable_fbc(dc, context);
+
+#endif
+
+ return DC_OK;
+}
+
+/*******************************************************************************
+ * Front End programming
+ ******************************************************************************/
+static void set_default_colors(struct pipe_ctx *pipe_ctx)
+{
+ struct default_adjustment default_adjust = { 0 };
+
+ default_adjust.force_hw_default = false;
+ if (pipe_ctx->plane_state == NULL)
+ default_adjust.in_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.in_color_space =
+ pipe_ctx->plane_state->color_space;
+ if (pipe_ctx->stream == NULL)
+ default_adjust.out_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.out_color_space =
+ pipe_ctx->stream->output_color_space;
+ default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
+
+ /* display color depth */
+ default_adjust.color_depth =
+ pipe_ctx->stream->timing.display_color_depth;
+
+ /* Lb color depth */
+ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth;
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default(
+ pipe_ctx->plane_res.xfm, &default_adjust);
+}
+
+
+/*******************************************************************************
+ * In order to turn on/off specific surface we will program
+ * Blender + CRTC
+ *
+ * In case that we have two surfaces and they have a different visibility
+ * we can't turn off the CRTC since it will turn off the entire display
+ *
+ * |----------------------------------------------- |
+ * |bottom pipe|curr pipe | | |
+ * |Surface |Surface | Blender | CRCT |
+ * |visibility |visibility | Configuration| |
+ * |------------------------------------------------|
+ * | off | off | CURRENT_PIPE | blank |
+ * | off | on | CURRENT_PIPE | unblank |
+ * | on | off | OTHER_PIPE | unblank |
+ * | on | on | BLENDING | unblank |
+ * -------------------------------------------------|
+ *
+ ******************************************************************************/
+static void program_surface_visibility(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum blnd_mode blender_mode = BLND_MODE_CURRENT_PIPE;
+ bool blank_target = false;
+
+ if (pipe_ctx->bottom_pipe) {
+
+ /* For now we are supporting only two pipes */
+ ASSERT(pipe_ctx->bottom_pipe->bottom_pipe == NULL);
+
+ if (pipe_ctx->bottom_pipe->plane_state->visible) {
+ if (pipe_ctx->plane_state->visible)
+ blender_mode = BLND_MODE_BLENDING;
+ else
+ blender_mode = BLND_MODE_OTHER_PIPE;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
+
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct xfm_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+}
+
+/**
+ * TODO REMOVE, USE UPDATE INSTEAD
+ */
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ unsigned int i;
+
+ memset(&adjust, 0, sizeof(adjust));
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+ program_scaler(dc, pipe_ctx);
+
+ program_surface_visibility(dc, pipe_ctx);
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+}
+
+static void update_plane_addr(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.mi,
+ &plane_state->address,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+}
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.mi->funcs->mem_input_is_flip_pending(
+ pipe_ctx->plane_res.mi);
+
+ if (plane_state->status.is_flip_pending && !plane_state->visible)
+ pipe_ctx->plane_res.mi->current_address = pipe_ctx->plane_res.mi->request_address;
+
+ plane_state->status.current_address = pipe_ctx->plane_res.mi->current_address;
+ if (pipe_ctx->plane_res.mi->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =\
+ !pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+void dce110_power_down(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+ disable_vga_and_power_gate_all_controllers(dc);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ uint32_t i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+/* Enable timing synchronization for a group of Timing Generators. */
+static void dce110_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcp_gsl_params gsl_params = { 0 };
+ int i;
+
+ DC_SYNC_INFO("GSL: Setting-up...\n");
+
+ /* Designate a single TG in the group as a master.
+ * Since HW doesn't care which one, we always assign
+ * the 1st one in the group. */
+ gsl_params.gsl_group = 0;
+ gsl_params.gsl_master = grouped_pipes[0]->stream_res.tg->inst;
+
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
+ grouped_pipes[i]->stream_res.tg, &gsl_params);
+
+ /* Reset slave controllers on master VSync */
+ DC_SYNC_INFO("GSL: enabling trigger-reset\n");
+
+ for (i = 1 /* skip the master */; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, gsl_params.gsl_group);
+
+
+
+ for (i = 1 /* skip the master */; i < group_size; i++) {
+ DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
+ /* Regardless of success of the wait above, remove the reset or
+ * the driver will start timing out on Display requests. */
+ DC_SYNC_INFO("GSL: disabling trigger-reset.\n");
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(grouped_pipes[i]->stream_res.tg);
+ }
+
+
+ /* GSL Vblank synchronization is a one time sync mechanism, assumption
+ * is that the sync'ed displays will not drift out of sync over time*/
+ DC_SYNC_INFO("GSL: Restoring register states.\n");
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("GSL: Set-up complete.\n");
+}
+
+static void init_hw(struct dc *dc)
+{
+ int i;
+ struct dc_bios *bp;
+ struct transform *xfm;
+ struct abm *abm;
+
+ bp = dc->ctx->dc_bios;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ xfm = dc->res_pool->transforms[i];
+ xfm->funcs->transform_reset(xfm);
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_INIT);
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_DISABLE);
+ dc->hwss.enable_display_pipe_clock_gating(
+ dc->ctx,
+ true);
+ }
+
+ dce_clock_gating_power_up(dc->hwseq, false);
+ /***************************************/
+
+ for (i = 0; i < dc->link_count; i++) {
+ /****************************************/
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector). */
+ struct dc_link *link = dc->links[i];
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->disable_vga(tg);
+
+ /* Blank controller using driver code instead of
+ * command table. */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+ audio->funcs->hw_init(audio);
+ }
+
+ abm = dc->res_pool->abm;
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
+#endif
+
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->pipe_idx;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->sink->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->sink->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->sink->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->sink->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->sink->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 1000 / stream->timing.pix_clk_khz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw.dce.sclk_khz);
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 1000
+ / timing->pix_clk_khz;
+ }
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ dce110_set_displaymarks(dc, context);
+
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+
+ pplib_apply_display_requirements(dc, context);
+}
+
+static void dce110_program_front_end_for_pipe(
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct pipe_ctx *old_pipe = NULL;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ unsigned int i;
+
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+
+ if (dc->current_state)
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ program_scaler(dc, pipe_ctx);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor && old_pipe->stream) {
+ if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+ else
+ enable_fbc(dc, dc->current_state);
+ }
+#endif
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+
+ /* Moved programming gamma from dc to hwss */
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;"
+ "clip: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_state,
+ pipe_ctx->plane_state->address.grph.addr.high_part,
+ pipe_ctx->plane_state->address.grph.addr.low_part,
+ pipe_ctx->plane_state->src_rect.x,
+ pipe_ctx->plane_state->src_rect.y,
+ pipe_ctx->plane_state->src_rect.width,
+ pipe_ctx->plane_state->src_rect.height,
+ pipe_ctx->plane_state->dst_rect.x,
+ pipe_ctx->plane_state->dst_rect.y,
+ pipe_ctx->plane_state->dst_rect.width,
+ pipe_ctx->plane_state->dst_rect.height,
+ pipe_ctx->plane_state->clip_rect.x,
+ pipe_ctx->plane_state->clip_rect.y,
+ pipe_ctx->plane_state->clip_rect.width,
+ pipe_ctx->plane_state->clip_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe %d: width, height, x, y\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+}
+
+static void dce110_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (num_planes == 0)
+ return;
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Need to allocate mem before program front end for Fiji */
+ if (pipe_ctx->plane_res.mi != NULL)
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ dce110_program_front_end_for_pipe(dc, pipe_ctx);
+ program_surface_visibility(dc, pipe_ctx);
+
+ }
+}
+
+static void dce110_power_down_fe(struct dc *dc, int fe_idx)
+{
+ /* Do not power down fe when stream is active on dce*/
+ if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
+ return;
+
+ dc->hwss.enable_display_power_gating(
+ dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
+
+ dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
+ dc->res_pool->transforms[fe_idx]);
+}
+
+static void dce110_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ /* do nothing*/
+}
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
+
+static void optimize_shared_resources(struct dc *dc) {}
+
+static const struct hw_sequencer_funcs dce110_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = update_plane_addr,
+ .update_pending_status = dce110_update_pending_status,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+ .set_output_transfer_func = dce110_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dce110_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .power_down_front_end = dce110_power_down_fe,
+ .pipe_control_lock = dce_pipe_control_lock,
+ .set_bandwidth = dce110_set_bandwidth,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
+ .setup_stereo = NULL,
+ .set_avmute = dce110_set_avmute,
+ .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control,
+};
+
+void dce110_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dce110_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
new file mode 100644
index 000000000000..4d72bb99be93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -0,0 +1,81 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE110_H__
+#define __DC_HWSS_DCE110_H__
+
+#include "core_types.h"
+
+#define GAMMA_HW_POINTS_NUM 256
+struct dc;
+struct dc_state;
+struct dm_pp_display_configuration;
+
+void dce110_hw_sequencer_construct(struct dc *dc);
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context);
+
+void dce110_set_display_clock(struct dc_state *context);
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dce110_enable_accelerated_mode(struct dc *dc);
+
+void dce110_power_down(struct dc *dc);
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up);
+
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable);
+
+#endif /* __DC_HWSS_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
new file mode 100644
index 000000000000..a06c6024deb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+/* TODO: this needs to be looked at, used by Stella's workaround*/
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+
+#include "include/logger_interface.h"
+#include "inc/dce_calcs.h"
+
+#include "dce/dce_mem_input.h"
+
+static void set_flip_control(
+ struct dce_mem_input *mem_input110,
+ bool immediate)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL);
+
+ set_reg_field_value(value, 1,
+ UNP_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_PENDING_MODE);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL,
+ value);
+}
+
+/* chroma part */
+static void program_pri_addr_c(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ value);
+}
+
+/* luma part */
+static void program_pri_addr_l(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ value);
+}
+
+static void program_addr(
+ struct dce_mem_input *mem_input110,
+ const struct dc_plane_address *addr)
+{
+ switch (addr->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ program_pri_addr_l(
+ mem_input110,
+ addr->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ program_pri_addr_c(
+ mem_input110,
+ addr->video_progressive.chroma_addr);
+ program_pri_addr_l(
+ mem_input110,
+ addr->video_progressive.luma_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+static void enable(struct dce_mem_input *mem_input110)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_ENABLE);
+ set_reg_field_value(value, 1, UNP_GRPH_ENABLE, GRPH_ENABLE);
+ dm_write_reg(mem_input110->base.ctx,
+ mmUNP_GRPH_ENABLE,
+ value);
+}
+
+static void program_tiling(
+ struct dce_mem_input *mem_input110,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(value, info->gfx8.num_banks,
+ UNP_GRPH_CONTROL, GRPH_NUM_BANKS);
+
+ set_reg_field_value(value, info->gfx8.bank_width,
+ UNP_GRPH_CONTROL, GRPH_BANK_WIDTH_L);
+
+ set_reg_field_value(value, info->gfx8.bank_height,
+ UNP_GRPH_CONTROL, GRPH_BANK_HEIGHT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect,
+ UNP_GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_split,
+ UNP_GRPH_CONTROL, GRPH_TILE_SPLIT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_mode,
+ UNP_GRPH_CONTROL, GRPH_MICRO_TILE_MODE_L);
+
+ set_reg_field_value(value, info->gfx8.pipe_config,
+ UNP_GRPH_CONTROL, GRPH_PIPE_CONFIG);
+
+ set_reg_field_value(value, info->gfx8.array_mode,
+ UNP_GRPH_CONTROL, GRPH_ARRAY_MODE);
+
+ set_reg_field_value(value, 1,
+ UNP_GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE);
+
+ set_reg_field_value(value, 0,
+ UNP_GRPH_CONTROL, GRPH_Z);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = 0;
+
+ set_reg_field_value(value, info->gfx8.bank_width_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_WIDTH_C);
+
+ set_reg_field_value(value, info->gfx8.bank_height_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_HEIGHT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect_c,
+ UNP_GRPH_CONTROL_C, GRPH_MACRO_TILE_ASPECT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_split_c,
+ UNP_GRPH_CONTROL_C, GRPH_TILE_SPLIT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_mode_c,
+ UNP_GRPH_CONTROL_C, GRPH_MICRO_TILE_MODE_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_C,
+ value);
+}
+
+static void program_size_and_rotation(
+ struct dce_mem_input *mem_input110,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ uint32_t value = 0;
+ union plane_size local_size = *plane_size;
+
+ if (rotation == ROTATION_ANGLE_90 ||
+ rotation == ROTATION_ANGLE_270) {
+
+ uint32_t swap;
+ swap = local_size.video.luma_size.x;
+ local_size.video.luma_size.x =
+ local_size.video.luma_size.y;
+ local_size.video.luma_size.y = swap;
+
+ swap = local_size.video.luma_size.width;
+ local_size.video.luma_size.width =
+ local_size.video.luma_size.height;
+ local_size.video.luma_size.height = swap;
+
+ swap = local_size.video.chroma_size.x;
+ local_size.video.chroma_size.x =
+ local_size.video.chroma_size.y;
+ local_size.video.chroma_size.y = swap;
+
+ swap = local_size.video.chroma_size.width;
+ local_size.video.chroma_size.width =
+ local_size.video.chroma_size.height;
+ local_size.video.chroma_size.height = swap;
+ }
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_pitch,
+ UNP_GRPH_PITCH_L, GRPH_PITCH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_pitch,
+ UNP_GRPH_PITCH_C, GRPH_PITCH_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_L, GRPH_X_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_C, GRPH_X_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_L, GRPH_Y_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_C, GRPH_Y_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.x +
+ local_size.video.luma_size.width,
+ UNP_GRPH_X_END_L, GRPH_X_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.x +
+ local_size.video.chroma_size.width,
+ UNP_GRPH_X_END_C, GRPH_X_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.y +
+ local_size.video.luma_size.height,
+ UNP_GRPH_Y_END_L, GRPH_Y_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.y +
+ local_size.video.chroma_size.height,
+ UNP_GRPH_Y_END_C, GRPH_Y_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_C,
+ value);
+
+ value = 0;
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ set_reg_field_value(value, 3,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_180:
+ set_reg_field_value(value, 2,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_270:
+ set_reg_field_value(value, 1,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ default:
+ set_reg_field_value(value, 0,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ }
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_HW_ROTATION,
+ value);
+}
+
+static void program_pixel_format(
+ struct dce_mem_input *mem_input110,
+ enum surface_pixel_format format)
+{
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ uint32_t value;
+ uint8_t grph_depth;
+ uint8_t grph_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ grph_depth,
+ UNP_GRPH_CONTROL,
+ GRPH_DEPTH);
+ set_reg_field_value(
+ value,
+ grph_format,
+ UNP_GRPH_CONTROL,
+ GRPH_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ /* VIDEO FORMAT 0 */
+ set_reg_field_value(
+ value,
+ 0,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+
+ } else {
+ /* Video 422 and 420 needs UNP_GRPH_CONTROL_EXP programmed */
+ uint32_t value;
+ uint8_t video_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ video_format = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ video_format = 3;
+ break;
+ default:
+ video_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ video_format,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+ }
+}
+
+bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t value;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_UPDATE);
+
+ if (get_reg_field_value(value, UNP_GRPH_UPDATE,
+ GRPH_SURFACE_UPDATE_PENDING))
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+bool dce_mem_input_v_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ set_flip_control(mem_input110, flip_immediate);
+ program_addr(mem_input110,
+ address);
+
+ mem_input->request_address = *address;
+
+ return true;
+}
+
+/* Scatter Gather param tables */
+static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+};
+
+/* Helper to get table entry from surface info */
+static const unsigned int *get_dvmm_hw_setting(
+ union dc_tiling_info *tiling_info,
+ enum surface_pixel_format format,
+ bool chroma)
+{
+ enum bits_per_pixel {
+ bpp_8 = 0,
+ bpp_16,
+ bpp_32,
+ bpp_64
+ } bpp;
+
+ if (format >= SURFACE_PIXEL_FORMAT_INVALID)
+ bpp = bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ bpp = chroma ? bpp_16 : bpp_8;
+ else
+ bpp = bpp_8;
+
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return dvmm_Hw_Setting_1DTiling[bpp];
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return dvmm_Hw_Setting_Linear[bpp];
+ default:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ }
+}
+
+void dce_mem_input_v_program_pte_vm(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format, false);
+ const unsigned int *pte_chroma = get_dvmm_hw_setting(tiling_info, format, true);
+
+ unsigned int page_width = 0;
+ unsigned int page_height = 0;
+ unsigned int page_width_chroma = 0;
+ unsigned int page_height_chroma = 0;
+ unsigned int temp_page_width = pte[1];
+ unsigned int temp_page_height = pte[2];
+ unsigned int min_pte_before_flip = 0;
+ unsigned int min_pte_before_flip_chroma = 0;
+ uint32_t value = 0;
+
+ while ((temp_page_width >>= 1) != 0)
+ page_width++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height++;
+
+ temp_page_width = pte_chroma[1];
+ temp_page_height = pte_chroma[2];
+ while ((temp_page_width >>= 1) != 0)
+ page_width_chroma++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height_chroma++;
+
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ min_pte_before_flip = pte[4];
+ min_pte_before_flip_chroma = pte_chroma[4];
+ break;
+ default:
+ min_pte_before_flip = pte[3];
+ min_pte_before_flip_chroma = pte_chroma[3];
+ break;
+ }
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT);
+ /* TODO: un-hardcode requestlimit */
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L);
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL);
+ set_reg_field_value(value, page_width, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH);
+ set_reg_field_value(value, page_height, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT);
+ set_reg_field_value(value, min_pte_before_flip, UNP_DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL);
+ set_reg_field_value(value, pte[5], UNP_DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C);
+ set_reg_field_value(value, page_width_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_WIDTH_C);
+ set_reg_field_value(value, page_height_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_HEIGHT_C);
+ set_reg_field_value(value, min_pte_before_flip_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_MIN_PTE_BEFORE_FLIP_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C);
+ set_reg_field_value(value, pte_chroma[5], UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_PTE_REQ_PER_CHUNK_C);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_MAX_PTE_REQ_OUTSTANDING_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
+}
+
+void dce_mem_input_v_program_surface_config(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizotal_mirror)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ enable(mem_input110);
+ program_tiling(mem_input110, tiling_info, format);
+ program_size_and_rotation(mem_input110, rotation, plane_size);
+ program_pixel_format(mem_input110, format);
+}
+
+static void program_urgency_watermark(
+ const struct dc_context *ctx,
+ const uint32_t urgency_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ /* register value */
+ uint32_t urgency_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(
+ urgency_cntl,
+ marks_low.a_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(
+ urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(urgency_cntl,
+ marks_low.b_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+}
+
+static void program_urgency_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV0_PIPE_URGENCY_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_urgency_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV1_PIPE_URGENCY_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_stutter_watermark(
+ const struct dc_context *ctx,
+ const uint32_t stutter_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks)
+{
+ /* register value */
+ uint32_t stutter_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+
+ if (ctx->dc->debug.disable_stutter) {
+ set_reg_field_value(stutter_cntl,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ } else {
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ }
+
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_IGNORE_FBC);
+
+ /*Write watermark set A*/
+ set_reg_field_value(stutter_cntl,
+ marks.a_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+ /*Write watermark set B*/
+ set_reg_field_value(stutter_cntl,
+ marks.b_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+}
+
+static void program_stutter_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV0_PIPE_STUTTER_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_stutter_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV1_PIPE_STUTTER_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark(
+ const struct dc_context *ctx,
+ const uint32_t wm_mask_ctrl_addr,
+ const uint32_t nbp_pstate_ctrl_addr,
+ struct dce_watermarks marks)
+{
+ uint32_t value;
+
+ /* Write mask to enable reading/writing of watermark set A */
+
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set A */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.a_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write mask to enable reading/writing of watermark set B */
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set B */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.b_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+}
+
+static void program_nbp_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+void dce_mem_input_v_program_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_l(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_l(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_l(
+ mem_input->ctx,
+ stutter);
+
+}
+
+void dce_mem_input_program_chroma_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_c(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_c(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_c(
+ mem_input->ctx,
+ stutter);
+}
+
+void dce110_allocate_mem_input_v(
+ struct mem_input *mi,
+ uint32_t h_total,/* for current stream */
+ uint32_t v_total,/* for current stream */
+ uint32_t pix_clk_khz,/* for current stream */
+ uint32_t total_stream_num)
+{
+ uint32_t addr;
+ uint32_t value;
+ uint32_t pix_dur;
+ if (pix_clk_khz != 0) {
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV0_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV1_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+ }
+
+}
+
+void dce110_free_mem_input_v(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+}
+
+static struct mem_input_funcs dce110_mem_input_v_funcs = {
+ .mem_input_program_display_marks =
+ dce_mem_input_v_program_display_marks,
+ .mem_input_program_chroma_display_marks =
+ dce_mem_input_program_chroma_display_marks,
+ .allocate_mem_input = dce110_allocate_mem_input_v,
+ .free_mem_input = dce110_free_mem_input_v,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mem_input_v_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm =
+ dce_mem_input_v_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mem_input_v_program_surface_config,
+ .mem_input_is_flip_pending =
+ dce_mem_input_v_is_surface_pending
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx)
+{
+ dce_mi->base.funcs = &dce110_mem_input_v_funcs;
+ dce_mi->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
new file mode 100644
index 000000000000..f01d4a607fea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
@@ -0,0 +1,35 @@
+/* Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_V_DCE110_H__
+#define __DC_MEM_INPUT_V_DCE110_H__
+
+#include "mem_input.h"
+#include "dce/dce_mem_input.h"
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
new file mode 100644
index 000000000000..feb397b5c1a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce110_transform_v.h"
+#include "basics/conversion.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+/* constrast:0 - 2.0, default 1.0 */
+#define UNDERLAY_CONTRAST_DEFAULT 100
+#define UNDERLAY_CONTRAST_MAX 200
+#define UNDERLAY_CONTRAST_MIN 0
+#define UNDERLAY_CONTRAST_STEP 1
+#define UNDERLAY_CONTRAST_DIVIDER 100
+
+/* Saturation: 0 - 2.0; default 1.0 */
+#define UNDERLAY_SATURATION_DEFAULT 100 /*1.00*/
+#define UNDERLAY_SATURATION_MIN 0
+#define UNDERLAY_SATURATION_MAX 200 /* 2.00 */
+#define UNDERLAY_SATURATION_STEP 1 /* 0.01 */
+/*actual max overlay saturation
+ * value = UNDERLAY_SATURATION_MAX /UNDERLAY_SATURATION_DIVIDER
+ */
+
+/* Hue */
+#define UNDERLAY_HUE_DEFAULT 0
+#define UNDERLAY_HUE_MIN -300
+#define UNDERLAY_HUE_MAX 300
+#define UNDERLAY_HUE_STEP 5
+#define UNDERLAY_HUE_DIVIDER 10 /* HW range: -30 ~ +30 */
+#define UNDERLAY_SATURATION_DIVIDER 100
+
+/* Brightness: in DAL usually -.25 ~ .25.
+ * In MMD is -100 to +100 in 16-235 range; which when scaled to full range is
+ * ~-116 to +116. When normalized this is about 0.4566.
+ * With 100 divider this becomes 46, but we may use another for better precision
+ * The ideal one is 100/219 ((100/255)*(255/219)),
+ * i.e. min/max = +-100, divider = 219
+ * default 0.0
+ */
+#define UNDERLAY_BRIGHTNESS_DEFAULT 0
+#define UNDERLAY_BRIGHTNESS_MIN -46 /* ~116/255 */
+#define UNDERLAY_BRIGHTNESS_MAX 46
+#define UNDERLAY_BRIGHTNESS_STEP 1 /* .01 */
+#define UNDERLAY_BRIGHTNESS_DIVIDER 100
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static void program_color_matrix_v(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL);
+ bool use_set_a = (get_reg_field_value(cntl_value,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE) != 4);
+
+ set_reg_field_value(
+ cntl_value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (use_set_a) {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C11_A);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C12_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C13_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C14_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C21_A);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C22_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C23_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C24_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C31_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C32_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C33_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C34_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 4,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ } else {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C11_B);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C12_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C13_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C14_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C21_B);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C22_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C23_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C24_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C31_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C32_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C33_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C34_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 5,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ }
+
+ dm_write_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL, cntl_value);
+}
+
+static bool configure_graphics_mode_v(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC)
+ return true;
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ dm_write_reg(ctx, addr, value);
+
+ return true;
+}
+
+/*TODO: color depth is not correct when this is called*/
+static void set_Denormalization(struct transform *xfm,
+ enum dc_color_depth color_depth)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL);
+
+ switch (color_depth) {
+ case COLOR_DEPTH_888:
+ /* 255/256 for 8 bit output color depth */
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ set_reg_field_value(
+ value,
+ 2,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ set_reg_field_value(
+ value,
+ 3,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ default:
+ /* not valid case */
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_10BIT_OUT);
+
+ dm_write_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL, value);
+}
+
+struct input_csc_matrix {
+ enum dc_color_space color_space;
+ uint32_t regval[12];
+};
+
+static const struct input_csc_matrix input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+/*1_1 1_2 1_3 1_4 2_1 2_2 2_3 2_4 3_1 3_2 3_3 3_4 */
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0x0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0x0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0x0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0x0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+static void program_input_csc(
+ struct transform *xfm, enum dc_color_space color_space)
+{
+ int arr_size = sizeof(input_csc_matrix)/sizeof(struct input_csc_matrix);
+ struct dc_context *ctx = xfm->ctx;
+ const uint32_t *regval = NULL;
+ bool use_set_a;
+ uint32_t value;
+ int i;
+
+ for (i = 0; i < arr_size; i++)
+ if (input_csc_matrix[i].color_space == color_space) {
+ regval = input_csc_matrix[i].regval;
+ break;
+ }
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /*
+ * 1 == set A, the logic is 'if currently we're not using set A,
+ * then use set A, otherwise use set B'
+ */
+ value = dm_read_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL);
+ use_set_a = get_reg_field_value(
+ value, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE) != 1;
+
+ if (use_set_a) {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_A, INPUT_CSC_C11_A);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_A, INPUT_CSC_C12_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_A, INPUT_CSC_C13_A);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_A, INPUT_CSC_C14_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_A, INPUT_CSC_C21_A);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_A, INPUT_CSC_C22_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_A, INPUT_CSC_C23_A);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_A, INPUT_CSC_C24_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_A, INPUT_CSC_C31_A);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_A, INPUT_CSC_C32_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_A, INPUT_CSC_C33_A);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_A, INPUT_CSC_C34_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_A, value);
+ } else {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_B, INPUT_CSC_C11_B);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_B, INPUT_CSC_C12_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_B, INPUT_CSC_C13_B);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_B, INPUT_CSC_C14_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_B, INPUT_CSC_C21_B);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_B, INPUT_CSC_C22_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_B, INPUT_CSC_C23_B);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_B, INPUT_CSC_C24_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_B, INPUT_CSC_C31_B);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_B, INPUT_CSC_C32_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_B, INPUT_CSC_C33_B);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_B, INPUT_CSC_C34_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_B, value);
+ }
+
+ /* KK: leave INPUT_CSC_CONVERSION_MODE at default */
+ value = 0;
+ /*
+ * select 8.4 input type instead of default 12.0. From the discussion
+ * with HW team, this format depends on the UNP surface format, so for
+ * 8-bit we should select 8.4 (4 bits truncated). For 10 it should be
+ * 10.2. For Carrizo we only support 8-bit surfaces on underlay pipe
+ * so we can always keep this at 8.4 (input_type=2). If the later asics
+ * start supporting 10+ bits, we will have a problem: surface
+ * programming including UNP_GRPH* is being done in DalISR after this,
+ * so either we pass surface format to here, or move this logic to ISR
+ */
+
+ set_reg_field_value(
+ value, 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_INPUT_TYPE);
+ set_reg_field_value(
+ value,
+ use_set_a ? 1 : 2,
+ COL_MAN_INPUT_CSC_CONTROL,
+ INPUT_CSC_MODE);
+
+ dm_write_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL, value);
+}
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file
+ */
+ program_color_matrix_v(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ program_input_csc(xfm, default_adjust->in_color_space);
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix
+ */
+
+ configure_graphics_mode_v(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+
+ set_Denormalization(xfm, default_adjust->color_depth);
+}
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix_v(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+
+ /*TODO: Check if denormalization is needed*/
+ /*set_Denormalization(opp, adjust->color_depth);*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
new file mode 100644
index 000000000000..e98ed3058ea2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce110_transform_v.h"
+
+static void power_on_lut(struct transform *xfm,
+ bool power_on, bool inputgamma, bool regamma)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ int i;
+
+ if (power_on) {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ } else {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ }
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+
+ for (i = 0; i < 3; i++) {
+ value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ if (get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS) &&
+ get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS))
+ break;
+
+ udelay(2);
+ }
+}
+
+static void set_bypass_input_gamma(struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_INPUT_GAMMA_CONTROL1,
+ INPUT_GAMMA_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1, value);
+}
+
+static void configure_regamma_mode(struct dce_transform *xfm_dce, uint32_t mode)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ mode,
+ GAMMA_CORR_CONTROL,
+ GAMMA_CORR_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CONTROL, 0);
+}
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce, const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ uint32_t value = 0;
+
+ {
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_x,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START);
+
+ set_reg_field_value(
+ value,
+ 0,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_START_CNTL,
+ value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_slope,
+ GAMMA_CORR_CNTLA_SLOPE_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_SLOPE_CNTL, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_x,
+ GAMMA_CORR_CNTLA_END_CNTL1,
+ GAMMA_CORR_CNTLA_EXP_REGION_END);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL1, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[2].custom_float_slope,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_BASE);
+
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_y,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL2, value);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS);
+
+ dm_write_reg(
+ xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_0_1,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_2_3,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_4_5,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_6_7,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_8_9,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_10_11,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_12_13,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_14_15,
+ value);
+ }
+}
+
+static void program_pwl(struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 7,
+ GAMMA_CORR_LUT_WRITE_EN_MASK,
+ GAMMA_CORR_LUT_WRITE_EN_MASK);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_WRITE_EN_MASK, value);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ const uint32_t addr = mmGAMMA_CORR_LUT_DATA;
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb =
+ params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->blue_reg);
+
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+}
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ set_bypass_input_gamma(xfm_dce);
+
+ /* Power on gamma LUT memory */
+ power_on_lut(xfm, true, false, true);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+
+ /* program regamma config */
+ configure_regamma_mode(xfm_dce, 1);
+
+ /* Power return to auto back */
+ power_on_lut(xfm, false, false, true);
+}
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+}
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode)
+{
+ // TODO: need to implement the function
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
new file mode 100644
index 000000000000..3545e43a4b77
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce/dce_opp.h"
+#include "dce110_opp_v.h"
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction =
+ dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
new file mode 100644
index 000000000000..152af4c418cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
@@ -0,0 +1,39 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE110_V_H__
+#define __DC_OPP_DCE110_V_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx);
+
+/* underlay callbacks */
+
+
+
+#endif /* __DC_OPP_DCE110_V_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
new file mode 100644
index 000000000000..42df17f9aa8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "dce110/dce110_resource.h"
+
+#include "include/irq_service_interface.h"
+#include "dce/dce_audio.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce110/dce110_timing_generator_v.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce110/dce110_mem_input_v.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce110/dce110_transform_v.h"
+#include "dce/dce_opp.h"
+#include "dce110/dce110_opp_v.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110/dce110_compressor.h"
+#endif
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+#ifndef DPHY_RX_FAST_TRAINING_CAPABLE
+ #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1
+#endif
+
+static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_110_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */
+
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps carrizo_resource_cap = {
+ .num_timing_generator = 3,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+static const struct resource_caps stoney_resource_cap = {
+ .num_timing_generator = 2,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce110_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce110_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_stoney_reg = {
+ HWSEQ_ST_REG_LIST()
+};
+
+static const struct dce_hwseq_registers hwseq_cz_reg = {
+ HWSEQ_CZ_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE11_MASK_SH_LIST(__SHIFT),
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE11_MASK_SH_LIST(_MASK),
+};
+
+static struct dce_hwseq *dce110_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ?
+ &hwseq_stoney_reg : &hwseq_cz_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ hws->wa.blnd_crtc_trigger = true;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce110_stream_encoder_create,
+ .create_hwseq = dce110_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE11_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+
+static struct mem_input *dce110_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 3;
+ return &dce_mi->base;
+}
+
+static void dce110_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce110_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce110_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 594000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce110_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct output_pixel_processor *dce110_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce110_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce110_clock_source_destroy(struct clock_source **clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src;
+
+ if (!clk_src)
+ return;
+
+ dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src);
+
+ kfree(dce110_clk_src->dp_ss_params);
+ kfree(dce110_clk_src->hdmi_ss_params);
+ kfree(dce110_clk_src->dvi_ss_params);
+
+ kfree(dce110_clk_src);
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce110_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce110_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce110_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2;
+ }
+}
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+}
+
+static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx)
+{
+ if (pipe_ctx->pipe_idx != underlay_idx)
+ return true;
+ if (!pipe_ctx->plane_state)
+ return false;
+ if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ return true;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (!is_surface_pixel_format_supported(pipe_ctx,
+ dc->res_pool->underlay_pipe_index))
+ return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+static bool dce110_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: %dx%d@%d Bandwidth validation failed!\n",
+ __func__,
+ context->streams[0]->timing.h_addressable,
+ context->streams[0]->timing.v_addressable,
+ context->streams[0]->timing.pix_clk_khz);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+static bool dce110_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 2)
+ return false;
+
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ struct dc_plane_state *plane =
+ context->stream_status[i].plane_states[j];
+
+ /* underlay validation */
+ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+
+ if ((plane->src_rect.width > 1920 ||
+ plane->src_rect.height > 1080))
+ return false;
+
+ /* irrespective of plane format,
+ * stream should be RGB encoded
+ */
+ if (context->streams[i]->timing.pixel_encoding
+ != PIXEL_ENCODING_RGB)
+ return false;
+
+ }
+
+ }
+ }
+
+ return true;
+}
+
+enum dc_status dce110_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce110_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static enum dc_status dce110_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+static enum dc_status dce110_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce110_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dce110_acquire_underlay(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+ struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = pool->underlay_pipe_index;
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
+
+ if (res_ctx->pipe_ctx[underlay_idx].stream)
+ return NULL;
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx];
+ pipe_ctx->plane_res.mi = pool->mis[underlay_idx];
+ /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/
+ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx];
+ pipe_ctx->stream_res.opp = pool->opps[underlay_idx];
+ pipe_ctx->pipe_idx = underlay_idx;
+
+ pipe_ctx->stream = stream;
+
+ if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) {
+ struct tg_color black_color = {0};
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ dc->hwss.enable_display_power_gating(
+ dc,
+ pipe_ctx->pipe_idx,
+ dcb, PIPE_GATING_CONTROL_DISABLE);
+
+ /*
+ * This is for powering on underlay, so crtc does not
+ * need to be enabled
+ */
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg,
+ &stream->timing,
+ false);
+
+ pipe_ctx->stream_res.tg->funcs->enable_advanced_request(
+ pipe_ctx->stream_res.tg,
+ true,
+ &stream->timing);
+
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ color_space_to_black_color(dc,
+ COLOR_SPACE_YCBCR601, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+ }
+
+ return pipe_ctx;
+}
+
+static void dce110_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+
+static const struct resource_funcs dce110_res_pool_funcs = {
+ .destroy = dce110_destroy_resource_pool,
+ .link_enc_create = dce110_link_encoder_create,
+ .validate_guaranteed = dce110_validate_guaranteed,
+ .validate_bandwidth = dce110_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
+ .add_stream_to_ctx = dce110_add_stream_to_ctx,
+ .validate_global = dce110_validate_global
+};
+
+static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv),
+ GFP_KERNEL);
+ struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv),
+ GFP_KERNEL);
+ struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv),
+ GFP_KERNEL);
+ struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
+ GFP_KERNEL);
+
+ if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+ kfree(dce110_tgv);
+ kfree(dce110_xfmv);
+ kfree(dce110_miv);
+ kfree(dce110_oppv);
+ return false;
+ }
+
+ dce110_opp_v_construct(dce110_oppv, ctx);
+
+ dce110_timing_generator_v_construct(dce110_tgv, ctx);
+ dce110_mem_input_v_construct(dce110_miv, ctx);
+ dce110_transform_v_construct(dce110_xfmv, ctx);
+
+ pool->opps[pool->pipe_count] = &dce110_oppv->base;
+ pool->timing_generators[pool->pipe_count] = &dce110_tgv->base;
+ pool->mis[pool->pipe_count] = &dce110_miv->base;
+ pool->transforms[pool->pipe_count] = &dce110_xfmv->base;
+ pool->pipe_count++;
+
+ /* update the public caps to indicate an underlay is available */
+ ctx->dc->caps.max_slave_planes = 1;
+ ctx->dc->caps.max_slave_planes = 1;
+
+ return true;
+}
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+ dc->sclk_lvls = clks;
+
+ /*do display clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ &clks);
+ dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1], 1000);
+ dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+}
+
+const struct resource_caps *dce110_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
+ return &stoney_resource_cap;
+ else
+ return &carrizo_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool,
+ struct hw_asic_id asic_id)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce110_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce110_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.underlay_pipe_index = pool->base.pipe_count;
+
+ dc->caps.max_downscale_ratio = 150;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1,
+ &clk_src_regs[1], false);
+
+ pool->base.clk_src_count = 2;
+
+ /* TODO: find out if CZ support 3 PLLs */
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce110_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce110_timing_generator_create(
+ ctx, i, &dce110_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce110_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce110_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce110_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce110_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+
+
+
+#endif
+ if (!underlay_create(ctx, &pool->base))
+ goto res_create_fail;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce110_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool, asic_id))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
new file mode 100644
index 000000000000..e5f168c1f8c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE110_H__
+#define __DC_RESOURCE_DCE110_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+#define TO_DCE110_RES_POOL(pool)\
+ container_of(pool, struct dce110_resource_pool, base)
+
+struct dce110_resource_pool {
+ struct resource_pool base;
+};
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx);
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id);
+
+#endif /* __DC_RESOURCE_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
new file mode 100644
index 000000000000..4befce6cd87a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -0,0 +1,1966 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+
+#include "timing_generator.h"
+
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+
+/* Flowing register offsets are same in files of
+ * dce/dce_11_0_d.h
+ * dce/vi_polaris10_p/vi_polaris10_d.h
+ *
+ * So we can create dce110 timing generator to use it.
+ */
+
+
+/*
+* apply_front_porch_workaround
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void dce110_timing_generator_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce110_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_STATUS);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = CRTC_REG(mmCRTC_CONTROL);
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ /*
+ * 3 is used to make sure V_UPDATE occurs at the beginning of the first
+ * line of vertical front porch
+ */
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value);
+
+ /* TODO: may want this on to catch underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK), value);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+ *****************************************************************************
+ * Function: disable_stereo
+ *
+ * @brief
+ * Disables active stereo on controller
+ * Frame Packing need to be disabled in vBlank or when CRTC not running
+ *****************************************************************************
+ */
+#if 0
+@TODOSTEREO
+static void disable_stereo(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_3D_STRUCTURE_CONTROL);
+ uint32_t value = 0;
+ uint32_t test = 0;
+ uint32_t field = 0;
+ uint32_t struc_en = 0;
+ uint32_t struc_stereo_sel_ovr = 0;
+
+ value = dm_read_reg(tg->ctx, addr);
+ struc_en = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_EN);
+
+ struc_stereo_sel_ovr = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_STEREO_SEL_OVR);
+
+ /*
+ * When disabling Frame Packing in 2 step mode, we need to program both
+ * registers at the same frame
+ * Programming it in the beginning of VActive makes sure we are ok
+ */
+
+ if (struc_en != 0 && struc_stereo_sel_ovr == 0) {
+ tg->funcs->wait_for_vblank(tg);
+ tg->funcs->wait_for_vactive(tg);
+ }
+
+ value = 0;
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = tg->regs[IDX_CRTC_STEREO_CONTROL];
+ dm_write_reg(tg->ctx, addr, value);
+}
+#endif
+
+/**
+ * disable_crtc - call ASIC Control Object to disable Timing generator.
+ */
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, false);
+
+ /* Need to make sure stereo is disabled according to the DCE5.0 spec */
+
+ /*
+ * @TODOSTEREO call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+* program_horz_count_by_2
+* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+*
+*/
+static void program_horz_count_by_2(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ regval = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL));
+
+ set_reg_field_value(regval, 0, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ if (timing->flags.HORZ_COUNT_BY_TWO)
+ set_reg_field_value(regval, 1, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ dm_write_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
+}
+
+/**
+ * program_timing_generator
+ * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
+ * Call ASIC Control Object to program Timings.
+ */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum bp_result result;
+ struct bp_hw_crtc_timing_parameters bp_params;
+ struct dc_crtc_timing patched_crtc_timing;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
+ dc_crtc_timing->v_front_porch;
+ uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = dc_crtc_timing->h_border_right +
+ dc_crtc_timing->h_front_porch;
+ uint32_t h_sync_start = dc_crtc_timing->h_addressable + hsync_offset;
+
+ memset(&bp_params, 0, sizeof(struct bp_hw_crtc_timing_parameters));
+
+ /* Due to an asic bug we need to apply the Front Porch workaround prior
+ * to programming the timing.
+ */
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ dce110_timing_generator_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ bp_params.controller_id = tg110->controller_id;
+
+ bp_params.h_total = patched_crtc_timing.h_total;
+ bp_params.h_addressable =
+ patched_crtc_timing.h_addressable;
+ bp_params.v_total = patched_crtc_timing.v_total;
+ bp_params.v_addressable = patched_crtc_timing.v_addressable;
+
+ bp_params.h_sync_start = h_sync_start;
+ bp_params.h_sync_width = patched_crtc_timing.h_sync_width;
+ bp_params.v_sync_start = v_sync_start;
+ bp_params.v_sync_width = patched_crtc_timing.v_sync_width;
+
+ /* Set overscan */
+ bp_params.h_overscan_left =
+ patched_crtc_timing.h_border_left;
+ bp_params.h_overscan_right =
+ patched_crtc_timing.h_border_right;
+ bp_params.v_overscan_top = patched_crtc_timing.v_border_top;
+ bp_params.v_overscan_bottom =
+ patched_crtc_timing.v_border_bottom;
+
+ /* Set flags */
+ if (patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.HSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ bp_params.flags.INTERLACE = 1;
+
+ if (patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1)
+ bp_params.flags.HORZ_COUNT_BY_TWO = 1;
+
+ result = tg->bp->funcs->program_crtc_timing(tg->bp, &bp_params);
+
+ program_horz_count_by_2(tg, &patched_crtc_timing);
+
+ tg110->base.funcs->enable_advanced_request(tg, true, &patched_crtc_timing);
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_DxCRTC_V_TOTAL_*.
+ *
+ * @param [in] pHwCrtcTiming: point to H
+ * wCrtcTiming struct
+ *****************************************************************************
+ */
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ /* register values */
+ uint32_t v_total_min = 0;
+ uint32_t v_total_max = 0;
+ uint32_t v_total_cntl = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ v_total_min = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ v_total_max = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ v_total_cntl = dm_read_reg(tg->ctx, addr);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ set_reg_field_value(v_total_max,
+ params->vertical_total_max - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+
+ set_reg_field_value(v_total_min,
+ params->vertical_total_min - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK_EN);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ } else {
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ set_reg_field_value(v_total_min,
+ 0,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ set_reg_field_value(v_total_max,
+ 0,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+ }
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ dm_write_reg(tg->ctx, addr, v_total_min);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ dm_write_reg(tg->ctx, addr, v_total_max);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ dm_write_reg(tg->ctx, addr, v_total_cntl);
+}
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t static_screen_cntl = 0;
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
+ static_screen_cntl = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(static_screen_cntl,
+ value,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK);
+
+ set_reg_field_value(static_screen_cntl,
+ 2,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_FRAME_COUNT);
+
+ dm_write_reg(tg->ctx, addr, static_screen_cntl);
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_STATUS_FRAME_COUNT);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_STATUS_POSITION));
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_NOM_VERT_POSITION));
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+/**
+ *****************************************************************************
+ * Function: get_crtc_scanoutpos
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] vpos, hpos
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_BLANK_START_END));
+
+ *v_blank_start = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce110_timing_generator_get_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+/* TODO: is it safe to assume that mask/shift of Primary and Underlay
+ * are the same?
+ * For example: today CRTC_H_TOTAL == CRTCV_H_TOTAL but is it always
+ * guaranteed? */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start =timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = CRTC_REG(mmCRTC_H_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTC_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_H_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ dyn_range,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+ dm_write_reg(ctx, addr, value);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg(ctx, addr, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ inc_base + 2,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 5,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 384 << 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ default:
+ break;
+ }
+ dm_write_reg(ctx, addr, value);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ /* add color depth translation here */
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_COLOR), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS),
+ value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+* dce110_timing_generator_validate_timing
+* The timing generators support a maximum display size of is 8192 x 8192 pixels,
+* including both active display and blanking periods. Check H Total and V Total.
+*/
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t h_blank;
+ uint32_t h_back_porch, hsync_offset, h_sync_start;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ if (!timing)
+ return false;
+
+ hsync_offset = timing->h_border_right + timing->h_front_porch;
+ h_sync_start = timing->h_addressable + hsync_offset;
+
+ /* Currently we don't support 3D, so block all 3D timings */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
+ return false;
+
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tg110->max_h_total ||
+ timing->v_total > tg110->max_v_total)
+ return false;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (h_blank < tg110->min_h_blank)
+ return false;
+
+ if (timing->h_front_porch < tg110->min_h_front_porch)
+ return false;
+
+ h_back_porch = h_blank - (h_sync_start -
+ timing->h_addressable -
+ timing->h_border_right -
+ timing->h_sync_width);
+
+ if (h_back_porch < tg110->min_h_back_porch)
+ return false;
+
+ return true;
+}
+
+/**
+* Wait till we are at the beginning of VBlank.
+*/
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_setup_global_swap_lock
+ *
+ * @brief
+ * Setups Global Swap Lock group for current pipe
+ * Pipe can join or leave GSL group, become a TimingServer or TimingClient
+ *
+ * @param [in] gsl_params: setup data
+ *****************************************************************************
+ */
+
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+ uint32_t check_point = FLIP_READY_BACK_LOOKUP;
+
+ value = dm_read_reg(tg->ctx, address);
+
+ /* This pipe will belong to GSL Group zero. */
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ gsl_params->gsl_master == tg->inst,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ HFLIP_READY_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ set_reg_field_value(value,
+ HFLIP_CHECK_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+
+ /* Checkpoint relative to end of frame */
+ check_point = get_reg_field_value(value_crtc_vtotal,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0);
+ }
+
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ VFLIP_READY_DELAY,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ /* Clear all the register writes done by
+ * dce110_timing_generator_setup_global_swap_lock
+ */
+
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+
+ value = 0;
+
+ /* This pipe will belong to GSL Group zero. */
+ /* Settig HW default values from reg specs */
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ 0x2,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ set_reg_field_value(value,
+ 0x6,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+ /* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+ }
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ 0,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ 0x2,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+/**
+ *****************************************************************************
+ * Function: is_counter_moving
+ *
+ * @brief
+ * check if the timing generator is currently going
+ *
+ * @return
+ * true if currently going, false if currently paused or stopped.
+ *
+ *****************************************************************************
+ */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/*TODO: Figure out if we need this function. */
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock)
+{
+ struct dc_context *ctx = tg->ctx;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ lock ? 1 : 0,
+ CRTC_MASTER_UPDATE_LOCK,
+ MASTER_UPDATE_LOCK);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ uint32_t value;
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Setup trigger edge */
+ {
+ uint32_t pol_value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_SYNC_A_CNTL));
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+ }
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ set_reg_field_value(value,
+ trig_src_select,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ rising_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_RISING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ falling_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ 0, /* send every signal */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FREQUENCY_SELECT);
+
+ set_reg_field_value(value,
+ 0, /* no delay */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_DELAY);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+
+ /**************************************************************/
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 2, /* force H count to H_TOTAL and V count to V_TOTAL */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* TriggerB - we never use TriggerA */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+}
+
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 0, /* force counter now mode is disabled */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+
+ /********************************************************************/
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ set_reg_field_value(value,
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+}
+
+/**
+ *****************************************************************************
+ * @brief
+ * Checks whether CRTC triggered reset occurred
+ *
+ * @return
+ * true if triggered reset occurred, false otherwise
+ *****************************************************************************
+ */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ return get_reg_field_value(value,
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+/**
+ * dce110_timing_generator_disable_vga
+ * Turn OFF VGA Mode and Timing - DxVGA_CONTROL
+ * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
+ */
+void dce110_timing_generator_disable_vga(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ addr = mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D1:
+ addr = mmD2VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ addr = mmD3VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ addr = mmD4VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ addr = mmD5VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ addr = mmD6VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+ value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+* set_overscan_color_black
+*
+* @param :black_color is one of the color space
+* :this routine will set overscan black color according to the color space.
+* @return none
+*/
+
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+ addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+
+}
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce110_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL));
+
+ if (get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+ return false;
+}
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_DOUBLE_BUFFER_CONTROL), value);
+ value = 0;
+
+ if (enable_blanking) {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), value);
+
+ } else
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), 0);
+}
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce110_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_tg_set_overscan_color(tg, overscan_color);
+}
+
+/* Gets first line of blank region of the display timing for CRTC
+ * and programms is as a trigger to fire vertical interrupt
+ */
+bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t val = 0;
+ uint32_t h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ set_reg_field_value(
+ val,
+ v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START);
+
+ /* Set interval width for interrupt to fire to 1 scanline */
+ set_reg_field_value(
+ val,
+ v_blank_start + width,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERTICAL_INTERRUPT0_POSITION), val);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce110_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_tg_program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_enable_advanced_request,
+ .set_drr =
+ dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+};
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce110_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
new file mode 100644
index 000000000000..82737dea6984
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE110_H__
+#define __DC_TIMING_GENERATOR_DCE110_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* GSL Sync related values */
+
+/* In VSync mode, after 4 units of time, master pipe will generate
+ * flip_ready signal */
+#define VFLIP_READY_DELAY 4
+/* In HSync mode, after 2 units of time, master pipe will generate
+ * flip_ready signal */
+#define HFLIP_READY_DELAY 2
+/* 6 lines delay between forcing flip and checking all pipes ready */
+#define HFLIP_CHECK_DELAY 6
+/* 3 lines before end of frame */
+#define FLIP_READY_BACK_LOOKUP 3
+
+/* Trigger Source Select - ASIC-defendant, actual values for the
+ * register programming */
+enum trigger_source_select {
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCA = 1,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCA = 2,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCB = 3,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCB = 4,
+ TRIGGER_SOURCE_SELECT_GENERICF = 5,
+ TRIGGER_SOURCE_SELECT_GENERICE = 6,
+ TRIGGER_SOURCE_SELECT_VSYNCA = 7,
+ TRIGGER_SOURCE_SELECT_HSYNCA = 8,
+ TRIGGER_SOURCE_SELECT_VSYNCB = 9,
+ TRIGGER_SOURCE_SELECT_HSYNCB = 10,
+ TRIGGER_SOURCE_SELECT_HPD1 = 11,
+ TRIGGER_SOURCE_SELECT_HPD2 = 12,
+ TRIGGER_SOURCE_SELECT_GENERICD = 13,
+ TRIGGER_SOURCE_SELECT_GENERICC = 14,
+ TRIGGER_SOURCE_SELECT_VIDEO_CAPTURE = 15,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP0 = 16,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP1 = 17,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP2 = 18,
+ TRIGGER_SOURCE_SELECT_BLONY = 19,
+ TRIGGER_SOURCE_SELECT_GENERICA = 20,
+ TRIGGER_SOURCE_SELECT_GENERICB = 21,
+ TRIGGER_SOURCE_SELECT_GSL_ALLOW_FLIP = 22,
+ TRIGGER_SOURCE_SELECT_MANUAL_TRIGGER = 23
+};
+
+/* Trigger Source Select - ASIC-dependant, actual values for the
+ * register programming */
+enum trigger_polarity_select {
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_POLARITY_SELECT_CRTC = 1,
+ TRIGGER_POLARITY_SELECT_GENERICA = 2,
+ TRIGGER_POLARITY_SELECT_GENERICB = 3,
+ TRIGGER_POLARITY_SELECT_HSYNCA = 4,
+ TRIGGER_POLARITY_SELECT_HSYNCB = 5,
+ TRIGGER_POLARITY_SELECT_VIDEO_CAPTURE = 6,
+ TRIGGER_POLARITY_SELECT_GENERICC = 7
+};
+
+
+struct dce110_timing_generator_offsets {
+ int32_t crtc;
+ int32_t dcp;
+
+ /* DCE80 use only */
+ int32_t dmif;
+};
+
+struct dce110_timing_generator {
+ struct timing_generator base;
+ struct dce110_timing_generator_offsets offsets;
+ struct dce110_timing_generator_offsets derived_offsets;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+ uint32_t min_h_front_porch;
+ uint32_t min_h_back_porch;
+
+ /* DCE 12 */
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+
+};
+
+#define DCE110TG_FROM_TG(tg)\
+ container_of(tg, struct dce110_timing_generator, base)
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/* determine if given timing can be supported by TG */
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal);
+
+/******** HW programming ************/
+
+/* Program timing generator with given timing */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing);
+
+/* Disable/Enable Timing Generator */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg);
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg);
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl);
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce110_timing_generator_get_vblank_counter(
+ struct timing_generator *tg);
+
+void dce110_timing_generator_get_position(
+ struct timing_generator *tg,
+ struct crtc_position *position);
+
+/* return true if TG counter is moving. false if TG is stopped */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg);
+
+/* wait until TG is in beginning of vertical blank region */
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg);
+
+/* wait until TG is in beginning of active region */
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg);
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg);
+
+/* Reset slave controllers on master VSync */
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source);
+
+/* disabling trigger-reset */
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg);
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg);
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce110_timing_generator_disable_vga(struct timing_generator *tg);
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color);
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color);
+void dce110_timing_generator_color_space_to_black_color(
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+/*************** End-of-move ********************/
+
+/* Not called yet */
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params);
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value);
+
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock);
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color);
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color);
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+
+bool dce110_tg_is_blanked(struct timing_generator *tg);
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking);
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state);
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+bool dce110_arm_vert_intr(
+ struct timing_generator *tg, uint8_t width);
+
+#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
new file mode 100644
index 000000000000..59b4cd329715
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+#include "dce110_timing_generator_v.h"
+
+#include "timing_generator.h"
+
+/** ********************************************************************************
+ *
+ * DCE11 Timing Generator Implementation
+ *
+ **********************************************************************************/
+
+/**
+* Enable CRTCV
+*/
+
+static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
+{
+/*
+* Set MASTER_UPDATE_MODE to 0
+* This is needed for DRR, and also suggested to be default value by Syed.
+*/
+
+ uint32_t value;
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ CRTCV_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ /* TODO: may want this on for looking for underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ value = 0;
+ set_reg_field_value(value, 1,
+ CRTCV_MASTER_EN, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_EN, value);
+
+ return true;
+}
+
+static bool dce110_timing_generator_v_disable_crtc(struct timing_generator *tg)
+{
+ uint32_t value;
+
+ value = dm_read_reg(tg->ctx,
+ mmCRTCV_CONTROL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_DISABLE_POINT_CNTL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_CONTROL, value);
+ /*
+ * TODO: call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+ return true;
+}
+
+static void dce110_timing_generator_v_blank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_unblank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static bool dce110_timing_generator_v_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+
+ addr = mmCRTCV_STATUS;
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTCV_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+static bool dce110_timing_generator_v_is_counter_moving(struct timing_generator *tg)
+{
+ uint32_t value;
+ uint32_t h1 = 0;
+ uint32_t h2 = 0;
+ uint32_t v1 = 0;
+ uint32_t v2 = 0;
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ if (h1 == h2 && v1 == v2)
+ return false;
+ else
+ return true;
+}
+
+static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+static void dce110_timing_generator_v_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_v_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_v_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dce110_timing_generator_v_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = mmCRTCV_H_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTCV_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTCV_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->h_sync_width,
+ CRTCV_H_SYNC_A,
+ CRTC_H_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.HSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->v_sync_width,
+ CRTCV_V_SYNC_A,
+ CRTC_V_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.VSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_INTERLACE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->flags.INTERLACE,
+ CRTCV_INTERLACE_CONTROL,
+ CRTC_INTERLACE_ENABLE);
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t addr = mmCRTCV_START_LINE_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ }
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 2,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ dce110_timing_generator_v_blank_crtc(tg);
+ else
+ dce110_timing_generator_v_unblank_crtc(tg);
+}
+
+static void dce110_timing_generator_v_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_v_program_blanking(tg, timing);
+}
+
+static void dce110_timing_generator_v_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+ addr = mmCRTCV_BLACK_COLOR;
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+static void dce110_tg_v_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_v_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_timing_generator_v_set_overscan_color(tg, overscan_color);
+}
+
+static void dce110_timing_generator_v_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ uint32_t address = mmCRTC_CONTROL;
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTCV_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_STATUS_FRAME_COUNT;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTCV_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+static bool dce110_timing_generator_v_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return false;
+}
+
+static void dce110_timing_generator_v_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_vga(
+ struct timing_generator *tg)
+{
+ return;
+}
+
+static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
+{
+ /* Signal comes from the primary pipe, underlay is never blanked. */
+ return false;
+}
+
+/** ********************************************************************************************
+ *
+ * DCE11 Timing Generator Constructor / Destructor
+ *
+ *********************************************************************************************/
+static const struct timing_generator_funcs dce110_tg_v_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_timing_generator_v_program_timing,
+ .enable_crtc = dce110_timing_generator_v_enable_crtc,
+ .disable_crtc = dce110_timing_generator_v_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_v_is_counter_moving,
+ .get_position = NULL, /* Not to be implemented for underlay*/
+ .get_frame_count = dce110_timing_generator_v_get_vblank_counter,
+ .set_early_control = dce110_timing_generator_v_set_early_control,
+ .wait_for_state = dce110_timing_generator_v_wait_for_state,
+ .set_blank = dce110_timing_generator_v_set_blank,
+ .is_blanked = dce110_tg_v_is_blanked,
+ .set_colors = dce110_timing_generator_v_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_v_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_v_program_blank_color,
+ .disable_vga = dce110_timing_generator_v_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_v_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_v_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_v_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_v_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_v_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_v_enable_advanced_request
+};
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx)
+{
+ tg110->controller_id = CONTROLLER_ID_UNDERLAY0;
+
+ tg110->base.funcs = &dce110_tg_v_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
new file mode 100644
index 000000000000..d2623a5994e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_V_DCE110_H__
+#define __DC_TIMING_GENERATOR_V_DCE110_H__
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx);
+
+#endif /* __DC_TIMING_GENERATOR_V_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
new file mode 100644
index 000000000000..47390dc58306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110_transform_v.h"
+#include "dm_services.h"
+#include "dc.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define SCLV_PHASES 64
+
+struct sclv_ratios_inits {
+ uint32_t h_int_scale_ratio_luma;
+ uint32_t h_int_scale_ratio_chroma;
+ uint32_t v_int_scale_ratio_luma;
+ uint32_t v_int_scale_ratio_chroma;
+ struct init_int_and_frac h_init_luma;
+ struct init_int_and_frac h_init_chroma;
+ struct init_int_and_frac v_init_luma;
+ struct init_int_and_frac v_init_chroma;
+};
+
+static void calculate_viewport(
+ const struct scaler_data *scl_data,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ /*Do not set chroma vp for rgb444 pixel format*/
+ luma_viewport->x = scl_data->viewport.x - scl_data->viewport.x % 2;
+ luma_viewport->y = scl_data->viewport.y - scl_data->viewport.y % 2;
+ luma_viewport->width =
+ scl_data->viewport.width - scl_data->viewport.width % 2;
+ luma_viewport->height =
+ scl_data->viewport.height - scl_data->viewport.height % 2;
+ chroma_viewport->x = luma_viewport->x;
+ chroma_viewport->y = luma_viewport->y;
+ chroma_viewport->height = luma_viewport->height;
+ chroma_viewport->width = luma_viewport->width;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8) {
+ luma_viewport->height += luma_viewport->height % 2;
+ luma_viewport->width += luma_viewport->width % 2;
+ /*for 420 video chroma is 1/4 the area of luma, scaled
+ *vertically and horizontally
+ */
+ chroma_viewport->x = luma_viewport->x / 2;
+ chroma_viewport->y = luma_viewport->y / 2;
+ chroma_viewport->height = luma_viewport->height / 2;
+ chroma_viewport->width = luma_viewport->width / 2;
+ }
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ struct rect *luma_view_port,
+ struct rect *chroma_view_port)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+
+ if (luma_view_port->width != 0 && luma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->x,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_X_START);
+ set_reg_field_value(
+ value,
+ luma_view_port->y,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_Y_START);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->height,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_HEIGHT);
+ set_reg_field_value(
+ value,
+ luma_view_port->width,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_WIDTH);
+ dm_write_reg(ctx, addr, value);
+ }
+
+ if (chroma_view_port->width != 0 && chroma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->x,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_X_START_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->y,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_Y_START_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->height,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_HEIGHT_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->width,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_WIDTH_C);
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+/*
+ * Function:
+ * void setup_scaling_configuration
+ *
+ * Purpose: setup scaling mode : bypass, RGb, YCbCr and nummber of taps
+ * Input: data
+ *
+ * Output:
+ * void
+ */
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ bool is_scaling_needed = false;
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+
+ set_reg_field_value(value, data->taps.h_taps - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.v_taps - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.h_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS_C);
+ set_reg_field_value(value, data->taps.v_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS_C);
+ dm_write_reg(ctx, mmSCLV_TAP_CONTROL, value);
+
+ value = 0;
+ if (data->taps.h_taps + data->taps.v_taps > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN);
+ is_scaling_needed = true;
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN);
+ }
+
+ if (data->taps.h_taps_c + data->taps.v_taps_c > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN_C);
+ is_scaling_needed = true;
+ } else if (data->format != PIXEL_FORMAT_420BPP8) {
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_MODE),
+ SCLV_MODE,
+ SCL_MODE_C);
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_PSCL_EN),
+ SCLV_MODE,
+ SCL_PSCL_EN_C);
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN_C);
+ }
+ dm_write_reg(ctx, mmSCLV_MODE, value);
+
+ value = 0;
+ /*
+ * 0 - Replaced out of bound pixels with black pixel
+ * (or any other required color)
+ * 1 - Replaced out of bound pixels with the edge pixel
+ */
+ set_reg_field_value(value, 1, SCLV_CONTROL, SCL_BOUNDARY_MODE);
+ dm_write_reg(ctx, mmSCLV_CONTROL, value);
+
+ return is_scaling_needed;
+}
+
+/**
+* Function:
+* void program_overscan
+*
+* Purpose: Programs overscan border
+* Input: overscan
+*
+* Output:
+ void
+*/
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ uint32_t overscan_left_right = 0;
+ uint32_t overscan_top_bottom = 0;
+
+ int overscan_right = data->h_active - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ set_reg_field_value(overscan_left_right, data->recout.x,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT);
+
+ set_reg_field_value(overscan_left_right, overscan_right,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT);
+
+ set_reg_field_value(overscan_top_bottom, data->recout.y,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP);
+
+ set_reg_field_value(overscan_top_bottom, overscan_bottom,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_LEFT_RIGHT,
+ overscan_left_right);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_TOP_BOTTOM,
+ overscan_top_bottom);
+}
+
+static void set_coeff_update_complete(
+ struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmSCLV_UPDATE);
+ set_reg_field_value(value, 1, SCLV_UPDATE, SCL_COEF_UPDATE_COMPLETE);
+ dm_write_reg(xfm_dce->base.ctx, mmSCLV_UPDATE, value);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ int i, phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCLV_PHASES / 2 + 1;
+
+ uint32_t select = 0;
+ uint32_t power_ctl, power_ctl_off;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ power_ctl = dm_read_reg(ctx, mmDCFEV_MEM_PWR_CTRL);
+ power_ctl_off = power_ctl;
+ set_reg_field_value(power_ctl_off, 1, DCFEV_MEM_PWR_CTRL, SCLV_COEFF_MEM_PWR_DIS);
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl_off);
+
+ /*Wait to disable gating:*/
+ for (i = 0; i < 10; i++) {
+ if (get_reg_field_value(
+ dm_read_reg(ctx, mmDCFEV_MEM_PWR_STATUS),
+ DCFEV_MEM_PWR_STATUS,
+ SCLV_COEFF_MEM_PWR_STATE) == 0)
+ break;
+
+ udelay(1);
+ }
+
+ set_reg_field_value(select, filter_type, SCLV_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE);
+
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ set_reg_field_value(select, phase, SCLV_COEF_RAM_SELECT, SCL_C_RAM_PHASE);
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint32_t data = 0;
+
+ set_reg_field_value(select, pair,
+ SCLV_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX);
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_SELECT, select);
+
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF);
+
+ if (taps % 2 && pair == taps_pairs - 1) {
+ set_reg_field_value(
+ data, 0,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ array_idx++;
+ } else {
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx + 1],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF);
+
+ array_idx += 2;
+ }
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_TAP_DATA, data);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl);
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct sclv_ratios_inits *inits,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ inits->h_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+ inits->h_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
+ inits->v_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
+
+ inits->h_init_luma.integer = 1;
+ inits->v_init_luma.integer = 1;
+ inits->h_init_chroma.integer = 1;
+ inits->v_init_chroma.integer = 1;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct sclv_ratios_inits *inits)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmSCLV_HORZ_FILTER_SCALE_RATIO;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_luma,
+ SCLV_HORZ_FILTER_SCALE_RATIO,
+ SCL_H_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_luma,
+ SCLV_VERT_FILTER_SCALE_RATIO,
+ SCL_V_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_chroma,
+ SCLV_HORZ_FILTER_SCALE_RATIO_C,
+ SCL_H_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_chroma,
+ SCLV_VERT_FILTER_SCALE_RATIO_C,
+ SCL_V_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.fraction,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.integer,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.fraction,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.integer,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.fraction,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.integer,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.fraction,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.integer,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+}
+
+static const uint16_t *get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static bool dce110_xfmv_power_up_line_buffer(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL);
+
+ /*Use all three pieces of memory always*/
+ set_reg_field_value(value, 0, LBV_MEMORY_CTRL, LB_MEMORY_CONFIG);
+ /*hard coded number DCE11 1712(0x6B0) Partitions: 720/960/1712*/
+ set_reg_field_value(value, xfm_dce->lb_memory_size, LBV_MEMORY_CTRL,
+ LB_MEMORY_SIZE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL, value);
+
+ return true;
+}
+
+static void dce110_xfmv_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required = false;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h, *coeffs_h_c, *coeffs_v_c;
+ struct rect luma_viewport = {0};
+ struct rect chroma_viewport = {0};
+
+ dce110_xfmv_power_up_line_buffer(xfm);
+ /* 1. Calculate viewport, viewport programming should happen after init
+ * calculations as they may require an adjustment in the viewport.
+ */
+
+ calculate_viewport(data, &luma_viewport, &chroma_viewport);
+
+ /* 2. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 3. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 4. Calculate and program ratio, filter initialization */
+
+ struct sclv_ratios_inits inits = { 0 };
+
+ calculate_inits(
+ xfm_dce,
+ data,
+ &inits,
+ &luma_viewport,
+ &chroma_viewport);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_64p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_64p(data->taps.h_taps, data->ratios.horz);
+ coeffs_v_c = get_filter_coeffs_64p(data->taps.v_taps_c, data->ratios.vert_c);
+ coeffs_h_c = get_filter_coeffs_64p(data->taps.h_taps_c, data->ratios.horz_c);
+
+ if (coeffs_v != xfm_dce->filter_v
+ || coeffs_v_c != xfm_dce->filter_v_c
+ || coeffs_h != xfm_dce->filter_h
+ || coeffs_h_c != xfm_dce->filter_h_c) {
+ /* 5. Program vertical filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps_c,
+ coeffs_v_c,
+ FILTER_TYPE_CBCR_VERTICAL);
+
+ /* 6. Program horizontal filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps_c,
+ coeffs_h_c,
+ FILTER_TYPE_CBCR_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_v_c = coeffs_v_c;
+ xfm_dce->filter_h = coeffs_h;
+ xfm_dce->filter_h_c = coeffs_h_c;
+ filter_updated = true;
+ }
+ }
+
+ /* 7. Program the viewport */
+ program_viewport(xfm_dce, &luma_viewport, &chroma_viewport);
+
+ /* 8. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ set_coeff_update_complete(xfm_dce);
+}
+
+static void dce110_xfmv_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+ xfm_dce->filter_h_c = NULL;
+ xfm_dce->filter_v_c = NULL;
+}
+
+static void dce110_xfmv_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ /* DO NOTHING*/
+}
+
+static void dce110_xfmv_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth = 0;
+ int expan_mode = 0;
+ uint32_t reg_data = 0;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_reg_field_value(
+ reg_data,
+ expan_mode,
+ LBV_DATA_FORMAT,
+ PIXEL_EXPAN_MODE);
+
+ set_reg_field_value(
+ reg_data,
+ pixel_depth,
+ LBV_DATA_FORMAT,
+ PIXEL_DEPTH);
+
+ dm_write_reg(xfm->ctx, mmLBV_DATA_FORMAT, reg_data);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static const struct transform_funcs dce110_xfmv_funcs = {
+ .transform_reset = dce110_xfmv_reset,
+ .transform_set_scaler = dce110_xfmv_set_scaler,
+ .transform_set_gamut_remap =
+ dce110_xfmv_set_gamut_remap,
+ .opp_set_csc_default = dce110_opp_v_set_csc_default,
+ .opp_set_csc_adjustment = dce110_opp_v_set_csc_adjustment,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut_v,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl_v,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode_v,
+ .transform_set_pixel_storage_depth =
+ dce110_xfmv_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.funcs = &dce110_xfmv_funcs;
+
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
new file mode 100644
index 000000000000..b70780210aad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
@@ -0,0 +1,58 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_V_DCE110_H__
+#define __DAL_TRANSFORM_V_DCE110_H__
+
+#include "../dce/dce_transform.h"
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm110,
+ struct dc_context *ctx);
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
new file mode 100644
index 000000000000..8e090446d511
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
+dce112_resource.o
+
+AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE112)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
new file mode 100644
index 000000000000..69649928768c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -0,0 +1,854 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce112_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce112_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce112_compressor *cp110)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp110->base.raw_size * cp110->base.banks_num *
+ cp110->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp110->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp110->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp110->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp110->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce112_compressor *cp110,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp110->base.embedded_panel_h_size != 0 &&
+ cp110->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp110->base.embedded_panel_h_size *
+ cp110->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce112_compressor *cp110,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce112_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce112_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce112_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp110,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce11_one_lpt_channel_max_resolution)) {
+ dce112_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce112_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce112_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce112_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp110);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce112_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+ /* Disable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce112_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Enable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp110, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp110);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce112_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_construct(struct dce112_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx)
+{
+ struct dce112_compressor *cp110 =
+ kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce112_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce112_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE112_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
new file mode 100644
index 000000000000..f1227133f6df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE112_H__
+#define __DC_COMPRESSOR_DCE112_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE112_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce112_compressor, base)
+
+struct dce112_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce112_compressor {
+ struct compressor base;
+ struct dce112_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx);
+
+void dce112_compressor_construct(struct dce112_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce112_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce112_compressor_power_up_fbc(struct compressor *cp);
+
+void dce112_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce112_compressor_disable_fbc(struct compressor *cp);
+
+void dce112_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce112_compressor_enable_lpt(struct compressor *cp);
+
+void dce112_compressor_disable_lpt(struct compressor *cp);
+
+void dce112_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
new file mode 100644
index 000000000000..1e4a7c13f0ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce112_hw_sequencer.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE11.2 register header files */
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+struct dce112_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+
+static const struct dce112_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+static void dce112_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+static bool dce112_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce112_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce112_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
new file mode 100644
index 000000000000..e646f4a37fa2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE112_H__
+#define __DC_HWSS_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce112_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
new file mode 100644
index 000000000000..663e0a047a4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -0,0 +1,1283 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+
+#include "irq/dce110/irq_service_dce110.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce112/dce112_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+enum dce112_clk_src_array_id {
+ DCE112_CLK_SRC_PLL0,
+ DCE112_CLK_SRC_PLL1,
+ DCE112_CLK_SRC_PLL2,
+ DCE112_CLK_SRC_PLL3,
+ DCE112_CLK_SRC_PLL4,
+ DCE112_CLK_SRC_PLL5,
+
+ DCE112_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE112(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_112_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps polaris_10_resource_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+static const struct resource_caps polaris_11_resource_cap = {
+ .num_timing_generator = 5,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+
+static struct timing_generator *dce112_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce112_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE112_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE112_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE112_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce112_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce112_stream_encoder_create,
+ .create_hwseq = dce112_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_2_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce112_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static void dce112_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce112_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce112_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce112_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+struct output_pixel_processor *dce112_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce112_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce112_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce112_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce112_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce112_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static struct clock_source *find_matching_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_stream_state *const stream)
+{
+ switch (stream->sink->link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL0];
+ case TRANSMITTER_UNIPHY_B:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL1];
+ case TRANSMITTER_UNIPHY_C:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL2];
+ case TRANSMITTER_UNIPHY_D:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL3];
+ case TRANSMITTER_UNIPHY_E:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL4];
+ case TRANSMITTER_UNIPHY_F:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL5];
+ default:
+ return NULL;
+ };
+
+ return 0;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: Bandwidth validation failed!",
+ __func__);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+
+ /* acquire new resources */
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source =
+ dc->res_pool->dp_clock_source;
+ else
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+static bool dce112_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce112_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+enum dc_status dce112_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce112_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static void dce112_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce112_res_pool_funcs = {
+ .destroy = dce112_destroy_resource_pool,
+ .link_enc_create = dce112_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx,
+ .validate_global = dce112_validate_global
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock TODO PPLIB: after PPLIB implement,
+ * then remove old way
+ */
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks)) {
+
+ /* This is only for temporary */
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ return;
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks);
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+const struct resource_caps *dce112_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev))
+ return &polaris_11_resource_cap;
+ else
+ return &polaris_10_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce112_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce112_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL0] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL1] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL2] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL3] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL4] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL5] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source = dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true);
+
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce112_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce112_timing_generator_create(
+ ctx,
+ i,
+ &dce112_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce112_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce112_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce112_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce112_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ /* Create hardware sequencer */
+ dce112_hw_sequencer_construct(dc);
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
new file mode 100644
index 000000000000..d5c19d34eb0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -0,0 +1,61 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE112_H__
+#define __DC_RESOURCE_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce112_validate_with_context(
+ struct dc *dc,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context,
+ struct dc_state *old_context);
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context);
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+
+#endif /* __DC_RESOURCE_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
new file mode 100644
index 000000000000..37db1f8d45ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+
+DCE120 = dce120_resource.o dce120_timing_generator.o \
+dce120_hw_sequencer.o
+
+AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
new file mode 100644
index 000000000000..1a0b54d6034e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce120_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce120_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define CNTL_ID(controller_id)\
+ controller_id
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+#if 0
+static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+/*
+ addr = mmDCP0_DVMM_PTE_CONTROL + controller_id *
+ (mmDCP1_DVMM_PTE_CONTROL- mmDCP0_DVMM_PTE_CONTROL);
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value, 0, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);*/
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+#endif
+
+static bool dce120_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ /* disable for bringup */
+#if 0
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC0_CRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce120_init_pte(ctx, controller_id);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+#endif
+ return false;
+}
+
+static void dce120_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE_2(DCHUB_FB_LOCATION,
+ FB_TOP, 0,
+ FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, 0);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, 0x03FFFF);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+
+
+void dce120_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwss.update_dchub = dce120_update_dchub;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
new file mode 100644
index 000000000000..77a6b86d7606
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE120_H__
+#define __DC_HWSS_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce120_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
new file mode 100644
index 000000000000..5c48c22d9d98
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -0,0 +1,1004 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.cls
+*
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "stream_encoder.h"
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce120_resource.h"
+#include "dce112/dce112_resource.h"
+
+#include "dce110/dce110_resource.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce120_timing_generator.h"
+#include "irq/dce120/irq_service_dce120.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_mem_input.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce120/dce120_hw_sequencer.h"
+#include "dce/dce_transform.h"
+
+#include "dce/dce_audio.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_hwseq.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "vega10/NBIO/nbio_6_1_offset.h"
+#include "reg_helper.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+enum dce120_clk_src_array_id {
+ DCE120_CLK_SRC_PLL0,
+ DCE120_CLK_SRC_PLL1,
+ DCE120_CLK_SRC_PLL2,
+ DCE120_CLK_SRC_PLL3,
+ DCE120_CLK_SRC_PLL4,
+ DCE120_CLK_SRC_PLL5,
+
+ DCE120_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ }
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE120_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_120_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+struct output_pixel_processor *dce120_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 7,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
+};
+
+static const struct dc_debug debug_defaults = {
+ .disable_clock_gate = true,
+};
+
+struct clock_source *dce120_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(*clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce120_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+
+bool dce120_hw_sequencer_create(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce120_hw_sequencer_construct(dc);
+
+ /*TODO Move to separate file and Override what is needed */
+
+ return true;
+}
+
+static struct timing_generator *dce120_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce120_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static void dce120_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce120_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce120_clock_source_destroy(
+ &pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce120_clock_source_destroy(&pool->base.dp_clock_source);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0);
+
+ straps->audio_stream_number = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ AUDIO_STREAM_NUMBER);
+ straps->hdmi_disable = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ HDMI_DISABLE);
+
+ reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0);
+ straps->dc_pinstraps_audio = get_reg_field_value(reg_val,
+ DC_PINSTRAPS,
+ DC_PINSTRAPS_AUDIO);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce120_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce120_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static struct stream_encoder *dce120_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE120_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce120_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce120_stream_encoder_create,
+ .create_hwseq = dce120_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce120_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static struct transform *dce120_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static void dce120_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce120_res_pool_funcs = {
+ .destroy = dce120_destroy_resource_pool,
+ .link_enc_create = dce120_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ int i;
+ unsigned int clk;
+ unsigned int latency;
+
+ /*do system clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks) || eng_clks.num_levels == 0) {
+
+ eng_clks.num_levels = 8;
+ clk = 300000;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ eng_clks.data[i].clocks_in_khz = clk;
+ clk += 100000;
+ }
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks) || mem_clks.num_levels == 0) {
+
+ mem_clks.num_levels = 3;
+ clk = 250000;
+ latency = 45;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ mem_clks.data[i].clocks_in_khz = clk;
+ mem_clks.data[i].latency_in_us = latency;
+ clk += 500000;
+ latency -= 5;
+ }
+
+ }
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data irq_init_data;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce120_res_pool_funcs;
+
+ /* TODO: Fill more data from GreenlandAsicCapability.cpp */
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+ dc->debug = debug_defaults;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clk_src_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ irq_init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce120_timing_generator_create(
+ ctx,
+ i,
+ &dce120_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.mis[i] = dce120_mem_input_create(ctx, i);
+
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.ipps[i] = dce120_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.transforms[i] = dce120_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce120_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ if (!dce120_hw_sequencer_create(dc))
+ goto controller_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+irqs_create_fail:
+controller_create_fail:
+disp_clk_create_fail:
+clk_src_create_fail:
+res_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
new file mode 100644
index 000000000000..3d1f3cf012f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
@@ -0,0 +1,39 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE120_H__
+#define __DC_RESOURCE_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE120_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
new file mode 100644
index 000000000000..2502182d5e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -0,0 +1,1174 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce120_timing_generator.h"
+
+#include "timing_generator.h"
+
+#define CRTC_REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_SET_N(reg_name, n, ...) \
+ generic_reg_set_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_UPDATE(reg, field, val) \
+ CRTC_REG_UPDATE_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_UPDATE_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_UPDATE_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_UPDATE_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+#define CRTC_REG_UPDATE_4(reg, field1, val1, field2, val2, field3, val3, field4, val4) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4)
+
+#define CRTC_REG_UPDATE_5(reg, field1, val1, field2, val2, field3, val3, field4, val4, field5, val5) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4, FD(reg##__##field5), val5)
+
+#define CRTC_REG_SET(reg, field, val) \
+ CRTC_REG_SET_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_SET_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_SET_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce120_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS,
+ tg110->offsets.crtc);
+
+ field = get_reg_field_value(value, CRTC0_CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+
+/* determine if given timing can be supported by TG */
+bool dce120_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ uint32_t v_blank =
+ (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (!dce110_timing_generator_validate_timing(
+ tg,
+ timing,
+ signal))
+ return false;
+
+
+ if (v_blank < tg110->min_v_blank ||
+ timing->h_sync_width < tg110->min_h_sync_width ||
+ timing->v_sync_width < tg110->min_v_sync_width)
+ return false;
+
+ return true;
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+/******** HW programming ************/
+/* Disable/Enable Timing Generator */
+bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.*/
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE, 0);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_LOCK,
+ UNDERFLOW_UPDATE_LOCK, 0);
+
+ /* TODO API for AtomFirmware didn't change*/
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce120_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_CONTROL,
+ CRTC_HBLANK_EARLY_CONTROL, early_cntl);
+}
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce120_timing_generator_get_vblank_counter(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_FRAME_COUNT,
+ tg110->offsets.crtc);
+ uint32_t field = get_reg_field_value(
+ value, CRTC0_CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/* Get current H and V position */
+void dce120_timing_generator_get_crtc_position(
+ struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(value,
+ CRTC0_CRTC_NOM_VERT_POSITION, CRTC_VERT_COUNT_NOM);
+}
+
+/* wait until TG is in beginning of vertical blank region */
+void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/* wait until TG is in beginning of active region */
+void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce120_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value_crtc_vtotal =
+ dm_read_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_V_TOTAL,
+ tg110->offsets.crtc);
+ /* Checkpoint relative to end of frame */
+ uint32_t check_point =
+ get_reg_field_value(value_crtc_vtotal,
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_GSL_WINDOW, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_N(DCP0_DCP_GSL_CONTROL, 6,
+ /* This pipe will belong to GSL Group zero. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 1,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), gsl_params->gsl_master == tg->inst,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 1);
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_FORCE_DELAY, VFLIP_READY_DELAY);
+}
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce120_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Settig HW default values from reg specs */
+ CRTC_REG_SET_N(DCP0_DCP_GSL_CONTROL, 6,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 0);
+
+ CRTC_REG_SET_2(CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, 0,
+ CRTC_GSL_FORCE_DELAY, 0x2); /*TODO Why this value here ?*/
+}
+
+/* Reset slave controllers on master VSync */
+void dce120_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source)
+{
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ /* Setup trigger edge */
+ uint32_t pol_value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_SYNC_A_CNTL,
+ tg110->offsets.crtc);
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC0_CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+
+ /* TODO What about other sources ?*/
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ CRTC_REG_UPDATE_N(CRTC0_CRTC_TRIGB_CNTL, 7,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT), trig_src_select,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT), TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL), rising_edge,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL), falling_edge,
+ /* send every signal */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT), 0,
+ /* no delay */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY), 0,
+ /* clear trigger status */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR), 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 2,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL, 1,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+/* disabling trigger-reset */
+void dce120_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 0,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT, TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_POLARITY_SELECT, TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ /* clear trigger status */
+ CRTC_TRIGB_CLEAR, 1);
+
+}
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce120_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ tg110->offsets.crtc);
+
+ return get_reg_field_value(value,
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+{
+ uint32_t offset = 0;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ offset = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ offset = mmD2VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ offset = mmD3VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ offset = mmD4VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ offset = mmD5VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ offset = mmD6VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ value = dm_read_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset, value);
+}
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce120_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t tmp1 = 0;
+ uint32_t tmp2 = 0;
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_H_TOTAL,
+ CRTC_H_TOTAL,
+ timing->h_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL,
+ timing->v_total - 1);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX,
+ timing->v_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN,
+ timing->v_total - 1);
+
+ tmp1 = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+ tmp2 = tmp1 + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END, tmp1,
+ CRTC_H_BLANK_START, tmp2);
+
+ tmp1 = timing->v_total - (v_sync_start + timing->v_border_top);
+ tmp2 = tmp1 + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END, tmp1,
+ CRTC_V_BLANK_START, tmp2);
+}
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_OVERSCAN_COLOR,
+ tg110->offsets.crtc);
+
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+void dce120_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, params->vertical_total_min - 1);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, params->vertical_total_max - 1);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 6,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0x180);
+
+ } else {
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, 0);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0);
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce120_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce120_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC0_CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+
+void dce120_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t v_blank_start_end = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_BLANK_START_END,
+ tg110->offsets.crtc);
+
+ *v_blank_start = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce120_timing_generator_get_crtc_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+void dce120_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_sync_width_and_b_porch =
+ timing->v_total - timing->v_addressable -
+ timing->v_border_bottom - timing->v_front_porch;
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc);
+
+ set_reg_field_value(
+ value,
+ enable ? 0 : 1,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+
+ /* Program advanced line position acc.to the best case from fetching data perspective to hide MC latency
+ * and prefilling Line Buffer in V Blank (to 10 lines as LB can store max 10 lines)
+ */
+ if (v_sync_width_and_b_porch > 10)
+ v_sync_width_and_b_porch = 10;
+
+ set_reg_field_value(
+ value,
+ v_sync_width_and_b_porch,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+
+ dm_write_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc);
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, overscan_color->color_r_cr);
+}
+
+void dce120_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce120_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce120_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc);
+
+ if (get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+
+ return false;
+}
+
+void dce120_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET(
+ CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+
+ if (enable_blanking)
+ CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+ else
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc, 0);
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce120_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce120_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce120_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce120_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce120_tg_program_blank_color(tg, blank_color);
+
+ if (overscan_color != NULL)
+ dce120_tg_set_overscan_color(tg, overscan_color);
+}
+
+static void dce120_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK, value,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+void dce120_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_HRES, 6);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+
+ /* enable test pattern */
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 6,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, inc_base + 2,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 5,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, 0);
+
+ /* enable test pattern */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool dce120_arm_vert_intr(
+ struct timing_generator *tg,
+ uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START, v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END, v_blank_start + width);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce120_tg_funcs = {
+ .validate_timing = dce120_tg_validate_timing,
+ .program_timing = dce120_tg_program_timing,
+ .enable_crtc = dce120_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ /* never be called */
+ .get_position = dce120_timing_generator_get_crtc_position,
+ .get_frame_count = dce120_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce120_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce120_timing_generator_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = dce120_tg_wait_for_state,
+ .set_blank = dce120_tg_set_blank,
+ .is_blanked = dce120_tg_is_blanked,
+ /* never be called */
+ .set_colors = dce120_tg_set_colors,
+ .set_overscan_blank_color = dce120_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce120_timing_generator_program_blank_color,
+ .disable_vga = dce120_timing_generator_disable_vga,
+ .did_triggered_reset_occur = dce120_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock = dce120_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce120_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce120_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock = dce120_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request = dce120_timing_generator_enable_advanced_request,
+ .set_drr = dce120_timing_generator_set_drr,
+ .set_static_screen_control = dce120_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce120_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce120_arm_vert_intr,
+};
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce120_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC0_CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC0_CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ /*//CRTC requires a minimum HBLANK = 32 pixels and o
+ * Minimum HSYNC = 8 pixels*/
+ tg110->min_h_blank = 32;
+ /*DCE12_CRTC_Block_ARch.doc*/
+ tg110->min_h_front_porch = 0;
+ tg110->min_h_back_porch = 0;
+
+ tg110->min_h_sync_width = 8;
+ tg110->min_v_sync_width = 1;
+ tg110->min_v_blank = 3;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
new file mode 100644
index 000000000000..549d70b23e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE120_H__
+#define __DC_TIMING_GENERATOR_DCE120_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+#include "dce110/dce110_timing_generator.h"
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+#endif /* __DC_TIMING_GENERATOR_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
new file mode 100644
index 000000000000..bc388aa4b2f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE80 = dce80_timing_generator.o dce80_compressor.o dce80_hw_sequencer.o \
+ dce80_resource.o
+
+AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE80)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
new file mode 100644
index 000000000000..951f2caba9b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+
+#include "include/logger_interface.h"
+#include "dce80_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp80->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp80->offsets.dmif_offset)
+
+static const struct dce80_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp80->base.raw_size * cp80->base.banks_num *
+ cp80->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp80->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp80->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp80->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp80->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce80_compressor *cp80,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp80->base.embedded_panel_h_size != 0 &&
+ cp80->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp80->base.embedded_panel_h_size *
+ cp80->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce80_compressor *cp80,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce80_compressor *cp80,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp80->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce80_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce80_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp80,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce8_one_lpt_channel_max_resolution)) {
+ dce80_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp80->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp80, true);
+ }
+}
+
+void dce80_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce80_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp80, false);
+ }
+}
+
+bool dce80_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ return false;
+}
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp80);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE8 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce80_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce80_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp80, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp80);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce80_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE8:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE8.1 also needs to set new DCE8.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_construct(struct dce80_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx)
+{
+ struct dce80_compressor *cp80 =
+ kzalloc(sizeof(struct dce80_compressor), GFP_KERNEL);
+
+ if (!cp80)
+ return NULL;
+
+ dce80_compressor_construct(cp80, ctx);
+ return &cp80->base;
+}
+
+void dce80_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE80_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
new file mode 100644
index 000000000000..cca58b044402
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE80_H__
+#define __DC_COMPRESSOR_DCE80_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE80_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce80_compressor, base)
+
+struct dce80_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce80_compressor {
+ struct compressor base;
+ struct dce80_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx);
+
+void dce80_compressor_construct(struct dce80_compressor *cp80,
+ struct dc_context *ctx);
+
+void dce80_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce80_compressor_power_up_fbc(struct compressor *cp);
+
+void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce80_compressor_disable_fbc(struct compressor *cp);
+
+void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce80_compressor_enable_lpt(struct compressor *cp);
+
+void dce80_compressor_disable_lpt(struct compressor *cp);
+
+void dce80_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
new file mode 100644
index 000000000000..ccfcf1c0eeb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce80_hw_sequencer.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+struct dce80_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce80_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce80_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating;
+ dc->hwss.pipe_control_lock = dce_pipe_control_lock;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
new file mode 100644
index 000000000000..7a1b31def66f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE80_H__
+#define __DC_HWSS_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce80_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
new file mode 100644
index 000000000000..9c18efd3446f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -0,0 +1,1257 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "irq/dce80/irq_service_dce80.h"
+#include "dce110/dce110_timing_generator.h"
+#include "dce110/dce110_resource.h"
+#include "dce80/dce80_timing_generator.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce80/dce80_hw_sequencer.h"
+#include "dce100/dce100_resource.h"
+
+#include "reg_helper.h"
+
+/* TODO remove this include */
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE
+#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE
+#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE
+#endif
+
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE
+#endif
+
+
+#ifndef mmHPD_DC_HPD_CONTROL
+ #define mmHPD_DC_HPD_CONTROL 0x189A
+ #define mmHPD0_DC_HPD_CONTROL 0x189A
+ #define mmHPD1_DC_HPD_CONTROL 0x18A2
+ #define mmHPD2_DC_HPD_CONTROL 0x18AA
+ #define mmHPD3_DC_HPD_CONTROL 0x18B2
+ #define mmHPD4_DC_HPD_CONTROL 0x18BA
+ #define mmHPD5_DC_HPD_CONTROL 0x18C2
+#endif
+
+#define DCE11_DIG_FE_CNTL 0x4a00
+#define DCE11_DIG_BE_CNTL 0x4a47
+#define DCE11_DP_SEC 0x4ac3
+
+static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_COMMON_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE80(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE80_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_80_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_80(id),\
+}
+
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_81 = {
+ .num_timing_generator = 4,
+ .num_audio = 7,
+ .num_stream_encoder = 7,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_83 = {
+ .num_timing_generator = 2,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce80_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce80_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct output_pixel_processor *dce80_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct stream_encoder *dce80_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE8_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE8_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE8_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce80_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce80_stream_encoder_create,
+ .create_hwseq = dce80_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce80_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce80_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce80_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->prescaler_on = false;
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 297000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce80_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct clock_source *dce80_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce80_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct input_pixel_processor *dce80_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce80_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce80_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce80_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce80_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce80_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce80_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce80_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce80_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ result = dce80_validate_bandwidth(dc, context);
+
+ return result;
+}
+
+static void dce80_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce80_res_pool_funcs = {
+ .destroy = dce80_destroy_resource_pool,
+ .link_enc_create = dce80_link_encoder_create,
+ .validate_guaranteed = dce80_validate_guaranteed,
+ .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce80_validate_global
+};
+
+static bool dce80_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce80_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce81_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_81;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_81.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce81_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce83_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_83;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_83.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 2;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 1;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce83_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
new file mode 100644
index 000000000000..eff31ab83a39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE80_H__
+#define __DC_RESOURCE_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
new file mode 100644
index 000000000000..265894851493
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dc_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "../dce110/dce110_timing_generator.h"
+#include "dce80_timing_generator.h"
+
+#include "timing_generator.h"
+
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+
+ BLACK_COLOR_FORMAT_COUNT
+};
+
+static const struct dce110_timing_generator_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+}
+};
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
+
+void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
+{
+ uint64_t pix_dur;
+ uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
+ + DCE110TG_FROM_TG(tg)->offsets.dmif;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (pix_clk_khz == 0)
+ return;
+
+ pix_dur = 1000000000 / pix_clk_khz;
+
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (!use_vbios)
+ program_pix_dur(tg, timing->pix_clk_khz);
+
+ dce110_tg_program_timing(tg, timing, use_vbios);
+}
+
+static const struct timing_generator_funcs dce80_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .set_drr = dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+
+ /* DCE8.0 overrides */
+ .enable_advanced_request =
+ dce80_timing_generator_enable_advanced_request,
+};
+
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+ tg110->offsets = *offsets;
+ tg110->derived_offsets = reg_offsets[instance];
+
+ tg110->base.funcs = &dce80_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
+
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
new file mode 100644
index 000000000000..9cebb24c94c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE80_H__
+#define __DC_TIMING_GENERATOR_DCE80_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* DCE8.0 implementation inherits from DCE11.0 */
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/******** HW programming ************/
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
new file mode 100644
index 000000000000..f565a6042970
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for DCN.
+
+DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_hubp.o dcn10_mpc.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o
+
+AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
new file mode 100644
index 000000000000..7f579cb19f4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+
+#include "dcn10_cm_common.h"
+
+#define REG(reg) reg
+
+#define CTX \
+ ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ reg->shifts.field_name, reg->masks.field_name
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34;
+ cur_csc_reg++) {
+
+ const uint16_t *regval0 = &(regval[2 * i]);
+ const uint16_t *regval1 = &(regval[(2 * i) + 1]);
+
+ REG_SET_2(cur_csc_reg, 0,
+ csc_c11, *regval0,
+ csc_c12, *regval1);
+
+ i++;
+ }
+
+}
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg)
+{
+ uint32_t reg_region_cur;
+ unsigned int i = 0;
+
+ REG_SET_2(reg->start_cntl_b, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_g, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_r, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+
+ REG_SET(reg->start_slope_cntl_b, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_g, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_r, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+
+ REG_SET(reg->start_end_cntl1_b, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_b, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_g, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_g, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_r, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_r, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ for (reg_region_cur = reg->region_start;
+ reg_region_cur <= reg->region_end;
+ reg_region_cur++) {
+
+ const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
+ const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
+
+ REG_SET_4(reg_region_cur, 0,
+ exp_region0_lut_offset, curve0->offset,
+ exp_region0_num_segments, curve0->segments_num,
+ exp_region1_lut_offset, curve1->offset,
+ exp_region1_num_segments, curve1->segments_num);
+
+ i++;
+ }
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
new file mode 100644
index 000000000000..64836dcf21f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN10_CM_COMMON_H__
+#define __DAL_DCN10_CM_COMMON_H__
+
+#define TF_HELPER_REG_FIELD_LIST(type) \
+ type exp_region0_lut_offset; \
+ type exp_region0_num_segments; \
+ type exp_region1_lut_offset; \
+ type exp_region1_num_segments;\
+ type field_region_end;\
+ type field_region_end_slope;\
+ type field_region_end_base;\
+ type exp_region_start;\
+ type exp_resion_start_segment;\
+ type field_region_linear_slope
+
+#define TF_CM_REG_FIELD_LIST(type) \
+ type csc_c11; \
+ type csc_c12
+
+struct xfer_func_shift {
+ TF_HELPER_REG_FIELD_LIST(uint8_t);
+};
+
+struct xfer_func_mask {
+ TF_HELPER_REG_FIELD_LIST(uint32_t);
+};
+
+struct xfer_func_reg {
+ struct xfer_func_shift shifts;
+ struct xfer_func_mask masks;
+
+ uint32_t start_cntl_b;
+ uint32_t start_cntl_g;
+ uint32_t start_cntl_r;
+ uint32_t start_slope_cntl_b;
+ uint32_t start_slope_cntl_g;
+ uint32_t start_slope_cntl_r;
+ uint32_t start_end_cntl1_b;
+ uint32_t start_end_cntl2_b;
+ uint32_t start_end_cntl1_g;
+ uint32_t start_end_cntl2_g;
+ uint32_t start_end_cntl1_r;
+ uint32_t start_end_cntl2_r;
+ uint32_t region_start;
+ uint32_t region_end;
+};
+
+struct cm_color_matrix_shift {
+ TF_CM_REG_FIELD_LIST(uint8_t);
+};
+
+struct cm_color_matrix_mask {
+ TF_CM_REG_FIELD_LIST(uint32_t);
+};
+
+struct color_matrices_reg{
+ struct cm_color_matrix_shift shifts;
+ struct cm_color_matrix_mask masks;
+
+ uint32_t csc_c11_c12;
+ uint32_t csc_c33_c34;
+};
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg);
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
new file mode 100644
index 000000000000..74e7c82bdc76
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum pixel_format_description {
+ PIXEL_FORMAT_FIXED = 0,
+ PIXEL_FORMAT_FIXED16,
+ PIXEL_FORMAT_FLOAT
+
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+/* Program gamut remap in bypass mode */
+void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+{
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ /* Gamut remap in bypass */
+}
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ uint32_t pixel_width;
+
+ if (scl_data->viewport.width > scl_data->recout.width)
+ pixel_width = scl_data->recout.width;
+ else
+ pixel_width = scl_data->viewport.width;
+
+ /* TODO: add lb check */
+
+ /* No support for programming ratio of 4, drop to 3.99999.. */
+ if (scl_data->ratios.horz.value == (4ll << 32))
+ scl_data->ratios.horz.value--;
+ if (scl_data->ratios.vert.value == (4ll << 32))
+ scl_data->ratios.vert.value--;
+ if (scl_data->ratios.horz_c.value == (4ll << 32))
+ scl_data->ratios.horz_c.value--;
+ if (scl_data->ratios.vert_c.value == (4ll << 32))
+ scl_data->ratios.vert_c.value--;
+
+ /* Set default taps if none are provided */
+ if (in_taps->h_taps == 0)
+ scl_data->taps.h_taps = 4;
+ else
+ scl_data->taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0)
+ scl_data->taps.v_taps = 4;
+ else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0)
+ scl_data->taps.v_taps_c = 2;
+ else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0)
+ scl_data->taps.h_taps_c = 2;
+ /* Only 1 and even h_taps_c are supported by hw */
+ else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ }
+
+ return true;
+}
+
+void dpp_reset(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp->filter_h_c = NULL;
+ dpp->filter_v_c = NULL;
+ dpp->filter_h = NULL;
+ dpp->filter_v = NULL;
+
+ /* set boundary mode to 0 */
+ REG_SET(DSCL_CONTROL, 0, SCL_BOUNDARY_MODE, 0);
+}
+
+
+
+static void dpp1_cm_set_regamma_pwl(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp1_cm_power_on_regamma_lut(dpp_base, true);
+ dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
+
+ if (dpp->is_write_to_ram_a_safe)
+ dpp1_cm_program_regamma_luta_settings(dpp_base, params);
+ else
+ dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
+
+ dpp1_cm_program_regamma_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+}
+
+static void dpp1_cm_set_regamma_mode(
+ struct dpp *dpp_base,
+ enum opp_regamma mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t re_mode = 0;
+ uint32_t obuf_bypass = 0; /* need for pipe split */
+ uint32_t obuf_hupscale = 0;
+
+ switch (mode) {
+ case OPP_REGAMMA_BYPASS:
+ re_mode = 0;
+ break;
+ case OPP_REGAMMA_SRGB:
+ re_mode = 1;
+ break;
+ case OPP_REGAMMA_3_6:
+ re_mode = 2;
+ break;
+ case OPP_REGAMMA_USER:
+ re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4;
+ dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe;
+ break;
+ default:
+ break;
+ }
+
+ REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode);
+ REG_UPDATE_2(OBUF_CONTROL,
+ OBUF_BYPASS, obuf_bypass,
+ OBUF_H_2X_UPSCALE_EN, obuf_hupscale);
+}
+
+static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
+ enum pixel_format_description *fmt)
+{
+
+ if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
+ input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
+ *fmt = PIXEL_FORMAT_FLOAT;
+ else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ *fmt = PIXEL_FORMAT_FIXED16;
+ else
+ *fmt = PIXEL_FORMAT_FIXED;
+}
+
+static void dpp1_set_degamma_format_float(
+ struct dpp *dpp_base,
+ bool is_float)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (is_float) {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1);
+ } else {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0);
+ }
+}
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode)
+{
+ uint32_t pixel_format;
+ uint32_t alpha_en;
+ enum pixel_format_description fmt ;
+ enum dc_color_space color_space;
+ enum dcn10_input_csc_select select;
+ bool is_float;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool force_disable_cursor = false;
+
+ dpp1_setup_format_flags(input_format, &fmt);
+ alpha_en = 1;
+ pixel_format = 0;
+ color_space = COLOR_SPACE_SRGB;
+ select = INPUT_CSC_SELECT_BYPASS;
+ is_float = false;
+
+ switch (fmt) {
+ case PIXEL_FORMAT_FIXED:
+ case PIXEL_FORMAT_FIXED16:
+ /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 0);
+ break;
+ case PIXEL_FORMAT_FLOAT:
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 1);
+ is_float = true;
+ break;
+ default:
+
+ break;
+ }
+
+ dpp1_set_degamma_format_float(dpp_base, is_float);
+
+ switch (input_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ default:
+ break;
+ }
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ dpp1_program_input_csc(dpp_base, color_space, select);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+}
+
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dc_cursor_color_format color_format = attr->color_format;
+
+ REG_UPDATE_2(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+
+ /* TODO: Fixed vs float */
+
+ REG_UPDATE_3(FORMAT_CONTROL,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 1,
+ FORMAT_EXPANSION_MODE, 0);
+}
+
+
+void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, cur_en);
+
+}
+
+static const struct dpp_funcs dcn10_dpp_funcs = {
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .opp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
+ .opp_set_csc_default = dpp1_cm_set_output_csc_default,
+ .opp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
+ .opp_program_regamma_lut = dpp1_cm_program_regamma_lut,
+ .opp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
+ .opp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
+ .opp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
+ .opp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
+ .opp_set_regamma_mode = dpp1_cm_set_regamma_mode,
+ .ipp_set_degamma = dpp1_set_degamma,
+ .ipp_program_input_lut = dpp1_program_input_lut,
+ .ipp_program_degamma_pwl = dpp1_set_degamma_pwl,
+ .ipp_setup = dpp1_cnv_setup,
+ .ipp_full_bypass = dpp1_full_bypass,
+ .set_cursor_attributes = dpp1_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+};
+
+static struct dpp_caps dcn10_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT,
+ .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dpp1_construct(
+ struct dcn10_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn10_dpp_funcs;
+ dpp->base.caps = &dcn10_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
new file mode 100644
index 000000000000..a9782b1aba47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -0,0 +1,1386 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPP_DCN10_H__
+#define __DAL_DPP_DCN10_H__
+
+#include "dpp.h"
+
+#define TO_DCN10_DPP(dpp)\
+ container_of(dpp, struct dcn10_dpp, base)
+
+/* TODO: Use correct number of taps. Using polaris values for now */
+#define LB_TOTAL_NUMBER_OF_ENTRIES 5124
+#define LB_BITS_PER_ENTRY 144
+
+#define TF_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+//Used to resolve corner case
+#define TF2_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## _ ## field_name ## post_fix
+
+#define TF_REG_LIST_DCN(id) \
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI(OTG_H_BLANK, DSCL, id), \
+ SRI(OTG_V_BLANK, DSCL, id), \
+ SRI(SCL_MODE, DSCL, id), \
+ SRI(LB_DATA_FORMAT, DSCL, id), \
+ SRI(LB_MEMORY_CTRL, DSCL, id), \
+ SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(SCL_BLACK_OFFSET, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
+ SRI(SCL_TAP_CONTROL, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI(DSCL_2TAP_CONTROL, DSCL, id), \
+ SRI(MPC_SIZE, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \
+ SRI(RECOUT_START, DSCL, id), \
+ SRI(RECOUT_SIZE, DSCL, id), \
+ SRI(OBUF_CONTROL, DSCL, id), \
+ SRI(CM_ICSC_CONTROL, CM, id), \
+ SRI(CM_ICSC_C11_C12, CM, id), \
+ SRI(CM_ICSC_C33_C34, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_DGAM_LUT_INDEX, CM, id), \
+ SRI(CM_DGAM_LUT_DATA, CM, id), \
+ SRI(CM_CONTROL, CM, id), \
+ SRI(CM_DGAM_CONTROL, CM, id), \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+
+
+#define TF_REG_LIST_DCN10(id) \
+ TF_REG_LIST_DCN(id), \
+ SRI(CM_COMA_C11_C12, CM, id),\
+ SRI(CM_COMA_C33_C34, CM, id),\
+ SRI(CM_COMB_C11_C12, CM, id),\
+ SRI(CM_COMB_C33_C34, CM, id),\
+ SRI(CM_OCSC_CONTROL, CM, id), \
+ SRI(CM_OCSC_C11_C12, CM, id), \
+ SRI(CM_OCSC_C33_C34, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_RGAM_LUT_DATA, CM, id), \
+ SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\
+ SRI(CM_RGAM_LUT_INDEX, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \
+ SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CM_CMOUT_CONTROL, CM, id)
+
+
+#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
+ TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_BYPASS, mask_sh), \
+ TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh)
+
+#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\
+ TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_H_2X_UPSCALE_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \
+ TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh)
+
+#define TF_REG_FIELD_LIST(type) \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type EXT_OVERSCAN_TOP; \
+ type OTG_H_BLANK_START; \
+ type OTG_H_BLANK_END; \
+ type OTG_V_BLANK_START; \
+ type OTG_V_BLANK_END; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type PIXEL_REDUCE_MODE; \
+ type DYNAMIC_PIXEL_DEPTH; \
+ type DITHER_EN; \
+ type INTERLEAVE_EN; \
+ type LB_DATA_FORMAT__ALPHA_EN; \
+ type MEMORY_CONFIG; \
+ type LB_MAX_PARTITIONS; \
+ type AUTOCAL_MODE; \
+ type AUTOCAL_NUM_PIPE; \
+ type AUTOCAL_PIPE_ID; \
+ type SCL_BLACK_OFFSET_RGB_Y; \
+ type SCL_BLACK_OFFSET_CBCR; \
+ type SCL_BOUNDARY_MODE; \
+ type SCL_V_NUM_TAPS; \
+ type SCL_H_NUM_TAPS; \
+ type SCL_V_NUM_TAPS_C; \
+ type SCL_H_NUM_TAPS_C; \
+ type SCL_COEF_RAM_TAP_PAIR_IDX; \
+ type SCL_COEF_RAM_PHASE; \
+ type SCL_COEF_RAM_FILTER_TYPE; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_COEF_RAM_ODD_TAP_COEF; \
+ type SCL_COEF_RAM_ODD_TAP_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_SHARP_EN; \
+ type SCL_H_2TAP_SHARP_FACTOR; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_V_2TAP_SHARP_EN; \
+ type SCL_V_2TAP_SHARP_FACTOR; \
+ type SCL_COEF_RAM_SELECT; \
+ type DSCL_MODE; \
+ type RECOUT_START_X; \
+ type RECOUT_START_Y; \
+ type RECOUT_WIDTH; \
+ type RECOUT_HEIGHT; \
+ type MPC_WIDTH; \
+ type MPC_HEIGHT; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_SCALE_RATIO_C; \
+ type SCL_V_SCALE_RATIO_C; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC_C; \
+ type SCL_H_INIT_INT_C; \
+ type SCL_V_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC_BOT; \
+ type SCL_V_INIT_INT_BOT; \
+ type SCL_V_INIT_FRAC_C; \
+ type SCL_V_INIT_INT_C; \
+ type SCL_V_INIT_FRAC_BOT_C; \
+ type SCL_V_INIT_INT_BOT_C; \
+ type SCL_CHROMA_COEF_MODE; \
+ type SCL_COEF_RAM_SELECT_CURRENT; \
+ type CM_GAMUT_REMAP_MODE; \
+ type CM_GAMUT_REMAP_C11; \
+ type CM_GAMUT_REMAP_C12; \
+ type CM_GAMUT_REMAP_C33; \
+ type CM_GAMUT_REMAP_C34; \
+ type CM_COMA_C11; \
+ type CM_COMA_C12; \
+ type CM_COMA_C33; \
+ type CM_COMA_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type CM_OCSC_MODE; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type RGAM_MEM_PWR_FORCE; \
+ type CM_RGAM_LUT_DATA; \
+ type CM_RGAM_LUT_WRITE_EN_MASK; \
+ type CM_RGAM_LUT_WRITE_SEL; \
+ type CM_RGAM_LUT_INDEX; \
+ type CM_RGAM_RAMB_EXP_REGION_START_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_R; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION_START_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_R; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_LUT_MODE; \
+ type CM_CMOUT_ROUND_TRUNC_MODE; \
+ type OBUF_BYPASS; \
+ type OBUF_H_2X_UPSCALE_EN; \
+ type CM_BLNDGAM_LUT_MODE; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_LUT_INDEX; \
+ type CM_BLNDGAM_LUT_DATA; \
+ type CM_3DLUT_MODE; \
+ type CM_3DLUT_SIZE; \
+ type CM_3DLUT_INDEX; \
+ type CM_3DLUT_DATA0; \
+ type CM_3DLUT_DATA1; \
+ type CM_3DLUT_DATA_30BIT; \
+ type CM_3DLUT_WRITE_EN_MASK; \
+ type CM_3DLUT_RAM_SEL; \
+ type CM_3DLUT_30BIT_EN; \
+ type CM_3DLUT_CONFIG_STATUS; \
+ type CM_3DLUT_READ_SEL; \
+ type CM_SHAPER_LUT_MODE; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_LUT_WRITE_SEL; \
+ type CM_SHAPER_LUT_INDEX; \
+ type CM_SHAPER_LUT_DATA; \
+ type CM_DGAM_CONFIG_STATUS; \
+ type CM_ICSC_MODE; \
+ type CM_ICSC_C11; \
+ type CM_ICSC_C12; \
+ type CM_ICSC_C33; \
+ type CM_ICSC_C34; \
+ type CM_DGAM_RAMB_EXP_REGION_START_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_R; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION_START_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_R; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type SHARED_MEM_PWR_DIS; \
+ type CM_IGAM_LUT_FORMAT_R; \
+ type CM_IGAM_LUT_FORMAT_G; \
+ type CM_IGAM_LUT_FORMAT_B; \
+ type CM_IGAM_LUT_HOST_EN; \
+ type CM_IGAM_LUT_RW_MODE; \
+ type CM_IGAM_LUT_WRITE_EN_MASK; \
+ type CM_IGAM_LUT_SEL; \
+ type CM_IGAM_LUT_SEQ_COLOR; \
+ type CM_IGAM_DGAM_CONFIG_STATUS; \
+ type CM_DGAM_LUT_WRITE_EN_MASK; \
+ type CM_DGAM_LUT_WRITE_SEL; \
+ type CM_DGAM_LUT_INDEX; \
+ type CM_DGAM_LUT_DATA; \
+ type CM_DGAM_LUT_MODE; \
+ type CM_IGAM_LUT_MODE; \
+ type CM_IGAM_INPUT_FORMAT; \
+ type CM_IGAM_LUT_RW_INDEX; \
+ type CM_BYPASS_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CNVC_BYPASS; \
+ type OUTPUT_FP; \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CURSOR_MODE; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_MODE; \
+ type CUR0_EXPANSION_MODE; \
+ type CUR0_ENABLE; \
+ type CM_BYPASS; \
+ type FORMAT_CONTROL__ALPHA_EN; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1
+
+
+
+struct dcn_dpp_shift {
+ TF_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_dpp_mask {
+ TF_REG_FIELD_LIST(uint32_t);
+};
+
+
+
+
+struct dcn_dpp_registers {
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t OTG_H_BLANK;
+ uint32_t OTG_V_BLANK;
+ uint32_t SCL_MODE;
+ uint32_t LB_DATA_FORMAT;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t DSCL_AUTOCAL;
+ uint32_t SCL_BLACK_OFFSET;
+ uint32_t DSCL_CONTROL;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_COEF_RAM_TAP_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t DSCL_2TAP_CONTROL;
+ uint32_t MPC_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_HORZ_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT_BOT;
+ uint32_t SCL_VERT_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C;
+ uint32_t RECOUT_START;
+ uint32_t RECOUT_SIZE;
+ uint32_t CM_GAMUT_REMAP_CONTROL;
+ uint32_t CM_GAMUT_REMAP_C11_C12;
+ uint32_t CM_GAMUT_REMAP_C33_C34;
+ uint32_t CM_COMA_C11_C12;
+ uint32_t CM_COMA_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t CM_OCSC_CONTROL;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_MEM_PWR_CTRL;
+ uint32_t CM_RGAM_LUT_DATA;
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_RGAM_LUT_INDEX;
+ uint32_t CM_RGAM_RAMB_START_CNTL_B;
+ uint32_t CM_RGAM_RAMB_START_CNTL_G;
+ uint32_t CM_RGAM_RAMB_START_CNTL_R;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMB_REGION_0_1;
+ uint32_t CM_RGAM_RAMB_REGION_32_33;
+ uint32_t CM_RGAM_RAMA_START_CNTL_B;
+ uint32_t CM_RGAM_RAMA_START_CNTL_G;
+ uint32_t CM_RGAM_RAMA_START_CNTL_R;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMA_REGION_0_1;
+ uint32_t CM_RGAM_RAMA_REGION_32_33;
+ uint32_t CM_RGAM_CONTROL;
+ uint32_t CM_CMOUT_CONTROL;
+ uint32_t OBUF_CONTROL;
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_BLNDGAM_CONTROL;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
+ uint32_t CM_BLNDGAM_LUT_INDEX;
+ uint32_t CM_BLNDGAM_LUT_DATA;
+ uint32_t CM_3DLUT_MODE;
+ uint32_t CM_3DLUT_INDEX;
+ uint32_t CM_3DLUT_DATA;
+ uint32_t CM_3DLUT_DATA_30BIT;
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL;
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
+ uint32_t CM_SHAPER_CONTROL;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_REGION_0_1;
+ uint32_t CM_SHAPER_RAMB_REGION_2_3;
+ uint32_t CM_SHAPER_RAMB_REGION_4_5;
+ uint32_t CM_SHAPER_RAMB_REGION_6_7;
+ uint32_t CM_SHAPER_RAMB_REGION_8_9;
+ uint32_t CM_SHAPER_RAMB_REGION_10_11;
+ uint32_t CM_SHAPER_RAMB_REGION_12_13;
+ uint32_t CM_SHAPER_RAMB_REGION_14_15;
+ uint32_t CM_SHAPER_RAMB_REGION_16_17;
+ uint32_t CM_SHAPER_RAMB_REGION_18_19;
+ uint32_t CM_SHAPER_RAMB_REGION_20_21;
+ uint32_t CM_SHAPER_RAMB_REGION_22_23;
+ uint32_t CM_SHAPER_RAMB_REGION_24_25;
+ uint32_t CM_SHAPER_RAMB_REGION_26_27;
+ uint32_t CM_SHAPER_RAMB_REGION_28_29;
+ uint32_t CM_SHAPER_RAMB_REGION_30_31;
+ uint32_t CM_SHAPER_RAMB_REGION_32_33;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_REGION_0_1;
+ uint32_t CM_SHAPER_RAMA_REGION_2_3;
+ uint32_t CM_SHAPER_RAMA_REGION_4_5;
+ uint32_t CM_SHAPER_RAMA_REGION_6_7;
+ uint32_t CM_SHAPER_RAMA_REGION_8_9;
+ uint32_t CM_SHAPER_RAMA_REGION_10_11;
+ uint32_t CM_SHAPER_RAMA_REGION_12_13;
+ uint32_t CM_SHAPER_RAMA_REGION_14_15;
+ uint32_t CM_SHAPER_RAMA_REGION_16_17;
+ uint32_t CM_SHAPER_RAMA_REGION_18_19;
+ uint32_t CM_SHAPER_RAMA_REGION_20_21;
+ uint32_t CM_SHAPER_RAMA_REGION_22_23;
+ uint32_t CM_SHAPER_RAMA_REGION_24_25;
+ uint32_t CM_SHAPER_RAMA_REGION_26_27;
+ uint32_t CM_SHAPER_RAMA_REGION_28_29;
+ uint32_t CM_SHAPER_RAMA_REGION_30_31;
+ uint32_t CM_SHAPER_RAMA_REGION_32_33;
+ uint32_t CM_SHAPER_LUT_INDEX;
+ uint32_t CM_SHAPER_LUT_DATA;
+ uint32_t CM_ICSC_CONTROL;
+ uint32_t CM_ICSC_C11_C12;
+ uint32_t CM_ICSC_C33_C34;
+ uint32_t CM_DGAM_RAMB_START_CNTL_B;
+ uint32_t CM_DGAM_RAMB_START_CNTL_G;
+ uint32_t CM_DGAM_RAMB_START_CNTL_R;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMB_REGION_0_1;
+ uint32_t CM_DGAM_RAMB_REGION_14_15;
+ uint32_t CM_DGAM_RAMA_START_CNTL_B;
+ uint32_t CM_DGAM_RAMA_START_CNTL_G;
+ uint32_t CM_DGAM_RAMA_START_CNTL_R;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMA_REGION_0_1;
+ uint32_t CM_DGAM_RAMA_REGION_14_15;
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_DGAM_LUT_INDEX;
+ uint32_t CM_DGAM_LUT_DATA;
+ uint32_t CM_CONTROL;
+ uint32_t CM_DGAM_CONTROL;
+ uint32_t CM_IGAM_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_INDEX;
+ uint32_t CM_IGAM_LUT_SEQ_COLOR;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+};
+
+struct dcn10_dpp {
+ struct dpp base;
+
+ const struct dcn_dpp_registers *tf_regs;
+ const struct dcn_dpp_shift *tf_shift;
+ const struct dcn_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+};
+
+enum dcn10_input_csc_select {
+ INPUT_CSC_SELECT_BYPASS = 0,
+ INPUT_CSC_SELECT_ICSC,
+ INPUT_CSC_SELECT_COMA
+};
+
+bool dpp1_dscl_is_lb_conf_valid(
+ int ceil_vratio,
+ int num_partitions,
+ int vtaps);
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a);
+
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a);
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select);
+
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dpp_reset(struct dpp *dpp_base);
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a);
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust);
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data);
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_construct(struct dcn10_dpp *dpp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
new file mode 100644
index 000000000000..40627c244bf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+#include "dcn10_cm_common.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+struct dcn10_input_csc_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+
+
+static void program_gamut_remap(
+ struct dcn10_dpp *dpp,
+ const uint16_t *regval,
+ enum gamut_remap_select select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ case GAMUT_REMAP_COMB_COEFF:
+ selection = 3;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, selection);
+
+}
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
+ }
+}
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust)
+{
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t ocsc_mode = 0;
+
+ if (default_adjust != NULL) {
+ switch (default_adjust->out_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ ocsc_mode = 0;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ ocsc_mode = 1;
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ ocsc_mode = 2;
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ ocsc_mode = 3;
+ break;
+ case COLOR_SPACE_UNKNOWN:
+ default:
+ break;
+ }
+ }
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+
+}
+
+static void dpp1_cm_get_reg_field(
+ struct dcn10_dpp *dpp,
+ struct xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+}
+
+static void dpp1_cm_program_color_matrix(
+ struct dcn10_dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ uint32_t mode;
+ struct color_matrices_reg gam_regs;
+
+ REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
+
+ if (tbl_entry == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
+
+ if (mode == 4) {
+
+ gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ uint32_t ocsc_mode = 4;
+
+ /**
+ *if (tbl_entry != NULL) {
+ * switch (tbl_entry->color_space) {
+ * case COLOR_SPACE_SRGB:
+ * case COLOR_SPACE_2020_RGB_FULLRANGE:
+ * ocsc_mode = 0;
+ * break;
+ * case COLOR_SPACE_SRGB_LIMITED:
+ * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ * ocsc_mode = 1;
+ * break;
+ * case COLOR_SPACE_YCBCR601:
+ * case COLOR_SPACE_YCBCR601_LIMITED:
+ * ocsc_mode = 2;
+ * break;
+ * case COLOR_SPACE_YCBCR709:
+ * case COLOR_SPACE_YCBCR709_LIMITED:
+ * case COLOR_SPACE_2020_YCBCR:
+ * ocsc_mode = 3;
+ * break;
+ * case COLOR_SPACE_UNKNOWN:
+ * default:
+ * break;
+ * }
+ *}
+ */
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+ dpp1_cm_program_color_matrix(dpp, tbl_entry);
+}
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
+}
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+
+}
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t selection = 1;
+ struct color_matrices_reg gam_regs;
+
+ if (select == INPUT_CSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ for (i = 0; i < arr_size; i++)
+ if (dcn10_input_csc_matrix[i].color_space == color_space) {
+ regval = dcn10_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (select == INPUT_CSC_SELECT_COMA)
+ selection = 2;
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, selection);
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+
+ if (select == INPUT_CSC_SELECT_ICSC) {
+
+ gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+/*program de gamma RAM B*/
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
+
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program de gamma RAM A*/
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+
+}
+
+static void dpp1_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
+ REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
+}
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ dpp1_enable_cm_block(dpp_base);
+
+ switch (mode) {
+ case IPP_DEGAMMA_MODE_BYPASS:
+ /* Setting de gamma bypass for now */
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
+ break;
+ case IPP_DEGAMMA_MODE_HW_sRGB:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
+ break;
+ case IPP_DEGAMMA_MODE_HW_xvYCC:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (use_ram_a)
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ else
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
+
+}
+
+static bool dpp1_degamma_ram_inuse(
+ struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool ret = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ if (status_reg == 9) {
+ *ram_a_inuse = true;
+ ret = true;
+ } else if (status_reg == 10) {
+ *ram_a_inuse = false;
+ ret = true;
+ }
+ return ret;
+}
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
+ CM_DGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
+ is_ram_a == true ? 0:1);
+
+ REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
+ }
+}
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ bool is_ram_a = true;
+
+ dpp1_power_on_degamma_lut(dpp_base, true);
+ dpp1_enable_cm_block(dpp_base);
+ dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
+ if (is_ram_a == true)
+ dpp1_program_degamma_lutb_settings(dpp_base, params);
+ else
+ dpp1_program_degamma_luta_settings(dpp_base, params);
+
+ dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
+ params->hw_points_num, !is_ram_a);
+ dpp1_degamma_ram_select(dpp_base, !is_ram_a);
+}
+
+void dpp1_full_bypass(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Input pixel format: ARGB8888 */
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, 0x8);
+
+ /* Zero expansion */
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 0,
+ FORMAT_EXPANSION_MODE, 0);
+
+ /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
+ if (dpp->tf_mask->CM_BYPASS_EN)
+ REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
+
+ /* Setting degamma bypass for now */
+ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
+}
+
+static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool in_use = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
+ if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
+ *ram_a_inuse = true;
+ in_use = true;
+ // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
+ } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
+ *ram_a_inuse = false;
+ in_use = true;
+ }
+ return in_use;
+}
+
+/*
+ * Input gamma LUT currently supports 256 values only. This means input color
+ * can have a maximum of 8 bits per channel (= 256 possible values) in order to
+ * have a one-to-one mapping with the LUT. Truncation will occur with color
+ * values greater than 8 bits.
+ *
+ * In the future, this function should support additional input gamma methods,
+ * such as piecewise linear mapping, and input gamma bypass.
+ */
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool rama_occupied = false;
+ uint32_t ram_num;
+ // Power on LUT memory.
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
+ dpp1_enable_cm_block(dpp_base);
+ // Determine whether to use RAM A or RAM B
+ dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
+ if (!rama_occupied)
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
+ else
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
+ // RW mode is 256-entry LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
+ // IGAM Input format should be 8 bits per channel.
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
+ // Do not mask any R,G,B values
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
+ // LUT-256, unsigned, integer, new u0.12 format
+ REG_UPDATE_3(
+ CM_IGAM_CONTROL,
+ CM_IGAM_LUT_FORMAT_R, 3,
+ CM_IGAM_LUT_FORMAT_G, 3,
+ CM_IGAM_LUT_FORMAT_B, 3);
+ // Start at index 0 of IGAM LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+ // Power off LUT memory
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
+ // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
+ REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
new file mode 100644
index 000000000000..cbad36410b32
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+static void dpp1_dscl_set_overscan(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *data)
+{
+ uint32_t left = data->recout.x;
+ uint32_t top = data->recout.y;
+
+ int right = data->h_active - data->recout.x - data->recout.width;
+ int bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (right < 0) {
+ BREAK_TO_DEBUGGER();
+ right = 0;
+ }
+ if (bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ bottom = 0;
+ }
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, left,
+ EXT_OVERSCAN_RIGHT, right);
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_BOTTOM, bottom,
+ EXT_OVERSCAN_TOP, top);
+}
+
+static void dpp1_dscl_set_otg_blank(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t h_blank_start = data->h_active;
+ uint32_t h_blank_end = 0;
+ uint32_t v_blank_start = data->v_active;
+ uint32_t v_blank_end = 0;
+
+ REG_SET_2(OTG_H_BLANK, 0,
+ OTG_H_BLANK_START, h_blank_start,
+ OTG_H_BLANK_END, h_blank_end);
+
+ REG_SET_2(OTG_V_BLANK, 0,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+}
+
+static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 0; /* 10 bpc */
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 1; /* 8 bpc */
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 2; /* 6 bpc */
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 3; /* 12 bpc */
+ else {
+ ASSERT(0);
+ return -1; /* Unsupported */
+ }
+}
+
+static bool dpp1_dscl_is_video_format(enum pixel_format format)
+{
+ if (format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool dpp1_dscl_is_420_format(enum pixel_format format)
+{
+ if (format == PIXEL_FORMAT_420BPP8 ||
+ format == PIXEL_FORMAT_420BPP10)
+ return true;
+ else
+ return false;
+}
+
+static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
+ struct dpp *dpp_base,
+ const struct scaler_data *data,
+ bool dbg_always_scale)
+{
+ const long long one = dal_fixed31_32_one.value;
+
+ if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL is processing data in fixed format */
+ if (data->format == PIXEL_FORMAT_FP16)
+ return DSCL_MODE_DSCL_BYPASS;
+ }
+
+ if (data->ratios.horz.value == one
+ && data->ratios.vert.value == one
+ && data->ratios.horz_c.value == one
+ && data->ratios.vert_c.value == one
+ && !dbg_always_scale)
+ return DSCL_MODE_SCALING_444_BYPASS;
+
+ if (!dpp1_dscl_is_420_format(data->format)) {
+ if (dpp1_dscl_is_video_format(data->format))
+ return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ else
+ return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ }
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+}
+
+static void dpp1_dscl_set_lb(
+ struct dcn10_dpp *dpp,
+ const struct line_buffer_params *lb_params,
+ enum lb_memory_config mem_size_config)
+{
+ /* LB */
+ if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL caps: pixel data processed in fixed format */
+ uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth);
+ uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth;
+
+ REG_SET_7(LB_DATA_FORMAT, 0,
+ PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */
+ PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */
+ PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */
+ DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */
+ DITHER_EN, 0, /* Dithering enable: Disabled */
+ INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
+ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
+ }
+
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ MEMORY_CONFIG, mem_size_config,
+ LB_MAX_PARTITIONS, 63);
+}
+
+static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 8)
+ return get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dpp1_dscl_set_scaler_filter(
+ struct dcn10_dpp *dpp,
+ uint32_t taps,
+ enum dcn10_coef_filter_type_sel filter_type,
+ const uint16_t *filter)
+{
+ const int tap_pairs = (taps + 1) / 2;
+ int phase;
+ int pair;
+ uint16_t odd_coef, even_coef;
+
+ REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0,
+ SCL_COEF_RAM_TAP_PAIR_IDX, 0,
+ SCL_COEF_RAM_PHASE, 0,
+ SCL_COEF_RAM_FILTER_TYPE, filter_type);
+
+ for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ for (pair = 0; pair < tap_pairs; pair++) {
+ even_coef = filter[phase * taps + 2 * pair];
+ if ((pair * 2 + 1) < taps)
+ odd_coef = filter[phase * taps + 2 * pair + 1];
+ else
+ odd_coef = 0;
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ /* Even tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
+ /* Write/read control for even coefficient */
+ SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
+ /* Odd tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
+ /* Write/read control for odd coefficient */
+ SCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
+ }
+ }
+
+}
+
+static void dpp1_dscl_set_scl_filter(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data,
+ bool chroma_coef_mode)
+{
+ bool h_2tap_hardcode_coef_en = false;
+ bool v_2tap_hardcode_coef_en = false;
+ bool h_2tap_sharp_en = false;
+ bool v_2tap_sharp_en = false;
+ uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz;
+ uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert;
+ bool coef_ram_current;
+ const uint16_t *filter_h = NULL;
+ const uint16_t *filter_v = NULL;
+ const uint16_t *filter_h_c = NULL;
+ const uint16_t *filter_v_c = NULL;
+
+ h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3
+ && scl_data->taps.h_taps_c < 3
+ && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1);
+ v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3
+ && scl_data->taps.v_taps_c < 3
+ && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1);
+
+ h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0;
+ v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0;
+
+ REG_UPDATE_6(DSCL_2TAP_CONTROL,
+ SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en,
+ SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en,
+ SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor,
+ SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en,
+ SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en,
+ SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor);
+
+ if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) {
+ bool filter_updated = false;
+
+ filter_h = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps, scl_data->ratios.horz);
+ filter_v = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps, scl_data->ratios.vert);
+
+ filter_updated = (filter_h && (filter_h != dpp->filter_h))
+ || (filter_v && (filter_v != dpp->filter_v));
+
+ if (chroma_coef_mode) {
+ filter_h_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps_c, scl_data->ratios.horz_c);
+ filter_v_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps_c, scl_data->ratios.vert_c);
+ filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c))
+ || (filter_v_c && (filter_v_c != dpp->filter_v_c));
+ }
+
+ if (filter_updated) {
+ uint32_t scl_mode = REG_READ(SCL_MODE);
+
+ if (!h_2tap_hardcode_coef_en && filter_h) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps,
+ SCL_COEF_LUMA_HORZ_FILTER, filter_h);
+ }
+ dpp->filter_h = filter_h;
+ if (!v_2tap_hardcode_coef_en && filter_v) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps,
+ SCL_COEF_LUMA_VERT_FILTER, filter_v);
+ }
+ dpp->filter_v = filter_v;
+ if (chroma_coef_mode) {
+ if (!h_2tap_hardcode_coef_en && filter_h_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps_c,
+ SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
+ }
+ if (!v_2tap_hardcode_coef_en && filter_v_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps_c,
+ SCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
+ }
+ }
+ dpp->filter_h_c = filter_h_c;
+ dpp->filter_v_c = filter_v_c;
+
+ coef_ram_current = get_reg_field_value_ex(
+ scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT,
+ dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT);
+
+ /* Swap coefficient RAM and set chroma coefficient mode */
+ REG_SET_2(SCL_MODE, scl_mode,
+ SCL_COEF_RAM_SELECT, !coef_ram_current,
+ SCL_CHROMA_COEF_MODE, chroma_coef_mode);
+ }
+ }
+}
+
+static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 10;
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 8;
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 6;
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 12;
+ else {
+ BREAK_TO_DEBUGGER();
+ return -1; /* Unsupported */
+ }
+}
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+ int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 816;
+ lb_memory_size_c = 816;
+ lb_memory_size_a = 984;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1088;
+ lb_memory_size_c = 1088;
+ lb_memory_size_a = 1312;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ /* 420 mode: using 3rd mem from Y, Cr and Cb */
+ lb_memory_size = 816 + 1088 + 848 + 848 + 848;
+ lb_memory_size_c = 816 + 1088;
+ lb_memory_size_a = 984 + 1312 + 456;
+ } else {
+ lb_memory_size = 816 + 1088 + 848;
+ lb_memory_size_c = 816 + 1088 + 848;
+ lb_memory_size_a = 984 + 1312 + 456;
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+
+ if (scl_data->lb_params.alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 64)
+ *num_part_y = 64;
+ if (*num_part_c > 64)
+ *num_part_c = 64;
+
+}
+
+bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps)
+{
+ if (ceil_vratio > 2)
+ return vtaps <= (num_partitions - ceil_vratio + 2);
+ else
+ return vtaps <= num_partitions;
+}
+
+/*find first match configuration which meets the min required lb size*/
+static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ int num_part_y, num_part_c;
+ int vtaps = scl_data->taps.v_taps;
+ int vtaps_c = scl_data->taps.v_taps_c;
+ int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
+ int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
+ enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
+
+ if (dpp->base.ctx->dc->debug.use_max_lb)
+ return mem_cfg;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_1;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_2;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8
+ || scl_data->format == PIXEL_FORMAT_420BPP10) {
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_3;
+ }
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c);
+
+ /*Ensure we can support the requested number of vtaps*/
+ ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c));
+
+ return LB_MEMORY_CONFIG_0;
+}
+
+void dpp1_dscl_set_scaler_auto_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ dpp1_dscl_set_overscan(dpp, scl_data);
+
+ dpp1_dscl_set_otg_blank(dpp, scl_data);
+
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* TODO: v_min */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
+
+
+static void dpp1_dscl_set_manual_ratio_init(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t init_frac = 0;
+ uint32_t init_int = 0;
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
+ SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
+ SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
+
+ /*
+ * 0.24 format for fraction, first five bits zeroed
+ */
+ init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h);
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_FRAC, init_frac,
+ SCL_H_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h_c);
+ REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
+ SCL_H_INIT_FRAC_C, init_frac,
+ SCL_H_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v);
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_FRAC, init_frac,
+ SCL_V_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
+ SCL_V_INIT_FRAC_BOT, init_frac,
+ SCL_V_INIT_INT_BOT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c);
+ REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
+ SCL_V_INIT_FRAC_C, init_frac,
+ SCL_V_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
+ SCL_V_INIT_FRAC_BOT_C, init_frac,
+ SCL_V_INIT_INT_BOT_C, init_int);
+}
+
+
+
+static void dpp1_dscl_set_recout(
+ struct dcn10_dpp *dpp, const struct rect *recout)
+{
+ REG_SET_2(RECOUT_START, 0,
+ /* First pixel of RECOUT */
+ RECOUT_START_X, recout->x,
+ /* First line of RECOUT */
+ RECOUT_START_Y, recout->y);
+
+ REG_SET_2(RECOUT_SIZE, 0,
+ /* Number of RECOUT horizontal pixels */
+ RECOUT_WIDTH, recout->width,
+ /* Number of RECOUT vertical lines */
+ RECOUT_HEIGHT, recout->height
+ - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
+ (dpp->base.inst + 1));
+}
+
+/* Main function to program scaler and line buffer in manual scaling mode */
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ /* Recout */
+ dpp1_dscl_set_recout(dpp, &scl_data->recout);
+
+ /* MPC Size */
+ REG_SET_2(MPC_SIZE, 0,
+ /* Number of horizontal pixels of MPC */
+ MPC_WIDTH, scl_data->h_active,
+ /* Number of vertical lines of MPC */
+ MPC_HEIGHT, scl_data->v_active);
+
+ /* SCL mode */
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ /* LB */
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ /* Manually calculate scale ratio and init values */
+ dpp1_dscl_set_manual_ratio_init(dpp, scl_data);
+
+ /* HTaps/VTaps */
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
new file mode 100644
index 000000000000..b13dee64e0ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -0,0 +1,960 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn10_hubp.h"
+
+#define REG(reg)\
+ hubp1->mi_regs->reg
+
+#define CTX \
+ hubp1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp1->mi_shift->field_name, hubp1->mi_mask->field_name
+
+void hubp1_set_blank(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, blank_en);
+
+ if (blank) {
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = 0xf;
+ }
+}
+
+static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en);
+}
+
+static void hubp1_vready_workaround(struct hubp *hubp,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ uint32_t value = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* set HBUBREQ_DEBUG_DB[12] = 1 */
+ value = REG_READ(HUBPREQ_DEBUG_DB);
+
+ /* hack mode disable */
+ value |= 0x100;
+ value &= ~0x1000;
+
+ if ((pipe_dest->vstartup_start - 2*(pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ /* if (eco_fix_needed(otg_global_sync_timing)
+ * set HBUBREQ_DEBUG_DB[12] = 1 */
+ value |= 0x1000;
+ }
+
+ REG_WRITE(HUBPREQ_DEBUG_DB, value);
+}
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp1,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_6(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ NUM_BANKS, log_2(info->gfx9.num_banks),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ NUM_SE, log_2(info->gfx9.num_shader_engines),
+ NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se),
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
+
+ REG_UPDATE_4(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, info->gfx9.meta_linear,
+ RB_ALIGNED, info->gfx9.rb_aligned,
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+}
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp1,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ pitch = plane_size->video.luma_pitch - 1;
+ meta_pitch = dcc->video.meta_pitch_l - 1;
+ pitch_c = plane_size->video.chroma_pitch - 1;
+ meta_pitch_c = dcc->video.meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->grph.surface_pitch - 1;
+ meta_pitch = dcc->grph.meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp1,
+ enum surface_pixel_format format)
+{
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 22);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* program flip type */
+ REG_SET(DCSURF_FLIP_CONTROL, 0,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ if (flip_immediate)
+ hubp->current_address = *address;
+
+ return true;
+}
+
+void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ bool independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_size_and_rotation(
+ hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp1, format);
+}
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ else
+ REG_SET_2(PREFETCH_SETTINGS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ else
+ REG_SET(PREFETCH_SETTINGS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+}
+
+static void hubp1_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+ hubp1_program_requestor(hubp, rq_regs);
+ hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp1_vready_workaround(hubp, pipe_dest);
+}
+
+bool hubp1_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ hubp->current_address = hubp->request_address;
+ return false;
+}
+
+uint32_t aperture_default_system = 1;
+uint32_t context0_default_system; /* = 0;*/
+
+static void hubp1_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 12;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 12;
+
+ REG_SET_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, aperture_default_system, /* 1 = system physical memory */
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mc_vm_apt_low.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mc_vm_apt_low.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mc_vm_apt_high.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mc_vm_apt_high.low_part);
+}
+
+static void hubp1_set_vm_context0_settings(struct hubp *hubp,
+ const struct vm_context0_param *vm0)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ /* pte base */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, vm0->pte_base.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, vm0->pte_base.low_part);
+
+ /* pte start */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, vm0->pte_start.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, vm0->pte_start.low_part);
+
+ /* pte end */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, vm0->pte_end.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, vm0->pte_end.low_part);
+
+ /* fault handling */
+ REG_SET_2(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, vm0->fault_default.high_part,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, context0_default_system);
+ REG_SET(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, vm0->fault_default.low_part);
+
+ /* control: enable VM PTE*/
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 3);
+}
+
+void min_set_viewport(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+ PRI_VIEWPORT_WIDTH, viewport->width,
+ PRI_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+ PRI_VIEWPORT_X_START, viewport->x,
+ PRI_VIEWPORT_Y_START, viewport->y);
+
+ /*for stereo*/
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+ SEC_VIEWPORT_WIDTH, viewport->width,
+ SEC_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+ SEC_VIEWPORT_X_START, viewport->x,
+ SEC_VIEWPORT_Y_START, viewport->y);
+
+ /* DC supports NV12 only at the moment */
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+ PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+ PRI_VIEWPORT_X_START_C, viewport_c->x,
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s)
+{
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+}
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
+static bool ippn10_cursor_program_control(
+ struct dcn10_hubp *hubp1,
+ bool pixel_data_invert,
+ enum dc_cursor_color_format color_format)
+{
+ if (REG(CURSOR_SETTINS))
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ else
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ return true;
+}
+
+static enum cursor_pitch ippn10_get_cursor_pitch(
+ unsigned int pitch)
+{
+ enum cursor_pitch hw_pitch;
+
+ switch (pitch) {
+ case 64:
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ case 128:
+ hw_pitch = CURSOR_PITCH_128_PIXELS;
+ break;
+ case 256:
+ hw_pitch = CURSOR_PITCH_256_PIXELS;
+ break;
+ default:
+ DC_ERR("Invalid cursor pitch of %d. "
+ "Only 64/128/256 is supported on DCN.\n", pitch);
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ }
+ return hw_pitch;
+}
+
+static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+ unsigned int cur_width,
+ enum dc_cursor_color_format format)
+{
+ enum cursor_lines_per_chunk line_per_chunk;
+
+ if (format == CURSOR_MODE_MONO)
+ /* impl B. expansion in CUR Buffer reader */
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 32)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 64)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
+ else if (cur_width <= 128)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
+ else
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
+
+ return line_per_chunk;
+}
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ attr->width, attr->color_format);
+
+ hubp->curs_attr = *attr;
+
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+ REG_UPDATE_3(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+ ippn10_cursor_program_control(hubp1,
+ attr->attribute_flags.bits.INVERT_PIXEL_DATA,
+ attr->color_format);
+}
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+ uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
+ dal_fixed31_32_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)hubp->curs_attr.width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+}
+
+static struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_program_surface_flip_and_addr =
+ hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config =
+ hubp1_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp1_setup,
+ .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
+ .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_hubp_blank_en = hubp1_set_hubp_blank_en,
+ .set_cursor_attributes = hubp1_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask)
+{
+ hubp1->base.funcs = &dcn10_hubp_funcs;
+ hubp1->base.ctx = ctx;
+ hubp1->mi_regs = mi_regs;
+ hubp1->mi_shift = mi_shift;
+ hubp1->mi_mask = mi_mask;
+ hubp1->base.inst = inst;
+ hubp1->base.opp_id = 0xf;
+ hubp1->base.mpcc_id = 0xf;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
new file mode 100644
index 000000000000..66db453c801b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -0,0 +1,683 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN10_H__
+#define __DC_MEM_INPUT_DCN10_H__
+
+#include "hubp.h"
+
+#define TO_DCN10_HUBP(hubp)\
+ container_of(hubp, struct dcn10_hubp, base)
+
+#define MI_REG_LIST_DCN(id)\
+ SRI(DCHUBP_CNTL, HUBP, id),\
+ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
+ SRI(DCSURF_TILING_CONFIG, HUBP, id),\
+ SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_PITCH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONFIG, HUBP, id),\
+ SRI(DCSURF_FLIP_CONTROL, HUBPREQ, id),\
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
+ SRI(HUBPRET_CONTROL, HUBPRET, id),\
+ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id),\
+ SRI(BLANK_OFFSET_0, HUBPREQ, id),\
+ SRI(BLANK_OFFSET_1, HUBPREQ, id),\
+ SRI(DST_DIMENSIONS, HUBPREQ, id),\
+ SRI(DST_AFTER_SCALER, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_0, HUBPREQ, id),\
+ SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
+ SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
+ SRI(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+
+#define MI_REG_LIST_DCN10(id)\
+ MI_REG_LIST_DCN(id),\
+ SRI(PREFETCH_SETTINS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, HUBPREQ, id),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+
+
+struct dcn_mi_registers {
+ uint32_t DCHUBP_CNTL;
+ uint32_t HUBPREQ_DEBUG_DB;
+ uint32_t DCSURF_ADDR_CONFIG;
+ uint32_t DCSURF_TILING_CONFIG;
+ uint32_t DCSURF_SURFACE_PITCH;
+ uint32_t DCSURF_SURFACE_PITCH_C;
+ uint32_t DCSURF_SURFACE_CONFIG;
+ uint32_t DCSURF_FLIP_CONTROL;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_PRI_VIEWPORT_START;
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_SEC_VIEWPORT_START;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
+ uint32_t DCSURF_PRI_VIEWPORT_START_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SURFACE_INUSE;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_INUSE_C;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t HUBPRET_CONTROL;
+ uint32_t DCN_EXPANSION_MODE;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
+ uint32_t BLANK_OFFSET_0;
+ uint32_t BLANK_OFFSET_1;
+ uint32_t DST_DIMENSIONS;
+ uint32_t DST_AFTER_SCALER;
+ uint32_t PREFETCH_SETTINS;
+ uint32_t PREFETCH_SETTINGS;
+ uint32_t VBLANK_PARAMETERS_0;
+ uint32_t REF_FREQ_TO_PIX_FREQ;
+ uint32_t VBLANK_PARAMETERS_1;
+ uint32_t VBLANK_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_0;
+ uint32_t NOM_PARAMETERS_1;
+ uint32_t NOM_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_5;
+ uint32_t PER_LINE_DELIVERY_PRE;
+ uint32_t PER_LINE_DELIVERY;
+ uint32_t PREFETCH_SETTINS_C;
+ uint32_t PREFETCH_SETTINGS_C;
+ uint32_t VBLANK_PARAMETERS_2;
+ uint32_t VBLANK_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_2;
+ uint32_t NOM_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_6;
+ uint32_t NOM_PARAMETERS_7;
+ uint32_t DCN_TTU_QOS_WM;
+ uint32_t DCN_GLOBAL_TTU_CNTL;
+ uint32_t DCN_SURF0_TTU_CNTL0;
+ uint32_t DCN_SURF0_TTU_CNTL1;
+ uint32_t DCN_SURF1_TTU_CNTL0;
+ uint32_t DCN_SURF1_TTU_CNTL1;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_MX_L1_TLB_CNTL;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCN_VM_FB_LOCATION_TOP;
+ uint32_t DCN_VM_FB_LOCATION_BASE;
+ uint32_t DCN_VM_FB_OFFSET;
+ uint32_t DCN_VM_AGP_BASE;
+ uint32_t DCN_VM_AGP_BOT;
+ uint32_t DCN_VM_AGP_TOP;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+#define MI_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define MI_MASK_SH_LIST_DCN(mask_sh)\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_RB_PER_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ MI_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
+
+#define MI_MASK_SH_LIST_DCN10(mask_sh)\
+ MI_MASK_SH_LIST_DCN(mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
+#define DCN_MI_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
+ type HUBP_TTU_DISABLE;\
+ type HUBP_NO_OUTSTANDING_REQ;\
+ type HUBP_UNDERFLOW_STATUS;\
+ type NUM_PIPES;\
+ type NUM_BANKS;\
+ type PIPE_INTERLEAVE;\
+ type NUM_SE;\
+ type NUM_RB_PER_SE;\
+ type MAX_COMPRESSED_FRAGS;\
+ type SW_MODE;\
+ type META_LINEAR;\
+ type RB_ALIGNED;\
+ type PIPE_ALIGNED;\
+ type PITCH;\
+ type META_PITCH;\
+ type PITCH_C;\
+ type META_PITCH_C;\
+ type ROTATION_ANGLE;\
+ type H_MIRROR_EN;\
+ type SURFACE_PIXEL_FORMAT;\
+ type SURFACE_FLIP_TYPE;\
+ type SURFACE_UPDATE_LOCK;\
+ type SURFACE_FLIP_PENDING;\
+ type PRI_VIEWPORT_WIDTH; \
+ type PRI_VIEWPORT_HEIGHT; \
+ type PRI_VIEWPORT_X_START; \
+ type PRI_VIEWPORT_Y_START; \
+ type SEC_VIEWPORT_WIDTH; \
+ type SEC_VIEWPORT_HEIGHT; \
+ type SEC_VIEWPORT_X_START; \
+ type SEC_VIEWPORT_Y_START; \
+ type PRI_VIEWPORT_WIDTH_C; \
+ type PRI_VIEWPORT_HEIGHT_C; \
+ type PRI_VIEWPORT_X_START_C; \
+ type PRI_VIEWPORT_Y_START_C; \
+ type PRIMARY_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_SURFACE_ADDRESS;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_SURFACE_ADDRESS;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_META_SURFACE_ADDRESS;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_META_SURFACE_ADDRESS;\
+ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_ADDRESS_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS;\
+ type SURFACE_INUSE_ADDRESS_HIGH;\
+ type SURFACE_INUSE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS_HIGH_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_TMZ;\
+ type PRIMARY_SURFACE_DCC_EN;\
+ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
+ type DET_BUF_PLANE1_BASE_ADDRESS;\
+ type CROSSBAR_SRC_CB_B;\
+ type CROSSBAR_SRC_CR_R;\
+ type DRQ_EXPANSION_MODE;\
+ type PRQ_EXPANSION_MODE;\
+ type MRQ_EXPANSION_MODE;\
+ type CRQ_EXPANSION_MODE;\
+ type CHUNK_SIZE;\
+ type MIN_CHUNK_SIZE;\
+ type META_CHUNK_SIZE;\
+ type MIN_META_CHUNK_SIZE;\
+ type DPTE_GROUP_SIZE;\
+ type MPTE_GROUP_SIZE;\
+ type SWATH_HEIGHT;\
+ type PTE_ROW_HEIGHT_LINEAR;\
+ type CHUNK_SIZE_C;\
+ type MIN_CHUNK_SIZE_C;\
+ type META_CHUNK_SIZE_C;\
+ type MIN_META_CHUNK_SIZE_C;\
+ type DPTE_GROUP_SIZE_C;\
+ type MPTE_GROUP_SIZE_C;\
+ type SWATH_HEIGHT_C;\
+ type PTE_ROW_HEIGHT_LINEAR_C;\
+ type REFCYC_H_BLANK_END;\
+ type DLG_V_BLANK_END;\
+ type MIN_DST_Y_NEXT_START;\
+ type REFCYC_PER_HTOTAL;\
+ type REFCYC_X_AFTER_SCALER;\
+ type DST_Y_AFTER_SCALER;\
+ type DST_Y_PREFETCH;\
+ type VRATIO_PREFETCH;\
+ type DST_Y_PER_VM_VBLANK;\
+ type DST_Y_PER_ROW_VBLANK;\
+ type REF_FREQ_TO_PIX_FREQ;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_L;\
+ type REFCYC_PER_META_CHUNK_VBLANK_L;\
+ type DST_Y_PER_PTE_ROW_NOM_L;\
+ type REFCYC_PER_PTE_GROUP_NOM_L;\
+ type DST_Y_PER_META_ROW_NOM_L;\
+ type REFCYC_PER_META_CHUNK_NOM_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_C;\
+ type REFCYC_PER_LINE_DELIVERY_L;\
+ type REFCYC_PER_LINE_DELIVERY_C;\
+ type VRATIO_PREFETCH_C;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_C;\
+ type REFCYC_PER_META_CHUNK_VBLANK_C;\
+ type DST_Y_PER_PTE_ROW_NOM_C;\
+ type REFCYC_PER_PTE_GROUP_NOM_C;\
+ type DST_Y_PER_META_ROW_NOM_C;\
+ type REFCYC_PER_META_CHUNK_NOM_C;\
+ type QoS_LEVEL_LOW_WM;\
+ type QoS_LEVEL_HIGH_WM;\
+ type MIN_TTU_VBLANK;\
+ type QoS_LEVEL_FLIP;\
+ type REFCYC_PER_REQ_DELIVERY;\
+ type QoS_LEVEL_FIXED;\
+ type QoS_RAMP_DISABLE;\
+ type REFCYC_PER_REQ_DELIVERY_PRE;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR;\
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR;\
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn_mi_shift {
+ DCN_MI_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_mi_mask {
+ DCN_MI_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_hubp {
+ struct hubp base;
+ const struct dcn_mi_registers *mi_regs;
+ const struct dcn_mi_shift *mi_shift;
+ const struct dcn_mi_mask *mi_mask;
+};
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs);
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp,
+ enum surface_pixel_format format);
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format);
+
+void hubp1_dcc_control(struct hubp *hubp,
+ bool enable,
+ bool independent_64b_blks);
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+bool hubp1_is_flip_pending(struct hubp *hubp);
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp1_set_blank(struct hubp *hubp, bool blank);
+
+void min_set_viewport(struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask);
+
+
+struct dcn_hubp_state {
+ uint32_t pixel_format;
+ uint32_t inuse_addr_hi;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+ uint32_t rotation_angle;
+ uint32_t h_mirror_en;
+ uint32_t sw_mode;
+ uint32_t dcc_en;
+ uint32_t blank_en;
+ uint32_t underflow_status;
+ uint32_t ttu_disable;
+ uint32_t min_ttu_vblank;
+ uint32_t qos_level_low_wm;
+ uint32_t qos_level_high_wm;
+};
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
new file mode 100644
index 000000000000..961ad5c3b454
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -0,0 +1,2958 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "resource.h"
+#include "custom_float.h"
+#include "dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+#include "abm.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "reg_helper.h"
+#include "custom_float.h"
+#include "dcn10_hubp.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static void log_mpc_crc(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(MPC_CRC_RESULT_GB))
+ DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
+ REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
+ if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
+ DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
+ REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
+}
+
+void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
+{
+ static const uint32_t ref_clk_mhz = 48;
+ static const unsigned int frac = 10;
+ uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
+
+ DTN_INFO("%d.%d \t ",
+ us_x10 / frac,
+ us_x10 % frac);
+}
+
+#define DTN_INFO_MICRO_SEC(ref_cycle) \
+ print_microsec(dc_ctx, ref_cycle)
+
+struct dcn_hubbub_wm_set {
+ uint32_t wm_set;
+ uint32_t data_urgent;
+ uint32_t pte_meta_urgent;
+ uint32_t sr_enter;
+ uint32_t sr_exit;
+ uint32_t dram_clk_chanage;
+};
+
+struct dcn_hubbub_wm {
+ struct dcn_hubbub_wm_set sets[4];
+};
+
+static void dcn10_hubbub_wm_read_state(struct dce_hwseq *hws,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+}
+
+static void dcn10_log_hubbub_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcn_hubbub_wm wm;
+ int i;
+
+ dcn10_hubbub_wm_read_state(dc->hwseq, &wm);
+
+ DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
+ "sr_enter \t sr_exit \t dram_clk_change \n");
+
+ for (i = 0; i < 4; i++) {
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm.sets[i];
+ DTN_INFO("WM_Set[%d]:\t ", s->wm_set);
+ DTN_INFO_MICRO_SEC(s->data_urgent);
+ DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
+ DTN_INFO_MICRO_SEC(s->sr_enter);
+ DTN_INFO_MICRO_SEC(s->sr_exit);
+ DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+ DTN_INFO("\n");
+ }
+
+ DTN_INFO("\n");
+}
+
+static void dcn10_log_hw_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc);
+
+ DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t "
+ "rotation \t mirror \t sw_mode \t "
+ "dcc_en \t blank_en \t ttu_dis \t underflow \t "
+ "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state s;
+
+ hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
+
+ DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
+ "%xh \t %xh \t %xh \t "
+ "%d \t %d \t %d \t %xh \t",
+ i,
+ s.pixel_format,
+ s.inuse_addr_hi,
+ s.viewport_width,
+ s.viewport_height,
+ s.rotation_angle,
+ s.h_mirror_en,
+ s.sw_mode,
+ s.dcc_en,
+ s.blank_en,
+ s.ttu_disable,
+ s.underflow_status);
+ DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
+ DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
+ DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
+ "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
+
+ for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ //only print if OTG master is enabled
+ if ((s.otg_enabled & 1) == 0)
+ continue;
+
+ DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t ",
+ i,
+ s.v_blank_start,
+ s.v_blank_end,
+ s.v_sync_a_start,
+ s.v_sync_a_end,
+ s.v_sync_a_pol,
+ s.v_total_max,
+ s.v_total_min,
+ s.h_blank_start,
+ s.h_blank_end,
+ s.h_sync_a_start,
+ s.h_sync_a_end,
+ s.h_sync_a_pol,
+ s.h_total,
+ s.v_total,
+ s.underflow_occurred_status);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ log_mpc_crc(dc);
+
+ DTN_INFO_END();
+}
+
+static void verify_allow_pstate_change_high(
+ struct dce_hwseq *hws)
+{
+ /* pstate latency is ~20us so if we wait over 40us and pstate allow
+ * still not asserted, we are probably stuck and going to hang
+ *
+ * TODO: Figure out why it takes ~100us on linux
+ * pstate takes around ~100us on linux. Unknown currently as to
+ * why it takes that long on linux
+ */
+ static unsigned int pstate_wait_timeout_us = 200;
+ static unsigned int pstate_wait_expected_timeout_us = 40;
+ static unsigned int max_sampled_pstate_wait_us; /* data collection */
+ static bool forced_pstate_allow; /* help with revert wa */
+ static bool should_log_hw_state; /* prevent hw state log by default */
+
+ unsigned int debug_index = 0x7;
+ unsigned int debug_data;
+ unsigned int i;
+
+ if (forced_pstate_allow) {
+ /* we hacked to force pstate allow to prevent hang last time
+ * we verify_allow_pstate_change_high. so disable force
+ * here so we can check status
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
+ forced_pstate_allow = false;
+ }
+
+ /* description "3-0: Pipe0 cursor0 QOS
+ * 7-4: Pipe1 cursor0 QOS
+ * 11-8: Pipe2 cursor0 QOS
+ * 15-12: Pipe3 cursor0 QOS
+ * 16: Pipe0 Plane0 Allow Pstate Change
+ * 17: Pipe1 Plane0 Allow Pstate Change
+ * 18: Pipe2 Plane0 Allow Pstate Change
+ * 19: Pipe3 Plane0 Allow Pstate Change
+ * 20: Pipe0 Plane1 Allow Pstate Change
+ * 21: Pipe1 Plane1 Allow Pstate Change
+ * 22: Pipe2 Plane1 Allow Pstate Change
+ * 23: Pipe3 Plane1 Allow Pstate Change
+ * 24: Pipe0 cursor0 Allow Pstate Change
+ * 25: Pipe1 cursor0 Allow Pstate Change
+ * 26: Pipe2 cursor0 Allow Pstate Change
+ * 27: Pipe3 cursor0 Allow Pstate Change
+ * 28: WB0 Allow Pstate Change
+ * 29: WB1 Allow Pstate Change
+ * 30: Arbiter's allow_pstate_change
+ * 31: SOC pstate change request
+ */
+
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+
+ if (debug_data & (1 << 30)) {
+
+ if (i > pstate_wait_expected_timeout_us)
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate took longer than expected ~%dus\n",
+ i);
+
+ return;
+ }
+ if (max_sampled_pstate_wait_us < i)
+ max_sampled_pstate_wait_us = i;
+
+ udelay(1);
+ }
+
+ /* force pstate allow to prevent system hang
+ * and break to debugger to investigate
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
+ forced_pstate_allow = true;
+
+ if (should_log_hw_state) {
+ dcn10_log_hw_state(hws->ctx->dc);
+ }
+
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate TEST_DEBUG_DATA: 0x%X\n",
+ debug_data);
+ BREAK_TO_DEBUGGER();
+}
+
+static void enable_dppclk(
+ struct dce_hwseq *hws,
+ uint8_t plane_id,
+ uint32_t requested_pix_clk,
+ bool dppclk_div)
+{
+ dm_logger_write(hws->ctx->logger, LOG_SURFACE,
+ "dppclk_rate_control for pipe %d programed to %d\n",
+ plane_id,
+ dppclk_div);
+
+ if (hws->shifts->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL[plane_id],
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL[plane_id],
+ DPP_CLOCK_ENABLE, 1);
+}
+
+static void enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable)
+{
+ bool force_on = 1; /* disable power gating */
+
+ if (enable)
+ force_on = 0;
+
+ /* DCHUBP0/1/2/3 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
+
+ /* DPP0/1/2/3 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
+}
+
+static void disable_vga(
+ struct dce_hwseq *hws)
+{
+ REG_WRITE(D1VGA_CONTROL, 0);
+ REG_WRITE(D2VGA_CONTROL, 0);
+ REG_WRITE(D3VGA_CONTROL, 0);
+ REG_WRITE(D4VGA_CONTROL, 0);
+}
+
+static void dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+static void program_watermarks(
+ struct dce_hwseq *hws,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz)
+{
+ uint32_t force_en = hws->ctx->dc->debug.disable_stutter ? 1 : 0;
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ uint32_t prog_wm_value;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+
+ /* clock state B */
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state C */
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state D */
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+
+ REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
+
+#if 0
+ REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+#endif
+}
+
+
+static void dcn10_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
+ SDPIF_FB_TOP, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
+ SDPIF_FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, 0X03FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+static void hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ switch (hubp_inst) {
+ case 0: /* DCHUBP0 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG,
+ DOMAIN0_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN0_PG_STATUS,
+ DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DCHUBP1 */
+ REG_UPDATE(DOMAIN2_PG_CONFIG,
+ DOMAIN2_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN2_PG_STATUS,
+ DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DCHUBP2 */
+ REG_UPDATE(DOMAIN4_PG_CONFIG,
+ DOMAIN4_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN4_PG_STATUS,
+ DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DCHUBP3 */
+ REG_UPDATE(DOMAIN6_PG_CONFIG,
+ DOMAIN6_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN6_PG_STATUS,
+ DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void power_on_plane(
+ struct dce_hwseq *hws,
+ int plane_id)
+{
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, plane_id, true);
+ hubp_pg_control(hws, plane_id, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(hws->ctx->logger, LOG_DEBUG,
+ "Un-gated front end for pipe %d\n", plane_id);
+ }
+}
+
+static void undo_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+ int pwr_status = 0;
+
+ REG_GET(DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, &pwr_status);
+ /* Don't need to blank if hubp is power gated*/
+ if (pwr_status == 2)
+ return;
+
+ hubp->funcs->set_blank(hubp, true);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, false);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+}
+
+static void apply_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+
+ if (dc->debug.disable_stutter)
+ return;
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ hubp->funcs->set_hubp_blank_en(hubp, false);
+}
+
+static void bios_golden_init(struct dc *dc)
+{
+ struct dc_bios *bp = dc->ctx->dc_bios;
+ int i;
+
+ /* initialize dcn global */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0, ASIC_PIPE_INIT);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* initialize dcn per pipe */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
+ }
+}
+
+static void dcn10_init_hw(struct dc *dc)
+{
+ int i;
+ struct abm *abm = dc->res_pool->abm;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+ return;
+ }
+ /* end of FPGA. Below if real ASIC */
+
+ bios_golden_init(dc);
+
+ disable_vga(dc->hwseq);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc, &(dc->res_pool->opps[i]->mpc_tree),
+ dc->res_pool->opps[i]->inst, i);
+
+ /* Blank controller using driver code instead of
+ * command table.
+ */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+}
+
+static enum dc_status dcn10_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
+ false:true;
+ bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+
+
+ /* by upper caller loop, pipe0 is parent pipe and be called first.
+ * back end is set up by for pipe0. Other children pipe share back end
+ * with pipe 0. No program is needed.
+ */
+ if (pipe_ctx->top_pipe != NULL)
+ return DC_OK;
+
+ /* TODO check if timing_changed, disable stream if timing changed */
+
+ /* HW program guide assume display already disable
+ * by unplug sequence. OTG assume stop.
+ */
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ enableStereo,
+ rightEyePolarity);
+
+#if 0 /* move to after enable_crtc */
+ /* TODO: OPP FMT, ABM. etc. should be done here. */
+ /* or FPGA now. instance 0 only. TODO: move to opp.c */
+
+ inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+#endif
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+
+ /* VTG is within DCHUB command block. DCFCLK is always on */
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ /* TODO program crtc source select for non-virtual signal*/
+ /* TODO program FMT */
+ /* TODO setup link_enc */
+ /* TODO set stream attributes */
+ /* TODO program audio */
+ /* TODO enable stream if timing changed */
+ /* TODO unblank stream if DP */
+
+ return DC_OK;
+}
+
+static void reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ int i;
+
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ /* DPMS may already disable */
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
+ break;
+
+ if (i == dc->res_pool->pipe_count)
+ return;
+
+ pipe_ctx->stream = NULL;
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync */
+static void plane_atomic_disconnect(struct dc *dc,
+ int fe_idx)
+{
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id, z_idx;
+ int mpcc_id = -1;
+
+ /* look at tree rather than mi here to know if we already reset */
+ for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
+ struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
+
+ for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
+ if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
+ mpcc_id = opp->mpc_tree.mpcc[z_idx];
+ break;
+ }
+ }
+ if (mpcc_id != -1)
+ break;
+ }
+ /*Already reset*/
+ if (opp_id == dc->res_pool->pipe_count)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ hubp->funcs->dcc_control(hubp, false, false);
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
+ dc->res_pool->opps[opp_id]->inst, fe_idx);
+}
+
+/* disable HW used by plane.
+ * note: cannot disable until disconnect is complete */
+static void plane_atomic_disable(struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id = hubp->opp_id;
+
+ if (opp_id == 0xf)
+ return;
+
+ mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
+ dc->res_pool->opps[hubp->opp_id]->mpcc_disconnect_pending[hubp->mpcc_id] = false;
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: atomic disable finished on mpcc %d]\n",
+ fe_idx);*/
+
+ hubp->funcs->set_blank(hubp, true);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
+ HUBP_CLOCK_ENABLE, 0);
+ REG_UPDATE(DPP_CONTROL[fe_idx],
+ DPP_CLOCK_ENABLE, 0);
+
+ if (dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
+ REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
+ OPP_PIPE_CLOCK_EN, 0);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+/*
+ * kill power to plane hw
+ * note: cannot power down until plane is disable
+ */
+static void plane_atomic_power_down(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+
+static void reset_front_end(
+ struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct timing_generator *tg;
+ int opp_id = dc->res_pool->hubps[fe_idx]->opp_id;
+
+ /*Already reset*/
+ if (opp_id == 0xf)
+ return;
+
+ tg = dc->res_pool->timing_generators[opp_id];
+ tg->funcs->lock(tg);
+
+ plane_atomic_disconnect(dc, fe_idx);
+
+ REG_UPDATE(OTG_GLOBAL_SYNC_STATUS[tg->inst], VUPDATE_NO_LOCK_EVENT_CLEAR, 1);
+ tg->funcs->unlock(tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(hws);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_GLOBAL_SYNC_STATUS[tg->inst],
+ VUPDATE_NO_LOCK_EVENT_OCCURRED, 1,
+ 1, 100000);
+
+ plane_atomic_disable(dc, fe_idx);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset front end %d\n",
+ fe_idx);
+}
+
+static void dcn10_power_down_fe(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ reset_front_end(dc, fe_idx);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset Front End*/
+ /* Lock*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->lock(tg);
+ }
+ /* Disconnect*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ plane_atomic_disconnect(dc, i);
+ }
+ }
+ /* Unlock*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->unlock(tg);
+ }
+
+ /* Disable and Powerdown*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /*if (!pipe_ctx_old->stream)
+ continue;*/
+
+ if (pipe_ctx->stream && pipe_ctx->plane_state
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ plane_atomic_disable(dc, i);
+
+ if (!pipe_ctx->stream || !pipe_ctx->plane_state)
+ plane_atomic_power_down(dc, i);
+ }
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+
+}
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ } else {
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ }
+ }
+ return false;
+}
+
+static void toggle_watermark_change_req(struct dce_hwseq *hws)
+{
+ uint32_t watermark_change_req;
+
+ REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
+
+ if (watermark_change_req)
+ watermark_change_req = 0;
+ else
+ watermark_change_req = 1;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+}
+
+static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &plane_state->address,
+ plane_state->flip_immediate);
+ plane_state->status.requested_address = plane_state->address;
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+static bool dcn10_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ const struct dc_transfer_func *tf = NULL;
+ bool result = true;
+
+ if (dpp_base == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ dpp_base->funcs->ipp_program_input_lut(dpp_base,
+ plane_state->gamma_correction);
+
+ if (tf == NULL)
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS*/
+ result = false;
+ }
+
+ return result;
+}
+/*modify the method to handle rgb for arr_points*/
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].slope,
+ &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_SEGMENTS 32
+
+static bool dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < 32 ; i++)
+ seg_distr[i] = 3;
+
+ segment_start = -25;
+ segment_end = 7;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+
+ segment_start = -10;
+ segment_end = 0;
+ }
+
+ for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dcn10_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ if (dpp == NULL)
+ return false;
+
+ dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_SRGB);
+ } else if (dcn10_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &dpp->regamma_params)) {
+ dpp->funcs->opp_program_regamma_pwl(dpp, &dpp->regamma_params);
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_USER);
+ } else {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_BYPASS);
+ }
+
+ return true;
+}
+
+static void dcn10_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct hubp *hubp = NULL;
+ hubp = dc->res_pool->hubps[pipe->pipe_idx];
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (pipe->top_pipe)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ int i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+static void dcn10_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ int i;
+
+ DC_SYNC_INFO("Setting up OTG reset trigger\n");
+
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, grouped_pipes[0]->stream_res.tg->inst);
+
+
+ DC_SYNC_INFO("Waiting for trigger\n");
+
+ /* Need to get only check 1 pipe for having reset as all the others are
+ * synchronized. Look at last pipe programmed to reset.
+ */
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("Sync complete\n");
+}
+
+static void print_rq_dlg_ttu(
+ struct dc *core_dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML TTU Output parameters [%d] ==============\n"
+ "qos_level_low_wm: %d, \n"
+ "qos_level_high_wm: %d, \n"
+ "min_ttu_vblank: %d, \n"
+ "qos_level_flip: %d, \n"
+ "refcyc_per_req_delivery_l: %d, \n"
+ "qos_level_fixed_l: %d, \n"
+ "qos_ramp_disable_l: %d, \n"
+ "refcyc_per_req_delivery_pre_l: %d, \n"
+ "refcyc_per_req_delivery_c: %d, \n"
+ "qos_level_fixed_c: %d, \n"
+ "qos_ramp_disable_c: %d, \n"
+ "refcyc_per_req_delivery_pre_c: %d\n"
+ "=============================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->ttu_regs.qos_level_low_wm,
+ pipe_ctx->ttu_regs.qos_level_high_wm,
+ pipe_ctx->ttu_regs.min_ttu_vblank,
+ pipe_ctx->ttu_regs.qos_level_flip,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
+ pipe_ctx->ttu_regs.qos_level_fixed_l,
+ pipe_ctx->ttu_regs.qos_ramp_disable_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
+ pipe_ctx->ttu_regs.qos_level_fixed_c,
+ pipe_ctx->ttu_regs.qos_ramp_disable_c,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML DLG Output parameters [%d] ==============\n"
+ "refcyc_h_blank_end: %d, \n"
+ "dlg_vblank_end: %d, \n"
+ "min_dst_y_next_start: %d, \n"
+ "refcyc_per_htotal: %d, \n"
+ "refcyc_x_after_scaler: %d, \n"
+ "dst_y_after_scaler: %d, \n"
+ "dst_y_prefetch: %d, \n"
+ "dst_y_per_vm_vblank: %d, \n"
+ "dst_y_per_row_vblank: %d, \n"
+ "ref_freq_to_pix_freq: %d, \n"
+ "vratio_prefetch: %d, \n"
+ "refcyc_per_pte_group_vblank_l: %d, \n"
+ "refcyc_per_meta_chunk_vblank_l: %d, \n"
+ "dst_y_per_pte_row_nom_l: %d, \n"
+ "refcyc_per_pte_group_nom_l: %d, \n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->dlg_regs.refcyc_h_blank_end,
+ pipe_ctx->dlg_regs.dlg_vblank_end,
+ pipe_ctx->dlg_regs.min_dst_y_next_start,
+ pipe_ctx->dlg_regs.refcyc_per_htotal,
+ pipe_ctx->dlg_regs.refcyc_x_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_prefetch,
+ pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
+ pipe_ctx->dlg_regs.dst_y_per_row_vblank,
+ pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
+ pipe_ctx->dlg_regs.vratio_prefetch,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\ndst_y_per_meta_row_nom_l: %d, \n"
+ "refcyc_per_meta_chunk_nom_l: %d, \n"
+ "refcyc_per_line_delivery_pre_l: %d, \n"
+ "refcyc_per_line_delivery_l: %d, \n"
+ "vratio_prefetch_c: %d, \n"
+ "refcyc_per_pte_group_vblank_c: %d, \n"
+ "refcyc_per_meta_chunk_vblank_c: %d, \n"
+ "dst_y_per_pte_row_nom_c: %d, \n"
+ "refcyc_per_pte_group_nom_c: %d, \n"
+ "dst_y_per_meta_row_nom_c: %d, \n"
+ "refcyc_per_meta_chunk_nom_c: %d, \n"
+ "refcyc_per_line_delivery_pre_c: %d, \n"
+ "refcyc_per_line_delivery_c: %d \n"
+ "========================================================\n",
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
+ pipe_ctx->dlg_regs.vratio_prefetch_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML RQ Output parameters [%d] ==============\n"
+ "chunk_size: %d \n"
+ "min_chunk_size: %d \n"
+ "meta_chunk_size: %d \n"
+ "min_meta_chunk_size: %d \n"
+ "dpte_group_size: %d \n"
+ "mpte_group_size: %d \n"
+ "swath_height: %d \n"
+ "pte_row_height_linear: %d \n"
+ "========================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->rq_regs.rq_regs_l.chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.swath_height,
+ pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
+ );
+}
+
+static void dcn10_power_on_fe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ power_on_plane(dc->hwseq,
+ pipe_ctx->pipe_idx);
+
+ /* enable DCFCLK current DCHUB */
+ REG_UPDATE(HUBP_CLK_CNTL[pipe_ctx->pipe_idx],
+ HUBP_CLOCK_ENABLE, 1);
+
+ /* make sure OPP_PIPE_CLOCK_EN = 1 */
+ REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
+ OPP_PIPE_CLOCK_EN, 1);
+ /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
+
+ if (plane_state) {
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;\n",
+ pipe_ctx->pipe_idx,
+ plane_state,
+ plane_state->address.grph.addr.high_part,
+ plane_state->address.grph.addr.low_part,
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe %d: width, height, x, y format:%d\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ plane_state->format,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+ print_rq_dlg_ttu(dc, pipe_ctx);
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct dpp_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
+}
+
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ }
+}
+static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ return false;
+}
+
+static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ return true;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ return false;
+ default:
+ /* Add a case to switch */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
+ struct vm_system_aperture_param *apt,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC physical_page_number;
+ uint32_t logical_addr_low;
+ uint32_t logical_addr_high;
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ LOGICAL_ADDR, &logical_addr_low);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ LOGICAL_ADDR, &logical_addr_high);
+
+ apt->sys_default.quad_part = physical_page_number.quad_part << 12;
+ apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
+ apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
+}
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
+ struct vm_context0_param *vm0,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ uint32_t fb_base_value;
+ uint32_t fb_offset_value;
+
+ REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
+ REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
+
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
+
+ /*
+ * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
+ * Therefore we need to do
+ * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
+ */
+ fb_base.quad_part = (uint64_t)fb_base_value << 24;
+ fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
+ vm0->pte_base.quad_part += fb_base.quad_part;
+ vm0->pte_base.quad_part -= fb_offset.quad_part;
+}
+
+static void dcn10_program_pte_vm(struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation,
+ struct dce_hwseq *hws)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct vm_system_aperture_param apt = { {{ 0 } } };
+ struct vm_context0_param vm0 = { { { 0 } } };
+
+
+ mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
+ mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
+
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+ hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
+}
+
+static void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ union plane_size size = plane_state->plane_size;
+ struct mpcc_cfg mpcc_cfg = {0};
+ struct pipe_ctx *top_pipe;
+ bool per_pixel_alpha = plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+
+ /* TODO: proper fix once fpga works */
+ /* depends on DML calculation, DPP clock value may change dynamically */
+ enable_dppclk(
+ dc->hwseq,
+ pipe_ctx->pipe_idx,
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk,
+ context->bw.dcn.calc_clk.dppclk_div);
+ dc->current_state->bw.dcn.cur_clk.dppclk_div =
+ context->bw.dcn.calc_clk.dppclk_div;
+ context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ REG_UPDATE(DCHUBP_CNTL[pipe_ctx->pipe_idx], HUBP_VTG_SEL, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+
+ size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+
+ if (dc->config.gpu_vm_support)
+ dcn10_program_pte_vm(
+ pipe_ctx->plane_res.hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation,
+ hws
+ );
+
+ dpp->funcs->ipp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO);
+
+ mpcc_cfg.dpp_id = hubp->inst;
+ mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
+ mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
+ for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
+ mpcc_cfg.z_index++;
+ if (dc->debug.surface_visual_confirm)
+ dcn10_get_surface_visual_confirm_color(
+ pipe_ctx, &mpcc_cfg.black_color);
+ else
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &mpcc_cfg.black_color);
+ mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && per_pixel_alpha;
+ hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
+ hubp->opp_id = mpcc_cfg.opp_id;
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+
+ hubp->funcs->mem_program_viewport(hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /*gamut remap*/
+ program_gamut_remap(pipe_ctx);
+
+ program_csc_matrix(pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix);
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (is_pipe_tree_visible(pipe_ctx))
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void program_all_pipe_in_tree(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+
+ if (pipe_ctx->top_pipe == NULL) {
+
+ /* lock otg_master_update to process all pipes associated with
+ * this OTG. this is done only one time.
+ */
+ /* watermark is for all pipes */
+ program_watermarks(dc->hwseq, &context->bw.dcn.watermarks, ref_clk_mhz);
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+ struct dc_cursor_position position = { 0 };
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ dcn10_power_on_fe(dc, pipe_ctx, context);
+
+ /* temporary dcn1 wa:
+ * watermark update requires toggle after a/b/c/d sets are programmed
+ * if hubp is pg then wm value doesn't get properaged to hubp
+ * need to toggle after ungate to ensure wm gets to hubp.
+ *
+ * final solution: we need to get SMU to do the toggle as
+ * DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST is owned by SMU we should have
+ * both driver and fw accessing same register
+ */
+ toggle_watermark_change_req(dc->hwseq);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ /* TODO: this is a hack w/a for switching from mpo to pipe split */
+ dc_stream_set_cursor_position(pipe_ctx->stream, &position);
+
+ dc_stream_set_cursor_attributes(pipe_ctx->stream,
+ &pipe_ctx->stream->cursor_attributes);
+
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after each pipe is programmed */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
+ program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+}
+
+static void dcn10_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync = false;/*todo*/
+ pp_display_cfg->nb_pstate_switch_disable = false;
+ pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->avail_mclk_switch_time_us =
+ context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
+ pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void optimize_shared_resources(struct dc *dc)
+{
+ if (dc->current_state->stream_count == 0) {
+ apply_DEGVIDCN10_253_wa(dc);
+ /* S0i2 message */
+ dcn10_pplib_apply_display_requirements(dc, dc->current_state);
+ }
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context)
+{
+ if (dc->current_state->stream_count == 0 &&
+ !dc->debug.disable_stutter)
+ undo_DEGVIDCN10_253_wa(dc);
+
+ /* S0i2 message */
+ if (dc->current_state->stream_count == 0 &&
+ context->stream_count != 0)
+ dcn10_pplib_apply_display_requirements(dc, context);
+}
+
+static void dcn10_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ ASSERT(be_idx != -1);
+
+ if (num_planes == 0) {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (old_pipe_ctx->stream_res.tg && old_pipe_ctx->stream_res.tg->inst == be_idx) {
+ old_pipe_ctx->stream_res.tg->funcs->set_blank(old_pipe_ctx->stream_res.tg, true);
+ dcn10_power_down_fe(dc, old_pipe_ctx->pipe_idx);
+ }
+ }
+ return;
+ }
+
+ /* reset unused mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
+ continue;
+
+ /*
+ * Powergate reused pipes that are not powergated
+ * fairly hacky right now, using opp_id as indicator
+ */
+
+ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
+ if (pipe_ctx->plane_res.hubp->opp_id != 0xf && pipe_ctx->stream_res.tg->inst == be_idx) {
+ dcn10_power_down_fe(dc, pipe_ctx->pipe_idx);
+ /*
+ * power down fe will unlock when calling reset, need
+ * to lock it back here. Messy, need rework.
+ */
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ }
+ }
+
+
+ if ((!pipe_ctx->plane_state && old_pipe_ctx->plane_state)
+ || (!pipe_ctx->stream && old_pipe_ctx->stream)) {
+ if (old_pipe_ctx->stream_res.tg->inst != be_idx)
+ continue;
+
+ if (!old_pipe_ctx->top_pipe) {
+ ASSERT(0);
+ continue;
+ }
+
+ /* reset mpc */
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc,
+ &(old_pipe_ctx->stream_res.opp->mpc_tree),
+ old_pipe_ctx->stream_res.opp->inst,
+ old_pipe_ctx->pipe_idx);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[old_pipe_ctx->plane_res.hubp->mpcc_id] = true;
+
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: apply_ctx disconnect pending on mpcc %d]\n",
+ old_pipe_ctx->mpcc->inst);*/
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ old_pipe_ctx->top_pipe = NULL;
+ old_pipe_ctx->bottom_pipe = NULL;
+ old_pipe_ctx->plane_state = NULL;
+ old_pipe_ctx->stream = NULL;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset mpcc for pipe %d\n",
+ old_pipe_ctx->pipe_idx);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* looking for top pipe to program */
+ if (!pipe_ctx->top_pipe)
+ program_all_pipe_in_tree(dc, pipe_ctx, context);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== Watermark parameters ==============\n"
+ "a.urgent_ns: %d \n"
+ "a.cstate_enter_plus_exit: %d \n"
+ "a.cstate_exit: %d \n"
+ "a.pstate_change: %d \n"
+ "a.pte_meta_urgent: %d \n"
+ "b.urgent_ns: %d \n"
+ "b.cstate_enter_plus_exit: %d \n"
+ "b.cstate_exit: %d \n"
+ "b.pstate_change: %d \n"
+ "b.pte_meta_urgent: %d \n",
+ context->bw.dcn.watermarks.a.urgent_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.b.urgent_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns
+ );
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\nc.urgent_ns: %d \n"
+ "c.cstate_enter_plus_exit: %d \n"
+ "c.cstate_exit: %d \n"
+ "c.pstate_change: %d \n"
+ "c.pte_meta_urgent: %d \n"
+ "d.urgent_ns: %d \n"
+ "d.cstate_enter_plus_exit: %d \n"
+ "d.cstate_exit: %d \n"
+ "d.pstate_change: %d \n"
+ "d.pte_meta_urgent: %d \n"
+ "========================================================\n",
+ context->bw.dcn.watermarks.c.urgent_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.d.urgent_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns
+ );
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ smu_req.hard_min_dcefclk_khz =
+ context->bw.dcn.calc_clk.dcfclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
+ dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ }
+
+ smu_req.display_count = context->stream_count;
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ *smu_req_cur = smu_req;
+
+ /* Decrease in freq is increase in period so opposite comparison for dram_ccm */
+ if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ context->bw.dcn.cur_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ }
+ dcn10_pplib_apply_display_requirements(dc, context);
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ /* need to fix this function. not doing the right thing here */
+}
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ /* TODO */
+ program_gamut_remap(pipe_ctx);
+}
+
+static void dcn10_config_stereo_parameters(
+ struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
+{
+ enum view_3d_format view_format = stream->view_format;
+ enum dc_timing_3d_format timing_3d_format =\
+ stream->timing.timing_3d_format;
+ bool non_stereo_timing = false;
+
+ if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
+ non_stereo_timing = true;
+
+ if (non_stereo_timing == false &&
+ view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
+
+ flags->PROGRAM_STEREO = 1;
+ flags->PROGRAM_POLARITY = 1;
+ if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
+ enum display_dongle_type dongle = \
+ stream->sink->link->ddc->dongle_type;
+ if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ flags->DISABLE_STEREO_DP_SYNC = 1;
+ }
+ flags->RIGHT_EYE_POLARITY =\
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+ if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ flags->FRAME_PACKED = 1;
+ }
+
+ return;
+}
+
+static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+ struct crtc_stereo_flags flags = { 0 };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ dcn10_config_stereo_parameters(stream, &flags);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ flags.PROGRAM_STEREO == 1 ? true:false,
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+
+ pipe_ctx->stream_res.tg->funcs->program_stereo(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ &flags);
+
+ return;
+}
+
+static void dcn10_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i]) {
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, i);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i] = false;
+ res_pool->hubps[i]->funcs->set_blank(res_pool->hubps[i], true);
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
+ i);*/
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+}
+
+static bool dcn10_dummy_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ return true;
+}
+
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ pipe_ctx->plane_res.hubp);
+
+ plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
+ if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =
+ !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .power_down_front_end = dcn10_power_down_fe,
+ .power_on_front_end = dcn10_power_on_fe,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .set_bandwidth = dcn10_set_bandwidth,
+ .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control
+};
+
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn10_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
new file mode 100644
index 000000000000..ca53dc1cc19b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -0,0 +1,38 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_H__
+#define __DC_HWSS_DCN10_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_hw_sequencer_construct(struct dc *dc);
+extern void fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
new file mode 100644
index 000000000000..08db1e6b5166
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_ipp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (ippn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ippn10->ipp_shift->field_name, ippn10->ipp_mask->field_name
+
+#define CTX \
+ ippn10->base.ctx
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCN10_IPP(*ipp));
+ *ipp = NULL;
+}
+
+static const struct ipp_funcs dcn10_ipp_funcs = {
+ .ipp_destroy = dcn10_ipp_destroy
+};
+
+void dcn10_ipp_construct(
+ struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask)
+{
+ ippn10->base.ctx = ctx;
+ ippn10->base.inst = inst;
+ ippn10->base.funcs = &dcn10_ipp_funcs;
+
+ ippn10->regs = regs;
+ ippn10->ipp_shift = ipp_shift;
+ ippn10->ipp_mask = ipp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
new file mode 100644
index 000000000000..d7b5bd20352a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN10_IPP_H_
+#define _DCN10_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCN10_IPP(ipp)\
+ container_of(ipp, struct dcn10_ipp, base)
+
+#define IPP_REG_LIST_DCN(id) \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+#define IPP_REG_LIST_DCN10(id) \
+ IPP_REG_LIST_DCN(id), \
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_MASK_SH_LIST_DCN(mask_sh) \
+ IPP_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, ALPHA_EN, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh)
+
+#define IPP_MASK_SH_LIST_DCN10(mask_sh) \
+ IPP_MASK_SH_LIST_DCN(mask_sh),\
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
+
+#define IPP_DCN10_REG_FIELD_LIST(type) \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CNVC_BYPASS; \
+ type ALPHA_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CUR0_MODE; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1; \
+ type CUR0_EXPANSION_MODE; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn10_ipp_shift {
+ IPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_ipp_mask {
+ IPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_ipp_registers {
+ uint32_t DPP_CONTROL;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+struct dcn10_ipp {
+ struct input_pixel_processor base;
+
+ const struct dcn10_ipp_registers *regs;
+ const struct dcn10_ipp_shift *ipp_shift;
+ const struct dcn10_ipp_mask *ipp_mask;
+
+ struct dc_cursor_attributes curs_attr;
+};
+
+void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask);
+
+#endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
new file mode 100644
index 000000000000..76573e1f5b01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_mpc.h"
+#include "dc.h"
+#include "mem_input.h"
+
+#define REG(reg)\
+ mpc10->mpc_regs->reg
+
+#define CTX \
+ mpc10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
+
+#define MODE_TOP_ONLY 1
+#define MODE_BLEND 3
+#define BLND_PP_ALPHA 0
+#define BLND_GLOBAL_ALPHA 2
+
+
+static void mpc10_set_bg_color(
+ struct dcn10_mpc *mpc10,
+ struct tg_color *bg_color,
+ int id)
+{
+ /* mpc color is 12 bit. tg_color is 10 bit */
+ /* todo: might want to use 16 bit to represent color and have each
+ * hw block translate to correct color depth.
+ */
+ uint32_t bg_r_cr = bg_color->color_r_cr << 2;
+ uint32_t bg_g_y = bg_color->color_g_y << 2;
+ uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+
+ REG_SET(MPCC_BG_R_CR[id], 0,
+ MPCC_BG_R_CR, bg_r_cr);
+ REG_SET(MPCC_BG_G_Y[id], 0,
+ MPCC_BG_G_Y, bg_g_y);
+ REG_SET(MPCC_BG_B_CB[id], 0,
+ MPCC_BG_B_CB, bg_b_cb);
+}
+
+void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id));
+ REG_WAIT(MPCC_STATUS[id],
+ MPCC_IDLE, 1,
+ 1, 100000);
+}
+
+static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+{
+ int i;
+ int last_free_mpcc_id = -1;
+
+ for (i = 0; i < mpc10->num_mpcc; i++) {
+ uint32_t is_idle = 0;
+
+ if (mpc10->mpcc_in_use_mask & 1 << i)
+ continue;
+
+ last_free_mpcc_id = i;
+ REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
+ if (is_idle)
+ return i;
+ }
+
+ /* This assert should never trigger, we have mpcc leak if it does */
+ ASSERT(last_free_mpcc_id != -1);
+
+ mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
+ return last_free_mpcc_id;
+}
+
+static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+{
+ unsigned int top_sel, mpc_busy, mpc_idle;
+
+ REG_GET(MPCC_TOP_SEL[id],
+ MPCC_TOP_SEL, &top_sel);
+
+ if (top_sel == 0xf) {
+ REG_GET_2(MPCC_STATUS[id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle);
+
+ ASSERT(mpc_busy == 0);
+ ASSERT(mpc_idle == 1);
+ }
+}
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ /* find z_idx for the dpp to be removed */
+ for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
+ if (tree_cfg->dpp[z_idx] == dpp_id)
+ break;
+
+ if (z_idx == tree_cfg->num_pipes) {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_OPP_ID[dpp_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[dpp_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[dpp_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ return;
+ }
+
+ mpcc_id = tree_cfg->mpcc[z_idx];
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ if (z_idx > 0) {
+ int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
+
+ if (z_idx + 1 < tree_cfg->num_pipes)
+ /* mpcc to be removed is in the middle of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
+ else {
+ /* mpcc to be removed is at the bottom of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
+ MPCC_MODE, MODE_TOP_ONLY);
+ }
+ } else if (tree_cfg->num_pipes > 1)
+ /* mpcc to be removed is at the top of the tree */
+ REG_SET(MUX[opp_id], 0,
+ MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
+ else
+ /* mpcc to be removed is the only one in the tree */
+ REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
+
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ tree_cfg->num_pipes--;
+ for (; z_idx < tree_cfg->num_pipes; z_idx++) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
+ }
+ tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
+ tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
+}
+
+static void mpc10_add_to_tree_cfg(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_mode = MODE_TOP_ONLY;
+ int position = cfg->z_index;
+ struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int z_idx;
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, cfg->opp_id);
+
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, cfg->dpp_id);
+
+ if (position == 0) {
+ /* idle dpp/mpcc is added to the top layer of tree */
+
+ if (tree_cfg->num_pipes > 0) {
+ /* get instance of previous top mpcc */
+ int prev_top_mpcc_id = tree_cfg->mpcc[0];
+
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, prev_top_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ /* opp will get new output. from new added mpcc */
+ REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
+
+ } else if (position == tree_cfg->num_pipes) {
+ /* idle dpp/mpcc is added to the bottom layer of tree */
+
+ /* get instance of previous bottom mpcc, set to middle layer */
+ int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
+
+ REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id become new bottom mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ } else {
+ /* idle dpp/mpcc is added to middle of tree */
+ int above_mpcc_id = tree_cfg->mpcc[position - 1];
+ int below_mpcc_id = tree_cfg->mpcc[position];
+
+ /* mpcc above new mpcc_id has new bottom mux*/
+ REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id bottom mux is from below mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, below_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
+ MPCC_MODE, mpcc_mode,
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+
+ /* update mpc_tree_cfg with new mpcc */
+ for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
+ }
+ tree_cfg->dpp[position] = cfg->dpp_id;
+ tree_cfg->mpcc[position] = mpcc_id;
+ tree_cfg->num_pipes++;
+}
+
+int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ ASSERT(cfg->z_index < mpc10->num_mpcc);
+
+ /* check in dpp already exists in mpc tree */
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+ if (z_idx == cfg->tree_cfg->num_pipes) {
+ ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
+ mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = cfg->dpp_id;
+ /* end hack*/
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
+
+ if (mpc->ctx->dc->debug.sanity_checks)
+ mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ } else {
+ ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ }
+
+ /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
+ mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
+
+ /* set background color */
+ mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
+
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ return mpcc_id;
+}
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+
+ /* find z_idx for the dpp that requires blending mode update*/
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+
+ ASSERT(z_idx < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+
+ REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+}
+
+const struct mpc_funcs dcn10_mpc_funcs = {
+ .add = mpc10_mpcc_add,
+ .remove = mpc10_mpcc_remove,
+ .wait_for_idle = mpc10_assert_idle_mpcc,
+ .update_blend_mode = mpc10_update_blend_mode,
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ mpc10->base.ctx = ctx;
+
+ mpc10->base.funcs = &dcn10_mpc_funcs;
+
+ mpc10->mpc_regs = mpc_regs;
+ mpc10->mpc_shift = mpc_shift;
+ mpc10->mpc_mask = mpc_mask;
+
+ mpc10->mpcc_in_use_mask = 0;
+ mpc10->num_mpcc = num_mpcc;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
new file mode 100644
index 000000000000..683ce4aaa76e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -0,0 +1,138 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN10_H__
+#define __DC_MPCC_DCN10_H__
+
+#include "mpc.h"
+
+#define TO_DCN10_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn10_mpc, base)
+
+#define MAX_MPCC 6
+#define MAX_OPP 6
+
+#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MPCC_TOP_SEL, MPCC, inst),\
+ SRII(MPCC_BOT_SEL, MPCC, inst),\
+ SRII(MPCC_CONTROL, MPCC, inst),\
+ SRII(MPCC_STATUS, MPCC, inst),\
+ SRII(MPCC_OPP_ID, MPCC, inst),\
+ SRII(MPCC_BG_G_Y, MPCC, inst),\
+ SRII(MPCC_BG_R_CR, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst)
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MUX, MPC_OUT, inst)
+
+#define MPC_COMMON_REG_VARIABLE_LIST \
+ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
+ uint32_t MPCC_BOT_SEL[MAX_MPCC]; \
+ uint32_t MPCC_CONTROL[MAX_MPCC]; \
+ uint32_t MPCC_STATUS[MAX_MPCC]; \
+ uint32_t MPCC_OPP_ID[MAX_MPCC]; \
+ uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
+ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
+ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MUX[MAX_OPP];
+
+#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
+ SF(MPCC0_MPCC_BOT_SEL, MPCC_BOT_SEL, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
+ SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
+ SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
+ SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
+ SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+
+#define MPC_REG_FIELD_LIST(type) \
+ type MPCC_TOP_SEL;\
+ type MPCC_BOT_SEL;\
+ type MPCC_MODE;\
+ type MPCC_ALPHA_BLND_MODE;\
+ type MPCC_ALPHA_MULTIPLIED_MODE;\
+ type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_IDLE;\
+ type MPCC_BUSY;\
+ type MPCC_OPP_ID;\
+ type MPCC_BG_G_Y;\
+ type MPCC_BG_R_CR;\
+ type MPCC_BG_B_CB;\
+ type MPC_OUT_MUX;
+
+struct dcn_mpc_registers {
+ MPC_COMMON_REG_VARIABLE_LIST
+};
+
+struct dcn_mpc_shift {
+ MPC_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_mpc_mask {
+ MPC_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_mpc {
+ struct mpc base;
+
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn_mpc_registers *mpc_regs;
+ const struct dcn_mpc_shift *mpc_shift;
+ const struct dcn_mpc_mask *mpc_mask;
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+int mpc10_mpcc_add(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id);
+
+void mpc10_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
new file mode 100644
index 000000000000..a136f70b7a3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn10->opp_shift->field_name, oppn10->opp_mask->field_name
+
+#define CTX \
+ oppn10->base.ctx
+
+
+
+/************* FORMATTER ************/
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED,
+ FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH,
+ FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE);
+}
+
+static void set_spatial_dither(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_MODE, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else {
+ return;
+ }
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+
+ /*Set seed for random values for
+ * spatial dithering for R,G,B channels*/
+
+ REG_SET(FMT_DITHER_RAND_R_SEED, 0,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_G_SEED, 0,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_B_SEED, 0,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL,
+ /*Enable spatial dithering*/
+ FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED,
+ /* Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ */
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ /*Set spatial dithering bit depth*/
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ /*Disable High pass filter*/
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ /*Reset only at startup*/
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ /*Set RGB data dithered with x^28+x^3+1*/
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+}
+
+static void oppn10_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ set_truncation(oppn10, params);
+ set_spatial_dither(oppn10, params);
+ /* TODO
+ * set_temporal_dither(oppn10, params);
+ */
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ switch (params->pixel_encoding) {
+
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+static void opp_set_clamping(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /* TODO */
+ default:
+ break;
+ }
+
+}
+
+static void oppn10_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ signal == SIGNAL_TYPE_VIRTUAL) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_set_clamping(oppn10, params);
+ set_pixel_encoding(oppn10, params);
+}
+
+static void oppn10_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0);
+
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+ oppn10_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ return;
+}
+
+
+
+static void oppn10_set_stereo_polarity(
+ struct output_pixel_processor *opp,
+ bool enable, bool rightEyePolarity)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+}
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_opp_destroy(struct output_pixel_processor **opp)
+{
+ kfree(TO_DCN10_OPP(*opp));
+ *opp = NULL;
+}
+
+static struct opp_funcs dcn10_opp_funcs = {
+ .opp_set_dyn_expansion = oppn10_set_dyn_expansion,
+ .opp_program_fmt = oppn10_program_fmt,
+ .opp_program_bit_depth_reduction = oppn10_program_bit_depth_reduction,
+ .opp_set_stereo_polarity = oppn10_set_stereo_polarity,
+ .opp_destroy = dcn10_opp_destroy
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask)
+{
+ int i;
+ oppn10->base.ctx = ctx;
+ oppn10->base.inst = inst;
+ oppn10->base.funcs = &dcn10_opp_funcs;
+
+ oppn10->base.mpc_tree.dpp[0] = inst;
+ oppn10->base.mpc_tree.mpcc[0] = inst;
+ oppn10->base.mpc_tree.num_pipes = 1;
+ for (i = 0; i < MAX_PIPES; i++)
+ oppn10->base.mpcc_disconnect_pending[i] = false;
+
+ oppn10->regs = regs;
+ oppn10->opp_shift = opp_shift;
+ oppn10->opp_mask = opp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
new file mode 100644
index 000000000000..790ce6014832
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -0,0 +1,186 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN10_H__
+#define __DC_OPP_DCN10_H__
+
+#include "opp.h"
+
+#define TO_DCN10_OPP(opp)\
+ container_of(opp, struct dcn10_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_REG_LIST_DCN(id) \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+
+#define OPP_REG_LIST_DCN10(id) \
+ OPP_REG_LIST_DCN(id)
+
+#define OPP_MASK_SH_LIST_DCN(mask_sh) \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+
+#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
+ OPP_MASK_SH_LIST_DCN(mask_sh)
+
+#define OPP_DCN10_REG_FIELD_LIST(type) \
+ type DPG_EN; \
+ type DPG_MODE; \
+ type DPG_VRES; \
+ type DPG_HRES; \
+ type DPG_COLOUR0_R_CR; \
+ type DPG_COLOUR1_R_CR; \
+ type DPG_COLOUR0_B_CB; \
+ type DPG_COLOUR1_B_CB; \
+ type DPG_COLOUR0_G_Y; \
+ type DPG_COLOUR1_G_Y; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C13; \
+ type CM_OCSC_C14; \
+ type CM_OCSC_C21; \
+ type CM_OCSC_C22; \
+ type CM_OCSC_C23; \
+ type CM_OCSC_C24; \
+ type CM_OCSC_C31; \
+ type CM_OCSC_C32; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C13; \
+ type CM_COMB_C14; \
+ type CM_COMB_C21; \
+ type CM_COMB_C22; \
+ type CM_COMB_C23; \
+ type CM_COMB_C24; \
+ type CM_COMB_C31; \
+ type CM_COMB_C32; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_MAP420MEM_PWR_FORCE; \
+ type FMT_STEREOSYNC_OVERRIDE
+
+struct dcn10_opp_shift {
+ OPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_opp_mask {
+ OPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_opp_registers {
+ uint32_t DPG_CONTROL;
+ uint32_t DPG_COLOUR_B_CB;
+ uint32_t DPG_COLOUR_G_Y;
+ uint32_t DPG_COLOUR_R_CR;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C13_C14;
+ uint32_t CM_OCSC_C21_C22;
+ uint32_t CM_OCSC_C23_C24;
+ uint32_t CM_OCSC_C31_C32;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C13_C14;
+ uint32_t CM_COMB_C21_C22;
+ uint32_t CM_COMB_C23_C24;
+ uint32_t CM_COMB_C31_C32;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_MAP420_MEMORY_CONTROL;
+};
+
+struct dcn10_opp {
+ struct output_pixel_processor base;
+
+ const struct dcn10_opp_registers *regs;
+ const struct dcn10_opp_shift *opp_shift;
+ const struct dcn10_opp_mask *opp_mask;
+
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
new file mode 100644
index 000000000000..9fc8f827f2a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -0,0 +1,1468 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn10/dcn10_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "irq/dcn10/irq_service_dcn10.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_opp.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#include "dcn10_hubp.h"
+
+#include "vega10/soc15ip.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+
+#include "raven1/NBIO/nbio_7_0_offset.h"
+
+#include "raven1/MMHUB/mmhub_9_1_offset.h"
+#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+
+enum dcn10_clk_src_array_id {
+ DCN10_CLK_SRC_PLL0,
+ DCN10_CLK_SRC_PLL1,
+ DCN10_CLK_SRC_PLL2,
+ DCN10_CLK_SRC_PLL3,
+ DCN10_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN10_REG_LIST(0)
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+ .AFMT_AVI_INFO0 = 0,\
+ .AFMT_AVI_INFO1 = 0,\
+ .AFMT_AVI_INFO2 = 0,\
+ .AFMT_AVI_INFO3 = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
+ .AFMT_GENERIC0_UPDATE = 0,
+ .AFMT_GENERIC2_UPDATE = 0,
+ .DP_DYN_RANGE = 0,
+ .DP_YCBCR_RANGE = 0,
+ .HDMI_AVI_INFO_SEND = 0,
+ .HDMI_AVI_INFO_CONT = 0,
+ .HDMI_AVI_INFO_LINE = 0,
+ .DP_SEC_AVI_ENABLE = 0,
+ .AFMT_AVI_INFO_VERSION = 0
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+};
+
+static const struct dcn10_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
+};
+
+static const struct dcn_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN10(_MASK),
+};
+
+static const struct dcn_mpc_registers mpc_regs = {
+ MPC_COMMON_REG_LIST_DCN1_0(0),
+ MPC_COMMON_REG_LIST_DCN1_0(1),
+ MPC_COMMON_REG_LIST_DCN1_0(2),
+ MPC_COMMON_REG_LIST_DCN1_0(3),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3)
+};
+
+static const struct dcn_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+};
+
+#define tg_regs(id)\
+[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
+
+static const struct dcn_tg_registers tg_regs[] = {
+ tg_regs(0),
+ tg_regs(1),
+ tg_regs(2),
+ tg_regs(3),
+};
+
+static const struct dcn_tg_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_tg_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define mi_regs(id)\
+[id] = {\
+ MI_REG_LIST_DCN10(id)\
+}
+
+
+static const struct dcn_mi_registers mi_regs[] = {
+ mi_regs(0),
+ mi_regs(1),
+ mi_regs(2),
+ mi_regs(3),
+};
+
+static const struct dcn_mi_shift mi_shift = {
+ MI_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn_mi_mask mi_mask = {
+ MI_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 4,
+ .num_video_plane = 4,
+ .num_audio = 4,
+ .num_stream_encoder = 4,
+ .num_pll = 4,
+};
+
+static const struct dc_debug debug_defaults_drv = {
+ .sanity_checks = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+
+ .min_disp_clk_khz = 300000,
+
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = false,
+ .pplib_wm_report_mode = WM_REPORT_DEFAULT,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = true,
+ .disable_dcc = DCC_ENABLE,
+ .voltage_align_fclk = true,
+ .disable_stereo_support = true,
+ .vsr_support = true,
+ .performance_trace = false,
+};
+
+static const struct dc_debug debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+ .clock_trace = true,
+ .disable_stutter = true,
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = true
+};
+
+static void dcn10_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN10_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn10_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_dpp *dpp =
+ kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ dpp1_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask);
+ return &dpp->base;
+}
+
+static struct input_pixel_processor *dcn10_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+static struct output_pixel_processor *dcn10_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_opp *opp =
+ kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+{
+ struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
+ GFP_KERNEL);
+
+ if (!mpc10)
+ return NULL;
+
+ dcn10_mpc_construct(mpc10, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ 4);
+
+ return &mpc10->base;
+}
+
+static struct timing_generator *dcn10_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct dcn10_timing_generator *tgn10 =
+ kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn10_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dcn10_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+struct clock_source *dcn10_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct stream_encoder *dcn10_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN1_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN1_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn10_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dcn10_stream_encoder_create,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+void dcn10_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+{
+ struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+
+ if (!pp_smu)
+ return pp_smu;
+
+ dm_pp_get_funcs_rv(ctx, pp_smu);
+ return pp_smu;
+}
+
+static void destruct(struct dcn10_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ /* TODO: free dcn version of stream encoder once implemented
+ * rather than using virtual stream encoder
+ */
+ kfree(pool->base.stream_enc[i]);
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN10_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.dpps[i] != NULL)
+ dcn10_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++)
+ kfree(pool->base.stream_enc[i]);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn10_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ kfree(pool->base.pp_smu);
+}
+
+static struct hubp *dcn10_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_hubp *hubp1 =
+ kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL);
+
+ if (!hubp1)
+ return NULL;
+
+ dcn10_hubp_construct(hubp1, ctx, inst,
+ &mi_regs[inst], &mi_shift, &mi_mask);
+ return &hubp1->base;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pixel_clk_params->requested_pix_clk /= 2;
+
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ /*TODO Seems unneeded anymore */
+ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ todo: shouldn't have to copy missing parameter here
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ stream->clamping.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ build_clamping_params(stream);
+
+ continue;
+ }
+ }
+ */
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ build_pipe_hw_param(pipe_ctx);
+ return DC_OK;
+}
+
+enum dc_status dcn10_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dcn10_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+ if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+
+ if (!head_pipe) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ if (!idle_pipe)
+ return NULL;
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+
+ return idle_pipe;
+}
+
+enum dcc_control {
+ dcc_control__256_256_xxx,
+ dcc_control__128_128_xxx,
+ dcc_control__256_64_64,
+};
+
+enum segment_order {
+ segment_order__na,
+ segment_order__contiguous,
+ segment_order__non_contiguous,
+};
+
+static bool dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element)
+{
+ /* DML: get_bytes_per_element */
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ *bytes_per_element = 2;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ *bytes_per_element = 4;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ *bytes_per_element = 8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (bytes_per_element == 1 && standard_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && standard_swizzle) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && display_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void det_request_size(
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
+
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = height * blk256_height * bpe;
+ swath_bytes_vert_wc = width * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+static bool get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ det_request_size(input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ break;
+ }
+
+ output->capable = true;
+ output->const_color_support = false;
+
+ return true;
+}
+
+
+static void dcn10_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
+
+ destruct(dcn10_pool);
+ kfree(dcn10_pool);
+ *pool = NULL;
+}
+
+static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && caps->max_video_width != 0
+ && plane_state->src_rect.width > caps->max_video_width)
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = get_dcc_compression_cap
+};
+
+static struct resource_funcs dcn10_res_pool_funcs = {
+ .destroy = dcn10_destroy_resource_pool,
+ .link_enc_create = dcn10_link_encoder_create,
+ .validate_guaranteed = dcn10_validate_guaranteed,
+ .validate_bandwidth = dcn_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .validate_plane = dcn10_validate_plane,
+ .add_stream_to_ctx = dcn10_add_stream_to_ctx
+};
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn10_resource_pool *pool)
+{
+ int i;
+ int j;
+ struct dc_context *ctx = dc->ctx;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+ * TODO fill in from actual raven resource when we create
+ * more than virtual encoder
+ */
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
+ dc->caps.max_video_width = 3840;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 256;
+
+ dc->caps.max_slave_planes = 1;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+ else
+ dc->debug = debug_defaults_diags;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+
+ pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ /* todo: not reuse phy_pll registers */
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clock_source_create_fail;
+ }
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dcn10_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
+ memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
+
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->urgent_latency = 3;
+ dc->debug.disable_dmcu = true;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
+ }
+
+
+ dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
+ ASSERT(dc->dcn_soc->number_of_channels < 3);
+ if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
+ dc->dcn_soc->number_of_channels = 2;
+
+ if (dc->dcn_soc->number_of_channels == 1) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
+ }
+ }
+
+ pool->base.pp_smu = dcn10_pp_smu_create(ctx);
+
+ if (!dc->debug.disable_pplib_clock_request)
+ dcn_bw_update_from_pplib(dc);
+ dcn_bw_sync_calcs_and_dml(dc);
+ if (!dc->debug.disable_pplib_wm_range) {
+ dc->res_pool = &pool->base;
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+ }
+
+ {
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+ #endif
+ }
+
+ /* index to valid pipe resource */
+ j = 0;
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
+ pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto mi_create_fail;
+ }
+
+ pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto ipp_create_fail;
+ }
+
+ pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpp!\n");
+ goto dpp_create_fail;
+ }
+
+ pool->base.opps[j] = dcn10_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto opp_create_fail;
+ }
+
+ pool->base.timing_generators[j] = dcn10_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto otg_create_fail;
+ }
+ /* check next valid pipe */
+ j++;
+ }
+
+ /* valid pipe num */
+ pool->base.pipe_count = j;
+
+ /* within dml lib, it is hard code to 4. If ASIC pipe is fused,
+ * the value may be changed
+ */
+ dc->dml.ip.max_num_dpp = pool->base.pipe_count;
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ pool->base.mpc = dcn10_mpc_create(ctx);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto mpc_create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto res_create_fail;
+
+ dcn10_hw_sequencer_construct(dc);
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+disp_clk_create_fail:
+mpc_create_fail:
+otg_create_fail:
+opp_create_fail:
+dpp_create_fail:
+ipp_create_fail:
+mi_create_fail:
+irqs_create_fail:
+res_create_fail:
+clock_source_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dcn10_resource_pool *pool =
+ kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
new file mode 100644
index 000000000000..8f71225bc61b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN10_H__
+#define __DC_RESOURCE_DCN10_H__
+
+#include "core_types.h"
+
+#define TO_DCN10_RES_POOL(pool)\
+ container_of(pool, struct dcn10_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn10_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+
+#endif /* __DC_RESOURCE_DCN10_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
new file mode 100644
index 000000000000..fced178c8c79
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
@@ -0,0 +1,1200 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_timing_generator.h"
+#include "dc.h"
+
+#define REG(reg)\
+ tgn10->tg_regs->reg
+
+#define CTX \
+ tgn10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+
+#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
+
+/**
+* apply_front_porch_workaround TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void tgn10_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+static void tgn10_program_global_sync(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (tg->dlg_otg_param.vstartup_start == 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ REG_SET(OTG_VSTARTUP_PARAM, 0,
+ VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+
+ REG_SET_2(OTG_VUPDATE_PARAM, 0,
+ VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+
+ REG_SET(OTG_VREADY_PARAM, 0,
+ VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+}
+
+static void tgn10_disable_stereo(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_STEREO_CONTROL, 0,
+ OTG_STEREO_EN, 0);
+
+ REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+ OTG_3D_STRUCTURE_EN, 0,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, 0);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, 0);
+}
+
+/**
+ * program_timing_generator used by mode timing set
+ * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
+ * Including SYNC. Call BIOS command table to program Timings.
+ */
+static void tgn10_program_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t vesa_sync_start;
+ uint32_t asic_blank_end;
+ uint32_t asic_blank_start;
+ uint32_t v_total;
+ uint32_t v_sync_end;
+ uint32_t v_init, v_fp2;
+ uint32_t h_sync_polarity, v_sync_polarity;
+ uint32_t interlace_factor;
+ uint32_t start_point = 0;
+ uint32_t field_num = 0;
+ uint32_t h_div_2;
+ int32_t vertical_line_start;
+
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ patched_crtc_timing = *dc_crtc_timing;
+ tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ /* Load horizontal timing */
+
+ /* CRTC_H_TOTAL = vesa.h_total - 1 */
+ REG_SET(OTG_H_TOTAL, 0,
+ OTG_H_TOTAL, patched_crtc_timing.h_total - 1);
+
+ /* h_sync_start = 0, h_sync_end = vesa.h_sync_width */
+ REG_UPDATE_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, 0,
+ OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width);
+
+ /* asic_h_blank_end = HsyncWidth + HbackPorch =
+ * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
+ * vesa.h_left_border
+ */
+ vesa_sync_start = patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right +
+ patched_crtc_timing.h_front_porch;
+
+ asic_blank_end = patched_crtc_timing.h_total -
+ vesa_sync_start -
+ patched_crtc_timing.h_border_left;
+
+ /* h_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ patched_crtc_timing.h_border_left +
+ patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right;
+
+ REG_UPDATE_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, asic_blank_start,
+ OTG_H_BLANK_END, asic_blank_end);
+
+ /* h_sync polarity */
+ h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, h_sync_polarity);
+
+ /* Load vertical timing */
+
+ /* CRTC_V_TOTAL = v_total - 1 */
+ if (patched_crtc_timing.flags.INTERLACE) {
+ interlace_factor = 2;
+ v_total = 2 * patched_crtc_timing.v_total;
+ } else {
+ interlace_factor = 1;
+ v_total = patched_crtc_timing.v_total - 1;
+ }
+ REG_SET(OTG_V_TOTAL, 0,
+ OTG_V_TOTAL, v_total);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
+ * OTG_V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, v_total);
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, v_total);
+
+ /* v_sync_start = 0, v_sync_end = v_sync_width */
+ v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, 0,
+ OTG_V_SYNC_A_END, v_sync_end);
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ /* v_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ (patched_crtc_timing.v_border_top +
+ patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom)
+ * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, asic_blank_start,
+ OTG_V_BLANK_END, asic_blank_end);
+
+ /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
+ * program the reg for interrupt postition.
+ */
+ vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ if (vertical_line_start < 0) {
+ ASSERT(0);
+ vertical_line_start = 0;
+ }
+ REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+
+ /* v_sync polarity */
+ v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, v_sync_polarity);
+
+ v_init = asic_blank_start;
+ if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ start_point = 1;
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ field_num = 1;
+ }
+ v_fp2 = 0;
+ if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+
+ /* Interlace */
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 1);
+ v_init = v_init / 2;
+ if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ v_fp2 = v_fp2 / 2;
+ }
+ else
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 0);
+
+
+ /* VTG enable set to 0 first VInit */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+
+ /* original code is using VTG offset to address OTG reg, seems wrong */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_START_POINT_CNTL, start_point,
+ OTG_FIELD_NUMBER_CNTL, field_num);
+
+ tgn10_program_global_sync(tg);
+
+ /* TODO
+ * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
+ * program_horz_count_by_2
+ * for DVI 30bpp mode, 0 otherwise
+ * program_horz_count_by_2(tg, &patched_crtc_timing);
+ */
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call
+ */
+ h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
+ 1 : 0;
+
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_BY2, h_div_2);
+
+}
+
+/**
+ * unblank_crtc
+ * Call ASIC Control Object to UnBlank CRTC.
+ */
+static void tgn10_unblank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t vertical_interrupt_enable = 0;
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
+ OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
+
+ /* temporary work around for vertical interrupt, once vertical interrupt enabled,
+ * this check will be removed.
+ */
+ if (vertical_interrupt_enable)
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 0,
+ OTG_BLANK_DE_MODE, 0);
+}
+
+/**
+ * blank_crtc
+ * Call ASIC Control Object to Blank CRTC.
+ */
+
+static void tgn10_blank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ OTG_BLANK_DE_MODE, 0);
+
+ /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
+ * for status?
+ */
+ REG_WAIT(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ 1, 100000);
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+}
+
+static void tgn10_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ tgn10_blank_crtc(tg);
+ else
+ tgn10_unblank_crtc(tg);
+}
+
+static bool tgn10_is_blanked(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t blank_en;
+ uint32_t blank_state;
+
+ REG_GET_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, &blank_en,
+ OTG_CURRENT_BLANK_STATE, &blank_state);
+
+ return blank_en && blank_state;
+}
+
+static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (enable) {
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_EN, 1,
+ OPTC_INPUT_CLK_GATE_DIS, 1);
+
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 1,
+ 1, 1000);
+
+ /* Enable clock */
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_EN, 1,
+ OTG_CLOCK_GATE_DIS, 1);
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 1,
+ 1, 1000);
+ } else {
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_GATE_DIS, 0,
+ OTG_CLOCK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 0,
+ 1, 1000);
+
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_GATE_DIS, 0,
+ OPTC_INPUT_CLK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 0,
+ 1, 1000);
+ }
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+static bool tgn10_enable_crtc(struct timing_generator *tg)
+{
+ /* TODO FPGA wait for answer
+ * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
+ * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
+ */
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* opp instance for OTG. For DCN1.0, ODM is remoed.
+ * OPP and OPTC should 1:1 mapping
+ */
+ REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SRC_SEL, tg->inst);
+
+ /* VTG enable first is for HW workaround */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 1);
+
+ /* Enable CRTC */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 1);
+
+ return true;
+}
+
+/* disable_crtc - call ASIC Control Object to disable Timing generator. */
+static bool tgn10_disable_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 0);
+
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ /* CRTC disabled, so disable clock. */
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_BUSY, 0,
+ 1, 100000);
+
+ return true;
+}
+
+
+static void tgn10_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET_3(OTG_BLACK_COLOR, 0,
+ OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ OTG_BLACK_COLOR_G_Y, black_color->color_g_y,
+ OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+
+static bool tgn10_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t interlace_factor;
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ v_blank = (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
+ return false;
+
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tgn10->max_h_total ||
+ timing->v_total > tgn10->max_v_total)
+ return false;
+
+
+ if (h_blank < tgn10->min_h_blank)
+ return false;
+
+ if (timing->h_sync_width < tgn10->min_h_sync_width ||
+ timing->v_sync_width < tgn10->min_v_sync_width)
+ return false;
+
+ min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+
+ if (v_blank < min_v_blank)
+ return false;
+
+ return true;
+
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t frame_count;
+
+ REG_GET(OTG_STATUS_FRAME_COUNT,
+ OTG_FRAME_COUNT, &frame_count);
+
+ return frame_count;
+}
+
+static void tgn10_lock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 100);
+}
+
+static void tgn10_unlock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ /* why are we waiting here? */
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_UPDATE_PENDING, 0,
+ 1, 100000);
+}
+
+static void tgn10_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET_2(OTG_STATUS_POSITION,
+ OTG_HORZ_COUNT, &position->horizontal_count,
+ OTG_VERT_COUNT, &position->vertical_count);
+
+ REG_GET(OTG_NOM_VERT_POSITION,
+ OTG_VERT_COUNT_NOM, &position->nominal_vcount);
+}
+
+static bool tgn10_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+static bool tgn10_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t occurred;
+
+ REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
+ OTG_FORCE_COUNT_NOW_OCCURRED, &occurred);
+
+ return occurred != 0;
+}
+
+static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t falling_edge;
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &falling_edge);
+
+ if (falling_edge)
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect falling edge */
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1);
+ else
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect rising edge */
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ /* force H count to H_TOTAL and V count to V_TOTAL in
+ * progressive mode and V_TOTAL-1 in interlaced mode
+ */
+ OTG_FORCE_COUNT_NOW_MODE, 2);
+}
+
+static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_WRITE(OTG_TRIGA_CNTL, 0);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ OTG_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+static void tgn10_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_BLANK, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_ACTIVE_DISP, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void tgn10_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ /* asic design change, do not need this control
+ * empty for share caller logic
+ */
+}
+
+
+static void tgn10_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* Bit 8 is no longer applicable in RV for PSR case,
+ * set bit 8 to 0 if given
+ */
+ if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ != 0)
+ value = value &
+ ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
+
+ REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
+ OTG_STATIC_SCREEN_EVENT_MASK, value,
+ OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ *****************************************************************************
+ */
+static void tgn10_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
+
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ } else {
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, 0);
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, 0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+ }
+}
+
+static void tgn10_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ uint32_t pattern_mask;
+ uint32_t pattern_data;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_HRES, 6);
+
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ pattern_data = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ pattern_mask = (1 << index);
+
+ /* write color component */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ pattern_data = dst_color[index];
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* enable test pattern */
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 6,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, inc_base + 2,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 5,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+
+ /* enable test pattern */
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+
+ REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+ }
+ break;
+ default:
+ break;
+
+ }
+}
+
+static void tgn10_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+
+ tgn10_get_position(tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+
+
+static void tgn10_enable_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ uint32_t active_width = timing->h_addressable;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+
+ if (flags) {
+ uint32_t stereo_en;
+ stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0;
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_STEREO_CONTROL,
+ OTG_STEREO_EN, stereo_en,
+ OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+
+ if (flags->PROGRAM_POLARITY)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_STEREO_EYE_FLAG_POLARITY,
+ flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
+
+ if (flags->DISABLE_STEREO_DP_SYNC)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+ OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
+
+ }
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, active_width);
+
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+}
+
+static void tgn10_program_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ if (flags->PROGRAM_STEREO)
+ tgn10_enable_stereo(tg, timing, flags);
+ else
+ tgn10_disable_stereo(tg);
+}
+
+
+static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+{
+ bool ret = false;
+ uint32_t left_eye = 0;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET(OTG_STEREO_STATUS,
+ OTG_STEREO_CURRENT_EYE, &left_eye);
+ if (left_eye == 1)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s)
+{
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &s->otg_enabled);
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &s->v_blank_start,
+ OTG_V_BLANK_END, &s->v_blank_end);
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
+
+ REG_GET(OTG_V_TOTAL,
+ OTG_V_TOTAL, &s->v_total);
+
+ REG_GET(OTG_V_TOTAL_MAX,
+ OTG_V_TOTAL_MAX, &s->v_total_max);
+
+ REG_GET(OTG_V_TOTAL_MIN,
+ OTG_V_TOTAL_MIN, &s->v_total_min);
+
+ REG_GET_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, &s->v_sync_a_start,
+ OTG_V_SYNC_A_END, &s->v_sync_a_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &s->h_blank_start,
+ OTG_H_BLANK_END, &s->h_blank_end);
+
+ REG_GET_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, &s->h_sync_a_start,
+ OTG_H_SYNC_A_END, &s->h_sync_a_end);
+
+ REG_GET(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
+
+ REG_GET(OTG_H_TOTAL,
+ OTG_H_TOTAL, &s->h_total);
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+}
+
+
+static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .validate_timing = tgn10_validate_timing,
+ .program_timing = tgn10_program_timing,
+ .program_global_sync = tgn10_program_global_sync,
+ .enable_crtc = tgn10_enable_crtc,
+ .disable_crtc = tgn10_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = tgn10_is_counter_moving,
+ .get_position = tgn10_get_position,
+ .get_frame_count = tgn10_get_vblank_counter,
+ .get_scanoutpos = tgn10_get_crtc_scanoutpos,
+ .set_early_control = tgn10_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = tgn10_wait_for_state,
+ .set_blank = tgn10_set_blank,
+ .is_blanked = tgn10_is_blanked,
+ .set_blank_color = tgn10_program_blank_color,
+ .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
+ .enable_reset_trigger = tgn10_enable_reset_trigger,
+ .disable_reset_trigger = tgn10_disable_reset_trigger,
+ .lock = tgn10_lock,
+ .unlock = tgn10_unlock,
+ .enable_optc_clock = tgn10_enable_optc_clock,
+ .set_drr = tgn10_set_drr,
+ .set_static_screen_control = tgn10_set_static_screen_control,
+ .set_test_pattern = tgn10_set_test_pattern,
+ .program_stereo = tgn10_program_stereo,
+ .is_stereo_left_eye = tgn10_is_stereo_left_eye
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+{
+ tgn10->base.funcs = &dcn10_tg_funcs;
+
+ tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
+ tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+
+ tgn10->min_h_blank = 32;
+ tgn10->min_v_blank = 3;
+ tgn10->min_v_blank_interlace = 5;
+ tgn10->min_h_sync_width = 8;
+ tgn10->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
new file mode 100644
index 000000000000..7d4818d7aa31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCN10_H__
+#define __DC_TIMING_GENERATOR_DCN10_H__
+
+#include "timing_generator.h"
+
+#define DCN10TG_FROM_TG(tg)\
+ container_of(tg, struct dcn10_timing_generator, base)
+
+#define TG_COMMON_REG_LIST_DCN(inst) \
+ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
+ SRI(OTG_VUPDATE_PARAM, OTG, inst),\
+ SRI(OTG_VREADY_PARAM, OTG, inst),\
+ SRI(OTG_BLANK_CONTROL, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
+ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
+ SRI(OTG_H_TOTAL, OTG, inst),\
+ SRI(OTG_H_BLANK_START_END, OTG, inst),\
+ SRI(OTG_H_SYNC_A, OTG, inst),\
+ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_H_TIMING_CNTL, OTG, inst),\
+ SRI(OTG_V_TOTAL, OTG, inst),\
+ SRI(OTG_V_BLANK_START_END, OTG, inst),\
+ SRI(OTG_V_SYNC_A, OTG, inst),\
+ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_INTERLACE_CONTROL, OTG, inst),\
+ SRI(OTG_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_CONTROL, OTG, inst),\
+ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_STATUS, OTG, inst),\
+ SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MIN, OTG, inst),\
+ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI(OTG_TRIGA_CNTL, OTG, inst),\
+ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
+ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
+ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
+ SRI(OTG_STATUS, OTG, inst),\
+ SRI(OTG_STATUS_POSITION, OTG, inst),\
+ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
+ SRI(OTG_BLACK_COLOR, OTG, inst),\
+ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
+ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
+ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
+ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
+ SRI(OPPBUF_CONTROL, OPPBUF, inst),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
+ SRI(CONTROL, VTG, inst)
+
+#define TG_COMMON_REG_LIST_DCN1_0(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
+
+
+struct dcn_tg_registers {
+ uint32_t OTG_VSTARTUP_PARAM;
+ uint32_t OTG_VUPDATE_PARAM;
+ uint32_t OTG_VREADY_PARAM;
+ uint32_t OTG_BLANK_CONTROL;
+ uint32_t OTG_MASTER_UPDATE_LOCK;
+ uint32_t OTG_GLOBAL_CONTROL0;
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL;
+ uint32_t OTG_H_TOTAL;
+ uint32_t OTG_H_BLANK_START_END;
+ uint32_t OTG_H_SYNC_A;
+ uint32_t OTG_H_SYNC_A_CNTL;
+ uint32_t OTG_H_TIMING_CNTL;
+ uint32_t OTG_V_TOTAL;
+ uint32_t OTG_V_BLANK_START_END;
+ uint32_t OTG_V_SYNC_A;
+ uint32_t OTG_V_SYNC_A_CNTL;
+ uint32_t OTG_INTERLACE_CONTROL;
+ uint32_t OTG_CONTROL;
+ uint32_t OTG_STEREO_CONTROL;
+ uint32_t OTG_3D_STRUCTURE_CONTROL;
+ uint32_t OTG_STEREO_STATUS;
+ uint32_t OTG_V_TOTAL_MAX;
+ uint32_t OTG_V_TOTAL_MIN;
+ uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_TRIGA_CNTL;
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL;
+ uint32_t OTG_STATIC_SCREEN_CONTROL;
+ uint32_t OTG_STATUS_FRAME_COUNT;
+ uint32_t OTG_STATUS;
+ uint32_t OTG_STATUS_POSITION;
+ uint32_t OTG_NOM_VERT_POSITION;
+ uint32_t OTG_BLACK_COLOR;
+ uint32_t OTG_TEST_PATTERN_PARAMETERS;
+ uint32_t OTG_TEST_PATTERN_CONTROL;
+ uint32_t OTG_TEST_PATTERN_COLOR;
+ uint32_t OTG_CLOCK_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
+ uint32_t OPTC_INPUT_CLOCK_CONTROL;
+ uint32_t OPTC_DATA_SOURCE_SELECT;
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL;
+ uint32_t OPPBUF_CONTROL;
+ uint32_t OPPBUF_3D_PARAMETERS_0;
+ uint32_t CONTROL;
+};
+
+#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
+ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
+ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DE_MODE, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_CURRENT_BLANK_STATE, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_INTERLACE_CONTROL, OTG_INTERLACE_ENABLE, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
+ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_B_CB, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_G_Y, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_R_CR, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh)
+
+#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC1, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_VRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_HRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_RAMP0_OFFSET, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_EN, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_MODE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_DYNAMIC_RANGE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
+
+#define TG_REG_FIELD_LIST(type) \
+ type VSTARTUP_START;\
+ type VUPDATE_OFFSET;\
+ type VUPDATE_WIDTH;\
+ type VREADY_OFFSET;\
+ type OTG_BLANK_DATA_EN;\
+ type OTG_BLANK_DE_MODE;\
+ type OTG_CURRENT_BLANK_STATE;\
+ type OTG_MASTER_UPDATE_LOCK;\
+ type UPDATE_LOCK_STATUS;\
+ type OTG_UPDATE_PENDING;\
+ type OTG_MASTER_UPDATE_LOCK_SEL;\
+ type OTG_BLANK_DATA_DOUBLE_BUFFER_EN;\
+ type OTG_H_TOTAL;\
+ type OTG_H_BLANK_START;\
+ type OTG_H_BLANK_END;\
+ type OTG_H_SYNC_A_START;\
+ type OTG_H_SYNC_A_END;\
+ type OTG_H_SYNC_A_POL;\
+ type OTG_H_TIMING_DIV_BY2;\
+ type OTG_V_TOTAL;\
+ type OTG_V_BLANK_START;\
+ type OTG_V_BLANK_END;\
+ type OTG_V_SYNC_A_START;\
+ type OTG_V_SYNC_A_END;\
+ type OTG_V_SYNC_A_POL;\
+ type OTG_INTERLACE_ENABLE;\
+ type OTG_MASTER_EN;\
+ type OTG_START_POINT_CNTL;\
+ type OTG_DISABLE_POINT_CNTL;\
+ type OTG_FIELD_NUMBER_CNTL;\
+ type OTG_STEREO_EN;\
+ type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\
+ type OTG_STEREO_SYNC_OUTPUT_POLARITY;\
+ type OTG_STEREO_EYE_FLAG_POLARITY;\
+ type OTG_STEREO_CURRENT_EYE;\
+ type OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP;\
+ type OTG_3D_STRUCTURE_EN;\
+ type OTG_3D_STRUCTURE_V_UPDATE_MODE;\
+ type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\
+ type OTG_V_TOTAL_MAX;\
+ type OTG_V_TOTAL_MIN;\
+ type OTG_V_TOTAL_MIN_SEL;\
+ type OTG_V_TOTAL_MAX_SEL;\
+ type OTG_FORCE_LOCK_ON_EVENT;\
+ type OTG_SET_V_TOTAL_MIN_MASK_EN;\
+ type OTG_SET_V_TOTAL_MIN_MASK;\
+ type OTG_FORCE_COUNT_NOW_CLEAR;\
+ type OTG_FORCE_COUNT_NOW_MODE;\
+ type OTG_FORCE_COUNT_NOW_OCCURRED;\
+ type OTG_TRIGA_SOURCE_SELECT;\
+ type OTG_TRIGA_SOURCE_PIPE_SELECT;\
+ type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\
+ type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\
+ type OTG_STATIC_SCREEN_EVENT_MASK;\
+ type OTG_STATIC_SCREEN_FRAME_COUNT;\
+ type OTG_FRAME_COUNT;\
+ type OTG_V_BLANK;\
+ type OTG_V_ACTIVE_DISP;\
+ type OTG_HORZ_COUNT;\
+ type OTG_VERT_COUNT;\
+ type OTG_VERT_COUNT_NOM;\
+ type OTG_BLACK_COLOR_B_CB;\
+ type OTG_BLACK_COLOR_G_Y;\
+ type OTG_BLACK_COLOR_R_CR;\
+ type OTG_TEST_PATTERN_INC0;\
+ type OTG_TEST_PATTERN_INC1;\
+ type OTG_TEST_PATTERN_VRES;\
+ type OTG_TEST_PATTERN_HRES;\
+ type OTG_TEST_PATTERN_RAMP0_OFFSET;\
+ type OTG_TEST_PATTERN_EN;\
+ type OTG_TEST_PATTERN_MODE;\
+ type OTG_TEST_PATTERN_DYNAMIC_RANGE;\
+ type OTG_TEST_PATTERN_COLOR_FORMAT;\
+ type OTG_TEST_PATTERN_MASK;\
+ type OTG_TEST_PATTERN_DATA;\
+ type OTG_BUSY;\
+ type OTG_CLOCK_EN;\
+ type OTG_CLOCK_ON;\
+ type OTG_CLOCK_GATE_DIS;\
+ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
+ type OTG_VERTICAL_INTERRUPT2_LINE_START;\
+ type OPTC_INPUT_CLK_EN;\
+ type OPTC_INPUT_CLK_ON;\
+ type OPTC_INPUT_CLK_GATE_DIS;\
+ type OPTC_SRC_SEL;\
+ type OPTC_SEG0_SRC_SEL;\
+ type OPTC_UNDERFLOW_OCCURRED_STATUS;\
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_3D_VACT_SPACE1_SIZE;\
+ type VTG0_ENABLE;\
+ type VTG0_FP2;\
+ type VTG0_VCOUNT_INIT;
+
+struct dcn_tg_shift {
+ TG_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_tg_mask {
+ TG_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_timing_generator {
+ struct timing_generator base;
+
+ const struct dcn_tg_registers *tg_regs;
+ const struct dcn_tg_shift *tg_shift;
+ const struct dcn_tg_mask *tg_mask;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+ uint32_t min_v_blank_interlace;
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+
+struct dcn_otg_state {
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t v_sync_a_pol;
+ uint32_t v_total;
+ uint32_t v_total_max;
+ uint32_t v_total_min;
+ uint32_t v_sync_a_start;
+ uint32_t v_sync_a_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ uint32_t h_sync_a_start;
+ uint32_t h_sync_a_end;
+ uint32_t h_sync_a_pol;
+ uint32_t h_total;
+ uint32_t underflow_occurred_status;
+ uint32_t otg_enabled;
+};
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s);
+
+#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
new file mode 100644
index 000000000000..ab88f07772a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines helper functions provided by the Display Manager to
+ * Display Core.
+ */
+#ifndef __DM_HELPERS__
+#define __DM_HELPERS__
+
+#include "dc_types.h"
+#include "dc.h"
+
+struct dp_mst_stream_allocation_table;
+
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps);
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable);
+
+/*
+ * Polls for ACT (allocation change trigger) handled and
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+/*
+ * Sends ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable);
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot);
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+/**
+ * OS specific aux read callback.
+ */
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+/**
+ * OS specific aux write callback.
+ */
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd);
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+
+#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
new file mode 100644
index 000000000000..bbfa83252fc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_PP_SMU_IF__H
+#define DM_PP_SMU_IF__H
+
+/*
+ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
+ */
+
+
+struct pp_smu {
+ struct dc_context *ctx;
+};
+
+enum wm_set_id {
+ WM_A,
+ WM_B,
+ WM_C,
+ WM_D,
+ WM_COUNT,
+};
+
+struct pp_smu_wm_set_range {
+ enum wm_set_id wm_inst;
+ uint32_t min_fill_clk_khz;
+ uint32_t max_fill_clk_khz;
+ uint32_t min_drain_clk_khz;
+ uint32_t max_drain_clk_khz;
+};
+
+struct pp_smu_wm_range_sets {
+ uint32_t num_reader_wm_sets;
+ struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+
+ uint32_t num_writer_wm_sets;
+ struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+};
+
+struct pp_smu_display_requirement_rv {
+ /* PPSMC_MSG_SetDisplayCount: count
+ * 0 triggers S0i2 optimization
+ */
+ unsigned int display_count;
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ unsigned int hard_min_fclk_khz;
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ unsigned int hard_min_dcefclk_khz;
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ unsigned int min_deep_sleep_dcefclk_mhz;
+};
+
+struct pp_smu_funcs_rv {
+ struct pp_smu pp_smu;
+
+ void (*set_display_requirement)(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req);
+
+ /* which SMU message? are reader and writer WM separate SMU msg? */
+ void (*set_wm_ranges)(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges);
+
+};
+
+#if 0
+struct pp_smu_funcs_rv {
+
+ /* PPSMC_MSG_SetDisplayCount
+ * 0 triggers S0i2 optimization
+ */
+ void (*set_display_count)(struct pp_smu *pp, int count);
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ void (*set_hard_min_dcefclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ void (*set_min_deep_sleep_dcefclk)(struct pp_smu *pp, int mhz);
+
+ /* todo: aesthetic
+ * watermark range table
+ */
+
+ /* todo: functional/feature
+ * PPSMC_MSG_SetHardMinSocclkByFreq: required to support DWB
+ */
+};
+#endif
+
+#endif /* DM_PP_SMU_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
new file mode 100644
index 000000000000..d4917037ac42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines external dependencies of Display Core.
+ */
+
+#ifndef __DM_SERVICES_H__
+
+#define __DM_SERVICES_H__
+
+/* TODO: remove when DC is complete. */
+#include "dm_services_types.h"
+#include "logger_interface.h"
+#include "link_service_types.h"
+
+#undef DEPRECATED
+
+irq_handler_idx dm_register_interrupt(
+ struct dc_context *ctx,
+ struct dc_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *handler_args);
+
+
+/*
+ *
+ * GPU registers access
+ *
+ */
+
+/* enable for debugging new code, this adds 50k to the driver size. */
+/* #define DM_CHECK_ADDR_0 */
+
+#define dm_read_reg(ctx, address) \
+ dm_read_reg_func(ctx, address, __func__)
+
+static inline uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ return value;
+}
+
+#define dm_write_reg(ctx, address, value) \
+ dm_write_reg_func(ctx, address, value, __func__)
+
+static inline void dm_write_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ uint32_t value,
+ const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+}
+
+static inline uint32_t dm_read_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index)
+{
+ return cgs_read_ind_register(ctx->cgs_device, addr_space, index);
+}
+
+static inline void dm_write_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index,
+ uint32_t value)
+{
+ cgs_write_ind_register(ctx->cgs_device, addr_space, index, value);
+}
+
+static inline uint32_t get_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ return (mask & reg_value) >> shift;
+}
+
+#define get_reg_field_value(reg_value, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+static inline uint32_t set_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ ASSERT(mask != 0);
+ return (reg_value & ~mask) | (mask & (value << shift));
+}
+
+#define set_reg_field_value(reg_value, value, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+#define FD(reg_field) reg_field ## __SHIFT, \
+ reg_field ## _MASK
+
+/*
+ * return number of poll before condition is met
+ * return 0 if condition is not meet after specified time out tries
+ */
+unsigned int generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line);
+
+
+/* These macros need to be used with soc15 registers in order to retrieve
+ * the actual offset.
+ */
+#define dm_write_reg_soc15(ctx, reg, inst_offset, value) \
+ dm_write_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, value, __func__)
+
+#define dm_read_reg_soc15(ctx, reg, inst_offset) \
+ dm_read_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, __func__)
+
+#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
+ dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
+ n, __VA_ARGS__)
+
+#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ n, __VA_ARGS__)
+
+#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+#define set_reg_field_value_soc15(reg_value, value, block, reg_num, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+/**************************************
+ * Power Play (PP) interfaces
+ **************************************/
+
+/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
+ * This is done *before* it changes DCE clock.
+ *
+ * If required clock is higher than current, then PP will increase the voltage.
+ *
+ * If required clock is lower than current, then PP will defer reduction of
+ * voltage until the call to dc_service_pp_post_dce_clock_change().
+ *
+ * \input - Contains clocks needed for Mode Set.
+ *
+ * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
+ * Valid only if function returns zero.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state);
+
+/* The returned clocks range are 'static' system clocks which will be used for
+ * mode validation purposes.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks);
+
+/* Gets valid clocks levels from pplib
+ *
+ * input: clk_type - display clk / sclk / mem clk
+ *
+ * output: array of valid clock levels for given type in ascending order,
+ * with invalid levels filtered out
+ *
+ */
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info);
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
+
+void dm_pp_get_funcs_rv(struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs);
+
+/* DAL calls this function to notify PP about completion of Mode Set.
+ * For PP it means that current DCE clocks are those which were returned
+ * by dc_service_pp_pre_dce_clock_change(), in the 'output' parameter.
+ *
+ * If the clocks are higher than before, then PP does nothing.
+ *
+ * If the clocks are lower than before, then PP reduces the voltage.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg);
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req);
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req);
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info);
+
+/****** end of PP interfaces ******/
+
+struct persistent_data_flag {
+ bool save_per_link;
+ bool save_per_edid;
+};
+
+/* Call to write data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * params - value to write in defined folder/key
+ * size - size of the input params
+ * flag - determine whether to save by link or edid
+ *
+ * \returns true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - create key with param value
+ * under base folder
+ * NULL - NULL - create module folder under base folder
+ * - NULL NULL - failure
+ * NULL - - - create key under module folder
+ * with no edid/link identification
+ * - NULL - - create key with param value
+ * under base folder
+ * - - NULL - create module folder under base folder
+ * - - - - create key under module folder
+ * with edid/link identification
+ */
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+
+/* Call to read data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * size - size of the output params
+ * flag - determine whether it was save by link or edid
+ *
+ * \returns params - value read from defined folder/key
+ * true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - read key under base folder
+ * NULL - NULL - failure
+ * - NULL NULL - failure
+ * NULL - - - read key under module folder
+ * with no edid/link identification
+ * - NULL - - read key under base folder
+ * - - NULL - failure
+ * - - - - read key under module folder
+ * with edid/link identification
+ */
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+bool dm_query_extended_brightness_caps
+ (struct dc_context *ctx, enum dm_acpi_display_type display,
+ struct dm_acpi_atif_backlight_caps *pCaps);
+
+bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
+
+/*
+ *
+ * print-out services
+ *
+ */
+#define dm_log_to_buffer(buffer, size, fmt, args)\
+ vsnprintf(buffer, size, fmt, args)
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx);
+
+/*
+ * Debug and verification hooks
+ */
+bool dm_helpers_dc_conn_log(
+ struct dc_context *ctx,
+ struct log_entry *entry,
+ enum dc_log_type event);
+
+void dm_dtn_log_begin(struct dc_context *ctx);
+void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
+void dm_dtn_log_end(struct dc_context *ctx);
+
+#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
new file mode 100644
index 000000000000..fa26cf488b3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DM_SERVICES_TYPES_H__
+#define __DM_SERVICES_TYPES_H__
+
+#include "os_types.h"
+#include "dc_types.h"
+
+#include "dm_pp_smu.h"
+
+struct dm_pp_clock_range {
+ int min_khz;
+ int max_khz;
+};
+
+enum dm_pp_clocks_state {
+ DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_STATE_ULTRA_LOW,
+ DM_PP_CLOCKS_STATE_LOW,
+ DM_PP_CLOCKS_STATE_NOMINAL,
+ DM_PP_CLOCKS_STATE_PERFORMANCE,
+
+ /* Starting from DCE11, Max 8 levels of DPM state supported. */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_INVALID = DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_0,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_1,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_2,
+ /* to be backward compatible */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_3,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_4,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_5,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_6,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_7,
+
+ DM_PP_CLOCKS_MAX_STATES
+};
+
+struct dm_pp_gpu_clock_range {
+ enum dm_pp_clocks_state clock_state;
+ struct dm_pp_clock_range sclk;
+ struct dm_pp_clock_range mclk;
+ struct dm_pp_clock_range eclk;
+ struct dm_pp_clock_range dclk;
+};
+
+enum dm_pp_clock_type {
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK = 1,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK, /* System clock */
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ DM_PP_CLOCK_TYPE_DCFCLK,
+ DM_PP_CLOCK_TYPE_DCEFCLK,
+ DM_PP_CLOCK_TYPE_SOCCLK,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ DM_PP_CLOCK_TYPE_DPPCLK,
+ DM_PP_CLOCK_TYPE_FCLK,
+};
+
+#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \
+ (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid"
+
+#define DM_PP_MAX_CLOCK_LEVELS 8
+
+struct dm_pp_clock_levels {
+ uint32_t num_levels;
+ uint32_t clocks_in_khz[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_latency {
+ uint32_t clocks_in_khz;
+ uint32_t latency_in_us;
+};
+
+struct dm_pp_clock_levels_with_latency {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_latency data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_voltage {
+ uint32_t clocks_in_khz;
+ uint32_t voltage_in_mv;
+};
+
+struct dm_pp_clock_levels_with_voltage {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_voltage data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_single_disp_config {
+ enum signal_type signal;
+ uint8_t transmitter;
+ uint8_t ddi_channel_mapping;
+ uint8_t pipe_idx;
+ uint32_t src_height;
+ uint32_t src_width;
+ uint32_t v_refresh;
+ uint32_t sym_clock; /* HDMI only */
+ struct dc_link_settings link_settings; /* DP only */
+};
+
+#define MAX_WM_SETS 4
+
+enum dm_pp_wm_set_id {
+ WM_SET_A = 0,
+ WM_SET_B,
+ WM_SET_C,
+ WM_SET_D,
+ WM_SET_INVALID = 0xffff,
+};
+
+struct dm_pp_clock_range_for_wm_set {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_eng_clk_in_khz;
+ uint32_t wm_max_eng_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges {
+ uint32_t num_wm_sets;
+ struct dm_pp_clock_range_for_wm_set wm_clk_ranges[MAX_WM_SETS];
+};
+
+struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_dcfclk_clk_in_khz;
+ uint32_t wm_max_dcfclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_socclk_clk_in_khz;
+ uint32_t wm_max_socclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges_soc15 {
+ uint32_t num_wm_dmif_sets;
+ uint32_t num_wm_mcif_sets;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15
+ wm_dmif_clocks_ranges[MAX_WM_SETS];
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15
+ wm_mcif_clocks_ranges[MAX_WM_SETS];
+};
+
+#define MAX_DISPLAY_CONFIGS 6
+
+struct dm_pp_display_configuration {
+ bool nb_pstate_switch_disable;/* controls NB PState switch */
+ bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
+ bool cpu_pstate_disable;
+ uint32_t cpu_pstate_separation_time;
+
+ uint32_t min_memory_clock_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_engine_clock_deep_sleep_khz;
+
+ uint32_t avail_mclk_switch_time_us;
+ uint32_t avail_mclk_switch_time_in_disp_active_us;
+ uint32_t min_dcfclock_khz;
+ uint32_t min_dcfc_deep_sleep_clock_khz;
+
+ uint32_t disp_clk_khz;
+
+ bool all_displays_in_sync;
+
+ uint8_t display_count;
+ struct dm_pp_single_disp_config disp_configs[MAX_DISPLAY_CONFIGS];
+
+ /*Controller Index of primary display - used in MCLK SMC switching hang
+ * SW Workaround*/
+ uint8_t crtc_index;
+ /*htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
+ uint32_t line_time_in_us;
+};
+
+struct dm_bl_data_point {
+ /* Brightness level in percentage */
+ uint8_t luminance;
+ /* Brightness level as effective value in range 0-255,
+ * corresponding to above percentage
+ */
+ uint8_t signalLevel;
+};
+
+/* Total size of the structure should not exceed 256 bytes */
+struct dm_acpi_atif_backlight_caps {
+
+
+ uint16_t size; /* Bytes 0-1 (2 bytes) */
+ uint16_t flags; /* Byted 2-3 (2 bytes) */
+ uint8_t errorCode; /* Byte 4 */
+ uint8_t acLevelPercentage; /* Byte 5 */
+ uint8_t dcLevelPercentage; /* Byte 6 */
+ uint8_t minInputSignal; /* Byte 7 */
+ uint8_t maxInputSignal; /* Byte 8 */
+ uint8_t numOfDataPoints; /* Byte 9 */
+ struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+};
+
+enum dm_acpi_display_type {
+ AcpiDisplayType_LCD1 = 0,
+ AcpiDisplayType_CRT1 = 1,
+ AcpiDisplayType_DFP1 = 3,
+ AcpiDisplayType_CRT2 = 4,
+ AcpiDisplayType_LCD2 = 5,
+ AcpiDisplayType_DFP2 = 7,
+ AcpiDisplayType_DFP3 = 9,
+ AcpiDisplayType_DFP4 = 10,
+ AcpiDisplayType_DFP5 = 11,
+ AcpiDisplayType_DFP6 = 12
+};
+
+enum dm_pp_power_level {
+ DM_PP_POWER_LEVEL_INVALID,
+ DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_PERFORMANCE,
+
+ DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
+ DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
+ DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
+ DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
+ DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
+};
+
+struct dm_pp_power_level_change_request {
+ enum dm_pp_power_level power_level;
+};
+
+struct dm_pp_clock_for_voltage_req {
+ enum dm_pp_clock_type clk_type;
+ uint32_t clocks_in_khz;
+};
+
+struct dm_pp_static_clock_info {
+ uint32_t max_sclk_khz;
+ uint32_t max_mclk_khz;
+
+ /* max possible display block clocks state */
+ enum dm_pp_clocks_state max_clocks_state;
+};
+
+struct dtn_min_clk_info {
+ uint32_t disp_clk_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_memory_clock_khz;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
new file mode 100644
index 000000000000..3488af2b5786
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -0,0 +1,43 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+CFLAGS_display_mode_vba.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_mode_lib.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_pipe_clocks.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml1_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_helpers.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_soc_bounding_box.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml_common_defs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+
+
+DML = display_mode_lib.o display_rq_dlg_calc.o \
+ display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+ soc_bounding_box.o dml_common_defs.o display_mode_vba.o
+
+AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
new file mode 100644
index 000000000000..ea4cde952f4f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_FEATURES_H__
+#define __DC_FEATURES_H__
+
+// local features
+#define DC__PRESENT 1
+#define DC__PRESENT__1 1
+#define DC__NUM_DPP 4
+#define DC__VOLTAGE_STATES 7
+#define DC__NUM_DPP__4 1
+#define DC__NUM_DPP__0_PRESENT 1
+#define DC__NUM_DPP__1_PRESENT 1
+#define DC__NUM_DPP__2_PRESENT 1
+#define DC__NUM_DPP__3_PRESENT 1
+#define DC__NUM_DPP__MAX 8
+#define DC__NUM_DPP__MAX__8 1
+#define DC__PIPE_10BIT 0
+#define DC__PIPE_10BIT__0 1
+#define DC__PIPE_10BIT__MAX 1
+#define DC__PIPE_10BIT__MAX__1 1
+#define DC__NUM_OPP 4
+#define DC__NUM_OPP__4 1
+#define DC__NUM_OPP__0_PRESENT 1
+#define DC__NUM_OPP__1_PRESENT 1
+#define DC__NUM_OPP__2_PRESENT 1
+#define DC__NUM_OPP__3_PRESENT 1
+#define DC__NUM_OPP__MAX 6
+#define DC__NUM_OPP__MAX__6 1
+#define DC__NUM_DSC 0
+#define DC__NUM_DSC__0 1
+#define DC__NUM_DSC__MAX 6
+#define DC__NUM_DSC__MAX__6 1
+#define DC__NUM_ABM 1
+#define DC__NUM_ABM__1 1
+#define DC__NUM_ABM__0_PRESENT 1
+#define DC__NUM_ABM__MAX 2
+#define DC__NUM_ABM__MAX__2 1
+#define DC__ODM_PRESENT 0
+#define DC__ODM_PRESENT__0 1
+#define DC__NUM_OTG 4
+#define DC__NUM_OTG__4 1
+#define DC__NUM_OTG__0_PRESENT 1
+#define DC__NUM_OTG__1_PRESENT 1
+#define DC__NUM_OTG__2_PRESENT 1
+#define DC__NUM_OTG__3_PRESENT 1
+#define DC__NUM_OTG__MAX 6
+#define DC__NUM_OTG__MAX__6 1
+#define DC__NUM_DWB 2
+#define DC__NUM_DWB__2 1
+#define DC__NUM_DWB__0_PRESENT 1
+#define DC__NUM_DWB__1_PRESENT 1
+#define DC__NUM_DWB__MAX 2
+#define DC__NUM_DWB__MAX__2 1
+#define DC__NUM_DIG 4
+#define DC__NUM_DIG__4 1
+#define DC__NUM_DIG__0_PRESENT 1
+#define DC__NUM_DIG__1_PRESENT 1
+#define DC__NUM_DIG__2_PRESENT 1
+#define DC__NUM_DIG__3_PRESENT 1
+#define DC__NUM_DIG__MAX 6
+#define DC__NUM_DIG__MAX__6 1
+#define DC__NUM_AUX 4
+#define DC__NUM_AUX__4 1
+#define DC__NUM_AUX__0_PRESENT 1
+#define DC__NUM_AUX__1_PRESENT 1
+#define DC__NUM_AUX__2_PRESENT 1
+#define DC__NUM_AUX__3_PRESENT 1
+#define DC__NUM_AUX__MAX 6
+#define DC__NUM_AUX__MAX__6 1
+#define DC__NUM_AUDIO_STREAMS 4
+#define DC__NUM_AUDIO_STREAMS__4 1
+#define DC__NUM_AUDIO_STREAMS__0_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__1_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__2_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__3_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__MAX 8
+#define DC__NUM_AUDIO_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_ENDPOINTS 6
+#define DC__NUM_AUDIO_ENDPOINTS__6 1
+#define DC__NUM_AUDIO_ENDPOINTS__0_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__1_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__2_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__3_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__4_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__5_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_ENDPOINTS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_STREAMS 0
+#define DC__NUM_AUDIO_INPUT_STREAMS__0 1
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX 8
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS 0
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__0 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX__8 1
+#define DC__NUM_CURSOR 1
+#define DC__NUM_CURSOR__1 1
+#define DC__NUM_CURSOR__0_PRESENT 1
+#define DC__NUM_CURSOR__MAX 2
+#define DC__NUM_CURSOR__MAX__2 1
+#define DC__DIGITAL_BYPASS_PRESENT 0
+#define DC__DIGITAL_BYPASS_PRESENT__0 1
+#define DC__HCID_HWMAJVER 1
+#define DC__HCID_HWMAJVER__1 1
+#define DC__HCID_HWMINVER 0
+#define DC__HCID_HWMINVER__0 1
+#define DC__HCID_HWREV 0
+#define DC__HCID_HWREV__0 1
+#define DC__ROMSTRAP_PRESENT 0
+#define DC__ROMSTRAP_PRESENT__0 1
+#define DC__NUM_RBBMIF_DECODES 30
+#define DC__NUM_RBBMIF_DECODES__30 1
+#define DC__NUM_DBG_REGS 36
+#define DC__NUM_DBG_REGS__36 1
+#define DC__NUM_PIPES_UNDERLAY 0
+#define DC__NUM_PIPES_UNDERLAY__0 1
+#define DC__NUM_PIPES_UNDERLAY__MAX 2
+#define DC__NUM_PIPES_UNDERLAY__MAX__2 1
+#define DC__NUM_VCE_ENGINE 1
+#define DC__NUM_VCE_ENGINE__1 1
+#define DC__NUM_VCE_ENGINE__0_PRESENT 1
+#define DC__NUM_VCE_ENGINE__MAX 2
+#define DC__NUM_VCE_ENGINE__MAX__2 1
+#define DC__OTG_EXTERNAL_SYNC_PRESENT 0
+#define DC__OTG_EXTERNAL_SYNC_PRESENT__0 1
+#define DC__OTG_CRC_PRESENT 1
+#define DC__OTG_CRC_PRESENT__1 1
+#define DC__VIP_PRESENT 0
+#define DC__VIP_PRESENT__0 1
+#define DC__DTMTEST_PRESENT 0
+#define DC__DTMTEST_PRESENT__0 1
+#define DC__POWER_GATE_PRESENT 1
+#define DC__POWER_GATE_PRESENT__1 1
+#define DC__MEM_PG 1
+#define DC__MEM_PG__1 1
+#define DC__FMT_SRC_SEL_PRESENT 0
+#define DC__FMT_SRC_SEL_PRESENT__0 1
+#define DC__DIG_FEATURES__HDMI_PRESENT 1
+#define DC__DIG_FEATURES__HDMI_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_PRESENT 1
+#define DC__DIG_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT 0
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT__0 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT 0
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT__0 1
+#define DC__DIG_RESYNC_FIFO_SIZE 14
+#define DC__DIG_RESYNC_FIFO_SIZE__14 1
+#define DC__DIG_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__12_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__13_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DAC_RESYNC_FIFO_SIZE 12
+#define DC__DAC_RESYNC_FIFO_SIZE__12 1
+#define DC__DAC_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DVO_RESYNC_FIFO_SIZE 12
+#define DC__DVO_RESYNC_FIFO_SIZE__12 1
+#define DC__DVO_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__MEM_CDC_PRESENT 1
+#define DC__MEM_CDC_PRESENT__1 1
+#define DC__NUM_HPD 4
+#define DC__NUM_HPD__4 1
+#define DC__NUM_HPD__0_PRESENT 1
+#define DC__NUM_HPD__1_PRESENT 1
+#define DC__NUM_HPD__2_PRESENT 1
+#define DC__NUM_HPD__3_PRESENT 1
+#define DC__NUM_HPD__MAX 6
+#define DC__NUM_HPD__MAX__6 1
+#define DC__NUM_DDC_PAIRS 4
+#define DC__NUM_DDC_PAIRS__4 1
+#define DC__NUM_DDC_PAIRS__0_PRESENT 1
+#define DC__NUM_DDC_PAIRS__1_PRESENT 1
+#define DC__NUM_DDC_PAIRS__2_PRESENT 1
+#define DC__NUM_DDC_PAIRS__3_PRESENT 1
+#define DC__NUM_DDC_PAIRS__MAX 6
+#define DC__NUM_DDC_PAIRS__MAX__6 1
+#define DC__NUM_AUDIO_PLL 0
+#define DC__NUM_AUDIO_PLL__0 1
+#define DC__NUM_AUDIO_PLL__MAX 2
+#define DC__NUM_AUDIO_PLL__MAX__2 1
+#define DC__NUM_PIXEL_PLL 1
+#define DC__NUM_PIXEL_PLL__1 1
+#define DC__NUM_PIXEL_PLL__0_PRESENT 1
+#define DC__NUM_PIXEL_PLL__MAX 4
+#define DC__NUM_PIXEL_PLL__MAX__4 1
+#define DC__NUM_CASCADED_PLL 0
+#define DC__NUM_CASCADED_PLL__0 1
+#define DC__NUM_CASCADED_PLL__MAX 3
+#define DC__NUM_CASCADED_PLL__MAX__3 1
+#define DC__PIXCLK_FROM_PHYPLL 1
+#define DC__PIXCLK_FROM_PHYPLL__1 1
+#define DC__NB_STUTTER_MODE_PRESENT 0
+#define DC__NB_STUTTER_MODE_PRESENT__0 1
+#define DC__I2S0_AND_SPDIF0_PRESENT 0
+#define DC__I2S0_AND_SPDIF0_PRESENT__0 1
+#define DC__I2S1_PRESENT 0
+#define DC__I2S1_PRESENT__0 1
+#define DC__SPDIF1_PRESENT 0
+#define DC__SPDIF1_PRESENT__0 1
+#define DC__DSI_PRESENT 0
+#define DC__DSI_PRESENT__0 1
+#define DC__DACA_PRESENT 0
+#define DC__DACA_PRESENT__0 1
+#define DC__DACB_PRESENT 0
+#define DC__DACB_PRESENT__0 1
+#define DC__NUM_PIPES 4
+#define DC__NUM_PIPES__4 1
+#define DC__NUM_PIPES__0_PRESENT 1
+#define DC__NUM_PIPES__1_PRESENT 1
+#define DC__NUM_PIPES__2_PRESENT 1
+#define DC__NUM_PIPES__3_PRESENT 1
+#define DC__NUM_PIPES__MAX 6
+#define DC__NUM_PIPES__MAX__6 1
+#define DC__NUM_DIG_LP 0
+#define DC__NUM_DIG_LP__0 1
+#define DC__NUM_DIG_LP__MAX 2
+#define DC__NUM_DIG_LP__MAX__2 1
+#define DC__DPDEBUG_PRESENT 0
+#define DC__DPDEBUG_PRESENT__0 1
+#define DC__DISPLAY_WB_PRESENT 1
+#define DC__DISPLAY_WB_PRESENT__1 1
+#define DC__NUM_CWB 0
+#define DC__NUM_CWB__0 1
+#define DC__NUM_CWB__MAX 2
+#define DC__NUM_CWB__MAX__2 1
+#define DC__MVP_PRESENT 0
+#define DC__MVP_PRESENT__0 1
+#define DC__DVO_PRESENT 0
+#define DC__DVO_PRESENT__0 1
+#define DC__ABM_PRESENT 0
+#define DC__ABM_PRESENT__0 1
+#define DC__BPHYC_PLL_PRESENT 0
+#define DC__BPHYC_PLL_PRESENT__0 1
+#define DC__BPHYC_UNIPHY_PRESENT 0
+#define DC__BPHYC_UNIPHY_PRESENT__0 1
+#define DC__PHY_BROADCAST_PRESENT 0
+#define DC__PHY_BROADCAST_PRESENT__0 1
+#define DC__NUM_OF_DCRX_SD 0
+#define DC__NUM_OF_DCRX_SD__0 1
+#define DC__DVO_17BIT_MAPPING 0
+#define DC__DVO_17BIT_MAPPING__0 1
+#define DC__AVSYNC_PRESENT 0
+#define DC__AVSYNC_PRESENT__0 1
+#define DC__NUM_OF_DCRX_PORTS 0
+#define DC__NUM_OF_DCRX_PORTS__0 1
+#define DC__NUM_OF_DCRX_PORTS__MAX 1
+#define DC__NUM_OF_DCRX_PORTS__MAX__1 1
+#define DC__NUM_PHY 4
+#define DC__NUM_PHY__4 1
+#define DC__NUM_PHY__0_PRESENT 1
+#define DC__NUM_PHY__1_PRESENT 1
+#define DC__NUM_PHY__2_PRESENT 1
+#define DC__NUM_PHY__3_PRESENT 1
+#define DC__NUM_PHY__MAX 7
+#define DC__NUM_PHY__MAX__7 1
+#define DC__NUM_PHY_LP 0
+#define DC__NUM_PHY_LP__0 1
+#define DC__NUM_PHY_LP__MAX 2
+#define DC__NUM_PHY_LP__MAX__2 1
+#define DC__SYNC_CELL vid_sync_gf14lpp
+#define DC__SYNC_CELL__VID_SYNC_GF14LPP 1
+#define DC__USE_NEW_VSS 1
+#define DC__USE_NEW_VSS__1 1
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES__6 1
+#define UNIPHYA_PRESENT 1
+#define UNIPHYA_PRESENT__1 1
+#define DC__UNIPHYA_PRESENT 1
+#define DC__UNIPHYA_PRESENT__1 1
+#define UNIPHYB_PRESENT 1
+#define UNIPHYB_PRESENT__1 1
+#define DC__UNIPHYB_PRESENT 1
+#define DC__UNIPHYB_PRESENT__1 1
+#define UNIPHYC_PRESENT 1
+#define UNIPHYC_PRESENT__1 1
+#define DC__UNIPHYC_PRESENT 1
+#define DC__UNIPHYC_PRESENT__1 1
+#define UNIPHYD_PRESENT 1
+#define UNIPHYD_PRESENT__1 1
+#define DC__UNIPHYD_PRESENT 1
+#define DC__UNIPHYD_PRESENT__1 1
+#define UNIPHYE_PRESENT 0
+#define UNIPHYE_PRESENT__0 1
+#define DC__UNIPHYE_PRESENT 0
+#define DC__UNIPHYE_PRESENT__0 1
+#define UNIPHYF_PRESENT 0
+#define UNIPHYF_PRESENT__0 1
+#define DC__UNIPHYF_PRESENT 0
+#define DC__UNIPHYF_PRESENT__0 1
+#define UNIPHYG_PRESENT 0
+#define UNIPHYG_PRESENT__0 1
+#define DC__UNIPHYG_PRESENT 0
+#define DC__UNIPHYG_PRESENT__0 1
+#define DC__TMDS_LINK tmds_link_dual
+#define DC__TMDS_LINK__TMDS_LINK_DUAL 1
+#define DC__WBSCL_PIXBW 8
+#define DC__WBSCL_PIXBW__8 1
+#define DC__DWB_CSC_PRESENT 0
+#define DC__DWB_CSC_PRESENT__0 1
+#define DC__DWB_LUMA_SCL_PRESENT 0
+#define DC__DWB_LUMA_SCL_PRESENT__0 1
+#define DC__DENTIST_INTERFACE_PRESENT 1
+#define DC__DENTIST_INTERFACE_PRESENT__1 1
+#define DC__GENERICA_PRESENT 1
+#define DC__GENERICA_PRESENT__1 1
+#define DC__GENERICB_PRESENT 1
+#define DC__GENERICB_PRESENT__1 1
+#define DC__GENERICC_PRESENT 0
+#define DC__GENERICC_PRESENT__0 1
+#define DC__GENERICD_PRESENT 0
+#define DC__GENERICD_PRESENT__0 1
+#define DC__GENERICE_PRESENT 0
+#define DC__GENERICE_PRESENT__0 1
+#define DC__GENERICF_PRESENT 0
+#define DC__GENERICF_PRESENT__0 1
+#define DC__GENERICG_PRESENT 0
+#define DC__GENERICG_PRESENT__0 1
+#define DC__UNIPHY_VOLTAGE_MODE 1
+#define DC__UNIPHY_VOLTAGE_MODE__1 1
+#define DC__BLON_TYPE dedicated
+#define DC__BLON_TYPE__DEDICATED 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT__1 1
+#define DC__XDMA_PRESENT 0
+#define DC__XDMA_PRESENT__0 1
+#define XDMA__PRESENT 0
+#define XDMA__PRESENT__0 1
+#define DC__DP_MEM_PG 0
+#define DC__DP_MEM_PG__0 1
+#define DP__MEM_PG 0
+#define DP__MEM_PG__0 1
+#define DC__AFMT_MEM_PG 0
+#define DC__AFMT_MEM_PG__0 1
+#define AFMT__MEM_PG 0
+#define AFMT__MEM_PG__0 1
+#define DC__HDMI_MEM_PG 0
+#define DC__HDMI_MEM_PG__0 1
+#define HDMI__MEM_PG 0
+#define HDMI__MEM_PG__0 1
+#define DC__I2C_MEM_PG 0
+#define DC__I2C_MEM_PG__0 1
+#define I2C__MEM_PG 0
+#define I2C__MEM_PG__0 1
+#define DC__DSCL_MEM_PG 0
+#define DC__DSCL_MEM_PG__0 1
+#define DSCL__MEM_PG 0
+#define DSCL__MEM_PG__0 1
+#define DC__CM_MEM_PG 0
+#define DC__CM_MEM_PG__0 1
+#define CM__MEM_PG 0
+#define CM__MEM_PG__0 1
+#define DC__OBUF_MEM_PG 0
+#define DC__OBUF_MEM_PG__0 1
+#define OBUF__MEM_PG 0
+#define OBUF__MEM_PG__0 1
+#define DC__WBIF_MEM_PG 1
+#define DC__WBIF_MEM_PG__1 1
+#define WBIF__MEM_PG 1
+#define WBIF__MEM_PG__1 1
+#define DC__VGA_MEM_PG 0
+#define DC__VGA_MEM_PG__0 1
+#define VGA__MEM_PG 0
+#define VGA__MEM_PG__0 1
+#define DC__FMT_MEM_PG 0
+#define DC__FMT_MEM_PG__0 1
+#define FMT__MEM_PG 0
+#define FMT__MEM_PG__0 1
+#define DC__ODM_MEM_PG 0
+#define DC__ODM_MEM_PG__0 1
+#define ODM__MEM_PG 0
+#define ODM__MEM_PG__0 1
+#define DC__DSI_MEM_PG 0
+#define DC__DSI_MEM_PG__0 1
+#define DSI__MEM_PG 0
+#define DSI__MEM_PG__0 1
+#define DC__AZ_MEM_PG 1
+#define DC__AZ_MEM_PG__1 1
+#define AZ__MEM_PG 1
+#define AZ__MEM_PG__1 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG__1 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P528X64QS__MEM_PG 1
+#define WBSCL_MEM1P528X64QS__MEM_PG__1 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG__1 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG__1 1
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_INT__MEM_PG 0
+#define HUBBUB_SDP_TAG_INT__MEM_PG__0 1
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_EXT__MEM_PG 0
+#define HUBBUB_SDP_TAG_EXT__MEM_PG__0 1
+#define DC__HUBBUB_RET_ZERO_MEM_PG 0
+#define DC__HUBBUB_RET_ZERO_MEM_PG__0 1
+#define HUBBUB_RET_ZERO__MEM_PG 0
+#define HUBBUB_RET_ZERO__MEM_PG__0 1
+#define DC__HUBBUB_RET_ROB_MEM_PG 0
+#define DC__HUBBUB_RET_ROB_MEM_PG__0 1
+#define HUBBUB_RET_ROB__MEM_PG 0
+#define HUBBUB_RET_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_ROB_MEM_PG 0
+#define DC__HUBPRET_CUR_ROB_MEM_PG__0 1
+#define HUBPRET_CUR_ROB__MEM_PG 0
+#define HUBPRET_CUR_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_CDC_MEM_PG 0
+#define DC__HUBPRET_CUR_CDC_MEM_PG__0 1
+#define HUBPRET_CUR_CDC__MEM_PG 0
+#define HUBPRET_CUR_CDC__MEM_PG__0 1
+#define DC__HUBPREQ_MPTE_MEM_PG 0
+#define DC__HUBPREQ_MPTE_MEM_PG__0 1
+#define HUBPREQ_MPTE__MEM_PG 0
+#define HUBPREQ_MPTE__MEM_PG__0 1
+#define DC__HUBPREQ_META_MEM_PG 0
+#define DC__HUBPREQ_META_MEM_PG__0 1
+#define HUBPREQ_META__MEM_PG 0
+#define HUBPREQ_META__MEM_PG__0 1
+#define DC__HUBPREQ_DPTE_MEM_PG 0
+#define DC__HUBPREQ_DPTE_MEM_PG__0 1
+#define HUBPREQ_DPTE__MEM_PG 0
+#define HUBPREQ_DPTE__MEM_PG__0 1
+#define DC__HUBPRET_DET_MEM_PG 0
+#define DC__HUBPRET_DET_MEM_PG__0 1
+#define HUBPRET_DET__MEM_PG 0
+#define HUBPRET_DET__MEM_PG__0 1
+#define DC__HUBPRET_PIX_CDC_MEM_PG 0
+#define DC__HUBPRET_PIX_CDC_MEM_PG__0 1
+#define HUBPRET_PIX_CDC__MEM_PG 0
+#define HUBPRET_PIX_CDC__MEM_PG__0 1
+#define DC__TOP_BLKS__DCCG 1
+#define DC__TOP_BLKS__DCHUBBUB 1
+#define DC__TOP_BLKS__DCHUBP 1
+#define DC__TOP_BLKS__HDA 1
+#define DC__TOP_BLKS__DIO 1
+#define DC__TOP_BLKS__DCIO 1
+#define DC__TOP_BLKS__DMU 1
+#define DC__TOP_BLKS__DPP 1
+#define DC__TOP_BLKS__MPC 1
+#define DC__TOP_BLKS__OPP 1
+#define DC__TOP_BLKS__OPTC 1
+#define DC__TOP_BLKS__MMHUBBUB 1
+#define DC__TOP_BLKS__WB 1
+#define DC__TOP_BLKS__MAX 13
+#define DC__TOP_BLKS__MAX__13 1
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS 9
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS__9 1
+#define DC__DPP_MPC_SF_PIXEL_CREDITS 9
+#define DC__DPP_MPC_SF_PIXEL_CREDITS__9 1
+#define DC__MPC_OPP_SF_PIXEL_CREDITS 8
+#define DC__MPC_OPP_SF_PIXEL_CREDITS__8 1
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS 8
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS__8 1
+#define DC__SFR_SFT_ROUND_TRIP_DELAY 5
+#define DC__SFR_SFT_ROUND_TRIP_DELAY__5 1
+#define DC__REPEATER_PROJECT_MAX 8
+#define DC__REPEATER_PROJECT_MAX__8 1
+#define DC__SURFACE_422_CAPABLE 0
+#define DC__SURFACE_422_CAPABLE__0 1
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
new file mode 100644
index 000000000000..b1ad3553f900
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_ENUMS_H__
+#define __DISPLAY_MODE_ENUMS_H__
+
+enum output_encoder_class {
+ dm_dp = 0, dm_hdmi = 1, dm_wb = 2, dm_edp
+};
+enum output_format_class {
+ dm_444 = 0, dm_420 = 1, dm_n422, dm_s422
+};
+enum source_format_class {
+ dm_444_16 = 0,
+ dm_444_32 = 1,
+ dm_444_64 = 2,
+ dm_420_8 = 3,
+ dm_420_10 = 4,
+ dm_422_8 = 5,
+ dm_422_10 = 6,
+ dm_444_8 = 7,
+ dm_mono_8,
+ dm_mono_16
+};
+enum output_bpc_class {
+ dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4
+};
+enum scan_direction_class {
+ dm_horz = 0, dm_vert = 1
+};
+enum dm_swizzle_mode {
+ dm_sw_linear = 0,
+ dm_sw_256b_s = 1,
+ dm_sw_256b_d = 2,
+ dm_sw_SPARE_0 = 3,
+ dm_sw_SPARE_1 = 4,
+ dm_sw_4kb_s = 5,
+ dm_sw_4kb_d = 6,
+ dm_sw_SPARE_2 = 7,
+ dm_sw_SPARE_3 = 8,
+ dm_sw_64kb_s = 9,
+ dm_sw_64kb_d = 10,
+ dm_sw_SPARE_4 = 11,
+ dm_sw_SPARE_5 = 12,
+ dm_sw_var_s = 13,
+ dm_sw_var_d = 14,
+ dm_sw_SPARE_6 = 15,
+ dm_sw_SPARE_7 = 16,
+ dm_sw_64kb_s_t = 17,
+ dm_sw_64kb_d_t = 18,
+ dm_sw_SPARE_10 = 19,
+ dm_sw_SPARE_11 = 20,
+ dm_sw_4kb_s_x = 21,
+ dm_sw_4kb_d_x = 22,
+ dm_sw_SPARE_12 = 23,
+ dm_sw_SPARE_13 = 24,
+ dm_sw_64kb_s_x = 25,
+ dm_sw_64kb_d_x = 26,
+ dm_sw_SPARE_14 = 27,
+ dm_sw_SPARE_15 = 28,
+ dm_sw_var_s_x = 29,
+ dm_sw_var_d_x = 30,
+ dm_sw_64kb_r_x,
+ dm_sw_gfx7_2d_thin_lvp,
+ dm_sw_gfx7_2d_thin_gl
+};
+enum lb_depth {
+ dm_lb_10 = 0, dm_lb_8 = 1, dm_lb_6 = 2, dm_lb_12 = 3, dm_lb_16
+};
+enum voltage_state {
+ dm_vmin = 0, dm_vmid = 1, dm_vnom = 2, dm_vmax = 3
+};
+enum source_macro_tile_size {
+ dm_4k_tile = 0, dm_64k_tile = 1, dm_256k_tile = 2
+};
+enum cursor_bpp {
+ dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
+};
+enum clock_change_support {
+ dm_dram_clock_change_uninitialized = 0,
+ dm_dram_clock_change_vactive,
+ dm_dram_clock_change_vblank,
+ dm_dram_clock_change_unsupported
+};
+
+enum output_standard {
+ dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
new file mode 100644
index 000000000000..4c31fa54af39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ soc->sr_exit_time_us = 9.0;
+ soc->sr_enter_plus_exit_time_us = 11.0;
+ soc->urgent_latency_us = 4.0;
+ soc->writeback_latency_us = 12.0;
+ soc->ideal_dram_bw_after_urgent_percent = 80.0;
+ soc->max_request_size_bytes = 256;
+
+ soc->vmin.dcfclk_mhz = 300.0;
+ soc->vmin.dispclk_mhz = 608.0;
+ soc->vmin.dppclk_mhz = 435.0;
+ soc->vmin.dram_bw_per_chan_gbps = 12.8;
+ soc->vmin.phyclk_mhz = 540.0;
+ soc->vmin.socclk_mhz = 208.0;
+
+ soc->vmid.dcfclk_mhz = 600.0;
+ soc->vmid.dispclk_mhz = 661.0;
+ soc->vmid.dppclk_mhz = 661.0;
+ soc->vmid.dram_bw_per_chan_gbps = 12.8;
+ soc->vmid.phyclk_mhz = 540.0;
+ soc->vmid.socclk_mhz = 208.0;
+
+ soc->vnom.dcfclk_mhz = 600.0;
+ soc->vnom.dispclk_mhz = 661.0;
+ soc->vnom.dppclk_mhz = 661.0;
+ soc->vnom.dram_bw_per_chan_gbps = 38.4;
+ soc->vnom.phyclk_mhz = 810;
+ soc->vnom.socclk_mhz = 208.0;
+
+ soc->vmax.dcfclk_mhz = 600.0;
+ soc->vmax.dispclk_mhz = 1086.0;
+ soc->vmax.dppclk_mhz = 661.0;
+ soc->vmax.dram_bw_per_chan_gbps = 38.4;
+ soc->vmax.phyclk_mhz = 810.0;
+ soc->vmax.socclk_mhz = 208.0;
+
+ soc->downspread_percent = 0.5;
+ soc->dram_page_open_time_ns = 50.0;
+ soc->dram_rw_turnaround_time_ns = 17.5;
+ soc->dram_return_buffer_per_channel_bytes = 8192;
+ soc->round_trip_ping_latency_dcfclk_cycles = 128;
+ soc->urgent_out_of_order_return_per_channel_bytes = 256;
+ soc->channel_interleave_bytes = 256;
+ soc->num_banks = 8;
+ soc->num_chans = 2;
+ soc->vmm_page_size_bytes = 4096;
+ soc->dram_clock_change_latency_us = 17.0;
+ soc->writeback_dram_clock_change_latency_us = 23.0;
+ soc->return_bus_width_bytes = 64;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ ip->rob_buffer_size_kbytes = 64;
+ ip->det_buffer_size_kbytes = 164;
+ ip->dpte_buffer_size_in_pte_reqs = 42;
+ ip->dpp_output_buffer_pixels = 2560;
+ ip->opp_output_buffer_lines = 1;
+ ip->pixel_chunk_size_kbytes = 8;
+ ip->pte_enable = 1;
+ ip->pte_chunk_size_kbytes = 2;
+ ip->meta_chunk_size_kbytes = 2;
+ ip->writeback_chunk_size_kbytes = 2;
+ ip->line_buffer_size_bits = 589824;
+ ip->max_line_buffer_lines = 12;
+ ip->IsLineBufferBppFixed = 0;
+ ip->LineBufferFixedBpp = -1;
+ ip->writeback_luma_buffer_size_kbytes = 12;
+ ip->writeback_chroma_buffer_size_kbytes = 8;
+ ip->max_num_dpp = 4;
+ ip->max_num_wb = 2;
+ ip->max_dchub_pscl_bw_pix_per_clk = 4;
+ ip->max_pscl_lb_bw_pix_per_clk = 2;
+ ip->max_lb_vscl_bw_pix_per_clk = 4;
+ ip->max_vscl_hscl_bw_pix_per_clk = 4;
+ ip->max_hscl_ratio = 4;
+ ip->max_vscl_ratio = 4;
+ ip->hscl_mults = 4;
+ ip->vscl_mults = 4;
+ ip->max_hscl_taps = 8;
+ ip->max_vscl_taps = 8;
+ ip->dispclk_ramp_margin_percent = 1;
+ ip->underscan_factor = 1.10;
+ ip->min_vblank_lines = 14;
+ ip->dppclk_delay_subtotal = 90;
+ ip->dispclk_delay_subtotal = 42;
+ ip->dcfclk_cstate_latency = 10;
+ ip->max_inter_dcn_tile_repeaters = 8;
+ ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
+ ip->bug_forcing_LC_req_same_size_fixed = 0;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
+{
+ if (lib->project != project) {
+ set_soc_bounding_box(&lib->soc, project);
+ set_ip_params(&lib->ip, project);
+ lib->project = project;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
new file mode 100644
index 000000000000..26f4f2a3d90d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_LIB_H__
+#define __DISPLAY_MODE_LIB_H__
+
+
+#include "dml_common_defs.h"
+#include "soc_bounding_box.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+#include "dml1_display_rq_dlg_calc.h"
+
+enum dml_project {
+ DML_PROJECT_UNDEFINED,
+ DML_PROJECT_RAVEN1
+};
+
+struct display_mode_lib {
+ struct _vcs_dpi_ip_params_st ip;
+ struct _vcs_dpi_soc_bounding_box_st soc;
+ enum dml_project project;
+ struct vba_vars_st vba;
+ struct dal_logger *logger;
+};
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
new file mode 100644
index 000000000000..baf182177736
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_STRUCTS_H__
+#define __DISPLAY_MODE_STRUCTS_H__
+
+typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
+typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
+typedef struct _vcs_dpi_ip_params_st ip_params_st;
+typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
+typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
+typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
+typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
+typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
+typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
+typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
+typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
+typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
+typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
+typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
+typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
+typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
+typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
+typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
+typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
+typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
+typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
+typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
+typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
+typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
+typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
+typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
+typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
+typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
+
+struct _vcs_dpi_voltage_scaling_st {
+ int state;
+ double dscclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dram_speed_mhz;
+ double fabricclk_mhz;
+ double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
+ double phyclk_mhz;
+ double dppclk_mhz;
+};
+
+struct _vcs_dpi_soc_bounding_box_st {
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
+ double urgent_latency_us;
+ double writeback_latency_us;
+ double ideal_dram_bw_after_urgent_percent;
+ unsigned int max_request_size_bytes;
+ struct _vcs_dpi_voltage_scaling_st vmin;
+ struct _vcs_dpi_voltage_scaling_st vmid;
+ struct _vcs_dpi_voltage_scaling_st vnom;
+ struct _vcs_dpi_voltage_scaling_st vmax;
+ double downspread_percent;
+ double dram_page_open_time_ns;
+ double dram_rw_turnaround_time_ns;
+ double dram_return_buffer_per_channel_bytes;
+ double dram_channel_width_bytes;
+ double fabric_datapath_to_dcn_data_return_bytes;
+ double dcn_downspread_percent;
+ double dispclk_dppclk_vco_speed_mhz;
+ double dfs_vco_period_ps;
+ unsigned int round_trip_ping_latency_dcfclk_cycles;
+ unsigned int urgent_out_of_order_return_per_channel_bytes;
+ unsigned int channel_interleave_bytes;
+ unsigned int num_banks;
+ unsigned int num_chans;
+ unsigned int vmm_page_size_bytes;
+ double dram_clock_change_latency_us;
+ double writeback_dram_clock_change_latency_us;
+ unsigned int return_bus_width_bytes;
+ unsigned int voltage_override;
+ double xfc_bus_transport_time_us;
+ double xfc_xbuf_latency_tolerance_us;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+};
+
+struct _vcs_dpi_ip_params_st {
+ unsigned int max_inter_dcn_tile_repeaters;
+ unsigned int num_dsc;
+ unsigned int odm_capable;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int det_buffer_size_kbytes;
+ unsigned int dpte_buffer_size_in_pte_reqs;
+ unsigned int pde_proc_buffer_size_64k_reqs;
+ unsigned int dpp_output_buffer_pixels;
+ unsigned int opp_output_buffer_lines;
+ unsigned int pixel_chunk_size_kbytes;
+ unsigned char pte_enable;
+ unsigned int pte_chunk_size_kbytes;
+ unsigned int meta_chunk_size_kbytes;
+ unsigned int writeback_chunk_size_kbytes;
+ unsigned int line_buffer_size_bits;
+ unsigned int max_line_buffer_lines;
+ unsigned int writeback_luma_buffer_size_kbytes;
+ unsigned int writeback_chroma_buffer_size_kbytes;
+ unsigned int writeback_chroma_line_buffer_width_pixels;
+ unsigned int max_page_table_levels;
+ unsigned int max_num_dpp;
+ unsigned int max_num_otg;
+ unsigned int cursor_chunk_size;
+ unsigned int cursor_buffer_size;
+ unsigned int max_num_wb;
+ unsigned int max_dchub_pscl_bw_pix_per_clk;
+ unsigned int max_pscl_lb_bw_pix_per_clk;
+ unsigned int max_lb_vscl_bw_pix_per_clk;
+ unsigned int max_vscl_hscl_bw_pix_per_clk;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
+ unsigned int hscl_mults;
+ unsigned int vscl_mults;
+ unsigned int max_hscl_taps;
+ unsigned int max_vscl_taps;
+ unsigned int xfc_supported;
+ unsigned int ptoi_supported;
+ unsigned int xfc_fill_constant_bytes;
+ double dispclk_ramp_margin_percent;
+ double xfc_fill_bw_overhead_percent;
+ double underscan_factor;
+ unsigned int min_vblank_lines;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int dcfclk_cstate_latency;
+ unsigned int dppclk_delay_scl;
+ unsigned int dppclk_delay_scl_lb_only;
+ unsigned int dppclk_delay_cnvc_formatter;
+ unsigned int dppclk_delay_cnvc_cursor;
+ unsigned int is_line_buffer_bpp_fixed;
+ unsigned int line_buffer_fixed_bpp;
+ unsigned int dcc_supported;
+
+ unsigned int IsLineBufferBppFixed;
+ unsigned int LineBufferFixedBpp;
+ unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ unsigned int bug_forcing_LC_req_same_size_fixed;
+};
+
+struct _vcs_dpi_display_xfc_params_st {
+ double xfc_tslv_vready_offset_us;
+ double xfc_tslv_vupdate_width_us;
+ double xfc_tslv_vupdate_offset_us;
+ int xfc_slv_chunk_size_bytes;
+};
+
+struct _vcs_dpi_display_pipe_source_params_st {
+ int source_format;
+ unsigned char dcc;
+ unsigned int dcc_override;
+ unsigned int dcc_rate;
+ unsigned char dcc_use_global;
+ unsigned char vm;
+ unsigned char vm_levels_force_en;
+ unsigned int vm_levels_force;
+ int source_scan;
+ int sw_mode;
+ int macro_tile_size;
+ unsigned char is_display_sw;
+ unsigned int viewport_width;
+ unsigned int viewport_height;
+ unsigned int viewport_y_y;
+ unsigned int viewport_y_c;
+ unsigned int viewport_width_c;
+ unsigned int viewport_height_c;
+ unsigned int data_pitch;
+ unsigned int data_pitch_c;
+ unsigned int meta_pitch;
+ unsigned int meta_pitch_c;
+ unsigned int cur0_src_width;
+ int cur0_bpp;
+ unsigned int cur1_src_width;
+ int cur1_bpp;
+ int num_cursors;
+ unsigned char is_hsplit;
+ unsigned char dynamic_metadata_enable;
+ unsigned int dynamic_metadata_lines_before_active;
+ unsigned int dynamic_metadata_xmit_bytes;
+ unsigned int hsplit_grp;
+ unsigned char xfc_enable;
+ unsigned char xfc_slave;
+ struct _vcs_dpi_display_xfc_params_st xfc_params;
+};
+struct writeback_st {
+ int wb_src_height;
+ int wb_dst_width;
+ int wb_dst_height;
+ int wb_pixel_format;
+ int wb_htaps_luma;
+ int wb_vtaps_luma;
+ int wb_htaps_chroma;
+ int wb_vtaps_chroma;
+ int wb_hratio;
+ int wb_vratio;
+};
+
+struct _vcs_dpi_display_output_params_st {
+ int dp_lanes;
+ int output_bpp;
+ int dsc_enable;
+ int wb_enable;
+ int output_bpc;
+ int output_type;
+ int output_format;
+ int output_standard;
+ int dsc_slices;
+ struct writeback_st wb;
+};
+
+struct _vcs_dpi_display_bandwidth_st {
+ double total_bw_consumed_gbps;
+ double guaranteed_urgent_return_bw_gbps;
+};
+
+struct _vcs_dpi_scaler_ratio_depth_st {
+ double hscl_ratio;
+ double vscl_ratio;
+ double hscl_ratio_c;
+ double vscl_ratio_c;
+ double vinit;
+ double vinit_c;
+ double vinit_bot;
+ double vinit_bot_c;
+ int lb_depth;
+ int scl_enable;
+};
+
+struct _vcs_dpi_scaler_taps_st {
+ unsigned int htaps;
+ unsigned int vtaps;
+ unsigned int htaps_c;
+ unsigned int vtaps_c;
+};
+
+struct _vcs_dpi_display_pipe_dest_params_st {
+ unsigned int recout_width;
+ unsigned int recout_height;
+ unsigned int full_recout_width;
+ unsigned int full_recout_height;
+ unsigned int hblank_start;
+ unsigned int hblank_end;
+ unsigned int vblank_start;
+ unsigned int vblank_end;
+ unsigned int htotal;
+ unsigned int vtotal;
+ unsigned int vactive;
+ unsigned int hactive;
+ unsigned int vstartup_start;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned char interlaced;
+ unsigned char underscan;
+ double pixel_rate_mhz;
+ unsigned char synchronized_vblank_all_planes;
+ unsigned char otg_inst;
+ unsigned char odm_split_cnt;
+ unsigned char odm_combine;
+};
+
+struct _vcs_dpi_display_pipe_params_st {
+ display_pipe_source_params_st src;
+ display_pipe_dest_params_st dest;
+ scaler_ratio_depth_st scale_ratio_depth;
+ scaler_taps_st scale_taps;
+};
+
+struct _vcs_dpi_display_clocks_and_cfg_st {
+ int voltage;
+ double dppclk_mhz;
+ double refclk_mhz;
+ double dispclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+};
+
+struct _vcs_dpi_display_e2e_pipe_params_st {
+ display_pipe_params_st pipe;
+ display_output_params_st dout;
+ display_clocks_and_cfg_st clks_cfg;
+};
+
+struct _vcs_dpi_dchub_buffer_sizing_st {
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int detail_buffer_size_y;
+};
+
+struct _vcs_dpi_watermarks_perf_st {
+ double stutter_eff_in_active_region_percent;
+ double urgent_latency_supported_us;
+ double non_urgent_latency_supported_us;
+ double dram_clock_change_margin_us;
+ double dram_access_eff_percent;
+};
+
+struct _vcs_dpi_cstate_pstate_watermarks_st {
+ double cstate_exit_us;
+ double cstate_enter_plus_exit_us;
+ double pstate_change_us;
+};
+
+struct _vcs_dpi_wm_calc_pipe_params_st {
+ unsigned int num_dpp;
+ int voltage;
+ int output_type;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dppclk_mhz;
+ double pixclk_mhz;
+ unsigned char interlace_en;
+ unsigned char pte_enable;
+ unsigned char dcc_enable;
+ double dcc_rate;
+ double bytes_per_pixel_c;
+ double bytes_per_pixel_y;
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int det_buffer_size_y;
+ double h_ratio;
+ double v_ratio;
+ unsigned int h_taps;
+ unsigned int h_total;
+ unsigned int v_total;
+ unsigned int v_active;
+ unsigned int e2e_index;
+ double display_pipe_line_delivery_time;
+ double read_bw;
+ unsigned int lines_in_det_y;
+ unsigned int lines_in_det_y_rounded_down_to_swath;
+ double full_det_buffering_time;
+ double dcfclk_deepsleep_mhz_per_plane;
+};
+
+struct _vcs_dpi_vratio_pre_st {
+ double vratio_pre_l;
+ double vratio_pre_c;
+};
+
+struct _vcs_dpi_display_data_rq_misc_params_st {
+ unsigned int full_swath_bytes;
+ unsigned int stored_swath_bytes;
+ unsigned int blk256_height;
+ unsigned int blk256_width;
+ unsigned int req_height;
+ unsigned int req_width;
+};
+
+struct _vcs_dpi_display_data_rq_sizing_params_st {
+ unsigned int chunk_bytes;
+ unsigned int min_chunk_bytes;
+ unsigned int meta_chunk_bytes;
+ unsigned int min_meta_chunk_bytes;
+ unsigned int mpte_group_bytes;
+ unsigned int dpte_group_bytes;
+};
+
+struct _vcs_dpi_display_data_rq_dlg_params_st {
+ unsigned int swath_width_ub;
+ unsigned int swath_height;
+ unsigned int req_per_swath_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int dpte_req_per_row_ub;
+ unsigned int dpte_groups_per_row_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_bytes_per_row_ub;
+ unsigned int meta_chunks_per_row_ub;
+ unsigned int meta_req_per_row_ub;
+ unsigned int meta_row_height;
+ unsigned int meta_bytes_per_row_ub;
+};
+
+struct _vcs_dpi_display_cur_rq_dlg_params_st {
+ unsigned char enable;
+ unsigned int swath_height;
+ unsigned int req_per_line;
+};
+
+struct _vcs_dpi_display_rq_dlg_params_st {
+ display_data_rq_dlg_params_st rq_l;
+ display_data_rq_dlg_params_st rq_c;
+ display_cur_rq_dlg_params_st rq_cur0;
+};
+
+struct _vcs_dpi_display_rq_sizing_params_st {
+ display_data_rq_sizing_params_st rq_l;
+ display_data_rq_sizing_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_misc_params_st {
+ display_data_rq_misc_params_st rq_l;
+ display_data_rq_misc_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_params_st {
+ unsigned char yuv420;
+ unsigned char yuv420_10bpc;
+ display_rq_misc_params_st misc;
+ display_rq_sizing_params_st sizing;
+ display_rq_dlg_params_st dlg;
+};
+
+struct _vcs_dpi_display_dlg_regs_st {
+ unsigned int refcyc_h_blank_end;
+ unsigned int dlg_vblank_end;
+ unsigned int min_dst_y_next_start;
+ unsigned int refcyc_per_htotal;
+ unsigned int refcyc_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ unsigned int dst_y_prefetch;
+ unsigned int dst_y_per_vm_vblank;
+ unsigned int dst_y_per_row_vblank;
+ unsigned int dst_y_per_vm_flip;
+ unsigned int dst_y_per_row_flip;
+ unsigned int ref_freq_to_pix_freq;
+ unsigned int vratio_prefetch;
+ unsigned int vratio_prefetch_c;
+ unsigned int refcyc_per_pte_group_vblank_l;
+ unsigned int refcyc_per_pte_group_vblank_c;
+ unsigned int refcyc_per_meta_chunk_vblank_l;
+ unsigned int refcyc_per_meta_chunk_vblank_c;
+ unsigned int refcyc_per_pte_group_flip_l;
+ unsigned int refcyc_per_pte_group_flip_c;
+ unsigned int refcyc_per_meta_chunk_flip_l;
+ unsigned int refcyc_per_meta_chunk_flip_c;
+ unsigned int dst_y_per_pte_row_nom_l;
+ unsigned int dst_y_per_pte_row_nom_c;
+ unsigned int refcyc_per_pte_group_nom_l;
+ unsigned int refcyc_per_pte_group_nom_c;
+ unsigned int dst_y_per_meta_row_nom_l;
+ unsigned int dst_y_per_meta_row_nom_c;
+ unsigned int refcyc_per_meta_chunk_nom_l;
+ unsigned int refcyc_per_meta_chunk_nom_c;
+ unsigned int refcyc_per_line_delivery_pre_l;
+ unsigned int refcyc_per_line_delivery_pre_c;
+ unsigned int refcyc_per_line_delivery_l;
+ unsigned int refcyc_per_line_delivery_c;
+ unsigned int chunk_hdl_adjust_cur0;
+ unsigned int chunk_hdl_adjust_cur1;
+ unsigned int vready_after_vcount0;
+ unsigned int dst_y_offset_cur0;
+ unsigned int dst_y_offset_cur1;
+ unsigned int xfc_reg_transfer_delay;
+ unsigned int xfc_reg_precharge_delay;
+ unsigned int xfc_reg_remote_surface_flip_latency;
+ unsigned int xfc_reg_prefetch_margin;
+ unsigned int dst_y_delta_drq_limit;
+};
+
+struct _vcs_dpi_display_ttu_regs_st {
+ unsigned int qos_level_low_wm;
+ unsigned int qos_level_high_wm;
+ unsigned int min_ttu_vblank;
+ unsigned int qos_level_flip;
+ unsigned int refcyc_per_req_delivery_l;
+ unsigned int refcyc_per_req_delivery_c;
+ unsigned int refcyc_per_req_delivery_cur0;
+ unsigned int refcyc_per_req_delivery_cur1;
+ unsigned int refcyc_per_req_delivery_pre_l;
+ unsigned int refcyc_per_req_delivery_pre_c;
+ unsigned int refcyc_per_req_delivery_pre_cur0;
+ unsigned int refcyc_per_req_delivery_pre_cur1;
+ unsigned int qos_level_fixed_l;
+ unsigned int qos_level_fixed_c;
+ unsigned int qos_level_fixed_cur0;
+ unsigned int qos_level_fixed_cur1;
+ unsigned int qos_ramp_disable_l;
+ unsigned int qos_ramp_disable_c;
+ unsigned int qos_ramp_disable_cur0;
+ unsigned int qos_ramp_disable_cur1;
+};
+
+struct _vcs_dpi_display_data_rq_regs_st {
+ unsigned int chunk_size;
+ unsigned int min_chunk_size;
+ unsigned int meta_chunk_size;
+ unsigned int min_meta_chunk_size;
+ unsigned int dpte_group_size;
+ unsigned int mpte_group_size;
+ unsigned int swath_height;
+ unsigned int pte_row_height_linear;
+};
+
+struct _vcs_dpi_display_rq_regs_st {
+ display_data_rq_regs_st rq_regs_l;
+ display_data_rq_regs_st rq_regs_c;
+ unsigned int drq_expansion_mode;
+ unsigned int prq_expansion_mode;
+ unsigned int mrq_expansion_mode;
+ unsigned int crq_expansion_mode;
+ unsigned int plane1_base_address;
+};
+
+struct _vcs_dpi_display_dlg_sys_params_st {
+ double t_mclk_wm_us;
+ double t_urg_wm_us;
+ double t_sr_wm_us;
+ double t_extra_us;
+ double mem_trip_us;
+ double t_srx_delay_us;
+ double deepsleep_dcfclk_mhz;
+ double total_flip_bw;
+ unsigned int total_flip_bytes;
+};
+
+struct _vcs_dpi_display_dlg_prefetch_param_st {
+ double prefetch_bw;
+ unsigned int flip_bytes;
+};
+
+struct _vcs_dpi_display_pipe_clock_st {
+ double dcfclk_mhz;
+ double dispclk_mhz;
+ double socclk_mhz;
+ double dscclk_mhz[6];
+ double dppclk_mhz[6];
+};
+
+struct _vcs_dpi_display_arb_params_st {
+ int max_req_outstanding;
+ int min_req_outstanding;
+ int sat_level_us;
+};
+
+#endif /*__DISPLAY_MODE_STRUCTS_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
new file mode 100644
index 000000000000..ea661ee44674
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -0,0 +1,6124 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+
+#include "dml_inline_defs.h"
+
+static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib);
+static void fetch_ip_params(struct display_mode_lib *mode_lib);
+static void fetch_pipe_params(struct display_mode_lib *mode_lib);
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+static void recalculate(struct display_mode_lib *mode_lib);
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN);
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat);
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidthY,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height);
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime);
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk);
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth);
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth);
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib);
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+
+void set_prefetch_mode(
+ struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support)
+{
+ unsigned int prefetch_mode;
+
+ if (cstate_en && pstate_en)
+ prefetch_mode = 0;
+ else if (cstate_en)
+ prefetch_mode = 1;
+ else
+ prefetch_mode = 2;
+ if (prefetch_mode != mode_lib->vba.PrefetchMode
+ || ignore_viewport_pos != mode_lib->vba.IgnoreViewportPositioning
+ || immediate_flip_support != mode_lib->vba.ImmediateFlipSupport) {
+ DTRACE(
+ " Prefetch mode has changed from %i to %i. Recalculating.",
+ prefetch_mode,
+ mode_lib->vba.PrefetchMode);
+ mode_lib->vba.PrefetchMode = prefetch_mode;
+ mode_lib->vba.IgnoreViewportPositioning = ignore_viewport_pos;
+ mode_lib->vba.ImmediateFlipSupport = immediate_flip_support;
+ recalculate(mode_lib);
+ }
+}
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(pipes, mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
+
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+
+ if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
+ recalculate(mode_lib);
+ else {
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+ }
+ ModeSupportAndSystemConfigurationFull(mode_lib);
+
+ return mode_lib->vba.VoltageLevel;
+}
+
+#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
+{ \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ return var; \
+}
+
+dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFClkDeepSleep);
+dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
+dml_get_attr_func(wm_memory_trip, mode_lib->vba.MemoryTripWatermark);
+dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
+dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
+dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
+dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
+dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
+dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
+dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
+dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
+dml_get_attr_func(urgent_latency, mode_lib->vba.MinUrgentLatencySupportUs);
+dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
+dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
+dml_get_attr_func(
+ dram_clock_change_latency,
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
+dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
+dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
+dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
+
+#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
+{\
+ unsigned int which_plane; \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
+ return var[which_plane]; \
+}
+
+dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
+dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
+dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
+dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
+dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
+dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
+dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
+dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
+dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
+dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
+dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
+dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
+dml_get_pipe_attr_func(
+ dst_y_per_row_flip,
+ mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
+
+dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
+dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
+dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
+dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe)
+{
+ unsigned int which_plane;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ which_plane = mode_lib->vba.pipe_plane[which_pipe];
+ return mode_lib->vba.VStartup[which_plane];
+}
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.TotImmediateFlipBytes;
+}
+
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.ImmediateFlipBW;
+}
+
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ unsigned int k;
+ double total_prefetch_bw = 0.0;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
+ return total_prefetch_bw;
+}
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i;
+
+ // SOC Bounding Box Parameters
+ mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
+ mode_lib->vba.NumberOfChannels = soc->num_chans;
+ mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency =
+ soc->ideal_dram_bw_after_urgent_percent; // there's always that one bastard variable that's so long it throws everything out of alignment!
+ mode_lib->vba.UrgentLatency = soc->urgent_latency_us;
+ mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel =
+ soc->urgent_out_of_order_return_per_channel_bytes;
+ mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
+ mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
+ mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
+ mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+ mode_lib->vba.Downspreading = soc->downspread_percent;
+ mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
+ mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
+ mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
+ // Set the voltage scaling clocks as the defaults. Most of these will
+ // be set to different values by the test
+ for (i = 0; i < DC__VOLTAGE_STATES; i++)
+ if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
+ break;
+
+ mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
+
+ mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
+ mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
+
+ mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
+ mode_lib->vba.MaxHSCLRatio = 4;
+ mode_lib->vba.MaxVSCLRatio = 4;
+ mode_lib->vba.MaxNumWriteback = 0; /*TODO*/
+ mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
+ mode_lib->vba.Cursor64BppSupport = true;
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
+ mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
+ mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
+ mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
+ mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ }
+}
+
+static void fetch_ip_params(struct display_mode_lib *mode_lib)
+{
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ // IP Parameters
+ mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
+ mode_lib->vba.MaxNumOTG = ip->max_num_otg;
+ mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
+ mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
+
+ mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
+ mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
+ mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
+ mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
+ mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
+ mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
+ mode_lib->vba.PTEChunkSize = ip->pte_chunk_size_kbytes;
+ mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
+ mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
+ mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
+ mode_lib->vba.PTEBufferSizeInRequests = ip->dpte_buffer_size_in_pte_reqs;
+ mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
+ mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
+ mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes;
+ mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes;
+ mode_lib->vba.WritebackChromaLineBufferWidth =
+ ip->writeback_chroma_line_buffer_width_pixels;
+ mode_lib->vba.MaxPageTableLevels = ip->max_page_table_levels;
+ mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
+ mode_lib->vba.NumberOfDSC = ip->num_dsc;
+ mode_lib->vba.ODMCapability = ip->odm_capable;
+ mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
+
+ mode_lib->vba.XFCSupported = ip->xfc_supported;
+ mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
+ mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
+ mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
+ mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
+ mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
+ mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
+ mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
+ mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
+
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
+
+ mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
+}
+
+static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+{
+ display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
+ unsigned int j, k;
+ bool PlaneVisited[DC__NUM_DPP__MAX];
+ bool visited[DC__NUM_DPP__MAX];
+
+ // Convert Pipes to Planes
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
+ visited[k] = false;
+
+ mode_lib->vba.NumberOfActivePlanes = 0;
+ for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
+ display_pipe_source_params_st *src = &pipes[j].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
+ scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
+ scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
+ display_output_params_st *dout = &pipes[j].dout;
+ display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
+
+ if (visited[j])
+ continue;
+ visited[j] = true;
+
+ mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
+ mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
+ (enum scan_direction_class) (src->source_scan);
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_width;
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_height;
+ mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_y;
+ mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_c;
+ mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
+ mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
+ mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
+ mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
+ mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
+ mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
+ if (mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes])
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
+ mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
+ mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
+ mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
+ mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
+ mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
+ mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
+ mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dcc_use_global ?
+ ip->dcc_supported : src->dcc && ip->dcc_supported;
+ mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (src->source_format);
+ mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
+ mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
+ mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
+ (enum dm_swizzle_mode) (src->sw_mode);
+ mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
+ mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
+ mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_format_class) (dout->output_format);
+ mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_encoder_class) (dout->output_type);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
+ mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dp_lanes;
+ mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dsc_slices;
+ mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpc == 0 ? 12 : dout->output_bpc;
+ mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
+ mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_src_height;
+ mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_width;
+ mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_height;
+ mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (dout->wb.wb_pixel_format);
+ mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_luma;
+ mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_luma;
+ mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_chroma;
+ mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_chroma;
+ mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_hratio;
+ mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vratio;
+
+ mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_enable;
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_lines_before_active;
+ mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_xmit_bytes;
+
+ mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
+ && ip->xfc_supported;
+ mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
+ mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
+ mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
+ mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
+ mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
+ mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
+ if (ip->is_line_buffer_bpp_fixed)
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
+ ip->line_buffer_fixed_bpp;
+ else {
+ unsigned int lb_depth;
+
+ switch (scl->lb_depth) {
+ case dm_lb_6:
+ lb_depth = 18;
+ break;
+ case dm_lb_8:
+ lb_depth = 24;
+ break;
+ case dm_lb_10:
+ lb_depth = 30;
+ break;
+ case dm_lb_12:
+ lb_depth = 36;
+ break;
+ case dm_lb_16:
+ lb_depth = 48;
+ break;
+ default:
+ lb_depth = 36;
+ }
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
+ }
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
+ // The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
+ // calculate things a little more accurately
+ for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
+ switch (k) {
+ case 0:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur0_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
+ src->cur0_src_width;
+ if (src->cur0_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ case 1:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur1_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
+ src->cur1_src_width;
+ if (src->cur1_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ default:
+ dml_print(
+ "ERROR: Number of cursors specified exceeds supported maximum\n")
+ ;
+ }
+ }
+
+ OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
+
+ if (dst->odm_combine && !src->is_hsplit)
+ dml_print(
+ "ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
+ j);
+
+ if (src->is_hsplit) {
+ for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
+ display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
+ display_output_params_st *dout_k = &pipes[k].dout;
+
+ if (src_k->is_hsplit && !visited[k]
+ && src->hsplit_grp == src_k->hsplit_grp) {
+ mode_lib->vba.pipe_plane[k] =
+ mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
+ if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
+ == dm_horz)
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
+ else
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
+
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
+ visited[k] = true;
+ }
+ }
+ }
+
+ mode_lib->vba.NumberOfActivePlanes++;
+ }
+
+ // handle overlays through dml_ml->vba.BlendingAndTiming
+ // dml_ml->vba.BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ PlaneVisited[j] = false;
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
+ // doesn't matter, so choose the smaller one
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ mode_lib->vba.BlendingAndTiming[k] = j;
+ PlaneVisited[k] = true;
+ }
+ }
+
+ if (!PlaneVisited[j]) {
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ }
+ }
+
+ // TODO: dml_ml->vba.ODMCombineEnabled => 2 * dml_ml->vba.DPPPerPlane...actually maybe not since all pipes are specified
+ // Do we want the dscclk to automatically be halved? Guess not since the value is specified
+
+ mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
+ for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
+ ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
+
+ mode_lib->vba.VirtualMemoryEnable = false;
+ mode_lib->vba.OverridePageTableLevels = 0;
+
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable
+ || !!pipes[k].pipe.src.vm;
+ mode_lib->vba.OverridePageTableLevels =
+ (pipes[k].pipe.src.vm_levels_force_en
+ && mode_lib->vba.OverridePageTableLevels
+ < pipes[k].pipe.src.vm_levels_force) ?
+ pipes[k].pipe.src.vm_levels_force :
+ mode_lib->vba.OverridePageTableLevels;
+ }
+
+ if (mode_lib->vba.OverridePageTableLevels)
+ mode_lib->vba.MaxPageTableLevels = mode_lib->vba.OverridePageTableLevels;
+
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable && !!ip->pte_enable;
+
+ mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
+ mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000.0;
+
+ // TODO: Must be consistent across all pipes
+ // DCCProgrammingAssumesScanDirectionUnknown = src.dcc_scan_dir_unknown;
+}
+
+static void recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ DisplayPipeConfiguration(mode_lib);
+ DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
+// rather than working them out as in recalculate_ms
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ // This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
+ if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(
+ pipes,
+ mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+ recalculate(mode_lib);
+ }
+}
+
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i, k;
+ unsigned int total_pipes = 0;
+
+ mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
+ for (i = 1; i < mode_lib->vba.cache_num_pipes; ++i)
+ ASSERT(mode_lib->vba.VoltageLevel == -1 || mode_lib->vba.VoltageLevel == mode_lib->vba.cache_pipes[i].clks_cfg.voltage);
+
+ mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
+ mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
+
+ if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
+ mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
+ else
+ mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
+
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+
+ // Total Available Pipes Support Check
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_pipes += mode_lib->vba.DPPPerPlane[k];
+ ASSERT(total_pipes <= DC__NUM_DPP__MAX);
+}
+
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN)
+{
+ double CriticalCompression;
+
+ if (DCCEnabledAnyPlane
+ && ReturnBandwidthToDCN
+ > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ ReturnBandwidthToDCN * 4
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ / ReturnBandwidthToDCN
+ - mode_lib->vba.DCFCLK
+ * mode_lib->vba.ReturnBusWidth
+ / 4)
+ + mode_lib->vba.UrgentLatency));
+
+ CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024);
+
+ if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ 4.0 * ReturnBandwidthToDCN
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / dml_pow(
+ (ReturnBandwidthToDCN
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024),
+ 2));
+
+ return ReturnBW;
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
+ Delay, pixels;
+
+ if (pixelFormat == dm_n422 || pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_s422)
+ s = 1;
+ else
+ s = 0;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ p = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + p * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ l = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && p != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix)
+{
+ bool MyError = false;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
+ double Tdm, LineTime, Tsetup;
+ double dst_y_prefetch_equ;
+ double Tsw_oto;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tpre_oto;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+
+ if (ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (DPPCLK == 0.0 || DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ + DSCDelay;
+
+ if (DPPPerPlane > 1)
+ *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
+
+ if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
+
+ *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
+ *VUpdateWidthPix = (14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
+ * PixelClock;
+
+ *VReadyOffsetPix = dml_max(
+ 150.0 / DPPCLK,
+ TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK)
+ * PixelClock;
+
+ Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
+
+ LineTime = (double) HTotal / PixelClock;
+
+ if (DynamicMetadataEnable) {
+ double Tdmbf, Tdmec, Tdmsks;
+
+ Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
+ Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
+ Tdmec = LineTime;
+ if (DynamicMetadataLinesBeforeActiveRequired == 0)
+ Tdmsks = VBlank * LineTime / 2.0;
+ else
+ Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
+ if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
+ Tdmsks = Tdmsks / 2;
+ if (VStartup * LineTime
+ < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
+ MyError = true;
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
+ } else
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
+ } else
+ Tdm = 0;
+
+ if (VirtualMemoryEnable) {
+ if (PageTableLevels == 4)
+ *Tno_bw = UrgentExtraLatency + UrgentLatency;
+ else if (PageTableLevels == 3)
+ *Tno_bw = UrgentExtraLatency;
+ else
+ *Tno_bw = 0;
+ } else if (DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
+ - (Tsetup + Tdm) / LineTime
+ - (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
+
+ Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+
+ prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
+ / Tsw_oto;
+
+ if (VirtualMemoryEnable == true) {
+ Tvm_oto =
+ dml_max(
+ *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ Tr0_oto = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
+ dml_max(UrgentLatency, dml_max(LineTime - Tvm_oto, LineTime / 4)));
+ } else
+ Tr0_oto = LineTime - Tvm_oto;
+
+ Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
+
+ dst_y_prefetch_oto = Tpre_oto / LineTime;
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ)
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ else
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+
+ *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
+ / 4;
+
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: TCalc: %f\n", TCalc);
+ dml_print("DML: TWait: %f\n", TWait);
+ dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: Tsetup: %f\n", Tsetup);
+ dml_print("DML: Tdm: %f\n", Tdm);
+ dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
+ dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
+ dml_print("DML: HTotal: %d\n", HTotal);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ if (*DestinationLinesForPrefetch > 1) {
+ *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2
+ * dml_ceil(BytePerPixelDETC, 2))
+ / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
+ if (VirtualMemoryEnable) {
+ TimeForFetchingMetaPTE =
+ dml_max(
+ *Tno_bw
+ + (double) PDEAndMetaPTEBytesFrame
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingMetaPTE = LineTime / 4;
+ else
+ TimeForFetchingMetaPTE = 0.0;
+ }
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlank =
+ dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentLatency,
+ dml_max(
+ LineTime
+ - TimeForFetchingMetaPTE,
+ LineTime
+ / 4.0)));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
+ else
+ TimeForFetchingRowInVBlank = 0.0;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
+ 1) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
+ 1) / 4.0;
+
+ LinesToRequestPrefetchPixelData =
+ *DestinationLinesForPrefetch
+ - ((NumberOfCursors > 0 || VirtualMemoryEnable
+ || DCCEnable) ?
+ (*DestinationLinesToRequestVMInVBlank
+ + *DestinationLinesToRequestRowInVBlank) :
+ 0.0);
+
+ if (LinesToRequestPrefetchPixelData > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY =
+ dml_max(
+ (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY
+ * SwathHeightY
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillY
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC =
+ dml_max(
+ *VRatioPrefetchC,
+ (double) MaxNumSwathC
+ * SwathHeightC
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillC
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBW =
+ DPPPerPlane
+ * ((double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETY,
+ 1)
+ + (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETC,
+ 2)
+ / 2)
+ * SwathWidthY / LineTime;
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ } else {
+ MyError = true;
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
+}
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidth,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height)
+{
+ unsigned int MetaRequestHeight;
+ unsigned int MetaRequestWidth;
+ unsigned int MetaSurfWidth;
+ unsigned int MetaSurfHeight;
+ unsigned int MPDEBytesFrame;
+ unsigned int MetaPTEBytesFrame;
+ unsigned int DCCMetaSurfaceBytes;
+
+ unsigned int MacroTileSizeBytes;
+ unsigned int MacroTileHeight;
+ unsigned int DPDE0BytesFrame;
+ unsigned int ExtraDPDEBytesFrame;
+ unsigned int PDEAndMetaPTEBytesFrame;
+
+ if (DCCEnable == true) {
+ MetaRequestHeight = 8 * BlockHeight256Bytes;
+ MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection == dm_horz) {
+ *meta_row_height = MetaRequestHeight;
+ MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ + MetaRequestWidth;
+ *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = MetaRequestWidth;
+ MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ + MetaRequestHeight;
+ *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ if (ScanDirection == dm_horz) {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ } else {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(
+ (double) ViewportHeight - 1,
+ 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ }
+ if (VirtualMemoryEnable == true) {
+ MetaPTEBytesFrame = (dml_ceil(
+ (double) (DCCMetaSurfaceBytes - VMMPageSize)
+ / (8 * VMMPageSize),
+ 1) + 1) * 64;
+ MPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 1);
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = 1;
+ } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
+ || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
+ MacroTileSizeBytes = 4096;
+ MacroTileHeight = 4 * BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
+ || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
+ || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
+ || SurfaceTiling == dm_sw_64kb_r_x) {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 262144;
+ MacroTileHeight = 32 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (VirtualMemoryEnable == true && mode_lib->vba.MaxPageTableLevels > 1) {
+ if (ScanDirection == dm_horz) {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ ViewportHeight
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ } else {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ (double) SwathWidth
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 2);
+ } else {
+ DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (VirtualMemoryEnable == true) {
+ unsigned int PTERequestSize;
+ unsigned int PixelPTEReqHeight;
+ unsigned int PixelPTEReqWidth;
+ double FractionOfPTEReturnDrop;
+ unsigned int EffectivePDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeight = 1;
+ PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ if (ScanDirection == dm_horz)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
+ else
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height =
+ dml_min(
+ 128,
+ 1
+ << (unsigned int) dml_floor(
+ dml_log2(
+ dml_min(
+ (double) PTEBufferSizeInRequests
+ * PixelPTEReqWidth,
+ EffectivePDEProcessingBufIn64KBReqs
+ * 65536.0
+ / BytePerPixel)
+ / Pitch),
+ 1));
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ (double) (Pitch * *dpte_row_height - 1)
+ / PixelPTEReqWidth,
+ 1) + 1);
+ } else if (ScanDirection == dm_horz) {
+ *dpte_row_height = PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ + 1);
+ } else {
+ *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ ((double) SwathWidth - 1)
+ / PixelPTEReqHeight,
+ 1) + 1);
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequests) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+ } else {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ unsigned int j, k;
+
+ mode_lib->vba.WritebackDISPCLK = 0.0;
+ mode_lib->vba.DISPCLKWithRamping = 0;
+ mode_lib->vba.DISPCLKWithoutRamping = 0;
+ mode_lib->vba.GlobalDPPCLK = 0.0;
+
+ // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
+ //
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k]) {
+ mode_lib->vba.WritebackDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPLuma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPChroma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
+ * mode_lib->vba.PixelClock[k];
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma,
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k)
+ continue;
+ if (mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ }
+ }
+
+ mode_lib->vba.DISPCLKWithRamping = dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.WritebackDISPCLK);
+ mode_lib->vba.DISPCLKWithoutRamping = dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.WritebackDISPCLK);
+
+ ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.soc.clock_limits[NumberOfStates - 1].dispclk_mhz,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
+ } else {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.DPPPerPlane[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ mode_lib->vba.GlobalDPPCLK = dml_max(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DPPCLK_calculated[k]);
+ }
+ mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
+ * dml_ceil(
+ mode_lib->vba.DPPCLK_calculated[k] * 255
+ / mode_lib->vba.GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
+ }
+
+ // Urgent Watermark
+ mode_lib->vba.DCCEnabledAnyPlane = false;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.DCCEnabledAnyPlane = true;
+
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency / 100;
+
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ // Let's do this calculation again??
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000);
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
+ DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
+ DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz)
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
+ else
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ MainPlaneDoesODMCombine = true;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true)
+ MainPlaneDoesODMCombine = true;
+
+ if (MainPlaneDoesODMCombine == true)
+ mode_lib->vba.SwathWidthY[k] = dml_min(
+ (double) mode_lib->vba.SwathWidthSingleDPPY[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]));
+ else
+ mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelDETY[k] = 8;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelDETY[k] = 4;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ mode_lib->vba.BytePerPixelDETY[k] = 2;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 2;
+ } else { // dm_420_10
+ mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
+ mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k] / 2;
+ DTRACE(
+ " read_bw[%i] = %fBps",
+ k,
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
+ mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k];
+ }
+
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ mode_lib->vba.TotalActiveDPP = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
+
+ if (mode_lib->vba.VRatio[k] <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * mode_lib->vba.SwathHeightY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeLuma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
+ else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ * mode_lib->vba.DPPPerPlane[k]
+ / (mode_lib->vba.HRatio[k] / 2.0)
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
+ * mode_lib->vba.SwathHeightC[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.ReturnBW
+ * mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark =
+ dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeChroma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ }
+
+ mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalDCCActiveDPP
+ * mode_lib->vba.MetaChunkSize) * 1024.0
+ / mode_lib->vba.ReturnBW;
+
+ if (mode_lib->vba.VirtualMemoryEnable)
+ mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
+ * mode_lib->vba.PTEChunkSize * 1024.0 / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatency
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
+ DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
+
+ mode_lib->vba.MemoryTripWatermark = mode_lib->vba.UrgentLatency;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k])
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
+
+ // NB P-State/DRAM Clock Change Watermark
+ mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.UrgentWatermark;
+
+ DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
+
+ DTRACE(" calculating wb pstate watermark");
+ DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
+ DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
+
+ // Stutter Efficiency
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
+ / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETY[k],
+ mode_lib->vba.SwathHeightY[k]);
+ mode_lib->vba.FullDETBufferingTimeY[k] =
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k];
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
+ / mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETC[k],
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.FullDETBufferingTimeC[k] =
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2);
+ } else {
+ mode_lib->vba.LinesInDETC[k] = 0;
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
+ mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
+ }
+ }
+
+ mode_lib->vba.MinFullDETBufferingTime = 999999.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.FullDETBufferingTimeY[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeY[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ if (mode_lib->vba.FullDETBufferingTimeC[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeC[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ }
+
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000;
+ } else {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000;
+ }
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 256
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 256;
+ }
+ if (mode_lib->vba.VirtualMemoryEnable) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 512
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 512;
+ }
+ }
+
+ mode_lib->vba.PartOfBurstThatFitsInROB =
+ dml_min(
+ mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.ROBBufferSizeInKByte * 1024
+ * mode_lib->vba.TotalDataReadBandwidth
+ / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ * 1000));
+ mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
+ * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
+ / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ + (mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth
+ - mode_lib->vba.PartOfBurstThatFitsInROB)
+ / (mode_lib->vba.DCFCLK * 64);
+ if (mode_lib->vba.TotalActiveWriteback == 0) {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
+ - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
+ / mode_lib->vba.MinFullDETBufferingTime) * 100;
+ } else {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ mode_lib->vba.SmallestVBlank = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.VBlankTime = 0;
+ }
+ mode_lib->vba.SmallestVBlank = dml_min(
+ mode_lib->vba.SmallestVBlank,
+ mode_lib->vba.VBlankTime);
+ }
+
+ mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
+ * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
+ - mode_lib->vba.SmallestVBlank)
+ + mode_lib->vba.SmallestVBlank)
+ / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
+
+ // dml_ml->vba.DCFCLK Deep Sleep
+ mode_lib->vba.DCFClkDeepSleep = 8.0;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane =
+ dml_max(
+ 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
+ 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ } else
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = dml_max(
+ mode_lib->vba.DCFCLKDeepSleepPerPlane,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ mode_lib->vba.DCFClkDeepSleep = dml_max(
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+
+ DTRACE(
+ " dcfclk_deepsleep_per_plane[%i] = %fMHz",
+ k,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+ }
+
+ DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFClkDeepSleep);
+
+ // Stutter Watermark
+ mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFClkDeepSleep;
+ mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
+ DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
+
+ // Urgent Latency Supported
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.EffectiveDETPlusLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETY[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETY[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightY[k]);
+
+ mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETC[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETC[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETC[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.UrgentLatencySupportUsChroma =
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2)
+ - mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.SwathWidthY[k]
+ / 2)
+ * mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+ mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
+ mode_lib->vba.UrgentLatencySupportUsLuma,
+ mode_lib->vba.UrgentLatencySupportUsChroma);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUs[k] =
+ mode_lib->vba.UrgentLatencySupportUsLuma;
+ }
+ }
+
+ mode_lib->vba.MinUrgentLatencySupportUs = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
+ mode_lib->vba.MinUrgentLatencySupportUs,
+ mode_lib->vba.UrgentLatencySupportUs[k]);
+ }
+
+ // Non-Urgent Latency Tolerance
+ mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
+ - mode_lib->vba.UrgentWatermark;
+
+ // DSCCLK
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
+ mode_lib->vba.DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k] == dm_n422)
+ mode_lib->vba.DSCFormatFactor = 2;
+ else
+ mode_lib->vba.DSCFormatFactor = 1;
+ if (mode_lib->vba.ODMCombineEnabled[k])
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 6
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ else
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 3
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ }
+ }
+
+ // DSC Delay
+ // TODO
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double bpp = mode_lib->vba.OutputBpp[k];
+ unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
+
+ if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
+ if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DSCDelay[k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelay[k] =
+ 2
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices / 2.0,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.DSCEnabled[j])
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PixelPTEBytesPerRowY;
+ unsigned int MetaRowByteY;
+ unsigned int MetaRowByteC;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int PixelPTEBytesPerRowC;
+
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
+ &mode_lib->vba.BlockHeight256BytesY[k],
+ &mode_lib->vba.BlockHeight256BytesC[k],
+ &mode_lib->vba.BlockWidth256BytesY[k],
+ &mode_lib->vba.BlockWidth256BytesC[k]);
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesY[k],
+ mode_lib->vba.BlockWidth256BytesY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.VInitPreFillY[k],
+ &mode_lib->vba.MaxNumSwathY[k]);
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
+ PDEAndMetaPTEBytesFrameC =
+ CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesC[k],
+ mode_lib->vba.BlockWidth256BytesC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2,
+ mode_lib->vba.ViewportHeight[k] / 2,
+ mode_lib->vba.SwathWidthY[k] / 2,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightC[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.VInitPreFillC[k],
+ &mode_lib->vba.MaxNumSwathC[k]);
+ } else {
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ mode_lib->vba.MaxNumSwathC[k] = 0;
+ mode_lib->vba.PrefetchSourceLinesC[k] = 0;
+ }
+
+ mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_bw[k],
+ &mode_lib->vba.dpte_row_bw[k],
+ &mode_lib->vba.qual_row_bw[k]);
+ }
+
+ mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFClkDeepSleep;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.DISPCLK;
+ } else
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j)
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
+
+ mode_lib->vba.VStartupLines = 13;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MaxVStartupLines[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1));
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ mode_lib->vba.MaximumMaxVStartupLines = dml_max(
+ mode_lib->vba.MaximumMaxVStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.cursor_bw[k] = 0.0;
+ for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
+ mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
+ * mode_lib->vba.CursorBPP[k][j] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+
+ do {
+ double MaxTotalRDBandwidth = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ bool prefetch_vm_bw_valid = true;
+ bool prefetch_row_bw_valid = true;
+ double TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
+ }
+ mode_lib->vba.ErrorResult[k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.DPPCLK[k],
+ mode_lib->vba.DISPCLK,
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DSCDelay[k],
+ mode_lib->vba.DPPPerPlane[k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ (unsigned int) (mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.HRatio[k]),
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]),
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.TCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchSourceLinesY[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.BytePerPixelDETY[k],
+ mode_lib->vba.VInitPreFillY[k],
+ mode_lib->vba.MaxNumSwathY[k],
+ mode_lib->vba.PrefetchSourceLinesC[k],
+ mode_lib->vba.BytePerPixelDETC[k],
+ mode_lib->vba.VInitPreFillC[k],
+ mode_lib->vba.MaxNumSwathC[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &mode_lib->vba.DSTXAfterScaler[k],
+ &mode_lib->vba.DSTYAfterScaler[k],
+ &mode_lib->vba.DestinationLinesForPrefetch[k],
+ &mode_lib->vba.PrefetchBandwidth[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
+ &mode_lib->vba.VRatioPrefetchY[k],
+ &mode_lib->vba.VRatioPrefetchC[k],
+ &mode_lib->vba.RequiredPrefetchPixDataBW[k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.VStartup[k] = dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+ if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
+ != 0) {
+ mode_lib->vba.VStartup[k] =
+ mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+ }
+ } else {
+ mode_lib->vba.VStartup[k] =
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+
+ if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
+ == 0)
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_row_bw[k] =
+ (double) (mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k])
+ / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ prefetch_row_bw_valid = false;
+ }
+
+ MaxTotalRDBandwidth =
+ MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])
+ + mode_lib->vba.meta_row_bw[k]
+ + mode_lib->vba.dpte_row_bw[k]));
+
+ if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (mode_lib->vba.VRatioPrefetchY[k] > 4
+ || mode_lib->vba.VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ }
+
+ if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
+ && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ mode_lib->vba.PrefetchModeSupported = true;
+ else {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ }
+
+ if (mode_lib->vba.PrefetchModeSupported == true) {
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double total_dcn_read_bw_with_flip = 0;
+
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ + mode_lib->vba.qual_row_bw[k],
+ mode_lib->vba.PrefetchBandwidth[k]);
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ ImmediateFlipBytes[k] = 0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ + mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ total_dcn_read_bw_with_flip =
+ total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])));
+ }
+ mode_lib->vba.ImmediateFlipSupported = true;
+ if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ErrorResult[k]) {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
+ } while (!((mode_lib->vba.PrefetchModeSupported
+ && (!mode_lib->vba.ImmediateFlipSupport
+ || mode_lib->vba.ImmediateFlipSupported))
+ || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
+
+ //Display Pipeline Delivery Time in Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ }
+ }
+
+ // Min TTUVBlank
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode == 0) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.DRAMClockChangeWatermark,
+ dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark));
+ } else if (mode_lib->vba.PrefetchMode == 1) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark);
+ } else {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
+ }
+ if (!mode_lib->vba.DynamicMetadataEnable[k])
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ + mode_lib->vba.MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ mode_lib->vba.ActiveDPPs = 0;
+ // NB P-State/DRAM Clock Change Support
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double DPPOutputBufferLinesY;
+ double DPPOutputBufferLinesC;
+ double DPPOPPBufferingY;
+ double MaxDETBufferingTimeY;
+ double ActiveDRAMClockChangeLatencyMarginY;
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1)) - (mode_lib->vba.vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2,
+ 1.0)),
+ 1))
+ - (mode_lib->vba.VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
+ / mode_lib->vba.VRatio[k]
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
+ / (mode_lib->vba.VRatio[k] / 2)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
+ / mode_lib->vba.SwathWidthY[k];
+ } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = 0.5;
+ } else {
+ DPPOutputBufferLinesY = 1;
+ }
+
+ if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = 0.5;
+ } else {
+ DPPOutputBufferLinesC = 1;
+ }
+
+ DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
+ MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ + (mode_lib->vba.LinesInDETY[k]
+ - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+
+ ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginY =
+ ActiveDRAMClockChangeLatencyMarginY
+ - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
+ * mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesC
+ + mode_lib->vba.OPPOutputBufferLines);
+ double MaxDETBufferingTimeC =
+ mode_lib->vba.FullDETBufferingTimeC[k]
+ + (mode_lib->vba.LinesInDETC[k]
+ - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
+ - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginC =
+ ActiveDRAMClockChangeLatencyMarginC
+ - (1
+ - 1
+ / (mode_lib->vba.ActiveDPPs
+ - 1))
+ * mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ ActiveDRAMClockChangeLatencyMarginY,
+ ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
+ ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (mode_lib->vba.WritebackEnable[k]) {
+ double WritebackDRAMClockChangeLatencyMargin;
+
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ WritebackDRAMClockChangeLatencyMargin =
+ (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * 4)
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
+ * 8.0 / 10,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize
+ * 8 / 10)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
+ mode_lib->vba.MinActiveDRAMClockChangeMargin
+ + mode_lib->vba.DRAMClockChangeLatency;
+
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+ } else {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
+ mode_lib->vba.DRAMClockChangeSupport =
+ dm_dram_clock_change_unsupported;
+ }
+ }
+ } else {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+ }
+
+ //XFC Parameters:
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ double TWait;
+
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
+ TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
+ dml_floor(
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCTransferDelay[k] =
+ dml_ceil(
+ mode_lib->vba.XFCBusTransportTime
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCPrechargeDelay[k] =
+ dml_ceil(
+ (mode_lib->vba.XFCBusTransportTime
+ + mode_lib->vba.TInitXFill
+ + mode_lib->vba.TslvChk)
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
+ * mode_lib->vba.SrcActiveDrainRate;
+ mode_lib->vba.FinalFillMargin =
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.XFCFillConstant;
+ mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.FinalFillMargin;
+ mode_lib->vba.RemainingFillLevel = dml_max(
+ 0.0,
+ mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
+ mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
+ / (mode_lib->vba.SrcActiveDrainRate
+ * mode_lib->vba.XFCFillBWOverhead / 100);
+ mode_lib->vba.XFCPrefetchMargin[k] =
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ + mode_lib->vba.TFinalxFill
+ + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
+ mode_lib->vba.XFCPrechargeDelay[k] = 0;
+ mode_lib->vba.XFCTransferDelay[k] = 0;
+ mode_lib->vba.XFCPrefetchMargin[k] = 0;
+ }
+ }
+}
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ double BytePerPixDETY;
+ double BytePerPixDETC;
+ double Read256BytesBlockHeightY;
+ double Read256BytesBlockHeightC;
+ double Read256BytesBlockWidthY;
+ double Read256BytesBlockWidthC;
+ double MaximumSwathHeightY;
+ double MaximumSwathHeightC;
+ double MinimumSwathHeightY;
+ double MinimumSwathHeightC;
+ double SwathWidth;
+ double SwathWidthGranularityY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesY;
+ double RoundedUpMaxSwathSizeBytesC;
+ unsigned int j, k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ BytePerPixDETY = 8;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ BytePerPixDETY = 4;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ BytePerPixDETY = 2;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 2;
+ } else {
+ BytePerPixDETY = 4.0 / 3.0;
+ BytePerPixDETC = 8.0 / 3.0;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ Read256BytesBlockHeightY = 4;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ Read256BytesBlockHeightY = 8;
+ } else {
+ Read256BytesBlockHeightY = 16;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockHeightC = 0;
+ Read256BytesBlockWidthC = 0;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ Read256BytesBlockHeightC = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ Read256BytesBlockHeightY = 16;
+ Read256BytesBlockHeightC = 8;
+ } else {
+ Read256BytesBlockHeightY = 8;
+ Read256BytesBlockHeightC = 8;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
+ / Read256BytesBlockHeightC;
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ MaximumSwathHeightY = Read256BytesBlockHeightY;
+ MaximumSwathHeightC = Read256BytesBlockHeightC;
+ } else {
+ MaximumSwathHeightY = Read256BytesBlockWidthY;
+ MaximumSwathHeightC = Read256BytesBlockWidthC;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
+ && mode_lib->vba.SourceScan[k] != dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ }
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ SwathWidth = mode_lib->vba.ViewportWidth[k];
+ } else {
+ SwathWidth = mode_lib->vba.ViewportHeight[k];
+ }
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ }
+
+ if (MainPlaneDoesODMCombine == true) {
+ SwathWidth = dml_min(
+ SwathWidth,
+ mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
+ } else {
+ SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
+ RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ (double) (SwathWidth - 1),
+ SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
+ * MaximumSwathHeightY;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ + 256;
+ }
+ if (MaximumSwathHeightC > 0) {
+ SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
+ / MaximumSwathHeightC;
+ RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ (double) (SwathWidth / 2.0 - 1),
+ SwathWidthGranularityC) + SwathWidthGranularityC)
+ * BytePerPixDETC * MaximumSwathHeightC;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ RoundedUpMaxSwathSizeBytesC,
+ 256) + 256;
+ }
+ } else
+ RoundedUpMaxSwathSizeBytesC = 0.0;
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
+ } else {
+ mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
+ }
+
+ if (mode_lib->vba.SwathHeightC[k] == 0) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
+ mode_lib->vba.DETBufferSizeC[k] = 0;
+ } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ } else {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2 / 3;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3;
+ }
+ }
+}
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC)
+{
+ if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
+ || SourcePixelFormat == dm_444_16
+ || SourcePixelFormat == dm_444_8)) {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ } else if (SourcePixelFormat == dm_444_64) {
+ *BlockHeight256BytesY = 4;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BlockHeight256BytesY = 16;
+ } else {
+ *BlockHeight256BytesY = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockHeight256BytesC = 0;
+ *BlockWidth256BytesC = 0;
+ } else {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ *BlockHeight256BytesC = 1;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BlockHeight256BytesY = 16;
+ *BlockHeight256BytesC = 8;
+ } else {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
+ }
+ return true;
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(
+ DRAMClockChangeLatency + UrgentLatency,
+ dml_max(SREnterPlusExitTime, UrgentLatency));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatency);
+ } else {
+ return UrgentLatency;
+ }
+}
+
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk)
+{
+ double TSlvSetup, AvgfillRate, result;
+
+ *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+ TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
+ *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
+ AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
+ *TslvChk = XFCSlvChunkSize / AvgfillRate;
+ dml_print(
+ "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
+ *SrcActiveDrainRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
+ result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
+ return result;
+}
+
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth)
+{
+ double CalculateWriteBackDISPCLK =
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1)
+ / WritebackHRatio,
+ dml_max(
+ (WritebackLumaVTaps
+ * dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1))
+ / (double) HTotal
+ + dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1)
+ + 4.0)
+ / (double) HTotal,
+ dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * WritebackDestinationWidth
+ / (double) HTotal));
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDISPCLK =
+ dml_max(
+ CalculateWriteBackDISPCLK,
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ dml_max(
+ (WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / WritebackChromaLineBufferWidth,
+ 1))
+ / HTotal
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)
+ / HTotal,
+ dml_ceil(
+ 1.0
+ / (2
+ * WritebackVRatio),
+ 1)
+ * WritebackDestinationWidth
+ / 2.0
+ / HTotal)));
+ }
+ return CalculateWriteBackDISPCLK;
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth)
+{
+ double CalculateWriteBackDelay =
+ dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
+ WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(1.0 / WritebackVRatio, 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1) + 4));
+
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDelay =
+ dml_max(
+ CalculateWriteBackDelay,
+ dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)));
+ }
+ return CalculateWriteBackDelay;
+}
+
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatio / 2 * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (VirtualMemoryEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatio / 2 * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+
+ if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
+ *qual_row_bw = *meta_row_bw + *dpte_row_bw;
+ } else {
+ *qual_row_bw = 0;
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *DestinationLinesToRequestVMInImmediateFlip = 0.0;
+ *DestinationLinesToRequestRowInImmediateFlip = 0.0;
+ *final_flip_bw = qual_row_bw;
+ *ImmediateFlipSupportedForPipe = true;
+ } else {
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+
+ if (VirtualMemoryEnable == true) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingMetaPTEImmediateFlip =
+ dml_max(
+ Tno_bw
+ + PDEAndMetaPTEBytesFrame
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (MaxPageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(UrgentLatency, LineTime / 4.0));
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if (VirtualMemoryEnable == true) {
+ *final_flip_bw =
+ dml_max(
+ PDEAndMetaPTEBytesFrame
+ / (*DestinationLinesToRequestVMInImmediateFlip
+ * LineTime),
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip
+ * LineTime));
+ } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
+ *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
+ } else {
+ *final_flip_bw = 0;
+ }
+
+ if (VirtualMemoryEnable && !DCCEnable)
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ else if (!VirtualMemoryEnable && DCCEnable)
+ min_row_time = meta_row_height * LineTime / VRatio;
+ else
+ min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
+ / VRatio;
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 8
+ || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip
+ + 2 * TimeForFetchingRowInVBlankImmediateFlip
+ > min_row_time)
+ *ImmediateFlipSupportedForPipe = false;
+ else
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
+{
+ unsigned int k;
+
+ //Progressive To dml_ml->vba.Interlace Unit Effect
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
+ if (mode_lib->vba.Interlace[k] == 1
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
+ mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
+ }
+ }
+}
+
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
+{
+ switch (ebpp) {
+ case dm_cur_2bit:
+ return 2;
+ case dm_cur_32bit:
+ return 32;
+ case dm_cur_64bit:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int TruncToValidBPP(
+ double DecimalBPP,
+ bool DSCEnabled,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent)
+{
+ if (Output == dm_hdmi) {
+ if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_444) {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP / 1.5 >= 24)
+ return 24;
+ else if (DecimalBPP / 1.5 >= 20)
+ return 20;
+ else if (DecimalBPP / 1.5 >= 16)
+ return 16;
+ else
+ return 0;
+ }
+ } else {
+ if (DSCEnabled) {
+ if (Format == dm_420) {
+ if (DecimalBPP < 6)
+ return 0;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
+ return 1.5 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else if (Format == dm_n422) {
+ if (DecimalBPP < 7)
+ return 0;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
+ return 2 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else {
+ if (DecimalBPP < 8)
+ return 0;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
+ return 3 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ }
+ } else if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_s422 || Format == dm_n422) {
+ if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 20)
+ return 20;
+ else if (DecimalBPP >= 16)
+ return 16;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ }
+ }
+}
+
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ int i;
+ unsigned int j, k;
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ mode_lib->vba.ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ScalerEnabled[k] == false
+ && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
+ || mode_lib->vba.HRatio[k] != 1.0
+ || mode_lib->vba.htaps[k] != 1.0
+ || mode_lib->vba.VRatio[k] != 1.0
+ || mode_lib->vba.vtaps[k] != 1.0)) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
+ || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
+ || (mode_lib->vba.htaps[k] > 1.0
+ && (mode_lib->vba.htaps[k] % 2) == 1)
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
+ || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
+ && (mode_lib->vba.HRatio[k] / 2.0
+ > mode_lib->vba.HTAPsChroma[k]
+ || mode_lib->vba.VRatio[k] / 2.0
+ > mode_lib->vba.VTAPsChroma[k]))) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ mode_lib->vba.SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ && mode_lib->vba.SourceScan[k] != dm_horz)
+ || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
+ || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10))
+ || (((mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_gl
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_lvp)
+ && !((mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_32)
+ && mode_lib->vba.SourceScan[k]
+ == dm_horz
+ && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
+ == true
+ && mode_lib->vba.DCCEnable[k]
+ == false))
+ || (mode_lib->vba.DCCEnable[k] == true
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_linear
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10)))) {
+ mode_lib->vba.SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
+ } else {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 8.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 2.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 2.0;
+ } else {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0 / 3;
+ mode_lib->vba.BytePerPixelInDETC[k] = 8.0 / 3;
+ }
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.SwathWidthYSingleDPP[k]
+ * (dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ * mode_lib->vba.VRatio[k]
+ + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / 2.0 * mode_lib->vba.VRatio[k] / 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ }
+ if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] != dm_horz
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 64);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] == dm_horz
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32)
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 512);
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.ReadBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 4.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 3.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 1.5;
+ } else {
+ mode_lib->vba.WriteBandwidth[k] = 0.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.WriteBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond;
+ mode_lib->vba.DCCEnabledInAnyPlane = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.DCCEnabledInAnyPlane = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] = dml_min(
+ mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClockPerState[i]
+ * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000;
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100;
+ mode_lib->vba.ReturnBWPerState[i] = mode_lib->vba.ReturnBWToDCNPerState;
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if ((mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.ReturnBWPerState[i])
+ && (mode_lib->vba.TotalBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.FabricAndDRAMBandwidthPerState[i]
+ * 1000.0
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100.0)) {
+ mode_lib->vba.BandwidthSupport[i] = true;
+ } else {
+ mode_lib->vba.BandwidthSupport[i] = false;
+ }
+ }
+ /*Writeback Latency support check*/
+
+ mode_lib->vba.WritebackLatencySupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ } else {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > 1.5
+ * dml_min(
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+ /*Re-ordering Buffer Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32.0)
+ / mode_lib->vba.DCFCLKPerState[i]
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBWPerState[i];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0 / mode_lib->vba.ReturnBWPerState[i]
+ > mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
+ mode_lib->vba.ROBSupport[i] = true;
+ } else {
+ mode_lib->vba.ROBSupport[i] = false;
+ }
+ }
+ /*Writeback Mode Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfActiveWriteback =
+ mode_lib->vba.TotalNumberOfActiveWriteback + 1;
+ }
+ }
+ mode_lib->vba.WritebackModeSupport = true;
+ if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.Writeback10bpc420Supported != true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ }
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
+ && (mode_lib->vba.WritebackHRatio[k] != 1.0
+ || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackMaxVSCLRatio
+ || mode_lib->vba.WritebackHRatio[k]
+ < mode_lib->vba.WritebackMinHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ < mode_lib->vba.WritebackMinVSCLRatio
+ || mode_lib->vba.WritebackLumaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackLumaHTaps[k]
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackLumaVTaps[k]
+ || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
+ == 1))
+ || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
+ && (mode_lib->vba.WritebackChromaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || 2.0
+ * mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackChromaHTaps[k]
+ || 2.0
+ * mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackChromaVTaps[k]
+ || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
+ mode_lib->vba.WritebackLumaVExtra =
+ dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
+ } else {
+ mode_lib->vba.WritebackLumaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ + mode_lib->vba.WritebackLineBufferChromaBufferSize)
+ / 3.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
+ mode_lib->vba.WritebackChromaVExtra = 0.0;
+ } else {
+ mode_lib->vba.WritebackChromaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackRequiredDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackRequiredDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.HRatio[k] > 1.0) {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = 0.0;
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max3(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ } else {
+ if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max5(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2.0),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4.0
+ / mode_lib->vba.PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
+ || mode_lib->vba.HTAPsChroma[k] > 6.0
+ || mode_lib->vba.VTAPsChroma[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ &mode_lib->vba.Read256BlockHeightY[k],
+ &mode_lib->vba.Read256BlockHeightC[k],
+ &mode_lib->vba.Read256BlockWidthY[k],
+ &mode_lib->vba.Read256BlockWidthC[k]);
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockHeightY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockHeightC[k];
+ } else {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockWidthY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockWidthC[k];
+ }
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ }
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ }
+ }
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
+ } else {
+ mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
+ }
+ mode_lib->vba.MaximumSwathWidthInDETBuffer =
+ dml_min(
+ mode_lib->vba.MaximumSwathWidthSupport,
+ mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
+ / (mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MinSwathHeightY[k]
+ + mode_lib->vba.BytePerPixelInDETC[k]
+ / 2.0
+ * mode_lib->vba.MinSwathHeightC[k]));
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ mode_lib->vba.LineBufferSize
+ * dml_max(mode_lib->vba.HRatio[k], 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0));
+ } else {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ dml_min(
+ mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0)),
+ 2.0 * mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.VTAPsChroma[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k]
+ / 2.0,
+ 1.0)
+ - 2,
+ 0.0)));
+ }
+ mode_lib->vba.MaximumSwathWidth[k] = dml_min(
+ mode_lib->vba.MaximumSwathWidthInDETBuffer,
+ mode_lib->vba.MaximumSwathWidthInLineBuffer);
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDppclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k] / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ if ((mode_lib->vba.MaxDispclk[i] == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])
+ && (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ > mode_lib->vba.MaxNumDPP
+ || mode_lib->vba.DISPCLK_DPPCLK_Support[i] == false)) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] > mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ if (mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ if (!(mode_lib->vba.MaxDispclk[i]
+ == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])) {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ } else {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.WritebackRequiredDISPCLK);
+ if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
+ < mode_lib->vba.WritebackRequiredDISPCLK) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ /*Viewport Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ViewportSizeSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ if (dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]))
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ } else {
+ if (mode_lib->vba.SwathWidthYSingleDPP[k] / 2.0
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ }
+ }
+ }
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] <= mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = true;
+ } else {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = false;
+ }
+ }
+ /*Total Available OTG Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ + 1.0;
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
+ mode_lib->vba.NumberOfOTGSupport = true;
+ } else {
+ mode_lib->vba.NumberOfOTGSupport = false;
+ }
+ /*Display IO and DSC Support Check*/
+
+ mode_lib->vba.NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
+ mode_lib->vba.NonsupportedDSCInputBPC = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.Output[k] == dm_hdmi) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ mode_lib->vba.OutputBppPerState[i][k] =
+ TruncToValidBPP(
+ dml_min(
+ 600.0,
+ mode_lib->vba.PHYCLKPerState[i])
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 24,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ } else if (mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp) {
+ if (mode_lib->vba.Output[k] == dm_edp) {
+ mode_lib->vba.EffectiveFECOverhead = 0.0;
+ } else {
+ mode_lib->vba.EffectiveFECOverhead =
+ mode_lib->vba.FECOverhead;
+ }
+ if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0
+ && mode_lib->vba.PHYCLKPerState[i]
+ >= 810.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true
+ || mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ }
+ } else {
+ mode_lib->vba.OutputBppPerState[i][k] = 0;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DIOSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || (mode_lib->vba.OutputFormat[k] == dm_420
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP
+ == true)) {
+ mode_lib->vba.DIOSupport[i] = false;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if ((mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp)) {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k]
+ == dm_n422) {
+ mode_lib->vba.DSCFormatFactor = 2;
+ } else {
+ mode_lib->vba.DSCFormatFactor = 1;
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == true) {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 6.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ } else {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 3.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = false;
+ mode_lib->vba.TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 2.0;
+ } else {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.RequiresDSC[i][k] == 0
+ || mode_lib->vba.RequiresDSC[i][k] == false) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
+ mode_lib->vba.slices = dml_ceil(
+ mode_lib->vba.PixelClockBackEnd[k] / 400.0,
+ 4.0);
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
+ mode_lib->vba.slices = 8.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
+ mode_lib->vba.slices = 4.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
+ mode_lib->vba.slices = 2.0;
+ } else {
+ mode_lib->vba.slices = 1.0;
+ }
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ mode_lib->vba.bpp = 0.0;
+ } else {
+ mode_lib->vba.bpp = mode_lib->vba.OutputBppPerState[i][k];
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ 2.0
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices
+ / 2,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.RequiresDSC[i][j] == true) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][j];
+ }
+ }
+ }
+ }
+ /*Urgent Latency Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k]
+ / 2.0
+ * mode_lib->vba.HRatio[k]));
+ } else {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ mode_lib->vba.SwathWidthYSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k];
+ }
+ mode_lib->vba.SwathWidthGranularityY = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ / mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] - 1.0,
+ mode_lib->vba.SwathWidthGranularityY)
+ + mode_lib->vba.SwathWidthGranularityY)
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MaxSwathHeightY[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
+ 256.0) + 256;
+ }
+ if (mode_lib->vba.MaxSwathHeightC[k] > 0.0) {
+ mode_lib->vba.SwathWidthGranularityC = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / mode_lib->vba.MaxSwathHeightC[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0 - 1.0,
+ mode_lib->vba.SwathWidthGranularityC)
+ + mode_lib->vba.SwathWidthGranularityC)
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.MaxSwathHeightC[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC,
+ 256.0) + 256;
+ }
+ } else {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
+ }
+ if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY
+ + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MinSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MinSwathHeightC[k];
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = 0.0;
+ } else if (mode_lib->vba.SwathHeightYPerState[i][k]
+ <= mode_lib->vba.SwathHeightCPerState[i][k]) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETC[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ } else {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2.0 / 3.0
+ / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ }
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.vtaps[k] - 1.0);
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.VTAPsChroma[k] - 1.0);
+ mode_lib->vba.EffectiveDETLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETLuma
+ + dml_min(
+ mode_lib->vba.LinesInDETLuma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.PSCL_FACTOR[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightYPerState[i][k]);
+ mode_lib->vba.EffectiveDETLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETChroma
+ + dml_min(
+ mode_lib->vba.LinesInDETChroma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightCPerState[i][k]);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ dml_min(
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]),
+ mode_lib->vba.EffectiveDETLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k]
+ / 2.0)
+ - mode_lib->vba.EffectiveDETLBLinesChroma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]));
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentLatencySupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.UrgentLatencySupportUsPerState[i][k]
+ < mode_lib->vba.UrgentLatency / 1.0) {
+ mode_lib->vba.UrgentLatencySupport[i] = false;
+ }
+ }
+ }
+ /*Prefetch Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = 8.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ } else {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MetaRowBytesY,
+ &mode_lib->vba.DPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceededY[i][k],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.PrefillY[k],
+ &mode_lib->vba.MaxNumSwY[k]);
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0,
+ mode_lib->vba.ViewportHeight[k] / 2.0,
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0.0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &mode_lib->vba.MetaRowBytesC,
+ &mode_lib->vba.DPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceededC[i][k],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2.0,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.PrefillC[k],
+ &mode_lib->vba.MaxNumSwC[k]);
+ } else {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
+ mode_lib->vba.MetaRowBytesC = 0.0;
+ mode_lib->vba.DPTEBytesPerRowC = 0.0;
+ mode_lib->vba.PrefetchLinesC[k] = 0.0;
+ mode_lib->vba.PTEBufferSizeNotExceededC[i][k] = true;
+ }
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY
+ + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
+ mode_lib->vba.MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY
+ + mode_lib->vba.MetaRowBytesC;
+ mode_lib->vba.DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY
+ + mode_lib->vba.DPTEBytesPerRowC;
+ }
+ mode_lib->vba.ExtraLatency =
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ + (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ * mode_lib->vba.MetaChunkSize)
+ * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ + mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PTEChunkSize * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ }
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.RequiredDISPCLK[i];
+ } else {
+ mode_lib->vba.WritebackDelay[i][k] = 0.0;
+ }
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j]
+ == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[i][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.RequiredDISPCLK[i]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackDelay[i][j];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.MaximumVStartup[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[i][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1.0));
+ }
+ mode_lib->vba.TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+ mode_lib->vba.IsErrorResult[i][k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.RequiredDPPCLK[i][k],
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.DSCDelayPerState[i][k],
+ mode_lib->vba.NoOfDPP[i][k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ mode_lib->vba.SwathWidthYPerState[i][k]
+ / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.BytePerPixelInDETY[k],
+ mode_lib->vba.PrefillY[k],
+ mode_lib->vba.MaxNumSwY[k],
+ mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.BytePerPixelInDETC[k],
+ mode_lib->vba.PrefillC[k],
+ mode_lib->vba.MaxNumSwC[k],
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.DSTXAfterScaler,
+ mode_lib->vba.DSTYAfterScaler,
+ &mode_lib->vba.LineTimesForPrefetch[k],
+ &mode_lib->vba.PrefetchBW[k],
+ &mode_lib->vba.LinesForMetaPTE[k],
+ &mode_lib->vba.LinesForMetaAndDPTERow[k],
+ &mode_lib->vba.VRatioPreY[i][k],
+ &mode_lib->vba.VRatioPreC[i][k],
+ &mode_lib->vba.RequiredPrefetchPixelDataBW[i][k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k]
+ * mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = true;
+ mode_lib->vba.prefetch_row_bw_valid = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] == 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaPTE[k] > 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ / (mode_lib->vba.LinesForMetaPTE[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowBytes[k] + mode_lib->vba.DPTEBytesPerRow[k]
+ == 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaAndDPTERow[k] > 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = (mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k])
+ / (mode_lib->vba.LinesForMetaAndDPTERow[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ mode_lib->vba.prefetch_row_bw_valid = false;
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch =
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max4(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]);
+ }
+ mode_lib->vba.PrefetchSupported[i] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ > mode_lib->vba.ReturnBWPerState[i]
+ || mode_lib->vba.prefetch_vm_bw_valid == false
+ || mode_lib->vba.prefetch_row_bw_valid == false) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.LineTimesForPrefetch[k] < 2.0
+ || mode_lib->vba.LinesForMetaPTE[k] >= 8.0
+ || mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ }
+ mode_lib->vba.VRatioInPrefetchSupported[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.VRatioPreY[i][k] > 4.0
+ || mode_lib->vba.VRatioPreC[i][k] > 4.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.VRatioInPrefetchSupported[i] = false;
+ }
+ }
+ if (mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.ReturnBWPerState[i];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.PrefetchBW[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ + mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + mode_lib->vba.ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &mode_lib->vba.final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.total_dcn_read_bw_with_flip =
+ mode_lib->vba.total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max3(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]));
+ }
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip
+ > mode_lib->vba.ReturnBWPerState[i]) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PTEBufferSizeNotExceededY[i][k] == false
+ || mode_lib->vba.PTEBufferSizeNotExceededC[i][k] == false) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = false;
+ }
+ }
+ }
+ /*Cursor Support Check*/
+
+ mode_lib->vba.CursorSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
+ if (dml_floor(
+ dml_floor(
+ mode_lib->vba.CursorBufferSize
+ - mode_lib->vba.CursorChunkSize,
+ mode_lib->vba.CursorChunkSize) * 1024.0
+ / (mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0]
+ / 8.0),
+ 1.0)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatency
+ || (mode_lib->vba.CursorBPP[k][0] == 64.0
+ && mode_lib->vba.Cursor64BppSupport == false)) {
+ mode_lib->vba.CursorSupport = false;
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ mode_lib->vba.PitchSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.AlignedYPitch[k] = dml_ceil(
+ dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
+ mode_lib->vba.MacroTileWidthY[k]);
+ if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.DCCMetaPitchY[k],
+ mode_lib->vba.ViewportWidth[k]),
+ 64.0 * mode_lib->vba.Read256BlockWidthY[k]);
+ } else {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
+ }
+ if (mode_lib->vba.AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
+ mode_lib->vba.AlignedCPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0),
+ mode_lib->vba.MacroTileWidthC[k]);
+ } else {
+ mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
+ }
+ if (mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (mode_lib->vba.ScaleRatioAndTapsSupport == true
+ && mode_lib->vba.SourceFormatPixelAndScanSupport == true
+ && mode_lib->vba.ViewportSizeSupport[i] == true
+ && mode_lib->vba.BandwidthSupport[i] == true
+ && mode_lib->vba.DIOSupport[i] == true
+ && mode_lib->vba.NotEnoughDSCUnits[i] == false
+ && mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
+ && mode_lib->vba.UrgentLatencySupport[i] == true
+ && mode_lib->vba.ROBSupport[i] == true
+ && mode_lib->vba.DISPCLK_DPPCLK_Support[i] == true
+ && mode_lib->vba.TotalAvailablePipesSupport[i] == true
+ && mode_lib->vba.NumberOfOTGSupport == true
+ && mode_lib->vba.WritebackModeSupport == true
+ && mode_lib->vba.WritebackLatencySupport == true
+ && mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
+ && mode_lib->vba.CursorSupport == true
+ && mode_lib->vba.PitchSupport == true
+ && mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true
+ && mode_lib->vba.PTEBufferSizeNotExceeded[i] == true
+ && mode_lib->vba.NonsupportedDSCInputBPC == false) {
+ mode_lib->vba.ModeSupport[i] = true;
+ } else {
+ mode_lib->vba.ModeSupport[i] = false;
+ }
+ }
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (i == DC__VOLTAGE_STATES || mode_lib->vba.ModeSupport[i] == true) {
+ mode_lib->vba.VoltageLevel = i;
+ }
+ }
+ mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricAndDRAMBandwidth =
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ImmediateFlipSupport =
+ mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][k];
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.ODMCombineEnabled[k] =
+ mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ } else {
+ mode_lib->vba.ODMCombineEnabled[k] = 0;
+ }
+ mode_lib->vba.DSCEnabled[k] =
+ mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
+ mode_lib->vba.OutputBpp[k] =
+ mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
new file mode 100644
index 000000000000..4112409cd974
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_MODE_VBA_H__
+#define __DML2_DISPLAY_MODE_VBA_H__
+
+#include "dml_common_defs.h"
+
+struct display_mode_lib;
+
+void set_prefetch_mode(struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support);
+
+#define dml_get_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes)
+
+dml_get_attr_decl(clk_dcf_deepsleep);
+dml_get_attr_decl(wm_urgent);
+dml_get_attr_decl(wm_memory_trip);
+dml_get_attr_decl(wm_writeback_urgent);
+dml_get_attr_decl(wm_stutter_exit);
+dml_get_attr_decl(wm_stutter_enter_exit);
+dml_get_attr_decl(wm_dram_clock_change);
+dml_get_attr_decl(wm_writeback_dram_clock_change);
+dml_get_attr_decl(wm_xfc_underflow);
+dml_get_attr_decl(stutter_efficiency_no_vblank);
+dml_get_attr_decl(stutter_efficiency);
+dml_get_attr_decl(urgent_latency);
+dml_get_attr_decl(urgent_extra_latency);
+dml_get_attr_decl(nonurgent_latency);
+dml_get_attr_decl(dram_clock_change_latency);
+dml_get_attr_decl(dispclk_calculated);
+dml_get_attr_decl(total_data_read_bw);
+dml_get_attr_decl(return_bw);
+dml_get_attr_decl(tcalc);
+
+#define dml_get_pipe_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe)
+
+dml_get_pipe_attr_decl(dsc_delay);
+dml_get_pipe_attr_decl(dppclk_calculated);
+dml_get_pipe_attr_decl(dscclk_calculated);
+dml_get_pipe_attr_decl(min_ttu_vblank);
+dml_get_pipe_attr_decl(vratio_prefetch_l);
+dml_get_pipe_attr_decl(vratio_prefetch_c);
+dml_get_pipe_attr_decl(dst_x_after_scaler);
+dml_get_pipe_attr_decl(dst_y_after_scaler);
+dml_get_pipe_attr_decl(dst_y_per_vm_vblank);
+dml_get_pipe_attr_decl(dst_y_per_row_vblank);
+dml_get_pipe_attr_decl(dst_y_prefetch);
+dml_get_pipe_attr_decl(dst_y_per_vm_flip);
+dml_get_pipe_attr_decl(dst_y_per_row_flip);
+dml_get_pipe_attr_decl(xfc_transfer_delay);
+dml_get_pipe_attr_decl(xfc_precharge_delay);
+dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
+dml_get_pipe_attr_decl(xfc_prefetch_margin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe);
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC);
+
+
+struct vba_vars_st {
+ ip_params_st ip;
+ soc_bounding_box_st soc;
+
+ unsigned int MaximumMaxVStartupLines;
+ double cursor_bw[DC__NUM_DPP__MAX];
+ double meta_row_bw[DC__NUM_DPP__MAX];
+ double dpte_row_bw[DC__NUM_DPP__MAX];
+ double qual_row_bw[DC__NUM_DPP__MAX];
+ double WritebackDISPCLK;
+ double PSCL_THROUGHPUT_LUMA[DC__NUM_DPP__MAX];
+ double PSCL_THROUGHPUT_CHROMA[DC__NUM_DPP__MAX];
+ double DPPCLKUsingSingleDPPLuma;
+ double DPPCLKUsingSingleDPPChroma;
+ double DPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double DISPCLKWithRamping;
+ double DISPCLKWithoutRamping;
+ double GlobalDPPCLK;
+ double DISPCLKWithRampingRoundedToDFSGranularity;
+ double DISPCLKWithoutRampingRoundedToDFSGranularity;
+ double MaxDispclkRoundedToDFSGranularity;
+ bool DCCEnabledAnyPlane;
+ double ReturnBandwidthToDCN;
+ unsigned int SwathWidthY[DC__NUM_DPP__MAX];
+ unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETC[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneChroma[DC__NUM_DPP__MAX];
+ unsigned int TotalActiveDPP;
+ unsigned int TotalDCCActiveDPP;
+ double UrgentRoundTripAndOutOfOrderLatency;
+ double DisplayPipeLineDeliveryTimeLuma[DC__NUM_DPP__MAX]; // WM
+ double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETY[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETC[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeC[DC__NUM_DPP__MAX]; // WM
+ double MinFullDETBufferingTime;
+ double FrameTimeForMinFullDETBufferingTime;
+ double AverageReadBandwidthGBytePerSecond;
+ double PartOfBurstThatFitsInROB;
+ double StutterBurstTime;
+ //unsigned int NextPrefetchMode;
+ double VBlankTime;
+ double SmallestVBlank;
+ double DCFCLKDeepSleepPerPlane;
+ double EffectiveDETPlusLBLinesLuma;
+ double EffectiveDETPlusLBLinesChroma;
+ double UrgentLatencySupportUsLuma;
+ double UrgentLatencySupportUsChroma;
+ double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
+ unsigned int DSCFormatFactor;
+ unsigned int BlockHeight256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockHeight256BytesC[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesC[DC__NUM_DPP__MAX];
+ double VInitPreFillY[DC__NUM_DPP__MAX];
+ double VInitPreFillC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathC[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesY[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesC[DC__NUM_DPP__MAX];
+ double PixelPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double MetaRowByte[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height_chroma[DC__NUM_DPP__MAX];
+
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MaxVStartupLines[DC__NUM_DPP__MAX];
+ double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchModeSupported;
+ bool AllowDRAMClockChangeDuringVBlank[DC__NUM_DPP__MAX];
+ bool AllowDRAMSelfRefreshDuringVBlank[DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixDataBW[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipDelay;
+ double TInitXFill;
+ double TslvChk;
+ double SrcActiveDrainRate;
+ double Tno_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupported;
+
+ double prefetch_vm_bw[DC__NUM_DPP__MAX];
+ double prefetch_row_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupportedForPipe[DC__NUM_DPP__MAX];
+ unsigned int VStartupLines;
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[DC__NUM_DPP__MAX];
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[DC__NUM_DPP__MAX];
+ unsigned int ActiveDPPs;
+ unsigned int LBLatencyHidingSourceLinesY;
+ unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double MinActiveDRAMClockChangeMargin;
+ double XFCSlaveVUpdateOffset[DC__NUM_DPP__MAX];
+ double XFCSlaveVupdateWidth[DC__NUM_DPP__MAX];
+ double XFCSlaveVReadyOffset[DC__NUM_DPP__MAX];
+ double InitFillLevel;
+ double FinalFillMargin;
+ double FinalFillLevel;
+ double RemainingFillLevel;
+ double TFinalxFill;
+
+
+ //
+ // SOC Bounding Box Parameters
+ //
+ double SRExitTime;
+ double SREnterPlusExitTime;
+ double UrgentLatency;
+ double WritebackLatency;
+ double PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency;
+ double NumberOfChannels;
+ double DRAMChannelWidth;
+ double FabricDatapathToDCNDataReturn;
+ double ReturnBusWidth;
+ double Downspreading;
+ double DISPCLKDPPCLKDSCCLKDownSpreading;
+ double DISPCLKDPPCLKVCOSpeed;
+ double RoundTripPingLatencyCycles;
+ double UrgentOutOfOrderReturnPerChannel;
+ unsigned int VMMPageSize;
+ double DRAMClockChangeLatency;
+ double XFCBusTransportTime;
+ double XFCXBUFLatencyTolerance;
+
+ //
+ // IP Parameters
+ //
+ unsigned int ROBBufferSizeInKByte;
+ double DETBufferSizeInKByte;
+ unsigned int DPPOutputBufferPixels;
+ unsigned int OPPOutputBufferLines;
+ unsigned int PixelChunkSizeInKByte;
+ double ReturnBW;
+ bool VirtualMemoryEnable;
+ unsigned int MaxPageTableLevels;
+ unsigned int OverridePageTableLevels;
+ unsigned int PTEChunkSize;
+ unsigned int MetaChunkSize;
+ unsigned int WritebackChunkSize;
+ bool ODMCapability;
+ unsigned int NumberOfDSC;
+ unsigned int LineBufferSize;
+ unsigned int MaxLineBufferLines;
+ unsigned int WritebackInterfaceLumaBufferSize;
+ unsigned int WritebackInterfaceChromaBufferSize;
+ unsigned int WritebackChromaLineBufferWidth;
+ double MaxDCHUBToPSCLThroughput;
+ double MaxPSCLToLBThroughput;
+ unsigned int PTEBufferSizeInRequests;
+ double DISPCLKRampingMargin;
+ unsigned int MaxInterDCNTileRepeaters;
+ bool XFCSupported;
+ double XFCSlvChunkSize;
+ double XFCFillBWOverhead;
+ double XFCFillConstant;
+ double XFCTSlvVupdateOffset;
+ double XFCTSlvVupdateWidth;
+ double XFCTSlvVreadyOffset;
+ double DPPCLKDelaySubtotal;
+ double DPPCLKDelaySCL;
+ double DPPCLKDelaySCLLBOnly;
+ double DPPCLKDelayCNVCFormater;
+ double DPPCLKDelayCNVCCursor;
+ double DISPCLKDelaySubtotal;
+ bool ProgressiveToInterlaceUnitInOPP;
+ unsigned int PDEProcessingBufIn64KBReqs;
+
+ // Pipe/Plane Parameters
+ int VoltageLevel;
+ double FabricAndDRAMBandwidth;
+ double FabricClock;
+ double DRAMSpeed;
+ double DISPCLK;
+ double SOCCLK;
+ double DCFCLK;
+
+ unsigned int NumberOfActivePlanes;
+ unsigned int ViewportWidth[DC__NUM_DPP__MAX];
+ unsigned int ViewportHeight[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartY[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartC[DC__NUM_DPP__MAX];
+ unsigned int PitchY[DC__NUM_DPP__MAX];
+ unsigned int PitchC[DC__NUM_DPP__MAX];
+ double HRatio[DC__NUM_DPP__MAX];
+ double VRatio[DC__NUM_DPP__MAX];
+ unsigned int htaps[DC__NUM_DPP__MAX];
+ unsigned int vtaps[DC__NUM_DPP__MAX];
+ unsigned int HTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int VTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int HTotal[DC__NUM_DPP__MAX];
+ unsigned int VTotal[DC__NUM_DPP__MAX];
+ unsigned int DPPPerPlane[DC__NUM_DPP__MAX];
+ double PixelClock[DC__NUM_DPP__MAX];
+ double PixelClockBackEnd[DC__NUM_DPP__MAX];
+ double DPPCLK[DC__NUM_DPP__MAX];
+ bool DCCEnable[DC__NUM_DPP__MAX];
+ unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
+ enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
+ enum source_format_class SourcePixelFormat[DC__NUM_DPP__MAX];
+ bool WritebackEnable[DC__NUM_DPP__MAX];
+ double WritebackDestinationWidth[DC__NUM_DPP__MAX];
+ double WritebackDestinationHeight[DC__NUM_DPP__MAX];
+ double WritebackSourceHeight[DC__NUM_DPP__MAX];
+ enum source_format_class WritebackPixelFormat[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaVTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaVTaps[DC__NUM_DPP__MAX];
+ double WritebackHRatio[DC__NUM_DPP__MAX];
+ double WritebackVRatio[DC__NUM_DPP__MAX];
+ unsigned int HActive[DC__NUM_DPP__MAX];
+ unsigned int VActive[DC__NUM_DPP__MAX];
+ bool Interlace[DC__NUM_DPP__MAX];
+ enum dm_swizzle_mode SurfaceTiling[DC__NUM_DPP__MAX];
+ unsigned int ScalerRecoutWidth[DC__NUM_DPP__MAX];
+ bool DynamicMetadataEnable[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataLinesBeforeActiveRequired[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
+ double DCCRate[DC__NUM_DPP__MAX];
+ bool ODMCombineEnabled[DC__NUM_DPP__MAX];
+ double OutputBpp[DC__NUM_DPP__MAX];
+ unsigned int NumberOfDSCSlices[DC__NUM_DPP__MAX];
+ bool DSCEnabled[DC__NUM_DPP__MAX];
+ unsigned int DSCDelay[DC__NUM_DPP__MAX];
+ unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
+ enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
+ enum output_encoder_class Output[DC__NUM_DPP__MAX];
+ unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
+ bool SynchronizedVBlank;
+ unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
+ unsigned int CursorWidth[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ unsigned int CursorBPP[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ bool XFCEnabled[DC__NUM_DPP__MAX];
+ bool ScalerEnabled[DC__NUM_DPP__MAX];
+
+ // Intermediates/Informational
+ bool ImmediateFlipSupport;
+ unsigned int SwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int SwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeY[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeC[DC__NUM_DPP__MAX];
+ unsigned int LBBitPerPixel[DC__NUM_DPP__MAX];
+ double LastPixelOfLineExtraWatermark;
+ double TotalDataReadBandwidth;
+ unsigned int TotalActiveWriteback;
+ unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
+ unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
+ double BandwidthAvailableForImmediateFlip;
+ unsigned int PrefetchMode;
+ bool IgnoreViewportPositioning;
+ double PrefetchBandwidth[DC__NUM_DPP__MAX];
+ bool ErrorResult[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesFrame[DC__NUM_DPP__MAX];
+
+ //
+ // Calculated dml_ml->vba.Outputs
+ //
+ double DCFClkDeepSleep;
+ double UrgentWatermark;
+ double UrgentExtraLatency;
+ double MemoryTripWatermark;
+ double WritebackUrgentWatermark;
+ double StutterExitWatermark;
+ double StutterEnterPlusExitWatermark;
+ double DRAMClockChangeWatermark;
+ double WritebackDRAMClockChangeWatermark;
+ double StutterEfficiency;
+ double StutterEfficiencyNotIncludingVBlank;
+ double MinUrgentLatencySupportUs;
+ double NonUrgentLatencyTolerance;
+ double MinActiveDRAMClockChangeLatencySupported;
+ enum clock_change_support DRAMClockChangeSupport;
+
+ // These are the clocks calcuated by the library but they are not actually
+ // used explicitly. They are fetched by tests and then possibly used. The
+ // ultimate values to use are the ones specified by the parameters to DML
+ double DISPCLK_calculated;
+ double DSCCLK_calculated[DC__NUM_DPP__MAX];
+ double DPPCLK_calculated[DC__NUM_DPP__MAX];
+
+ unsigned int VStartup[DC__NUM_DPP__MAX];
+ unsigned int VUpdateOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VUpdateWidthPix[DC__NUM_DPP__MAX];
+ unsigned int VReadyOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+
+ double ImmediateFlipBW;
+ unsigned int TotImmediateFlipBytes;
+ double TCalc;
+ double MinTTUVBlank[DC__NUM_DPP__MAX];
+ double VRatioPrefetchY[DC__NUM_DPP__MAX];
+ double VRatioPrefetchC[DC__NUM_DPP__MAX];
+ double DSTXAfterScaler[DC__NUM_DPP__MAX];
+ double DSTYAfterScaler[DC__NUM_DPP__MAX];
+
+ double DestinationLinesToRequestVMInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesForPrefetch[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
+
+ double XFCTransferDelay[DC__NUM_DPP__MAX];
+ double XFCPrechargeDelay[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipLatency[DC__NUM_DPP__MAX];
+ double XFCPrefetchMargin[DC__NUM_DPP__MAX];
+
+ display_e2e_pipe_params_st cache_pipes[DC__NUM_DPP__MAX];
+ unsigned int cache_num_pipes;
+ unsigned int pipe_plane[DC__NUM_DPP__MAX];
+
+ /* vba mode support */
+ /*inputs*/
+ bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
+ double MaxHSCLRatio;
+ double MaxVSCLRatio;
+ unsigned int MaxNumWriteback;
+ bool WritebackLumaAndChromaScalingSupported;
+ bool Cursor64BppSupport;
+ double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double FabricClockPerState[DC__VOLTAGE_STATES + 1];
+ double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
+ double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDppclk[DC__VOLTAGE_STATES + 1];
+ double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
+ double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDispclk[DC__VOLTAGE_STATES + 1];
+
+ /*outputs*/
+ bool ScaleRatioAndTapsSupport;
+ bool SourceFormatPixelAndScanSupport;
+ unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETC[DC__NUM_DPP__MAX];
+ double TotalReadBandwidthConsumedGBytePerSecond;
+ double ReadBandwidth[DC__NUM_DPP__MAX];
+ double TotalWriteBandwidthConsumedGBytePerSecond;
+ double WriteBandwidth[DC__NUM_DPP__MAX];
+ double TotalBandwidthConsumedGBytePerSecond;
+ bool DCCEnabledInAnyPlane;
+ bool WritebackLatencySupport;
+ bool WritebackModeSupport;
+ bool Writeback10bpc420Supported;
+ bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
+ unsigned int TotalNumberOfActiveWriteback;
+ double CriticalPoint;
+ double ReturnBWToDCNPerState;
+ double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
+ bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1];
+ bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1];
+ bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1];
+ bool ModeSupport[DC__VOLTAGE_STATES + 1];
+ bool DIOSupport[DC__VOLTAGE_STATES + 1];
+ bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
+ bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1];
+ bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool IsErrorResult[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool prefetch_vm_bw_valid;
+ bool prefetch_row_bw_valid;
+ bool NumberOfOTGSupport;
+ bool NonsupportedDSCInputBPC;
+ bool WritebackScaleRatioAndTapsSupport;
+ bool CursorSupport;
+ bool PitchSupport;
+
+ double WritebackLineBufferLumaBufferSize;
+ double WritebackLineBufferChromaBufferSize;
+ double WritebackMinHSCLRatio;
+ double WritebackMinVSCLRatio;
+ double WritebackMaxHSCLRatio;
+ double WritebackMaxVSCLRatio;
+ double WritebackMaxHSCLTaps;
+ double WritebackMaxVSCLTaps;
+ unsigned int MaxNumDPP;
+ unsigned int MaxNumOTG;
+ double CursorBufferSize;
+ double CursorChunkSize;
+ unsigned int Mode;
+ unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double OutputLinkDPLanes[DC__NUM_DPP__MAX];
+ double SwathWidthYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightCPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixelDataBW[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDISPCLK[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1];
+ double PrefetchBW[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
+ double PrefillY[DC__NUM_DPP__MAX];
+ double PrefillC[DC__NUM_DPP__MAX];
+ double LineTimesForPrefetch[DC__NUM_DPP__MAX];
+ double LinesForMetaPTE[DC__NUM_DPP__MAX];
+ double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
+ double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthC[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double MaxSwathHeightY[DC__NUM_DPP__MAX];
+ double MaxSwathHeightC[DC__NUM_DPP__MAX];
+ double MinSwathHeightY[DC__NUM_DPP__MAX];
+ double MinSwathHeightC[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__NUM_DPP__MAX];
+ double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
+ double AlignedYPitch[DC__NUM_DPP__MAX];
+ double AlignedCPitch[DC__NUM_DPP__MAX];
+ double MaximumSwathWidth[DC__NUM_DPP__MAX];
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ double ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1];
+
+ double WritebackLumaVExtra;
+ double WritebackChromaVExtra;
+ double WritebackRequiredDISPCLK;
+ double MaximumSwathWidthSupport;
+ double MaximumSwathWidthInDETBuffer;
+ double MaximumSwathWidthInLineBuffer;
+ double MaxDispclkRoundedDownToDFSGranularity;
+ double MaxDppclkRoundedDownToDFSGranularity;
+ double PlaneRequiredDISPCLKWithoutODMCombine;
+ double PlaneRequiredDISPCLK;
+ double TotalNumberOfActiveOTG;
+ double FECOverhead;
+ double EffectiveFECOverhead;
+ unsigned int Outbpp;
+ unsigned int OutbppDSC;
+ double TotalDSCUnitsRequired;
+ double bpp;
+ unsigned int slices;
+ double SwathWidthGranularityY;
+ double RoundedUpMaxSwathSizeBytesY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesC;
+ double LinesInDETLuma;
+ double LinesInDETChroma;
+ double EffectiveDETLBLinesLuma;
+ double EffectiveDETLBLinesChroma;
+ double ProjectedDCFCLKDeepSleep;
+ double PDEAndMetaPTEBytesPerFrameY;
+ double PDEAndMetaPTEBytesPerFrameC;
+ unsigned int MetaRowBytesY;
+ unsigned int MetaRowBytesC;
+ unsigned int DPTEBytesPerRowC;
+ unsigned int DPTEBytesPerRowY;
+ double ExtraLatency;
+ double TimeCalc;
+ double TWait;
+ double MaximumReadBandwidthWithPrefetch;
+ double total_dcn_read_bw_with_flip;
+};
+
+#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
new file mode 100644
index 000000000000..8ba962df42e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
@@ -0,0 +1,1763 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp);
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ } else if (source_format == dm_444_8) {
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ bool odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double) refclk_freq_in_mhz
+ * dml_min((double) recout_width, (double) hactive / 2.0)
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // FIXME: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { //128b request (for luma only for yuv420 8bpc)
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+ // Note: assumption, the config that pass in will fit into
+ // the detiled buffer.
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element;
+ unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
+ false);
+ unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
+ true);
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+ const unsigned int pde_proc_buffer_size_64k_reqs =
+ mode_lib->ip.pde_proc_buffer_size_64k_reqs;
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ unsigned int pde_buf_entries;
+ bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
+
+ Calculate256BBlockSizes((enum source_format_class)(source_format),
+ (enum dm_swizzle_mode)(tiling),
+ bytes_per_element_y,
+ bytes_per_element_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
+ * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
+ if (surf_linear) {
+ unsigned int dpte_row_height;
+
+ log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
+ / bytes_per_element,
+ dpte_buf_in_pte_reqs
+ * dpte_req_width)
+ / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ //full size
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // FIXME check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+}
+
+void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ display_rq_params_st rq_param = {0};
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_src_param);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+
+ // Scaling
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratio_l;
+ double hratio_c;
+ double vratio_l;
+ double vratio_c;
+ bool scl_enable;
+
+ double line_time_in_us;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int meta_chunks_per_row_ub_c;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int meta_row_height_c;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+
+ unsigned int full_recout_width;
+ double xfc_transfer_delay;
+ double xfc_precharge_delay;
+ double xfc_remote_surface_flip_latency;
+ double xfc_dst_y_delta_drq_limit;
+ double xfc_prefetch_margin;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ double refcyc_per_req_delivery_pre_cur1;
+ double refcyc_per_req_delivery_cur1;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+ dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
+ dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
+ dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+// dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
+ mode_422 = 0; // FIXME
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
+// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+// vinit_l = scl.vinit;
+// vinit_c = scl.vinit_c;
+// vinit_bot_l = scl.vinit_bot;
+// vinit_bot_c = scl.vinit_bot_c;
+
+// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // Lwait
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n",
+ __func__,
+ full_recout_width);
+ dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // XFC
+ xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
+ xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp)(src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp)(src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int) dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
+ dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int) dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int) ((double) meta_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
+ disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
+ disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
+ disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
+ 1);
+
+ // slave has to have this value also set to off
+ if (src->xfc_enable && !src->xfc_slave)
+ disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
+ else
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = {0};
+ display_dlg_sys_params_st dlg_sys_param = {0};
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml_rq_dlg_get_dlg_params(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en,
+ vm_en,
+ ignore_viewport_pos,
+ immediate_flip_support);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
+{
+ memset(arb_param, 0, sizeof(*arb_param));
+ arb_param->max_req_outstanding = 256;
+ arb_param->min_req_outstanding = 68;
+ arb_param->sat_level_us = 60;
+}
+
+void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
+ * (double) cur_req_width;
+ cur_req_per_width = cur_width_ub / (double) cur_req_width;
+ hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ dml_print("DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
+
+unsigned int dml_rq_dlg_get_calculated_vstartup(struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx)
+{
+ unsigned int vstartup_pipe[DC__NUM_PIPES__MAX];
+ bool visited[DC__NUM_PIPES__MAX];
+ unsigned int pipe_inst = 0;
+ unsigned int i, j, k;
+
+ for (k = 0; k < num_pipes; ++k)
+ visited[k] = false;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
+ unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
+
+ for (j = i; j < num_pipes; j++) {
+ if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
+ && e2e_pipe_param[j].pipe.src.is_hsplit
+ && !visited[j]) {
+ vstartup_pipe[j] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[j] = true;
+ }
+ }
+
+ pipe_inst++;
+ }
+
+ if (!visited[i]) {
+ vstartup_pipe[i] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[i] = true;
+ pipe_inst++;
+ }
+ }
+
+ return vstartup_pipe[pipe_idx];
+
+}
+
+void dml_rq_dlg_get_row_heights(struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ display_data_rq_dlg_params_st rq_dlg_param;
+ display_data_rq_misc_params_st rq_misc_param;
+ display_data_rq_sizing_params_st rq_sizing_param;
+
+ get_meta_and_pte_attr(mode_lib,
+ &rq_dlg_param,
+ &rq_misc_param,
+ &rq_sizing_param,
+ vp_width,
+ 0, // dummy
+ data_pitch,
+ 0, // dummy
+ source_format,
+ tiling,
+ macro_tile_size,
+ source_scan,
+ is_chroma);
+
+ *o_dpte_row_height = rq_dlg_param.dpte_row_height;
+ *o_meta_row_height = rq_dlg_param.meta_row_height;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
new file mode 100644
index 000000000000..efdd4c73d8f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_RQ_DLG_CALC_H__
+#define __DML2_DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+// Function: dml_rq_dlg_get_rq_params
+// Calculate requestor related parameters that register definition agnostic
+// (i.e. this layer does try to separate real values from register definition)
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+//
+void dml_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_dlg_params
+// Calculate deadline related parameters
+//
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_dlg_param_prefetch
+// For flip_bw programming guide change, now dml needs to calculate the flip_bytes and prefetch_bw
+// for ALL pipes and use this info to calculate the prefetch programming.
+// Output: prefetch_param.prefetch_bw and flip_bytes
+void dml_rq_dlg_get_dlg_params_prefetch(
+ struct display_mode_lib *mode_lib,
+ display_dlg_prefetch_param_st *prefetch_param,
+ display_rq_dlg_params_st rq_dlg_param,
+ display_dlg_sys_params_st dlg_sys_param,
+ display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en);
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_calculated_vstartup
+// Calculate and return vstartup
+// Output:
+// unsigned int vstartup
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// NOTE: this MUST be called after setting the prefetch mode!
+unsigned int dml_rq_dlg_get_calculated_vstartup(
+ struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx);
+
+// Function: dml_rq_dlg_get_row_heights
+// Calculate dpte and meta row heights
+void dml_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma);
+
+// Function: dml_rq_dlg_get_arb_params
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
new file mode 100644
index 000000000000..189052e911fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_rq_dlg_helpers.h"
+
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
+{
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c);
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+}
+
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
+ rq_sizing.min_meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
+ rq_dlg_param.swath_width_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_height = %0d\n",
+ rq_dlg_param.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
+ rq_dlg_param.req_per_swath_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
+ rq_dlg_param.meta_pte_bytes_per_frame_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_groups_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
+ rq_dlg_param.dpte_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_bytes_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
+ rq_dlg_param.meta_chunks_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
+ rq_dlg_param.meta_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_row_height = %0d\n",
+ rq_dlg_param.meta_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.meta_bytes_per_row_ub);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
+ rq_misc_param.full_swath_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
+ rq_misc_param.stored_swath_bytes);
+ dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width);
+ dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height);
+ dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width);
+ dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
+ dlg_sys_param.t_srx_delay_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
+ dlg_sys_param.deepsleep_dcfclk_mhz);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
+ dlg_sys_param.total_flip_bw);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
+ dlg_sys_param.total_flip_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
+ rq_regs.min_meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
+ rq_regs.pte_row_height_linear);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c);
+ dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
+ dlg_regs.refcyc_h_blank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
+ dlg_regs.dlg_vblank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
+ dlg_regs.min_dst_y_next_start);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
+ dlg_regs.refcyc_per_htotal);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
+ dlg_regs.refcyc_x_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
+ dlg_regs.dst_y_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
+ dlg_regs.dst_y_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_row_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_row_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
+ dlg_regs.ref_freq_to_pix_freq);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
+ dlg_regs.vratio_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
+ dlg_regs.vratio_prefetch_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
+ dlg_regs.dst_y_offset_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
+ dlg_regs.vready_after_vcount0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
+ dlg_regs.dst_y_delta_drq_limit);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_transfer_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_precharge_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
+ dlg_regs.xfc_reg_remote_surface_flip_latency);
+
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
+ ttu_regs.qos_level_low_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
+ ttu_regs.qos_level_high_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
+ ttu_regs.min_ttu_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
+ ttu_regs.qos_level_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
+ ttu_regs.qos_level_fixed_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
+ ttu_regs.qos_level_fixed_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur1);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
new file mode 100644
index 000000000000..1f24db830737
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
+#define __DISPLAY_RQ_DLG_HELPERS_H__
+
+#include "dml_common_defs.h"
+#include "display_mode_lib.h"
+
+/* Function: Printer functions
+ * Print various struct
+ */
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param);
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing);
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param);
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param);
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param);
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param);
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs);
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs);
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs);
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
new file mode 100644
index 000000000000..1e4b1e383401
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -0,0 +1,1905 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml1_display_rq_dlg_calc.h"
+#include "display_mode_lib.h"
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static void get_blk256_size(
+ unsigned int *blk256_width,
+ unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static double get_refcyc_per_delivery(
+ struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ unsigned int recout_width,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: recout_width = %d", __func__, recout_width);
+ DTRACE("DLG: %s: vratio = %3.2f", __func__, vratio);
+ DTRACE("DLG: %s: req_per_swath_ub = %d", __func__, req_per_swath_ub);
+ DTRACE("DLG: %s: refcyc_per_delivery= %3.2f", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static double get_vratio_pre(
+ struct display_mode_lib *mode_lib,
+ unsigned int max_num_sw,
+ unsigned int max_partial_sw,
+ unsigned int swath_height,
+ double vinit,
+ double l_sw)
+{
+ double prefill = dml_floor(vinit, 1);
+ double vratio_pre = 1.0;
+
+ vratio_pre = (max_num_sw * swath_height + max_partial_sw) / l_sw;
+
+ if (swath_height > 4) {
+ double tmp0 = (max_num_sw * swath_height) / (l_sw - (prefill - 3.0) / 2.0);
+
+ if (tmp0 > vratio_pre)
+ vratio_pre = tmp0;
+ }
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, max_partial_sw);
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+ DTRACE("DLG: %s: vratio_pre = %3.2f", __func__, vratio_pre);
+
+ if (vratio_pre < 1.0) {
+ DTRACE("WARNING_DLG: %s: vratio_pre=%3.2f < 1.0, set to 1.0", __func__, vratio_pre);
+ vratio_pre = 1.0;
+ }
+
+ if (vratio_pre > 4.0) {
+ DTRACE(
+ "WARNING_DLG: %s: vratio_pre=%3.2f > 4.0 (max scaling ratio). set to 4.0",
+ __func__,
+ vratio_pre);
+ vratio_pre = 4.0;
+ }
+
+ return vratio_pre;
+}
+
+static void get_swath_need(
+ struct display_mode_lib *mode_lib,
+ unsigned int *max_num_sw,
+ unsigned int *max_partial_sw,
+ unsigned int swath_height,
+ double vinit)
+{
+ double prefill = dml_floor(vinit, 1);
+ unsigned int max_partial_sw_int;
+
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+
+ ASSERT(prefill > 0.0 && prefill <= 8.0);
+
+ *max_num_sw = (unsigned int) (dml_ceil((prefill - 1.0) / (double) swath_height, 1) + 1.0); /* prefill has to be >= 1 */
+ max_partial_sw_int =
+ (prefill == 1) ?
+ (swath_height - 1) :
+ ((unsigned int) (prefill - 2.0) % swath_height);
+ *max_partial_sw = (max_partial_sw_int < 1) ? 1 : max_partial_sw_int; /* ensure minimum of 1 is used */
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, *max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, *max_partial_sw);
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing)
+{
+ DTRACE("DLG: %s: rq_sizing param", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ if (rq_param.yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ /* FIXME: take the max between luma, chroma chunk size?
+ * okay for now, as we are setting chunk_bytes to 8kb anyways
+ */
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple(
+ (unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; /* 2/3 to chroma */
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(
+ rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(
+ rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { /*full 256b request */
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { /*128b request (for luma only for yuv420 8bpc) */
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+
+ /* Bug workaround, luma and chroma req size needs to be the same. (see: DEGVIDCN10-137)
+ * TODO: Remove after rtl fix
+ */
+ if (req128_l == 1) {
+ req128_c = 1;
+ DTRACE("DLG: %s: bug workaround DEGVIDCN10-137", __func__);
+ }
+
+ /* Note: assumption, the config that pass in will fit into
+ * the detiled buffer.
+ */
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ DTRACE("DLG: %s: req128_l = %0d", __func__, req128_l);
+ DTRACE("DLG: %s: req128_c = %0d", __func__, req128_c);
+ DTRACE("DLG: %s: full_swath_bytes_packed_l = %0d", __func__, full_swath_bytes_packed_l);
+ DTRACE("DLG: %s: full_swath_bytes_packed_c = %0d", __func__, full_swath_bytes_packed_c);
+}
+
+/* Need refactor. */
+static void dml1_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) source_format,
+ is_chroma);
+ unsigned int log2_bytes_per_element = dml_log2(bytes_per_element);
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int log2_meta_row_height;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int dpte_req_width;
+
+ if (surf_linear) {
+ blk256_width = 256;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ log2_meta_row_height = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert)
+ log2_meta_row_height = log2_meta_req_height;
+ else
+ log2_meta_row_height = log2_meta_req_width;
+
+ *o_meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* the dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_wmpg_width is how much 1 pte represent, now trying to calculate how much 64b pte req represent
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ } else {
+ /* the upper bound of the dpte_row_width without dependency on viewport position follows. */
+ if (!surf_vert)
+ log2_dpte_row_height = log2_dpte_req_height;
+ else
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ }
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16)
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+
+ *o_dpte_row_height = 1 << log2_dpte_row_height;
+}
+
+static void get_surf_rq_param(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
+ struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
+ struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+ bool surf_linear;
+ bool surf_vert;
+ unsigned int bytes_per_element;
+ unsigned int log2_bytes_per_element;
+ unsigned int blk256_width;
+ unsigned int blk256_height;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int log2_dpte_group_width;
+ unsigned int dpte_row_width_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_req_height;
+ unsigned int dpte_req_width;
+ unsigned int dpte_group_width;
+ unsigned int log2_dpte_group_bytes;
+ unsigned int log2_dpte_group_length;
+ unsigned int func_meta_row_height, func_dpte_row_height;
+
+ /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ surf_vert = (pipe_src_param.source_scan == dm_vert);
+
+ bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) pipe_src_param.source_format,
+ is_chroma);
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+ blk256_width = 0;
+ blk256_height = 0;
+
+ if (surf_linear) {
+ blk256_width = 256 / bytes_per_element;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ DTRACE("DLG: %s: surf_linear = %d", __func__, surf_linear);
+ DTRACE("DLG: %s: surf_vert = %d", __func__, surf_vert);
+ DTRACE("DLG: %s: blk256_width = %d", __func__, blk256_width);
+ DTRACE("DLG: %s: blk256_height = %d", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes =
+ surf_linear ? 256 : get_blk_size_bytes(
+ (enum source_macro_tile_size) pipe_src_param.macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (pipe_src_param.sw_mode != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(
+ vp_height - 1,
+ blk256_height,
+ 1) + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ /*full sized meta chunk width in unit of data elements */
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ + meta_blk_height) * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(
+ meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; /*64B mpte request */
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ DTRACE("DLG: %s: meta_blk_height = %d", __func__, meta_blk_height);
+ DTRACE("DLG: %s: meta_blk_width = %d", __func__, meta_blk_width);
+ DTRACE("DLG: %s: meta_surface_bytes = %d", __func__, meta_surface_bytes);
+ DTRACE("DLG: %s: meta_pte_req_per_frame_ub = %d", __func__, meta_pte_req_per_frame_ub);
+ DTRACE("DLG: %s: meta_pte_bytes_per_frame_ub = %d", __func__, meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ log2_dpte_group_width = 0;
+ dpte_row_width_ub = 0;
+ dpte_row_height = 0;
+ dpte_req_height = 0; /* 64b dpte req height in data element */
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+ dpte_group_width = 0;
+ log2_dpte_group_bytes = 0;
+ log2_dpte_group_length = 0;
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ /* Ensure we only have the 3 shapes */
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ * That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ /* For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ * the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ */
+ dpte_row_width_ub = dml_round_to_multiple(
+ data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ /* for tiled mode, row height is the same as req height and row store up to vp size upper bound */
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64;
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16) {
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+
+ /* the dpte_group_bytes is reduced for the specific case of vertical
+ * access of a tile surface that has dpte request of 8x1 ptes.
+ */
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) /*reduced, in this case, will have page fault within a group */
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ /*full size */
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ /*since pte request size is 64byte, the number of data pte requests per full sized group is as follows. */
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; /*length in 64b requests */
+
+ /* full sized data pte group width in elements */
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ /* since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ * the upper bound for the dpte groups per row is as follows.
+ */
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
+ (double) dpte_row_width_ub / dpte_group_width,
+ 1);
+
+ dml1_rq_dlg_get_row_heights(
+ mode_lib,
+ &func_dpte_row_height,
+ &func_meta_row_height,
+ vp_width,
+ data_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+
+ /* Just a check to make sure this function and the new one give the same
+ * result. The standalone get_row_heights() function is based off of the
+ * code in this function so the same changes need to be made to both.
+ */
+ if (rq_dlg_param->meta_row_height != func_meta_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->meta_row_height = %d",
+ rq_dlg_param->meta_row_height);
+ DTRACE("MISMATCH: func_meta_row_height = %d", func_meta_row_height);
+ ASSERT(0);
+ }
+
+ if (rq_dlg_param->dpte_row_height != func_dpte_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->dpte_row_height = %d",
+ rq_dlg_param->dpte_row_height);
+ DTRACE("MISMATCH: func_dpte_row_height = %d", func_dpte_row_height);
+ ASSERT(0);
+ }
+}
+
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ /* get param for luma surface */
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) {
+ /* get param for chroma surface */
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ /* calculate how to split the det buffer space between luma and chroma */
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+/* Note: currently taken in as is.
+ * Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en)
+{
+ /* Timing */
+ unsigned int htotal = e2e_pipe_param.pipe.dest.htotal;
+ unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end;
+ unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start;
+ unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end;
+ bool interlaced = e2e_pipe_param.pipe.dest.interlaced;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz;
+ double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz;
+ double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz;
+ double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz;
+
+ double ref_freq_to_pix_freq;
+ double prefetch_xy_calc_in_dcfclk;
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dcc_en;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int bytes_per_element_l;
+ unsigned int bytes_per_element_c;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratios_l;
+ double hratios_c;
+ double vratio_l;
+ double vratio_c;
+ double line_time_in_us;
+ double vinit_l;
+ double vinit_c;
+ double vinit_bot_l;
+ double vinit_bot_c;
+ unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ unsigned int meta_pte_bytes_per_frame_ub_l;
+ unsigned int meta_bytes_per_row_ub_l;
+ unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double line_o;
+ double line_setup;
+ double line_calc;
+ double dst_y_prefetch;
+ double t_pre_us;
+ unsigned int vm_bytes;
+ unsigned int meta_row_bytes;
+ unsigned int max_num_sw_l;
+ unsigned int max_num_sw_c;
+ unsigned int max_partial_sw_l;
+ unsigned int max_partial_sw_c;
+ double max_vinit_l;
+ double max_vinit_c;
+ unsigned int lsw_l;
+ unsigned int lsw_c;
+ unsigned int sw_bytes_ub_l;
+ unsigned int sw_bytes_ub_c;
+ unsigned int sw_bytes;
+ unsigned int dpte_row_bytes;
+ double prefetch_bw;
+ double flip_bw;
+ double t_vm_us;
+ double t_r0_us;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ unsigned int full_recout_width;
+ double hratios_cur0;
+ unsigned int cur0_src_width;
+ enum cursor_bpp cur0_bpp;
+ unsigned int cur0_req_size;
+ unsigned int cur0_req_width;
+ double cur0_width_ub;
+ double cur0_req_per_width;
+ double hactive_cur0;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ DTRACE("DLG: %s: cstate_en = %d", __func__, cstate_en);
+ DTRACE("DLG: %s: pstate_en = %d", __func__, pstate_en);
+ DTRACE("DLG: %s: vm_en = %d", __func__, vm_en);
+ DTRACE("DLG: %s: iflip_en = %d", __func__, iflip_en);
+
+ /* ------------------------- */
+ /* Section 1.5.2.1: OTG dependent Params */
+ /* ------------------------- */
+ DTRACE("DLG: %s: dppclk_freq_in_mhz = %3.2f", __func__, dppclk_freq_in_mhz);
+ DTRACE("DLG: %s: dispclk_freq_in_mhz = %3.2f", __func__, dispclk_freq_in_mhz);
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: interlaced = %d", __func__, interlaced);
+
+ ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
+
+ prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
+ min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
+ if (cstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank);
+ if (pstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank);
+ min_ttu_vblank = min_ttu_vblank + t_calc_us;
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ DTRACE("DLG: %s: min_dcfclk_mhz = %3.2f", __func__, min_dcfclk_mhz);
+ DTRACE("DLG: %s: min_ttu_vblank = %3.2f", __func__, min_ttu_vblank);
+ DTRACE(
+ "DLG: %s: min_dst_y_ttu_vblank = %3.2f",
+ __func__,
+ min_dst_y_ttu_vblank);
+ DTRACE("DLG: %s: t_calc_us = %3.2f", __func__, t_calc_us);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ DTRACE(
+ "DLG: %s: ref_freq_to_pix_freq = %3.2f",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ /* ------------------------- */
+ /* Section 1.5.2.2: Prefetch, Active and TTU */
+ /* ------------------------- */
+ /* Prefetch Calc */
+ /* Source */
+ dcc_en = e2e_pipe_param.pipe.src.dcc;
+ dual_plane = is_dual_plane(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format);
+ mode_422 = 0; /* FIXME */
+ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
+ bytes_per_element_l = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 0);
+ bytes_per_element_c = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 1);
+ vp_height_l = e2e_pipe_param.pipe.src.viewport_height;
+ vp_width_l = e2e_pipe_param.pipe.src.viewport_width;
+ vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c;
+ vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c;
+
+ /* Scaling */
+ htaps_l = e2e_pipe_param.pipe.scale_taps.htaps;
+ htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c;
+ hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c;
+ vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio;
+ vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+ vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit;
+ vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c;
+ vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot;
+ vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c;
+
+ swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+ dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+ meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+ meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+ swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset;
+ vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width;
+ vready_offset = e2e_pipe_param.pipe.dest.vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start;
+
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ if (vstartup_start >= min_vblank) {
+ DTRACE(
+ "WARNING_DLG: %s: vblank_start=%d vblank_end=%d",
+ __func__,
+ vblank_start,
+ vblank_end);
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = 0;
+ dst_y_after_scaler = 0;
+
+ if (e2e_pipe_param.pipe.src.is_hsplit)
+ dst_x_after_scaler = pixel_rate_delay_subtotal
+ + e2e_pipe_param.pipe.dest.recout_width;
+ else
+ dst_x_after_scaler = pixel_rate_delay_subtotal;
+
+ if (e2e_pipe_param.dout.output_format == dm_420)
+ dst_y_after_scaler = 1;
+ else
+ dst_y_after_scaler = 0;
+
+ if (dst_x_after_scaler >= htotal) {
+ dst_x_after_scaler = dst_x_after_scaler - htotal;
+ dst_y_after_scaler = dst_y_after_scaler + 1;
+ }
+
+ DTRACE("DLG: %s: htotal = %d", __func__, htotal);
+ DTRACE(
+ "DLG: %s: pixel_rate_delay_subtotal = %d",
+ __func__,
+ pixel_rate_delay_subtotal);
+ DTRACE("DLG: %s: dst_x_after_scaler = %d", __func__, dst_x_after_scaler);
+ DTRACE("DLG: %s: dst_y_after_scaler = %d", __func__, dst_y_after_scaler);
+
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(
+ mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ line_o = (double) dst_y_after_scaler + dst_x_after_scaler / (double) htotal;
+ line_setup = (double) (vupdate_offset + vupdate_width + vready_offset) / (double) htotal;
+ line_calc = t_calc_us / line_time_in_us;
+
+ DTRACE(
+ "DLG: %s: soc.sr_enter_plus_exit_time_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.sr_enter_plus_exit_time_us);
+ DTRACE(
+ "DLG: %s: soc.dram_clock_change_latency_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.dram_clock_change_latency_us);
+ DTRACE(
+ "DLG: %s: soc.urgent_latency_us = %3.2f",
+ __func__,
+ mode_lib->soc.urgent_latency_us);
+
+ DTRACE("DLG: %s: swath_height_l = %d", __func__, swath_height_l);
+ if (dual_plane)
+ DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
+
+ DTRACE(
+ "DLG: %s: t_srx_delay_us = %3.2f",
+ __func__,
+ (double) dlg_sys_param.t_srx_delay_us);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
+ DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
+ DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
+ DTRACE("DLG: %s: vready_offset = %d", __func__, vready_offset);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, line_time_in_us);
+ DTRACE("DLG: %s: line_wait = %3.2f", __func__, line_wait);
+ DTRACE("DLG: %s: line_o = %3.2f", __func__, line_o);
+ DTRACE("DLG: %s: line_setup = %3.2f", __func__, line_setup);
+ DTRACE("DLG: %s: line_calc = %3.2f", __func__, line_calc);
+
+ dst_y_prefetch = ((double) min_vblank - 1.0)
+ - (line_setup + line_calc + line_wait + line_o);
+ DTRACE("DLG: %s: dst_y_prefetch (before rnd) = %3.2f", __func__, dst_y_prefetch);
+ ASSERT(dst_y_prefetch >= 2.0);
+
+ dst_y_prefetch = dml_floor(4.0 * (dst_y_prefetch + 0.125), 1) / 4;
+ DTRACE("DLG: %s: dst_y_prefetch (after rnd) = %3.2f", __func__, dst_y_prefetch);
+
+ t_pre_us = dst_y_prefetch * line_time_in_us;
+ vm_bytes = 0;
+ meta_row_bytes = 0;
+
+ if (dcc_en && vm_en)
+ vm_bytes = meta_pte_bytes_per_frame_ub_l;
+ if (dcc_en)
+ meta_row_bytes = meta_bytes_per_row_ub_l;
+
+ max_num_sw_l = 0;
+ max_num_sw_c = 0;
+ max_partial_sw_l = 0;
+ max_partial_sw_c = 0;
+
+ max_vinit_l = interlaced ? dml_max(vinit_l, vinit_bot_l) : vinit_l;
+ max_vinit_c = interlaced ? dml_max(vinit_c, vinit_bot_c) : vinit_c;
+
+ get_swath_need(mode_lib, &max_num_sw_l, &max_partial_sw_l, swath_height_l, max_vinit_l);
+ if (dual_plane)
+ get_swath_need(
+ mode_lib,
+ &max_num_sw_c,
+ &max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c);
+
+ lsw_l = max_num_sw_l * swath_height_l + max_partial_sw_l;
+ lsw_c = max_num_sw_c * swath_height_c + max_partial_sw_c;
+ sw_bytes_ub_l = lsw_l * swath_width_ub_l * bytes_per_element_l;
+ sw_bytes_ub_c = lsw_c * swath_width_ub_c * bytes_per_element_c;
+ sw_bytes = 0;
+ dpte_row_bytes = 0;
+
+ if (vm_en) {
+ if (dual_plane)
+ dpte_row_bytes = dpte_bytes_per_row_ub_l + dpte_bytes_per_row_ub_c;
+ else
+ dpte_row_bytes = dpte_bytes_per_row_ub_l;
+ } else {
+ dpte_row_bytes = 0;
+ }
+
+ if (dual_plane)
+ sw_bytes = sw_bytes_ub_l + sw_bytes_ub_c;
+ else
+ sw_bytes = sw_bytes_ub_l;
+
+ DTRACE("DLG: %s: sw_bytes_ub_l = %d", __func__, sw_bytes_ub_l);
+ DTRACE("DLG: %s: sw_bytes_ub_c = %d", __func__, sw_bytes_ub_c);
+ DTRACE("DLG: %s: sw_bytes = %d", __func__, sw_bytes);
+ DTRACE("DLG: %s: vm_bytes = %d", __func__, vm_bytes);
+ DTRACE("DLG: %s: meta_row_bytes = %d", __func__, meta_row_bytes);
+ DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
+
+ prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
+ flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw)
+ / (double) dlg_sys_param.total_flip_bytes;
+ t_vm_us = line_time_in_us / 4.0;
+ if (vm_en && dcc_en) {
+ t_vm_us = dml_max(
+ dlg_sys_param.t_extra_us,
+ dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
+
+ if (iflip_en && !dual_plane) {
+ t_vm_us = dml_max(mode_lib->soc.urgent_latency_us, t_vm_us);
+ if (flip_bw > 0.)
+ t_vm_us = dml_max(vm_bytes / flip_bw, t_vm_us);
+ }
+ }
+
+ t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
+
+ if (vm_en || dcc_en) {
+ t_r0_us = dml_max(
+ (double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
+ dlg_sys_param.t_extra_us);
+ t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
+
+ if (iflip_en && !dual_plane) {
+ t_r0_us = dml_max(mode_lib->soc.urgent_latency_us * 2.0, t_r0_us);
+ if (flip_bw > 0.)
+ t_r0_us = dml_max(
+ (dpte_row_bytes + meta_row_bytes) / flip_bw,
+ t_r0_us);
+ }
+ }
+
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; /* in terms of line */
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; /* in terms of refclk */
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->dst_y_after_scaler);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->refcyc_x_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->refcyc_x_after_scaler);
+
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_prefetch = %d",
+ __func__,
+ disp_dlg_regs->dst_y_prefetch);
+
+ dst_y_per_vm_vblank = 0.0;
+ dst_y_per_row_vblank = 0.0;
+
+ dst_y_per_vm_vblank = t_vm_us / line_time_in_us;
+ dst_y_per_vm_vblank = dml_floor(4.0 * (dst_y_per_vm_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+
+ dst_y_per_row_vblank = t_r0_us / line_time_in_us;
+ dst_y_per_row_vblank = dml_floor(4.0 * (dst_y_per_row_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+
+ DTRACE("DLG: %s: lsw_l = %d", __func__, lsw_l);
+ DTRACE("DLG: %s: lsw_c = %d", __func__, lsw_c);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_l = %d", __func__, dpte_bytes_per_row_ub_l);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_c = %d", __func__, dpte_bytes_per_row_ub_c);
+
+ DTRACE("DLG: %s: prefetch_bw = %3.2f", __func__, prefetch_bw);
+ DTRACE("DLG: %s: flip_bw = %3.2f", __func__, flip_bw);
+ DTRACE("DLG: %s: t_pre_us = %3.2f", __func__, t_pre_us);
+ DTRACE("DLG: %s: t_vm_us = %3.2f", __func__, t_vm_us);
+ DTRACE("DLG: %s: t_r0_us = %3.2f", __func__, t_r0_us);
+ DTRACE("DLG: %s: dst_y_per_vm_vblank = %3.2f", __func__, dst_y_per_vm_vblank);
+ DTRACE("DLG: %s: dst_y_per_row_vblank = %3.2f", __func__, dst_y_per_row_vblank);
+ DTRACE("DLG: %s: dst_y_prefetch = %3.2f", __func__, dst_y_prefetch);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ DTRACE("DLG: %s: lsw = %3.2f", __func__, lsw);
+
+ vratio_pre_l = get_vratio_pre(
+ mode_lib,
+ max_num_sw_l,
+ max_partial_sw_l,
+ swath_height_l,
+ max_vinit_l,
+ lsw);
+ vratio_pre_c = 1.0;
+ if (dual_plane)
+ vratio_pre_c = get_vratio_pre(
+ mode_lib,
+ max_num_sw_c,
+ max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c,
+ lsw);
+
+ DTRACE("DLG: %s: vratio_pre_l=%3.2f", __func__, vratio_pre_l);
+ DTRACE("DLG: %s: vratio_pre_c=%3.2f", __func__, vratio_pre_c);
+
+ ASSERT(vratio_pre_l <= 4.0);
+ if (vratio_pre_l >= 4.0)
+ disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+
+ ASSERT(vratio_pre_c <= 4.0);
+ if (vratio_pre_c >= 4.0)
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ /* Active */
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_c < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; /* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; /* *2 for 2 pixel per element */
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratios_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l * 2.0;
+ } else {
+ if (hratios_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratios_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c * 2.0;
+ } else {
+ if (hratios_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+ refcyc_per_req_delivery_pre_cur0 = 0.;
+ refcyc_per_req_delivery_cur0 = 0.;
+
+ full_recout_width = 0;
+ if (e2e_pipe_param.pipe.src.is_hsplit) {
+ if (e2e_pipe_param.pipe.dest.full_recout_width == 0) {
+ DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width;
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width;
+
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ DTRACE("DLG: %s: full_recout_width = %d", __func__, full_recout_width);
+ DTRACE("DLG: %s: hscale_pixel_rate_l = %3.2f", __func__, hscale_pixel_rate_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_c);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+ }
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+
+ /* TTU - Luma / Chroma */
+ if (access_dir) { /* vertical access */
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c =
+ (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ /* TTU - Cursor */
+ hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */
+ cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp;
+ cur0_req_size = 0;
+ cur0_req_width = 0;
+ cur0_width_ub = 0.0;
+ cur0_req_per_width = 0.0;
+ hactive_cur0 = 0.0;
+
+ ASSERT(cur0_src_width <= 256);
+
+ if (cur0_src_width > 0) {
+ unsigned int cur0_bit_per_pixel = 0;
+
+ if (cur0_bpp == dm_cur_2bit) {
+ cur0_req_size = 64; /* byte */
+ cur0_bit_per_pixel = 2;
+ } else { /* 32bit */
+ cur0_bit_per_pixel = 32;
+ if (cur0_src_width >= 1 && cur0_src_width <= 16)
+ cur0_req_size = 64;
+ else if (cur0_src_width >= 17 && cur0_src_width <= 31)
+ cur0_req_size = 128;
+ else
+ cur0_req_size = 256;
+ }
+
+ cur0_req_width = (double) cur0_req_size / ((double) cur0_bit_per_pixel / 8.0);
+ cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
+ * (double) cur0_req_width;
+ cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+
+ if (vratio_pre_l <= 1.0) {
+ refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_pre_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_pre_cur0 < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ refcyc_per_req_delivery_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ DTRACE("DLG: %s: cur0_req_width = %d", __func__, cur0_req_width);
+ DTRACE(
+ "DLG: %s: cur0_width_ub = %3.2f",
+ __func__,
+ cur0_width_ub);
+ DTRACE(
+ "DLG: %s: cur0_req_per_width = %3.2f",
+ __func__,
+ cur0_req_per_width);
+ DTRACE(
+ "DLG: %s: hactive_cur0 = %3.2f",
+ __func__,
+ hactive_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_cur0);
+
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_cur0 < dml_pow(2, 13));
+ } else {
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = 0;
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = 0;
+ }
+
+ /* TTU - Misc */
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
new file mode 100644
index 000000000000..987d7671cd0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_CALC_H__
+#define __DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param);
+/* Function: dml_rq_dlg_get_rq_params
+ * Calculate requestor related parameters that register definition agnostic
+ * (i.e. this layer does try to separate real values from register definition)
+ * Input:
+ * pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+ * Output:
+ * rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+ */
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param);
+
+
+/* Function: dml_rq_dlg_get_dlg_params
+ * Calculate deadline related parameters
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
new file mode 100644
index 000000000000..b953b02a1512
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+#include "dml_inline_defs.h"
+
+double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
new file mode 100644
index 000000000000..b2847bc469fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMMON_DEFS_H__
+#define __DC_COMMON_DEFS_H__
+
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
+
+#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+
+double dml_round(double a);
+
+#endif /* __DC_COMMON_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
new file mode 100644
index 000000000000..e68086b8a22f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML_INLINE_DEFS_H__
+#define __DML_INLINE_DEFS_H__
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+static inline double dml_min(double a, double b)
+{
+ return (double) dcn_bw_min2(a, b);
+}
+
+static inline double dml_max(double a, double b)
+{
+ return (double) dcn_bw_max2(a, b);
+}
+
+static inline double dml_max3(double a, double b, double c)
+{
+ return dml_max(dml_max(a, b), c);
+}
+
+static inline double dml_max4(double a, double b, double c, double d)
+{
+ return dml_max(dml_max(a, b), dml_max(c, d));
+}
+
+static inline double dml_max5(double a, double b, double c, double d, double e)
+{
+ return dml_max(dml_max4(a, b, c, d), e);
+}
+
+static inline double dml_ceil(double a, double granularity)
+{
+ return (double) dcn_bw_ceil2(a, granularity);
+}
+
+static inline double dml_floor(double a, double granularity)
+{
+ return (double) dcn_bw_floor2(a, granularity);
+}
+
+static inline int dml_log2(double x)
+{
+ return dml_round((double)dcn_bw_log(x, 2));
+}
+
+static inline double dml_pow(double a, int exp)
+{
+ return (double) dcn_bw_pow(a, exp);
+}
+
+static inline double dml_fmod(double f, int val)
+{
+ return (double) dcn_bw_mod(f, val);
+}
+
+static inline double dml_ceil_2(double f)
+{
+ return (double) dcn_bw_ceil2(f, 2);
+}
+
+static inline double dml_ceil_ex(double x, double granularity)
+{
+ return (double) dcn_bw_ceil2(x, granularity);
+}
+
+static inline double dml_floor_ex(double x, double granularity)
+{
+ return (double) dcn_bw_floor2(x, granularity);
+}
+
+static inline double dml_log(double x, double base)
+{
+ return (double) dcn_bw_log(x, base);
+}
+
+static inline unsigned int dml_round_to_multiple(unsigned int num,
+ unsigned int multiple,
+ bool up)
+{
+ unsigned int remainder;
+
+ if (multiple == 0)
+ return num;
+
+ remainder = num % multiple;
+
+ if (remainder == 0)
+ return num;
+
+ if (up)
+ return (num + multiple - remainder);
+ else
+ return (num - remainder);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
new file mode 100644
index 000000000000..bc7d8c707221
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "soc_bounding_box.h"
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+#include "dml_inline_defs.h"
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
+{
+ to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
+ to_box->sr_exit_time_us = from_box->sr_exit_time_us;
+ to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
+ to_box->urgent_latency_us = from_box->urgent_latency_us;
+ to_box->writeback_latency_us = from_box->writeback_latency_us;
+}
+
+voltage_scaling_st dml_socbb_voltage_scaling(
+ const soc_bounding_box_st *soc,
+ enum voltage_state voltage)
+{
+ const voltage_scaling_st *voltage_state;
+ const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
+
+ for (voltage_state = soc->clock_limits;
+ voltage_state < voltage_end && voltage_state->state != voltage;
+ voltage_state++) {
+ }
+
+ if (voltage_state < voltage_end)
+ return *voltage_state;
+ return soc->clock_limits[DC__VOLTAGE_STATES - 1];
+}
+
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
+{
+ double return_bw;
+
+ voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
+ state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
+ * box->ideal_dram_bw_after_urgent_percent / 100.0);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
+
+ return return_bw;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
new file mode 100644
index 000000000000..7a65206a6d21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __SOC_BOUNDING_BOX_H__
+#define __SOC_BOUNDING_BOX_H__
+
+#include "dml_common_defs.h"
+
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
+voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
new file mode 100644
index 000000000000..562ee189d780
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -0,0 +1,79 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'gpio' sub-component of DAL.
+# It provides the control and status of HW GPIO pins.
+
+GPIO = gpio_base.o gpio_service.o hw_factory.o \
+ hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o
+
+AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# all DCE8.x are derived from DCE8.0
+GPIO_DCE80 = hw_translate_dce80.o hw_factory_dce80.o
+
+AMD_DAL_GPIO_DCE80 = $(addprefix $(AMDDALPATH)/dc/gpio/dce80/,$(GPIO_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+GPIO_DCE110 = hw_translate_dce110.o hw_factory_dce110.o
+
+AMD_DAL_GPIO_DCE110 = $(addprefix $(AMDDALPATH)/dc/gpio/dce110/,$(GPIO_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE110)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+GPIO_DCE120 = hw_translate_dce120.o hw_factory_dce120.o
+
+AMD_DAL_GPIO_DCE120 = $(addprefix $(AMDDALPATH)/dc/gpio/dce120/,$(GPIO_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
+
+AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
+endif
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
+
+AMD_DAL_GPIO_DIAG_FPGA = $(addprefix $(AMDDALPATH)/dc/gpio/diagnostics/,$(GPIO_DIAG_FPGA))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DIAG_FPGA)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
new file mode 100644
index 000000000000..20d81bca119c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ mm ## block ## id ## _ ## reg_name
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+/*
+ * dal_hw_factory_dce110_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce110_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
new file mode 100644
index 000000000000..ecf06ed0d587
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE110_H__
+#define __DAL_HW_FACTORY_DCE110_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce110_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
new file mode 100644
index 000000000000..ac4cddbba815
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce110_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce110_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
new file mode 100644
index 000000000000..4d16e09853c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE110_H__
+#define __DAL_HW_TRANSLATE_DCE110_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce110_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
new file mode 100644
index 000000000000..4ced9a7d63dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dce120.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dce120_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce120_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
new file mode 100644
index 000000000000..db260c351f73
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE120_H__
+#define __DAL_HW_FACTORY_DCE120_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce120_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
new file mode 100644
index 000000000000..af3843a69652
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dce120.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce120_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce120_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
new file mode 100644
index 000000000000..c21766894af3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE120_H__
+#define __DAL_HW_TRANSLATE_DCE120_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce120_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
new file mode 100644
index 000000000000..48b67866377e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce80.h"
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define HPD_REG_LIST_DCE8(id) \
+ HPD_GPIO_REG_LIST(id), \
+ .int_status = mmDC_HPD ## id ## _INT_STATUS,\
+ .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
+
+#define HPD_MASK_SH_LIST_DCE8(mask_sh) \
+ .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
+ .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
+ .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
+ .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST_DCE8(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5),
+ hpd_regs(6)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST_DCE8(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST_DCE8(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
new file mode 100644
index 000000000000..e78a8b36f35a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE80_H__
+#define __DAL_HW_FACTORY_DCE80_H__
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
new file mode 100644
index 000000000000..fabb9da504be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce80.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "smu/smu_7_0_1_d.h"
+
+/*
+ * @brief
+ * Returns index of first bit (starting with LSB) which is set
+ */
+static uint32_t index_from_vector(
+ uint32_t vector)
+{
+ uint32_t result = 0;
+ uint32_t mask = 1;
+
+ do {
+ if (vector == mask)
+ return result;
+
+ ++result;
+ mask <<= 1;
+ } while (mask);
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_ENUM_UNKNOWN;
+}
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* GPIOPAD */
+ case mmGPIOPAD_A:
+ *id = GPIO_ID_GPIO_PAD;
+ *en = index_from_vector(mask);
+ return (*en <= GPIO_GPIO_PAD_MAX);
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GPIO_PAD:
+ info->offset = mmGPIOPAD_A;
+ info->mask = (1 << en);
+ result = (info->mask <= GPIO_GPIO_PAD_MAX);
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *translate)
+{
+ translate->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
new file mode 100644
index 000000000000..374f2f3282a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE80_H__
+#define __DAL_HW_TRANSLATE_DCE80_H__
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *tr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
new file mode 100644
index 000000000000..409763c70ce5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dcn10.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dcn10_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
new file mode 100644
index 000000000000..2cc7a585b1f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCN10_H__
+#define __DAL_HW_FACTORY_DCN10_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
new file mode 100644
index 000000000000..64a6915b846b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dcn10.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dcn10_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
new file mode 100644
index 000000000000..9edef53c80a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCN10_H__
+#define __DAL_HW_TRANSLATE_DCN10_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
new file mode 100644
index 000000000000..9c4a56c738c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+
+#include "gpio_regs.h"
+
+/****************************** new register headers */
+/*** following in header */
+
+#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
+ .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_REG_LIST(cd,id) \
+ {\
+ DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define DDC_REG_LIST(cd,id) \
+ DDC_GPIO_REG_LIST(cd,id),\
+ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
+
+#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
+ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
+ .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_VGA_REG_LIST(cd) \
+ {\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_VGA_REG_LIST(cd) \
+ DDC_GPIO_VGA_REG_LIST(cd),\
+ .ddc_setup = mmDC_I2C_DDCVGA_SETUP
+
+#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
+ .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
+ .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_I2C_REG_LIST(cd) \
+ {\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_I2C_REG_LIST(cd) \
+ DDC_GPIO_I2C_REG_LIST(cd),\
+ .ddc_setup = 0
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
+
+
+struct ddc_registers {
+ struct gpio_registers gpio;
+ uint32_t ddc_setup;
+};
+
+struct ddc_sh_mask {
+ /* i2c_dd_setup */
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_MODE;
+ /* ddc1_mask */
+ uint32_t DC_GPIO_DDC1DATA_PD_EN;
+ uint32_t DC_GPIO_DDC1CLK_PD_EN;
+ uint32_t AUX_PAD1_MODE;
+ /* i2cpad_mask */
+ uint32_t DC_GPIO_SDA_PD_DIS;
+ uint32_t DC_GPIO_SCL_PD_DIS;
+};
+
+
+
+/*** following in dc_resource */
+
+#define ddc_data_regs(id) \
+{\
+ DDC_REG_LIST(DATA,id)\
+}
+
+#define ddc_clk_regs(id) \
+{\
+ DDC_REG_LIST(CLK,id)\
+}
+
+#define ddc_vga_data_regs \
+{\
+ DDC_VGA_REG_LIST(DATA)\
+}
+
+#define ddc_vga_clk_regs \
+{\
+ DDC_VGA_REG_LIST(CLK)\
+}
+
+#define ddc_i2c_data_regs \
+{\
+ DDC_I2C_REG_LIST(SDA)\
+}
+
+#define ddc_i2c_clk_regs \
+{\
+ DDC_I2C_REG_LIST(SCL)\
+}
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
new file mode 100644
index 000000000000..26695b963c58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+/* function table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = NULL,
+ .create_ddc_clock = NULL,
+ .create_generic = NULL,
+ .create_hpd = NULL,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+};
+
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
new file mode 100644
index 000000000000..8a74f6adb8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
+#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
new file mode 100644
index 000000000000..bf9068846927
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+
+#include "../hw_translate.h"
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = NULL,
+ .id_to_offset = NULL,
+};
+
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
new file mode 100644
index 000000000000..4f053241fe96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+#define __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
new file mode 100644
index 000000000000..1d1efd72b291
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_gpio.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Public API
+ */
+
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ return dal_gpio_open_ex(gpio, mode);
+}
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (gpio->pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_ALREADY_OPENED;
+ }
+
+ gpio->mode = mode;
+
+ return dal_gpio_service_open(
+ gpio->service, gpio->id, gpio->en, mode, &gpio->pin);
+}
+
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->get_value(gpio->pin, value);
+}
+
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_value(gpio->pin, value);
+}
+
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio)
+{
+ return gpio->mode;
+}
+
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->change_mode(gpio->pin, mode);
+}
+
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio)
+{
+ return gpio->id;
+}
+
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio)
+{
+ return gpio->en;
+}
+
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_config(gpio->pin, config_data);
+}
+
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info)
+{
+ return gpio->service->translate.funcs->id_to_offset(
+ gpio->id, gpio->en, pin_info) ?
+ GPIO_RESULT_OK : GPIO_RESULT_INVALID_DATA;
+}
+
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio)
+{
+ switch (gpio->id) {
+ case GPIO_ID_GENERIC:
+ switch (gpio->en) {
+ case GPIO_GENERIC_A:
+ return SYNC_SOURCE_IO_GENERIC_A;
+ case GPIO_GENERIC_B:
+ return SYNC_SOURCE_IO_GENERIC_B;
+ case GPIO_GENERIC_C:
+ return SYNC_SOURCE_IO_GENERIC_C;
+ case GPIO_GENERIC_D:
+ return SYNC_SOURCE_IO_GENERIC_D;
+ case GPIO_GENERIC_E:
+ return SYNC_SOURCE_IO_GENERIC_E;
+ case GPIO_GENERIC_F:
+ return SYNC_SOURCE_IO_GENERIC_F;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (gpio->en) {
+ case GPIO_SYNC_HSYNC_A:
+ return SYNC_SOURCE_IO_HSYNC_A;
+ case GPIO_SYNC_VSYNC_A:
+ return SYNC_SOURCE_IO_VSYNC_A;
+ case GPIO_SYNC_HSYNC_B:
+ return SYNC_SOURCE_IO_HSYNC_B;
+ case GPIO_SYNC_VSYNC_B:
+ return SYNC_SOURCE_IO_VSYNC_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_HPD:
+ switch (gpio->en) {
+ case GPIO_HPD_1:
+ return SYNC_SOURCE_IO_HPD1;
+ case GPIO_HPD_2:
+ return SYNC_SOURCE_IO_HPD2;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (gpio->en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC;
+ case GPIO_GSL_SWAPLOCK_A:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_A;
+ case GPIO_GSL_SWAPLOCK_B:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+}
+
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio)
+{
+ return gpio->output_state;
+}
+
+void dal_gpio_close(
+ struct gpio *gpio)
+{
+ if (!gpio)
+ return;
+
+ dal_gpio_service_close(gpio->service, &gpio->pin);
+
+ gpio->mode = GPIO_MODE_UNKNOWN;
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state)
+{
+ struct gpio *gpio = kzalloc(sizeof(struct gpio), GFP_KERNEL);
+
+ if (!gpio) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ gpio->service = service;
+ gpio->pin = NULL;
+ gpio->id = id;
+ gpio->en = en;
+ gpio->mode = GPIO_MODE_UNKNOWN;
+ gpio->output_state = output_state;
+
+ return gpio;
+}
+
+void dal_gpio_destroy(
+ struct gpio **gpio)
+{
+ if (!gpio || !*gpio) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*gpio);
+
+ kfree(*gpio);
+
+ *gpio = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
new file mode 100644
index 000000000000..5c5925299f8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+
+struct gpio_registers {
+ uint32_t MASK_reg;
+ uint32_t MASK_mask;
+ uint32_t MASK_shift;
+ uint32_t A_reg;
+ uint32_t A_mask;
+ uint32_t A_shift;
+ uint32_t EN_reg;
+ uint32_t EN_mask;
+ uint32_t EN_shift;
+ uint32_t Y_reg;
+ uint32_t Y_mask;
+ uint32_t Y_shift;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
new file mode 100644
index 000000000000..80038e0e610f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "hw_gpio.h"
+
+/*
+ * @brief
+ * Public API.
+ */
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx)
+{
+ struct gpio_service *service;
+
+ uint32_t index_of_id;
+
+ service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+
+ if (!service) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!dal_hw_translate_init(&service->translate, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ if (!dal_hw_factory_init(&service->factory, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ /* allocate and initialize business storage */
+ {
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ index_of_id = 0;
+ service->ctx = ctx;
+
+ do {
+ uint32_t number_of_bits =
+ service->factory.number_of_pins[index_of_id];
+
+ uint32_t number_of_uints =
+ (number_of_bits + bits_per_uint - 1) /
+ bits_per_uint;
+
+ uint32_t *slot;
+
+ if (number_of_bits) {
+ uint32_t index_of_uint = 0;
+
+ slot = kzalloc(number_of_uints * sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!slot) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ do {
+ slot[index_of_uint] = 0;
+
+ ++index_of_uint;
+ } while (index_of_uint < number_of_uints);
+ } else
+ slot = NULL;
+
+ service->busyness[index_of_id] = slot;
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ return service;
+
+failure_2:
+ while (index_of_id) {
+ uint32_t *slot;
+
+ --index_of_id;
+
+ slot = service->busyness[index_of_id];
+
+ kfree(slot);
+ }
+
+failure_1:
+ kfree(service);
+
+ return NULL;
+}
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask)
+{
+ enum gpio_id id;
+ uint32_t en;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ return dal_gpio_create_irq(service, id, en);
+}
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr)
+{
+ if (!ptr || !*ptr) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* free business storage */
+ {
+ uint32_t index_of_id = 0;
+
+ do {
+ uint32_t *slot = (*ptr)->busyness[index_of_id];
+
+ kfree(slot);
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ kfree(*ptr);
+
+ *ptr = NULL;
+}
+
+/*
+ * @brief
+ * Private API.
+ */
+
+static bool is_pin_busy(
+ const struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
+
+ return 0 != (*slot & (1 << (en % bits_per_uint)));
+}
+
+static void set_pin_busy(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] |=
+ (1 << (en % bits_per_uint));
+}
+
+static void set_pin_free(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] &=
+ ~(1 << (en % bits_per_uint));
+}
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!service->busyness[id]) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ if (is_pin_busy(service, id, en)) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_DEVICE_BUSY;
+ }
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ pin = service->factory.funcs->create_ddc_data(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ pin = service->factory.funcs->create_ddc_clock(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_GENERIC:
+ pin = service->factory.funcs->create_generic(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_HPD:
+ pin = service->factory.funcs->create_hpd(
+ service->ctx, id, en);
+ service->factory.funcs->define_hpd_registers(pin, en);
+ break;
+ case GPIO_ID_SYNC:
+ pin = service->factory.funcs->create_sync(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_GSL:
+ pin = service->factory.funcs->create_gsl(
+ service->ctx, id, en);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin->funcs->open(pin, mode)) {
+ ASSERT_CRITICAL(false);
+ dal_gpio_service_close(service, &pin);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ set_pin_busy(service, id, en);
+ *ptr = pin;
+ return GPIO_RESULT_OK;
+}
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!ptr) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ pin = *ptr;
+
+ if (pin) {
+ set_pin_free(service, pin->id, pin->en);
+
+ pin->funcs->close(pin);
+
+ pin->funcs->destroy(ptr);
+ }
+}
+
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1 +
+ dal_gpio_get_enum(irq));
+ case GPIO_ID_GPIO_PAD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_GPIOPAD0 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1RX +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config)
+{
+ struct gpio_config_data config_data;
+
+ if (!config)
+ return GPIO_RESULT_INVALID_DATA;
+
+ config_data.type = GPIO_CONFIG_TYPE_HPD;
+ config_data.config.hpd = *config;
+
+ return dal_gpio_set_config(irq, &config_data);
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct gpio *irq;
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ case GPIO_ID_GPIO_PAD:
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ irq = dal_gpio_create(
+ service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (irq)
+ return irq;
+
+ ASSERT_CRITICAL(false);
+ return NULL;
+}
+
+void dal_gpio_destroy_irq(
+ struct gpio **irq)
+{
+ if (!irq || !*irq) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*irq);
+ dal_gpio_destroy(irq);
+ kfree(*irq);
+
+ *irq = NULL;
+}
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info)
+{
+ enum gpio_id id;
+ uint32_t en;
+ struct ddc *ddc;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en))
+ return NULL;
+
+ ddc = kzalloc(sizeof(struct ddc), GFP_KERNEL);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ ddc->pin_data = dal_gpio_create(
+ service, GPIO_ID_DDC_DATA, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_data) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ ddc->pin_clock = dal_gpio_create(
+ service, GPIO_ID_DDC_CLOCK, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_clock) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ ddc->hw_info = *info;
+
+ ddc->ctx = service->ctx;
+
+ return ddc;
+
+failure_2:
+ dal_gpio_destroy(&ddc->pin_data);
+
+failure_1:
+ kfree(ddc);
+
+ return NULL;
+}
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_ddc_close(*ddc);
+ dal_gpio_destroy(&(*ddc)->pin_data);
+ dal_gpio_destroy(&(*ddc)->pin_clock);
+ kfree(*ddc);
+
+ *ddc = NULL;
+}
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type)
+{
+ enum gpio_result result;
+
+ struct gpio_config_data config_data;
+ struct hw_gpio *hw_data;
+ struct hw_gpio *hw_clock;
+
+ result = dal_gpio_open_ex(ddc->pin_data, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ return result;
+ }
+
+ result = dal_gpio_open_ex(ddc->pin_clock, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ goto failure;
+ }
+
+ /* DDC clock and data pins should belong
+ * to the same DDC block id,
+ * we use the data pin to set the pad mode. */
+
+ if (mode == GPIO_MODE_INPUT)
+ /* this is from detect_sink_type,
+ * we need extra delay there */
+ config_data.type = GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE;
+ else
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+
+ hw_data = FROM_HW_GPIO_PIN(ddc->pin_data->pin);
+ hw_clock = FROM_HW_GPIO_PIN(ddc->pin_clock->pin);
+
+ config_data.config.ddc.data_en_bit_present = hw_data->store.en != 0;
+ config_data.config.ddc.clock_en_bit_present = hw_clock->store.en != 0;
+
+ result = dal_gpio_set_config(ddc->pin_data, &config_data);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ BREAK_TO_DEBUGGER();
+
+ dal_gpio_close(ddc->pin_clock);
+
+failure:
+ dal_gpio_close(ddc->pin_data);
+
+ return result;
+}
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode)
+{
+ enum gpio_result result;
+
+ enum gpio_mode original_mode =
+ dal_gpio_get_mode(ddc->pin_data);
+
+ result = dal_gpio_change_mode(ddc->pin_data, mode);
+
+ /* [anaumov] DAL2 code returns GPIO_RESULT_NON_SPECIFIC_ERROR
+ * in case of failures;
+ * set_mode() is so that, in case of failure,
+ * we must explicitly set original mode */
+
+ if (result != GPIO_RESULT_OK)
+ goto failure;
+
+ result = dal_gpio_change_mode(ddc->pin_clock, mode);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ dal_gpio_change_mode(ddc->pin_clock, original_mode);
+
+failure:
+ dal_gpio_change_mode(ddc->pin_data, original_mode);
+
+ return result;
+}
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc)
+{
+ return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data);
+}
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type)
+{
+ struct gpio_config_data config_data;
+
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+ config_data.config.ddc.data_en_bit_present = false;
+ config_data.config.ddc.clock_en_bit_present = false;
+
+ return dal_gpio_set_config(ddc->pin_data, &config_data);
+}
+
+void dal_ddc_close(
+ struct ddc *ddc)
+{
+ dal_gpio_close(ddc->pin_clock);
+ dal_gpio_close(ddc->pin_data);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
new file mode 100644
index 000000000000..c7f3081f59cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_H__
+#define __DAL_GPIO_SERVICE_H__
+
+struct hw_translate;
+struct hw_factory;
+
+struct gpio_service {
+ struct dc_context *ctx;
+ struct hw_translate translate;
+ struct hw_factory factory;
+ /*
+ * @brief
+ * Business storage.
+ * For each member of 'enum gpio_id',
+ * store array of bits (packed into uint32_t slots),
+ * index individual bit by 'en' value */
+ uint32_t *busyness[GPIO_ID_COUNT];
+};
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr);
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
new file mode 100644
index 000000000000..dcfdd71b2304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+
+#include "gpio_regs.h"
+
+#define ONE_MORE_0 1
+#define ONE_MORE_1 2
+#define ONE_MORE_2 3
+#define ONE_MORE_3 4
+#define ONE_MORE_4 5
+#define ONE_MORE_5 6
+
+
+#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_HPD_## type),\
+ .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
+
+#define HPD_GPIO_REG_LIST(id) \
+ {\
+ HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define HPD_REG_LIST(id) \
+ HPD_GPIO_REG_LIST(ONE_MORE_ ## id), \
+ .int_status = REGI(DC_HPD_INT_STATUS, HPD, id),\
+ .toggle_filt_cntl = REGI(DC_HPD_TOGGLE_FILT_CNTL, HPD, id)
+
+ #define HPD_MASK_SH_LIST(mask_sh) \
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED, mask_sh),\
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_CONNECT_INT_DELAY, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_DISCONNECT_INT_DELAY, mask_sh)
+
+struct hpd_registers {
+ struct gpio_registers gpio;
+ uint32_t int_status;
+ uint32_t toggle_filt_cntl;
+};
+
+struct hpd_sh_mask {
+ /* int_status */
+ uint32_t DC_HPD_SENSE_DELAYED;
+ uint32_t DC_HPD_SENSE;
+ /* toggle_filt_cntl */
+ uint32_t DC_HPD_CONNECT_INT_DELAY;
+ uint32_t DC_HPD_DISCONNECT_INT_DELAY;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
new file mode 100644
index 000000000000..310f48965b27
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_ddc.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ddc->shifts->field_name, ddc->masks->field_name
+
+#define CTX \
+ ddc->base.base.ctx
+#define REG(reg)\
+ (ddc->regs->reg)
+
+static void destruct(
+ struct hw_ddc *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
+
+ destruct(pin);
+
+ kfree(pin);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(ptr);
+ struct hw_gpio *hw_gpio = NULL;
+ uint32_t regval;
+ uint32_t ddc_data_pd_en = 0;
+ uint32_t ddc_clk_pd_en = 0;
+ uint32_t aux_pad_mode = 0;
+
+ hw_gpio = &ddc->base;
+
+ if (hw_gpio == NULL) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ regval = REG_GET_3(gpio.MASK_reg,
+ DC_GPIO_DDC1DATA_PD_EN, &ddc_data_pd_en,
+ DC_GPIO_DDC1CLK_PD_EN, &ddc_clk_pd_en,
+ AUX_PAD1_MODE, &aux_pad_mode);
+
+ switch (config_data->config.ddc.type) {
+ case GPIO_DDC_CONFIG_TYPE_MODE_I2C:
+ /* On plug-in, there is a transient level on the pad
+ * which must be discharged through the internal pull-down.
+ * Enable internal pull-down, 2.5msec discharge time
+ * is required for detection of AUX mode */
+ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
+ if (!ddc_data_pd_en || !ddc_clk_pd_en) {
+
+ REG_SET_2(gpio.MASK_reg, regval,
+ DC_GPIO_DDC1DATA_PD_EN, 1,
+ DC_GPIO_DDC1CLK_PD_EN, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ } else {
+ uint32_t reg2;
+ uint32_t sda_pd_dis = 0;
+ uint32_t scl_pd_dis = 0;
+
+ reg2 = REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+
+ if (sda_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SDA_PD_DIS, 0);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+
+ if (!scl_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SCL_PD_DIS, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ }
+
+ if (aux_pad_mode) {
+ /* let pins to get de-asserted
+ * before setting pad to I2C mode */
+ if (config_data->config.ddc.data_en_bit_present ||
+ config_data->config.ddc.clock_en_bit_present)
+ /* [anaumov] in DAL2, there was
+ * dc_service_delay_in_microseconds(2000); */
+ msleep(2);
+
+ /* set the I2C pad mode */
+ /* read the register again,
+ * some bits may have been changed */
+ REG_UPDATE(gpio.MASK_reg,
+ AUX_PAD1_MODE, 0);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
+ /* set the AUX pad mode */
+ if (!aux_pad_mode) {
+ REG_SET(gpio.MASK_reg, regval,
+ AUX_PAD1_MODE, 1);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 1);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_2(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 0,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ }
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = dal_hw_gpio_get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_ddc *ddc,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&ddc->base, id, en, ctx);
+ ddc->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_ddc *pin;
+
+ if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ pin = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(pin, id, en, ctx);
+ return &pin->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
new file mode 100644
index 000000000000..9690e2a885d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_DDC_H__
+#define __DAL_HW_DDC_H__
+
+#include "ddc_regs.h"
+
+struct hw_ddc {
+ struct hw_gpio base;
+ const struct ddc_registers *regs;
+ const struct ddc_sh_mask *shifts;
+ const struct ddc_sh_mask *masks;
+};
+
+#define HW_DDC_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_ddc, base)
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
new file mode 100644
index 000000000000..87b580fa4bc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_factory.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_factory_dce80.h"
+#include "dce110/hw_factory_dce110.h"
+#include "dce120/hw_factory_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_factory_dcn10.h"
+#endif
+
+#include "diagnostics/hw_factory_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_factory_diag_fpga_init(factory);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_factory_dce80_init(factory);
+ return true;
+
+ case DCE_VERSION_10_0:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_factory_dce120_init(factory);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_factory_dcn10_init(factory);
+ return true;
+#endif
+
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+void dal_hw_factory_destroy(
+ struct dc_context *ctx,
+ struct hw_factory **factory)
+{
+ if (!factory || !*factory) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*factory);
+
+ *factory = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
new file mode 100644
index 000000000000..6e4dd3521935
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_H__
+#define __DAL_HW_FACTORY_H__
+
+struct hw_gpio_pin;
+struct hw_hpd;
+
+struct hw_factory {
+ uint32_t number_of_pins[GPIO_ID_COUNT];
+
+ const struct hw_factory_funcs {
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ void (*define_hpd_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ void (*define_ddc_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ } *funcs;
+};
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
new file mode 100644
index 000000000000..660510842ecf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
+
+#define CTX \
+ gpio->base.ctx
+#define REG(reg)\
+ (gpio->regs->reg)
+
+static void store_registers(
+ struct hw_gpio *gpio)
+{
+ REG_GET(MASK_reg, MASK, &gpio->store.mask);
+ REG_GET(A_reg, A, &gpio->store.a);
+ REG_GET(EN_reg, EN, &gpio->store.en);
+ /* TODO store GPIO_MUX_CONTROL if we ever use it */
+}
+
+static void restore_registers(
+ struct hw_gpio *gpio)
+{
+ REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
+ REG_UPDATE(A_reg, A, gpio->store.a);
+ REG_UPDATE(EN_reg, EN, gpio->store.en);
+ /* TODO restore GPIO_MUX_CONTROL if we ever use it */
+}
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ store_registers(pin);
+
+ ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
+
+ return ptr->opened;
+}
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ enum gpio_result result = GPIO_RESULT_OK;
+
+ switch (ptr->mode) {
+ case GPIO_MODE_INPUT:
+ case GPIO_MODE_OUTPUT:
+ case GPIO_MODE_HARDWARE:
+ case GPIO_MODE_FAST_OUTPUT:
+ REG_GET(Y_reg, Y, value);
+ break;
+ default:
+ result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ return result;
+}
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value)
+{
+ struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ /* This is the public interface
+ * where the input comes from client, not shifted yet
+ * (because client does not know the shifts). */
+
+ switch (ptr->mode) {
+ case GPIO_MODE_OUTPUT:
+ REG_UPDATE(A_reg, A, value);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* We use (EN) to faster switch (used in DDC GPIO).
+ * So (A) is grounded, output is driven by (EN = 0)
+ * to pull the line down (output == 0) and (EN=1)
+ * then output is tri-state */
+ REG_UPDATE(EN_reg, EN, ~value);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ return dal_hw_gpio_config_mode(pin, mode);
+}
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ restore_registers(pin);
+
+ ptr->mode = GPIO_MODE_UNKNOWN;
+ ptr->opened = false;
+}
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *gpio,
+ enum gpio_mode mode)
+{
+ gpio->base.mode = mode;
+
+ switch (mode) {
+ case GPIO_MODE_INPUT:
+ /* turn off output enable, act as input pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(EN_reg, EN, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_OUTPUT:
+ /* turn on output enable, act as output pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* grounding the A register then use the EN register bit
+ * will have faster effect on the rise time */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_HARDWARE:
+ /* program the pin as tri-state, pin is driven by HW */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_INTERRUPT:
+ /* Interrupt mode supported only by HPD (IrqGpio) pins. */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ pin->base.ctx = ctx;
+ pin->base.id = id;
+ pin->base.en = en;
+ pin->base.mode = GPIO_MODE_UNKNOWN;
+ pin->base.opened = false;
+
+ pin->store.mask = 0;
+ pin->store.a = 0;
+ pin->store.en = 0;
+ pin->store.mux = 0;
+
+ pin->mux_supported = false;
+}
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin)
+{
+ ASSERT(!pin->base.opened);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
new file mode 100644
index 000000000000..bca0cef18ff9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_GPIO_H__
+#define __DAL_HW_GPIO_H__
+
+#include "gpio_regs.h"
+
+#define FROM_HW_GPIO_PIN(ptr) \
+ container_of((ptr), struct hw_gpio, base)
+
+struct addr_mask {
+ uint32_t addr;
+ uint32_t mask;
+};
+
+struct hw_gpio_pin {
+ const struct hw_gpio_pin_funcs *funcs;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ bool opened;
+ struct dc_context *ctx;
+};
+
+struct hw_gpio_pin_funcs {
+ void (*destroy)(
+ struct hw_gpio_pin **ptr);
+ bool (*open)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ enum gpio_result (*get_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+ enum gpio_result (*set_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t value);
+ enum gpio_result (*set_config)(
+ struct hw_gpio_pin *pin,
+ const struct gpio_config_data *config_data);
+ enum gpio_result (*change_mode)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ void (*close)(
+ struct hw_gpio_pin *pin);
+};
+
+
+struct hw_gpio;
+
+/* Register indices are represented by member variables
+ * and are to be filled in by constructors of derived classes.
+ * These members permit the use of common code
+ * for programming registers, where the sequence is the same
+ * but register sets are different.
+ * Some GPIOs have HW mux which allows to choose
+ * what is the source of the signal in HW mode */
+
+struct hw_gpio_pin_reg {
+ struct addr_mask DC_GPIO_DATA_MASK;
+ struct addr_mask DC_GPIO_DATA_A;
+ struct addr_mask DC_GPIO_DATA_EN;
+ struct addr_mask DC_GPIO_DATA_Y;
+};
+
+struct hw_gpio_mux_reg {
+ struct addr_mask GPIO_MUX_CONTROL;
+ struct addr_mask GPIO_MUX_STEREO_SEL;
+};
+
+struct hw_gpio {
+ struct hw_gpio_pin base;
+
+ /* variables to save register value */
+ struct {
+ uint32_t mask;
+ uint32_t a;
+ uint32_t en;
+ uint32_t mux;
+ } store;
+
+ /* GPIO MUX support */
+ bool mux_supported;
+ const struct gpio_registers *regs;
+};
+
+#define HW_GPIO_FROM_BASE(hw_gpio_pin) \
+ container_of((hw_gpio_pin), struct hw_gpio, base)
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx);
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *pin,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin);
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value);
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
new file mode 100644
index 000000000000..784feccc5853
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_hpd.h"
+
+#include "reg_helper.h"
+#include "hpd_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hpd->shifts->field_name, hpd->masks->field_name
+
+#define CTX \
+ hpd->base.base.ctx
+#define REG(reg)\
+ (hpd->regs->reg)
+
+static void dal_hw_hpd_construct(
+ struct hw_hpd *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+}
+
+static void dal_hw_hpd_destruct(
+ struct hw_hpd *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+
+static void destruct(
+ struct hw_hpd *hpd)
+{
+ dal_hw_hpd_destruct(hpd);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
+
+ destruct(hpd);
+
+ kfree(hpd);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+ uint32_t hpd_delayed = 0;
+
+ /* in Interrupt mode we ask for SENSE bit */
+
+ if (ptr->mode == GPIO_MODE_INTERRUPT) {
+
+ REG_GET(int_status,
+ DC_HPD_SENSE_DELAYED, &hpd_delayed);
+
+ *value = hpd_delayed;
+ return GPIO_RESULT_OK;
+ }
+
+ /* in any other modes, operate as normal GPIO */
+
+ return dal_hw_gpio_get_value(ptr, value);
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+
+ if (!config_data)
+ return GPIO_RESULT_INVALID_DATA;
+
+ REG_UPDATE_2(toggle_filt_cntl,
+ DC_HPD_CONNECT_INT_DELAY, config_data->config.hpd.delay_on_connect / 10,
+ DC_HPD_DISCONNECT_INT_DELAY, config_data->config.hpd.delay_on_disconnect / 10);
+
+ return GPIO_RESULT_OK;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_hpd *hpd,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_hpd_construct(hpd, id, en, ctx);
+ hpd->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_hpd *hpd;
+
+ if (id != GPIO_ID_HPD) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ if ((en < GPIO_HPD_MIN) || (en > GPIO_HPD_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
+ if (!hpd) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(hpd, id, en, ctx);
+ return &hpd->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
new file mode 100644
index 000000000000..4ab7a208f781
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_HPD_H__
+#define __DAL_HW_HPD_H__
+
+#include "hpd_regs.h"
+
+struct hw_hpd {
+ struct hw_gpio base;
+ const struct hpd_registers *regs;
+ const struct hpd_sh_mask *shifts;
+ const struct hpd_sh_mask *masks;
+};
+
+#define HW_HPD_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_hpd, base)
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
new file mode 100644
index 000000000000..0ae8ace25739
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_translate.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_translate_dce80.h"
+#include "dce110/hw_translate_dce110.h"
+#include "dce120/hw_translate_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_translate_dcn10.h"
+#endif
+
+#include "diagnostics/hw_translate_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_translate_diag_fpga_init(translate);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_translate_dce80_init(translate);
+ return true;
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_translate_dce110_init(translate);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_translate_dce120_init(translate);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_translate_dcn10_init(translate);
+ return true;
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
new file mode 100644
index 000000000000..3a7d89ca1605
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_H__
+#define __DAL_HW_TRANSLATE_H__
+
+struct hw_translate_funcs {
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+
+struct hw_translate {
+ const struct hw_translate_funcs *funcs;
+};
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
new file mode 100644
index 000000000000..352885cb4d07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -0,0 +1,99 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'i2c' sub-component of DAL.
+# It provides the control and status of HW i2c engine of the adapter.
+
+I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \
+ i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o
+
+AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX)
+
+###############################################################################
+# DCE 8x family
+###############################################################################
+I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \
+ i2c_sw_engine_dce80.o
+
+AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80)
+
+###############################################################################
+# DCE 100 family
+###############################################################################
+I2CAUX_DCE100 = i2caux_dce100.o
+
+AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100)
+
+###############################################################################
+# DCE 110 family
+###############################################################################
+I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \
+ aux_engine_dce110.o
+
+AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110)
+
+###############################################################################
+# DCE 112 family
+###############################################################################
+I2CAUX_DCE112 = i2caux_dce112.o
+
+AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
+
+###############################################################################
+# DCN 1.0 family
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+I2CAUX_DCN1 = i2caux_dcn10.o
+
+AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCN1)
+endif
+
+###############################################################################
+# DCE 120 family
+###############################################################################
+I2CAUX_DCE120 = i2caux_dce120.o
+
+AMD_DAL_I2CAUX_DCE120 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce120/,$(I2CAUX_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE120)
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+I2CAUX_DIAG = i2caux_diag.o
+
+AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG)
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
new file mode 100644
index 000000000000..fc7a7d4ebca5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "include/link_service_types.h"
+
+/*
+ * This unit
+ */
+
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+bool dal_aux_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ enum gpio_result result;
+ if (aux_engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!aux_engine->funcs->is_engine_available(aux_engine)) {
+ return false;
+ }
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!aux_engine->funcs->acquire_engine(aux_engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+
+bool dal_aux_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(aux_engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(aux_engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(aux_engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->delay = 0;
+ engine->max_defer_write_retry = 0;
+}
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
new file mode 100644
index 000000000000..8e71324ccb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
+};
+
+struct aux_engine;
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available) (
+ struct aux_engine *engine);
+};
+
+struct aux_engine {
+ struct engine base;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+
+ bool acquire_reset;
+};
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx);
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine);
+bool dal_aux_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+bool dal_aux_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc);
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
new file mode 100644
index 000000000000..e8d3781deaed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce100_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce100_aux_regs,
+ dce100_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
new file mode 100644
index 000000000000..2b508d3e0ef4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE100_H__
+#define __DAL_I2C_AUX_DCE100_H__
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
new file mode 100644
index 000000000000..81f9f3e34c10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../aux_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct aux_engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine);
+
+static void destroy(
+ struct aux_engine **aux_engine)
+{
+ struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *aux_engine = NULL;
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required. */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply(). */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ /* Need to do a read to get the number of bytes to process
+ * Alternatively, this information can be passed -
+ * but that causes coupling which isn't good either. */
+
+ uint32_t bytes_replied;
+ uint32_t value;
+
+ value = REG_GET(AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+
+ if (bytes_replied) {
+ uint32_t reply_result;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &reply_result);
+
+ reply_result = reply_result >> 4;
+
+ switch (reply_result) {
+ case 0: /* ACK */ {
+ uint32_t i = 0;
+
+ /* first byte was already used
+ * to get the command status */
+ --bytes_replied;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &aux_sw_data_val);
+
+ reply->data[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ }
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ } else {
+ /* Need to handle an error case...
+ * hopefully, upper layer function won't call this function
+ * if the number of bytes in the reply was 0
+ * because there was surely an error that was asserted
+ * that should have been handled
+ * for hot plug case, this could happens*/
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored. */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period */
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .destroy = destroy,
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .submit_request = dal_aux_engine_submit_request,
+ .get_engine_type = dal_aux_engine_get_engine_type,
+ .acquire = dal_aux_engine_acquire,
+};
+
+static void construct(
+ struct aux_engine_dce110 *engine,
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ dal_aux_engine_construct(&engine->base, aux_init_data->ctx);
+ engine->base.base.funcs = &engine_funcs;
+ engine->base.funcs = &aux_engine_funcs;
+
+ engine->timeout_period = aux_init_data->timeout_period;
+ engine->regs = aux_init_data->regs;
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine)
+{
+ struct aux_engine_dce110 *aux110 = engine;
+/*temp w/a, to do*/
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_DMCU_DONE_USING_AUX_REG, 1);
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ dal_aux_engine_destruct(&engine->base);
+}
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ struct aux_engine_dce110 *engine;
+
+ if (!aux_init_data) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+
+ if (!engine) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine, aux_init_data);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
new file mode 100644
index 000000000000..85ee82162590
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+
+#include "../aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
new file mode 100644
index 000000000000..56e25b3d65fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 538
+};
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_hw_engine *'
+ * to pointer 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce110, base)
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+#define CTX \
+ hw_engine->base.base.base.ctx
+
+#define REG(reg_name)\
+ (hw_engine->regs->reg_name)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name
+
+#include "reg_helper.h"
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+
+ if (safe_to_reset)
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+ else
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ REG_UPDATE_6(
+ DC_I2C_CONTROL,
+ DC_I2C_GO, 0,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_TRANSACTION_COUNT, 0,
+ DC_I2C_DDC_SELECT, hw_engine->engine_id);
+
+ /* Program time limit */
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ REG_UPDATE_2(
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO, 0,
+ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t pre_scale = 0;
+
+ REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+ return pre_scale ?
+ hw_engine->reference_frequency / pre_scale :
+ hw_engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(
+ SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(
+ SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+ uint32_t i2c_sw_status = 0;
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+
+#define STOP_TRANS_PREDICAT \
+ ((hw_engine->transaction_count == 3) || \
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ))
+
+#define SET_I2C_TRANSACTION(id) \
+ do { \
+ REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
+ if (STOP_TRANS_PREDICAT) \
+ last_transaction = true; \
+ } while (false)
+
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+ uint32_t value = 0;
+
+ bool last_transaction = false;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = hw_engine->base.base.base.ctx;
+
+
+
+ switch (hw_engine->transaction_count) {
+ case 0:
+ SET_I2C_TRANSACTION(0);
+ break;
+ case 1:
+ SET_I2C_TRANSACTION(1);
+ break;
+ case 2:
+ SET_I2C_TRANSACTION(2);
+ break;
+ case 3:
+ SET_I2C_TRANSACTION(3);
+ break;
+ default:
+ /* TODO Warning ? */
+ break;
+ }
+
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+ if (hw_engine->transaction_count == 0) {
+ value = REG_SET_4(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address,
+ DC_I2C_INDEX, 0,
+ DC_I2C_INDEX_WRITE, 1);
+ hw_engine->buffer_used_write = 0;
+ } else
+ value = REG_SET_2(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address);
+
+ hw_engine->buffer_used_write++;
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+ while (length) {
+ REG_SET_2(DC_I2C_DATA, value,
+ DC_I2C_INDEX_WRITE, 0,
+ DC_I2C_DATA, *buffer++);
+ hw_engine->buffer_used_write++;
+ --length;
+ }
+ }
+
+ ++hw_engine->transaction_count;
+ hw_engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 5,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
+
+
+ REG_UPDATE_5(DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_GO, 0,
+ DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1);
+
+ /* start I2C transfer */
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ hw_engine->transaction_count = 0;
+ hw_engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ struct i2c_hw_engine_dce110 *hw_engine =
+ FROM_I2C_ENGINE(engine);
+
+
+ REG_SET_3(DC_I2C_DATA, 0,
+ DC_I2C_INDEX, hw_engine->buffer_used_write,
+ DC_I2C_DATA_RW, 1,
+ DC_I2C_INDEX_WRITE, 1);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ uint32_t i2c_data;
+
+ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
+ *buffer++ = i2c_data;
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *i2c_engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t value =
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *engine_dce110 =
+ FROM_I2C_ENGINE(*i2c_engine);
+
+ dal_i2c_hw_engine_destruct(&engine_dce110->base);
+
+ kfree(engine_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size = get_hw_buffer_available_size,
+ .get_transaction_timeout = get_transaction_timeout,
+ .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ uint32_t xtal_ref_div = 0;
+
+ dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx);
+
+ hw_engine->base.base.base.funcs = &engine_funcs;
+ hw_engine->base.base.funcs = &i2c_engine_funcs;
+ hw_engine->base.funcs = &i2c_hw_engine_funcs;
+ hw_engine->base.default_speed = arg->default_speed;
+
+ hw_engine->regs = arg->regs;
+ hw_engine->i2c_shift = arg->i2c_shift;
+ hw_engine->i2c_mask = arg->i2c_mask;
+
+ hw_engine->engine_id = arg->engine_id;
+
+ hw_engine->buffer_used_bytes = 0;
+ hw_engine->transaction_count = 0;
+ hw_engine->engine_keep_power_up_count = 1;
+
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+ if (xtal_ref_div == 0) {
+ dm_logger_write(
+ hw_engine->base.base.base.ctx->logger, LOG_WARNING,
+ "Invalid base timer divider\n",
+ __func__);
+ xtal_ref_div = 2;
+ }
+
+ /*Calculating Reference Clock by divding original frequency by
+ * XTAL_REF_DIV.
+ * At upper level, uint32_t reference_frequency =
+ * dal_i2caux_get_reference_clock(as) >> 1
+ * which already divided by 2. So we need x2 to get original
+ * reference clock from ppll_info
+ */
+ hw_engine->reference_frequency =
+ (arg->reference_frequency * 2) / xtal_ref_div;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ struct i2c_hw_engine_dce110 *engine_dce10;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+ if (!arg->reference_frequency) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce10 = kzalloc(sizeof(struct i2c_hw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce10) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce10, arg);
+ return &engine_dce10->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
new file mode 100644
index 000000000000..5bb04085f670
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__
+#define __DAL_I2C_HW_ENGINE_DCE110_H__
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
+ SRI(SETUP, DC_I2C_DDC, id),\
+ SRI(SPEED, DC_I2C_DDC, id),\
+ SR(DC_I2C_ARBITRATION),\
+ SR(DC_I2C_CONTROL),\
+ SR(DC_I2C_SW_STATUS),\
+ SR(DC_I2C_TRANSACTION0),\
+ SR(DC_I2C_TRANSACTION1),\
+ SR(DC_I2C_TRANSACTION2),\
+ SR(DC_I2C_TRANSACTION3),\
+ SR(DC_I2C_DATA),\
+ SR(MICROSECOND_TIME_BASE_DIV)
+
+#define I2C_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
+
+struct dce110_i2c_hw_engine_shift {
+ uint8_t DC_I2C_DDC1_ENABLE;
+ uint8_t DC_I2C_DDC1_TIME_LIMIT;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint8_t DC_I2C_NO_QUEUED_SW_GO;
+ uint8_t DC_I2C_SW_PRIORITY;
+ uint8_t DC_I2C_SOFT_RESET;
+ uint8_t DC_I2C_SW_STATUS_RESET;
+ uint8_t DC_I2C_GO;
+ uint8_t DC_I2C_SEND_RESET;
+ uint8_t DC_I2C_TRANSACTION_COUNT;
+ uint8_t DC_I2C_DDC_SELECT;
+ uint8_t DC_I2C_DDC1_PRESCALE;
+ uint8_t DC_I2C_DDC1_THRESHOLD;
+ uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint8_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint8_t DC_I2C_SW_TIMEOUT;
+ uint8_t DC_I2C_SW_ABORTED;
+ uint8_t DC_I2C_SW_DONE;
+ uint8_t DC_I2C_SW_STATUS;
+ uint8_t DC_I2C_STOP_ON_NACK0;
+ uint8_t DC_I2C_START0;
+ uint8_t DC_I2C_RW0;
+ uint8_t DC_I2C_STOP0;
+ uint8_t DC_I2C_COUNT0;
+ uint8_t DC_I2C_DATA_RW;
+ uint8_t DC_I2C_DATA;
+ uint8_t DC_I2C_INDEX;
+ uint8_t DC_I2C_INDEX_WRITE;
+ uint8_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_mask {
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_TIME_LIMIT;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint32_t DC_I2C_NO_QUEUED_SW_GO;
+ uint32_t DC_I2C_SW_PRIORITY;
+ uint32_t DC_I2C_SOFT_RESET;
+ uint32_t DC_I2C_SW_STATUS_RESET;
+ uint32_t DC_I2C_GO;
+ uint32_t DC_I2C_SEND_RESET;
+ uint32_t DC_I2C_TRANSACTION_COUNT;
+ uint32_t DC_I2C_DDC_SELECT;
+ uint32_t DC_I2C_DDC1_PRESCALE;
+ uint32_t DC_I2C_DDC1_THRESHOLD;
+ uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint32_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint32_t DC_I2C_SW_TIMEOUT;
+ uint32_t DC_I2C_SW_ABORTED;
+ uint32_t DC_I2C_SW_DONE;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_STOP_ON_NACK0;
+ uint32_t DC_I2C_START0;
+ uint32_t DC_I2C_RW0;
+ uint32_t DC_I2C_STOP0;
+ uint32_t DC_I2C_COUNT0;
+ uint32_t DC_I2C_DATA_RW;
+ uint32_t DC_I2C_DATA;
+ uint32_t DC_I2C_INDEX;
+ uint32_t DC_I2C_INDEX_WRITE;
+ uint32_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_registers {
+ uint32_t SETUP;
+ uint32_t SPEED;
+ uint32_t DC_I2C_ARBITRATION;
+ uint32_t DC_I2C_CONTROL;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_TRANSACTION0;
+ uint32_t DC_I2C_TRANSACTION1;
+ uint32_t DC_I2C_TRANSACTION2;
+ uint32_t DC_I2C_TRANSACTION3;
+ uint32_t DC_I2C_DATA;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+};
+
+struct i2c_hw_engine_dce110 {
+ struct i2c_hw_engine base;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of bytes used for write transaction in HW buffer
+ * - this will be used as the index to read from*/
+ uint32_t buffer_used_write;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
new file mode 100644
index 000000000000..3aa7f791e523
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce110 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce110, base)
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce110 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce110 *engine_dce110,
+ const struct i2c_sw_engine_dce110_create_arg *arg_dce110)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg_dce110->ctx;
+ arg_base.default_speed = arg_dce110->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine_dce110->base, &arg_base);
+
+ /*struct engine struct engine_funcs*/
+ engine_dce110->base.base.base.funcs = &engine_funcs;
+ /*struct i2c_engine struct i2c_engine_funcs*/
+ engine_dce110->base.base.funcs = &i2c_engine_funcs;
+ engine_dce110->base.default_speed = arg_dce110->default_speed;
+ engine_dce110->engine_id = arg_dce110->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg)
+{
+ struct i2c_sw_engine_dce110 *engine_dce110;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce110 = kzalloc(sizeof(struct i2c_sw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce110, arg);
+ return &engine_dce110->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
new file mode 100644
index 000000000000..c48c61f540a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__
+#define __DAL_I2C_SW_ENGINE_DCE110_H__
+
+struct i2c_sw_engine_dce110 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
new file mode 100644
index 000000000000..2a047f8ca0e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_dce110.h"
+
+#include "i2c_sw_engine_dce110.h"
+#include "i2c_hw_engine_dce110.h"
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce110, base)
+
+static void destruct(
+ struct i2caux_dce110 *i2caux_dce110)
+{
+ dal_i2caux_destruct(&i2caux_dce110->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce110);
+
+ kfree(i2caux_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ /* generic hw engine is not used for EDID read
+ * It may be needed for external i2c device, like thermal chip,
+ * TODO will be implemented when needed.
+ * check dce80 bool non_generic for generic hw engine;
+ */
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT)
+ engine = i2caux->i2c_hw_engines[line];
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (!i2caux_dce110->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce110->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce110_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers aux_regs[],
+ const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask)
+{
+ uint32_t i = 0;
+ uint32_t reference_frequency = 0;
+ bool use_i2c_sw_engine = false;
+ struct i2caux *base = NULL;
+ /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock
+ * does not return 48KHz, we need hard coded for 48Khz.
+ * Some BIOS setting incorrect cause this
+ * For production, we always get value from BIOS*/
+ reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ base = &i2caux_dce110->base;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce110->base.funcs = &i2caux_funcs;
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+ /* Create I2C engines (DDC lines per connector)
+ * different I2C/AUX usage cases, DDC, Generic GPIO, AUX.
+ */
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce110_create_arg hw_arg_dce110;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce110_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce110_create(&sw_arg);
+ }
+
+ hw_arg_dce110.engine_id = i;
+ hw_arg_dce110.reference_frequency = reference_frequency;
+ hw_arg_dce110.default_speed = base->default_i2c_hw_speed;
+ hw_arg_dce110.ctx = ctx;
+ hw_arg_dce110.regs = &i2c_hw_engine_regs[i];
+ hw_arg_dce110.i2c_shift = i2c_shift;
+ hw_arg_dce110.i2c_mask = i2c_mask;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data aux_init_data;
+
+ aux_init_data.engine_id = i;
+ aux_init_data.timeout_period = base->aux_timeout_period;
+ aux_init_data.ctx = ctx;
+ aux_init_data.regs = &aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&aux_init_data);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /*TODO Generic I2C SW and HW*/
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce110_aux_regs,
+ i2c_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
new file mode 100644
index 000000000000..1b1f71c60ac9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE110_H__
+#define __DAL_I2C_AUX_DCE110_H__
+
+#include "../i2caux.h"
+
+struct i2caux_dce110 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct dce110_aux_registers;
+struct dce110_i2c_hw_engine_registers;
+struct dce110_i2c_hw_engine_shift;
+struct dce110_i2c_hw_engine_mask;
+
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx);
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers *aux_regs,
+ const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask);
+
+#endif /* __DAL_I2C_AUX_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
new file mode 100644
index 000000000000..dafc1a727f7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2caux_dce110.h"
+#include "i2caux_dce112.h"
+
+#include "../dce110/aux_engine_dce110.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce112_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static void construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx)
+{
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce112_aux_regs,
+ dce112_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux_dce110, ctx);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
new file mode 100644
index 000000000000..8d35453c25b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE112_H__
+#define __DAL_I2C_AUX_DCE112_H__
+
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
new file mode 100644
index 000000000000..668981a4c285
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+static const struct dce110_aux_registers dce120_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_i2c_hw_engine_registers dce120_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce120_aux_regs,
+ dce120_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
new file mode 100644
index 000000000000..b6ac47617c70
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE120_H__
+#define __DAL_I2C_AUX_DCE120_H__
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
new file mode 100644
index 000000000000..fd0832dd2c75
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 144
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+ uint32_t value = 0;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_DONE_USING_I2C_REG);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value);
+ }
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ if (safe_to_reset)
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+ }
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static void destruct(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *i2c_engine = NULL;
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ uint32_t value = 0;
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ set_reg_field_value(
+ value,
+ engine->engine_id,
+ DC_I2C_CONTROL,
+ DC_I2C_DDC_SELECT);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program time limit */
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ I2C_SETUP_TIME_LIMIT,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_TIME_LIMIT);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ {
+ value = dm_read_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO);
+
+ set_reg_field_value(
+ value,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_PRIORITY);
+
+ dm_write_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION, value);
+ }
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t pre_scale = 0;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ pre_scale = get_reg_field_value(
+ value,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+
+ return pre_scale ?
+ engine->reference_frequency / pre_scale :
+ engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ engine->reference_frequency / speed,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ set_reg_field_value(
+ value,
+ 2,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_THRESHOLD);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ uint32_t i2c_sw_status = 0;
+
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+/*
+ * @brief
+ * DC_GPIO_DDC MM register offsets
+ */
+static const uint32_t transaction_addr[] = {
+ mmDC_I2C_TRANSACTION0,
+ mmDC_I2C_TRANSACTION1,
+ mmDC_I2C_TRANSACTION2,
+ mmDC_I2C_TRANSACTION3
+};
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce80 *engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+
+ bool last_transaction = false;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr =
+ transaction_addr[engine->transaction_count];
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP_ON_NACK0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_START0);
+
+ if ((engine->transaction_count == 3) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ last_transaction = true;
+ } else
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ set_reg_field_value(
+ value,
+ (0 != (request->action &
+ I2CAUX_TRANSACTION_ACTION_I2C_READ)),
+ DC_I2C_TRANSACTION0,
+ DC_I2C_RW0);
+
+ set_reg_field_value(
+ value,
+ length,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_COUNT0);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+
+ {
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ false,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ request->address,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ if (engine->transaction_count == 0) {
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ /*enable index write*/
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+ }
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ while (length) {
+
+ set_reg_field_value(
+ value,
+ *buffer++,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+ --length;
+ }
+ }
+ }
+
+ ++engine->transaction_count;
+ engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ uint32_t value = 0;
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_CLK_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_SEL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_TRANSACTION_DELAY);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_BYTE_DELAY);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ engine->transaction_count - 1,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* start I2C transfer */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ engine->transaction_count = 0;
+ engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ uint32_t value = 0;
+
+ /*set index*/
+ set_reg_field_value(
+ value,
+ length - 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA);
+
+ *buffer++ = get_reg_field_value(
+ value,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SETUP MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_setup_offset[] = {
+
+ mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */
+};
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SPEED MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_speed_offset[] = {
+ mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */
+};
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size =
+ get_hw_buffer_available_size,
+ .get_transaction_timeout =
+ get_transaction_timeout,
+ .wait_on_operation_result =
+ dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce80 *engine,
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ dal_i2c_hw_engine_construct(&engine->base, arg->ctx);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.funcs = &i2c_hw_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->addr.DC_I2C_DDCX_SETUP =
+ mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id];
+ engine->addr.DC_I2C_DDCX_SPEED =
+ mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id];
+
+ engine->engine_id = arg->engine_id;
+ engine->reference_frequency = arg->reference_frequency;
+ engine->buffer_used_bytes = 0;
+ engine->transaction_count = 0;
+ engine->engine_keep_power_up_count = 1;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ struct i2c_hw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if ((arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t)) ||
+ (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t)) ||
+ !arg->reference_frequency) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_hw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
new file mode 100644
index 000000000000..5c6116fb5479
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__
+#define __DAL_I2C_HW_ENGINE_DCE80_H__
+
+struct i2c_hw_engine_dce80 {
+ struct i2c_hw_engine base;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
new file mode 100644
index 000000000000..4853ee26096a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+/*
+ * This unit
+ */
+
+static const uint32_t ddc_hw_status_addr[] = {
+ mmDC_I2C_DDC1_HW_STATUS,
+ mmDC_I2C_DDC2_HW_STATUS,
+ mmDC_I2C_DDC3_HW_STATUS,
+ mmDC_I2C_DDC4_HW_STATUS,
+ mmDC_I2C_DDC5_HW_STATUS,
+ mmDC_I2C_DDC6_HW_STATUS,
+ mmDC_I2C_DDCVGA_HW_STATUS
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce80 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce80 *engine,
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg->ctx;
+ arg_base.default_speed = arg->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine->base, &arg_base);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->engine_id = arg->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
new file mode 100644
index 000000000000..26355c088746
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__
+#define __DAL_I2C_SW_ENGINE_DCE80_H__
+
+struct i2c_sw_engine_dce80 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
new file mode 100644
index 000000000000..ed48596dd2a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "i2c_sw_engine_dce80.h"
+#include "../i2c_hw_engine.h"
+#include "i2c_hw_engine_dce80.h"
+#include "../i2c_generic_hw_engine.h"
+#include "../aux_engine.h"
+
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers dce80_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+/*
+ * This unit
+ */
+
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce80, base)
+
+static void destruct(
+ struct i2caux_dce80 *i2caux_dce80)
+{
+ dal_i2caux_destruct(&i2caux_dce80->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce80);
+
+ kfree(i2caux_dce80);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ bool non_generic;
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT) {
+ non_generic = true;
+ engine = i2caux->i2c_hw_engines[line];
+ }
+ }
+
+ if (!engine) {
+ non_generic = false;
+ engine = i2caux->i2c_generic_hw_engine;
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (non_generic) {
+ if (!i2caux_dce80->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce80->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+ } else {
+ if (engine->base.funcs->acquire(&engine->base, ddc))
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6
+};
+
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+static void construct(
+ struct i2caux_dce80 *i2caux_dce80,
+ struct dc_context *ctx)
+{
+ /* Entire family have I2C engine reference clock frequency
+ * changed from XTALIN (27) to XTALIN/2 (13.5) */
+
+ struct i2caux *base = &i2caux_dce80->base;
+
+ uint32_t reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as,
+ FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/
+
+ /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */
+ bool use_i2c_sw_engine = true;
+
+ uint32_t i;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce80->base.funcs = &i2caux_funcs;
+ i2caux_dce80->i2c_hw_buffer_in_use = false;
+
+ /* Create I2C HW engines (HW + SW pairs)
+ * for all lines which has assisted HW DDC
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce80_create_arg hw_arg;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce80_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce80_create(&sw_arg);
+ }
+
+ hw_arg.engine_id = i;
+ hw_arg.reference_frequency = reference_frequency;
+ hw_arg.default_speed = base->default_i2c_hw_speed;
+ hw_arg.ctx = ctx;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce80_create(&hw_arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data arg;
+
+ arg.engine_id = i;
+ arg.timeout_period = base->aux_timeout_period;
+ arg.ctx = ctx;
+ arg.regs = &dce80_aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /* TODO Generic I2C SW and HW */
+}
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce80 *i2caux_dce80 =
+ kzalloc(sizeof(struct i2caux_dce80), GFP_KERNEL);
+
+ if (!i2caux_dce80) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(i2caux_dce80, ctx);
+ return &i2caux_dce80->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
new file mode 100644
index 000000000000..21908629e973
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE80_H__
+#define __DAL_I2C_AUX_DCE80_H__
+
+struct i2caux_dce80 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
new file mode 100644
index 000000000000..13b807d8aff8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dcn10_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dcn10_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dcn10_aux_regs,
+ dcn10_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
new file mode 100644
index 000000000000..aeb4a86463d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCN10_H__
+#define __DAL_I2C_AUX_DCN10_H__
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
new file mode 100644
index 000000000000..e6408f644086
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_diag.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+static void destruct(
+ struct i2caux *i2caux)
+{
+ dal_i2caux_destruct(i2caux);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ destruct(*i2c_engine);
+
+ kfree(*i2c_engine);
+
+ *i2c_engine = NULL;
+}
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = NULL,
+ .release_engine = NULL,
+ .acquire_i2c_sw_engine = NULL,
+ .acquire_aux_engine = NULL,
+};
+
+static void construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct(i2caux, ctx);
+ i2caux->funcs = &i2caux_funcs;
+}
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx)
+{
+ struct i2caux *i2caux = kzalloc(sizeof(struct i2caux),
+ GFP_KERNEL);
+
+ if (!i2caux) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux, ctx);
+ return i2caux;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
new file mode 100644
index 000000000000..a83eeb748283
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__
+#define __DAL_I2C_AUX_DIAG_FPGA_H__
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
new file mode 100644
index 000000000000..33de8a8834dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ENGINE_H__
+#define __DAL_ENGINE_H__
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+struct engine;
+
+struct engine_funcs {
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct engine *engine);
+ bool (*acquire)(
+ struct engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct engine *engine);
+};
+
+struct engine {
+ const struct engine_funcs *funcs;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+};
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
new file mode 100644
index 000000000000..5d155d36d353
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "engine.h"
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx)
+{
+ engine->ddc = NULL;
+ engine->ctx = ctx;
+}
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine)
+{
+ /* nothing to do */
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
new file mode 100644
index 000000000000..70e20bd47ce4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct i2c_engine, base)
+
+bool dal_i2c_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc_handle)
+{
+ struct i2c_engine *i2c_engine = FROM_ENGINE(engine);
+
+ uint32_t counter = 0;
+ bool result;
+
+ do {
+ result = i2c_engine->funcs->acquire_engine(
+ i2c_engine, ddc_handle);
+
+ if (result)
+ break;
+
+ /* i2c_engine is busy by VBios, lets wait and retry */
+
+ udelay(10);
+
+ ++counter;
+ } while (counter < 2);
+
+ if (result) {
+ if (!i2c_engine->funcs->setup_engine(i2c_engine)) {
+ engine->funcs->release_engine(engine);
+ result = false;
+ }
+ }
+
+ return result;
+}
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine)
+{
+ /* Derivative classes do not have to override this */
+
+ return true;
+}
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+
+}
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+
+}
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->timeout_delay = 0;
+}
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
new file mode 100644
index 000000000000..58fc0f25eceb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_ENGINE_H__
+#define __DAL_I2C_ENGINE_H__
+
+enum i2c_channel_operation_result {
+ I2C_CHANNEL_OPERATION_SUCCEEDED,
+ I2C_CHANNEL_OPERATION_FAILED,
+ I2C_CHANNEL_OPERATION_NOT_GRANTED,
+ I2C_CHANNEL_OPERATION_IS_BUSY,
+ I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
+ I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
+ I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY,
+ I2C_CHANNEL_OPERATION_TIMEOUT,
+ I2C_CHANNEL_OPERATION_NO_RESPONSE,
+ I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
+ I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
+ I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
+ I2C_CHANNEL_OPERATION_NOT_STARTED
+};
+
+struct i2c_request_transaction_data {
+ enum i2caux_transaction_action action;
+ enum i2c_channel_operation_result status;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_reply_transaction_data {
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_engine;
+
+struct i2c_engine_funcs {
+ void (*destroy)(
+ struct i2c_engine **ptr);
+ uint32_t (*get_speed)(
+ const struct i2c_engine *engine);
+ void (*set_speed)(
+ struct i2c_engine *engine,
+ uint32_t speed);
+ bool (*acquire_engine)(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+ bool (*setup_engine)(
+ struct i2c_engine *engine);
+ void (*submit_channel_request)(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+ enum i2c_channel_operation_result (*get_channel_status)(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+};
+
+struct i2c_engine {
+ struct engine base;
+ const struct i2c_engine_funcs *funcs;
+ uint32_t timeout_delay;
+};
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine);
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine);
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+
+bool dal_i2c_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc_handle);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
new file mode 100644
index 000000000000..5a4295e0fae5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+#include "i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_generic_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_generic_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW;
+}
+
+/*
+ * @brief
+ * Single transaction handling.
+ * Since transaction may be bigger than HW buffer size,
+ * it divides transaction to sub-transactions
+ * and uses batch transaction feature of the engine.
+ */
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_hw_engine *base = &hw_engine->base;
+
+ uint32_t max_payload_size =
+ base->funcs->get_hw_buffer_available_size(base);
+
+ bool initial_stop_bit = !middle_of_transaction;
+
+ struct i2c_generic_transaction_attributes attributes;
+
+ enum i2c_channel_operation_result operation_result =
+ I2C_CHANNEL_OPERATION_FAILED;
+
+ bool result = false;
+
+ /* setup transaction initial properties */
+
+ uint8_t address = i2caux_request->payload.address;
+ uint8_t *current_payload = i2caux_request->payload.data;
+ uint32_t remaining_payload_size = i2caux_request->payload.length;
+
+ bool first_iteration = true;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ return false;
+ }
+
+ /* Do batch transaction.
+ * Divide read/write data into payloads which fit HW buffer size.
+ * 1. Single transaction:
+ * start_bit = 1, stop_bit depends on session state, ack_on_read = 0;
+ * 2. Start of batch transaction:
+ * start_bit = 1, stop_bit = 0, ack_on_read = 1;
+ * 3. Middle of batch transaction:
+ * start_bit = 0, stop_bit = 0, ack_on_read = 1;
+ * 4. End of batch transaction:
+ * start_bit = 0, stop_bit depends on session state, ack_on_read = 0.
+ * Session stop bit is set if 'middle_of_transaction' = 0. */
+
+ while (remaining_payload_size) {
+ uint32_t current_transaction_size;
+ uint32_t current_payload_size;
+
+ bool last_iteration;
+ bool stop_bit;
+
+ /* Calculate current transaction size and payload size.
+ * Transaction size = total number of bytes in transaction,
+ * including slave's address;
+ * Payload size = number of data bytes in transaction. */
+
+ if (first_iteration) {
+ /* In the first sub-transaction we send slave's address
+ * thus we need to reserve one byte for it */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size - 1) ?
+ max_payload_size :
+ remaining_payload_size + 1;
+
+ current_payload_size = current_transaction_size - 1;
+ } else {
+ /* Second and further sub-transactions will have
+ * entire buffer reserved for data */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size) ?
+ max_payload_size :
+ remaining_payload_size;
+
+ current_payload_size = current_transaction_size;
+ }
+
+ last_iteration =
+ (remaining_payload_size == current_payload_size);
+
+ stop_bit = last_iteration ? initial_stop_bit : false;
+
+ /* write slave device address */
+
+ if (first_iteration)
+ hw_engine->funcs->write_address(hw_engine, address);
+
+ /* write current portion of data, if requested */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ hw_engine->funcs->write_data(
+ hw_engine,
+ current_payload,
+ current_payload_size);
+
+ /* execute transaction */
+
+ attributes.start_bit = first_iteration;
+ attributes.stop_bit = stop_bit;
+ attributes.last_read = last_iteration;
+ attributes.transaction_size = current_transaction_size;
+
+ hw_engine->funcs->execute_transaction(hw_engine, &attributes);
+
+ /* wait until transaction is processed; if it fails - quit */
+
+ operation_result = base->funcs->wait_on_operation_result(
+ base,
+ base->funcs->get_transaction_timeout(
+ base, current_transaction_size),
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED)
+ break;
+
+ /* read current portion of data, if requested */
+
+ /* the read offset should be 1 for first sub-transaction,
+ * and 0 for any next one */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ hw_engine->funcs->read_data(hw_engine, current_payload,
+ current_payload_size, first_iteration ? 1 : 0);
+
+ /* update loop variables */
+
+ first_iteration = false;
+ current_payload += current_payload_size;
+ remaining_payload_size -= current_payload_size;
+ }
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ return result;
+}
+
+/*
+ * @brief
+ * Returns number of microseconds to wait until timeout to be considered
+ */
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ const struct i2c_engine *base = &engine->base;
+
+ uint32_t speed = base->funcs->get_speed(base);
+
+ if (!speed)
+ return 0;
+
+ /* total timeout = period_timeout * (start + data bits count + stop) */
+
+ return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) *
+ (1 + (length << 3) + 1);
+}
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_hw_engine_construct(&engine->base, ctx);
+}
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
new file mode 100644
index 000000000000..1da0397b04a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__
+#define __DAL_I2C_GENERIC_HW_ENGINE_H__
+
+struct i2c_generic_transaction_attributes {
+ enum i2caux_transaction_action action;
+ uint32_t transaction_size;
+ bool start_bit;
+ bool stop_bit;
+ bool last_read;
+};
+
+struct i2c_generic_hw_engine;
+
+struct i2c_generic_hw_engine_funcs {
+ void (*write_address)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t address);
+ void (*write_data)(
+ struct i2c_generic_hw_engine *engine,
+ const uint8_t *buffer,
+ uint32_t length);
+ void (*read_data)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t *buffer,
+ uint32_t length,
+ uint32_t offset);
+ void (*execute_transaction)(
+ struct i2c_generic_hw_engine *engine,
+ struct i2c_generic_transaction_attributes *attributes);
+};
+
+struct i2c_generic_hw_engine {
+ struct i2c_hw_engine base;
+ const struct i2c_generic_hw_engine_funcs *funcs;
+};
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine);
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
new file mode 100644
index 000000000000..4b54fcfb28ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_DDC_HW;
+}
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_request_transaction_data request;
+
+ uint32_t transaction_timeout;
+
+ enum i2c_channel_operation_result operation_result;
+
+ bool result = false;
+
+ /* We need following:
+ * transaction length will not exceed
+ * the number of free bytes in HW buffer (minus one for address)*/
+
+ if (i2caux_request->payload.length >=
+ hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
+ return false;
+ }
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* [anaumov] in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ /* obtain timeout value before submitting request */
+
+ transaction_timeout = hw_engine->funcs->get_transaction_timeout(
+ hw_engine, i2caux_request->payload.length + 1);
+
+ hw_engine->base.funcs->submit_channel_request(
+ &hw_engine->base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
+ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ return false;
+ }
+
+ /* wait until transaction proceed */
+
+ operation_result = hw_engine->funcs->wait_on_operation_result(
+ hw_engine,
+ transaction_timeout,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) {
+ struct i2c_reply_transaction_data reply;
+
+ reply.data = i2caux_request->payload.data;
+ reply.length = i2caux_request->payload.length;
+
+ hw_engine->base.funcs->
+ process_channel_reply(&hw_engine->base, &reply);
+ }
+
+ return result;
+}
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+ uint32_t current_speed;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ current_speed = engine->funcs->get_speed(engine);
+
+ if (current_speed)
+ FROM_I2C_ENGINE(engine)->original_speed = current_speed;
+
+ return true;
+}
+/*
+ * @brief
+ * Queries in a loop for current engine status
+ * until retrieved status matches 'expected_result', or timeout occurs.
+ * Timeout given in microseconds
+ * and the status query frequency is also one per microsecond.
+ */
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
+{
+ enum i2c_channel_operation_result result;
+ uint32_t i = 0;
+
+ if (!timeout)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ do {
+ result = engine->base.funcs->get_channel_status(
+ &engine->base, NULL);
+
+ if (result != expected_result)
+ break;
+
+ udelay(1);
+
+ ++i;
+ } while (i < timeout);
+
+ return result;
+}
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_engine_construct(&engine->base, ctx);
+ engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+ engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+}
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
new file mode 100644
index 000000000000..8936a994804a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_H__
+#define __DAL_I2C_HW_ENGINE_H__
+
+enum {
+ TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32
+};
+
+struct i2c_hw_engine;
+
+struct i2c_hw_engine_funcs {
+ uint32_t (*get_hw_buffer_available_size)(
+ const struct i2c_hw_engine *engine);
+ enum i2c_channel_operation_result (*wait_on_operation_result)(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+ uint32_t (*get_transaction_timeout)(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+};
+
+struct i2c_hw_engine {
+ struct i2c_engine base;
+ const struct i2c_hw_engine_funcs *funcs;
+
+ /* Values below are in kilohertz */
+ uint32_t original_speed;
+ uint32_t default_speed;
+};
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine);
+
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
new file mode 100644
index 000000000000..8e19bb629394
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define SCL false
+#define SDA true
+
+static inline bool read_bit_from_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock)
+{
+ uint32_t value = 0;
+
+ if (data_nor_clock)
+ dal_gpio_get_value(ddc->pin_data, &value);
+ else
+ dal_gpio_get_value(ddc->pin_clock, &value);
+
+ return (value != 0);
+}
+
+static inline void write_bit_to_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock,
+ bool bit)
+{
+ uint32_t value = bit ? 1 : 0;
+
+ if (data_nor_clock)
+ dal_gpio_set_value(ddc->pin_data, value);
+ else
+ dal_gpio_set_value(ddc->pin_clock, value);
+}
+
+static bool wait_for_scl_high(
+ struct dc_context *ctx,
+ struct ddc *ddc,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t scl_retry = 0;
+ uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
+
+ udelay(clock_delay_div_4);
+
+ /* 3 milliseconds delay
+ * to wake up some displays from "low power" state.
+ */
+
+ do {
+ if (read_bit_from_ddc(ddc, SCL))
+ return true;
+
+ udelay(clock_delay_div_4);
+
+ ++scl_retry;
+ } while (scl_retry <= scl_retry_max);
+
+ return false;
+}
+
+static bool start_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications start signal is:
+ * the SDA going low from high, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ udelay(clock_delay_div_4);
+
+ do {
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ if (!read_bit_from_ddc(ddc_handle, SDA)) {
+ ++retry;
+ continue;
+ }
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ break;
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+ } while (retry <= I2C_SW_RETRIES);
+
+ return false;
+}
+
+static bool stop_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications stop signal is:
+ * the SDA going high from low, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ do {
+ udelay(clock_delay_div_4);
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ return true;
+
+ ++retry;
+ } while (retry <= 2);
+
+ return false;
+}
+
+static bool write_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t byte)
+{
+ int32_t shift = 7;
+ bool ack;
+
+ /* bits are transmitted serially, starting from MSB */
+
+ do {
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* The display sends ACK by preventing the SDA from going high
+ * after the SCL pulse we use to send our last data bit.
+ * If the SDA goes high after that bit, it's a NACK */
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ /* read ACK bit */
+
+ ack = !read_bit_from_ddc(ddc_handle, SDA);
+
+ udelay(clock_delay_div_4 << 1);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ return ack;
+}
+
+static bool read_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t *byte,
+ bool more)
+{
+ int32_t shift = 7;
+
+ uint8_t data = 0;
+
+ /* The data bits are read from MSB to LSB;
+ * bit is read while SCL is high */
+
+ do {
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ data |= (1 << shift);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* read only whole byte */
+
+ *byte = data;
+
+ udelay(clock_delay_div_4);
+
+ /* send the acknowledge bit:
+ * SDA low means ACK, SDA high means NACK */
+
+ write_bit_to_ddc(ddc_handle, SDA, !more);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+}
+
+static bool i2c_write(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ const uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i]))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool i2c_read(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i,
+ i < length - 1))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_SW;
+}
+
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = &sw_engine->base;
+
+ struct i2c_request_transaction_data request;
+ bool operation_succeeded = false;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ base->funcs->submit_channel_request(base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
+ (request.status == I2C_CHANNEL_OPERATION_FAILED))
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ else {
+ enum i2c_channel_operation_result operation_result;
+
+ do {
+ operation_result =
+ base->funcs->get_channel_status(base, NULL);
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ operation_succeeded = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ break;
+ }
+ } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+ }
+
+ return operation_succeeded;
+}
+
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine)
+{
+ return FROM_I2C_ENGINE(engine)->speed;
+}
+
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *engine,
+ uint32_t speed)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ ASSERT(speed);
+
+ sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED;
+
+ sw_engine->clock_delay = 1000 / sw_engine->speed;
+
+ if (sw_engine->clock_delay < 12)
+ sw_engine->clock_delay = 12;
+}
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ return true;
+}
+
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *req)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ struct ddc *ddc = engine->base.ddc;
+ uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2;
+
+ /* send sync (start / repeated start) */
+
+ bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4);
+
+ /* process payload */
+
+ if (result) {
+ switch (req->action) {
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE:
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT:
+ result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ:
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT:
+ result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ }
+
+ /* send stop if not 'mot' or operation failed */
+
+ if (!result ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ))
+ if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4))
+ result = false;
+
+ req->status = result ?
+ I2C_CHANNEL_OPERATION_SUCCEEDED :
+ I2C_CHANNEL_OPERATION_FAILED;
+}
+
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ /* No arbitration with VBIOS is performed since DCE 6.0 */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **ptr)
+{
+ dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr));
+
+ kfree(*ptr);
+ *ptr = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ dal_i2c_engine_construct(&engine->base, arg->ctx);
+ dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed);
+ engine->base.funcs = &i2c_engine_funcs;
+ engine->base.base.funcs = &engine_funcs;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ struct i2c_sw_engine *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dal_i2c_sw_engine_construct(engine, arg);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
new file mode 100644
index 000000000000..546f15b0d3f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_H__
+#define __DAL_I2C_SW_ENGINE_H__
+
+enum {
+ I2C_SW_RETRIES = 10,
+ I2C_SW_SCL_READ_RETRIES = 128,
+ /* following value is in microseconds */
+ I2C_SW_TIMEOUT_DELAY = 3000
+};
+
+struct i2c_sw_engine;
+
+struct i2c_sw_engine {
+ struct i2c_engine base;
+ uint32_t clock_delay;
+ /* Values below are in KHz */
+ uint32_t speed;
+ uint32_t default_speed;
+};
+
+struct i2c_sw_engine_create_arg {
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg);
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle);
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine);
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg);
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine);
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *ptr,
+ uint32_t speed);
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *ptr,
+ struct i2c_request_transaction_data *req);
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
new file mode 100644
index 000000000000..e1593ffe5a2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "dc_bios_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "engine.h"
+#include "i2c_engine.h"
+#include "aux_engine.h"
+
+/*
+ * This unit
+ */
+
+#include "dce80/i2caux_dce80.h"
+
+#include "dce100/i2caux_dce100.h"
+
+#include "dce110/i2caux_dce110.h"
+
+#include "dce112/i2caux_dce112.h"
+
+#include "dce120/i2caux_dce120.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/i2caux_dcn10.h"
+#endif
+
+#include "diagnostics/i2caux_diag.h"
+
+/*
+ * @brief
+ * Plain API, available publicly
+ */
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx)
+{
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ return dal_i2caux_diag_fpga_create(ctx);
+ }
+
+ switch (ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ return dal_i2caux_dce80_create(ctx);
+ case DCE_VERSION_11_2:
+ return dal_i2caux_dce112_create(ctx);
+ case DCE_VERSION_11_0:
+ return dal_i2caux_dce110_create(ctx);
+ case DCE_VERSION_10_0:
+ return dal_i2caux_dce100_create(ctx);
+ case DCE_VERSION_12_0:
+ return dal_i2caux_dce120_create(ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ return dal_i2caux_dcn10_create(ctx);
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd)
+{
+ struct i2c_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ /*
+ * default will be SW, however there is a feature flag in adapter
+ * service that determines whether SW i2c_engine will be available or
+ * not, if sw i2c is not available we will fallback to hw. This feature
+ * flag is set to not creating sw i2c engine for every dce except dce80
+ * currently
+ */
+ switch (cmd->engine) {
+ case I2C_COMMAND_ENGINE_DEFAULT:
+ case I2C_COMMAND_ENGINE_SW:
+ /* try to acquire SW engine first,
+ * acquire HW engine if SW engine not available */
+ engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_hw_engine(
+ i2caux, ddc);
+ break;
+ case I2C_COMMAND_ENGINE_HW:
+ default:
+ /* try to acquire HW engine first,
+ * acquire SW engine if HW engine not available */
+ engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_sw_engine(
+ i2caux, ddc);
+ }
+
+ if (!engine)
+ return false;
+
+ engine->funcs->set_speed(engine, cmd->speed);
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ bool mot = (index_of_payload != cmd->number_of_payloads - 1);
+
+ struct i2c_payload *payload = cmd->payloads + index_of_payload;
+
+ struct i2caux_transaction_request request = { 0 };
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd)
+{
+ struct aux_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+ bool mot;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return false;
+
+ engine->delay = cmd->defer_delay;
+ engine->max_defer_write_retry = cmd->max_defer_write_retry;
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ struct aux_payload *payload = cmd->payloads + index_of_payload;
+ struct i2caux_transaction_request request = { 0 };
+
+ if (cmd->mot == I2C_MOT_UNDEF)
+ mot = (index_of_payload != cmd->number_of_payloads - 1);
+ else
+ mot = (cmd->mot == I2C_MOT_TRUE);
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ if (payload->i2c_over_aux) {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ } else {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD;
+
+ request.payload.address = payload->address;
+ }
+
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+static bool get_hw_supported_ddc_line(
+ struct ddc *ddc,
+ enum gpio_ddc_line *line)
+{
+ enum gpio_ddc_line line_found;
+
+ *line = GPIO_DDC_LINE_UNKNOWN;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!ddc->hw_info.hw_supported)
+ return false;
+
+ line_found = dal_ddc_get_line(ddc);
+
+ if (line_found >= GPIO_DDC_LINE_COUNT)
+ return false;
+
+ *line = line_found;
+
+ return true;
+}
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg)
+{
+ struct aux_engine *engine =
+ i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return;
+
+ engine->funcs->configure(engine, cfg);
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+}
+
+void dal_i2caux_destroy(
+ struct i2caux **i2caux)
+{
+ if (!i2caux || !*i2caux) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ (*i2caux)->funcs->destroy(i2caux);
+
+ *i2caux = NULL;
+}
+
+/*
+ * @brief
+ * An utility function used by 'struct i2caux' and its descendants
+ */
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios)
+{
+ struct dc_firmware_info info = { { 0 } };
+
+ if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
+ return 0;
+
+ return info.pll_info.crystal_frequency;
+}
+
+/*
+ * @brief
+ * i2caux
+ */
+
+enum {
+ /* following are expressed in KHz */
+ DEFAULT_I2C_SW_SPEED = 50,
+ DEFAULT_I2C_HW_SPEED = 50,
+
+ DEFAULT_I2C_SW_SPEED_100KHZ = 100,
+ DEFAULT_I2C_HW_SPEED_100KHZ = 100,
+
+ /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description". */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS. */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct i2c_engine *engine = NULL;
+
+ if (get_hw_supported_ddc_line(ddc, &line))
+ engine = i2caux->i2c_sw_engines[line];
+
+ if (!engine)
+ engine = i2caux->i2c_generic_sw_engine;
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct aux_engine *engine;
+
+ if (!get_hw_supported_ddc_line(ddc, &line))
+ return NULL;
+
+ engine = i2caux->aux_engines[line];
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ engine->funcs->release_engine(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+}
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ uint32_t i = 0;
+
+ i2caux->ctx = ctx;
+ do {
+ i2caux->i2c_sw_engines[i] = NULL;
+ i2caux->i2c_hw_engines[i] = NULL;
+ i2caux->aux_engines[i] = NULL;
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+
+ i2caux->i2c_generic_sw_engine = NULL;
+ i2caux->i2c_generic_hw_engine = NULL;
+
+ i2caux->aux_timeout_period =
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD;
+
+ if (ctx->dce_version >= DCE_VERSION_11_2) {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED_100KHZ;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED_100KHZ;
+ } else {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED;
+ }
+}
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux)
+{
+ uint32_t i = 0;
+
+ if (i2caux->i2c_generic_hw_engine)
+ i2caux->i2c_generic_hw_engine->funcs->destroy(
+ &i2caux->i2c_generic_hw_engine);
+
+ if (i2caux->i2c_generic_sw_engine)
+ i2caux->i2c_generic_sw_engine->funcs->destroy(
+ &i2caux->i2c_generic_sw_engine);
+
+ do {
+ if (i2caux->aux_engines[i])
+ i2caux->aux_engines[i]->funcs->destroy(
+ &i2caux->aux_engines[i]);
+
+ if (i2caux->i2c_hw_engines[i])
+ i2caux->i2c_hw_engines[i]->funcs->destroy(
+ &i2caux->i2c_hw_engines[i]);
+
+ if (i2caux->i2c_sw_engines[i])
+ i2caux->i2c_sw_engines[i]->funcs->destroy(
+ &i2caux->i2c_sw_engines[i]);
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
new file mode 100644
index 000000000000..64f51bb06915
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_H__
+#define __DAL_I2C_AUX_H__
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios);
+
+struct i2caux;
+
+struct engine;
+
+struct i2caux_funcs {
+ void (*destroy)(struct i2caux **ptr);
+ struct i2c_engine * (*acquire_i2c_sw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct i2c_engine * (*acquire_i2c_hw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct aux_engine * (*acquire_aux_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ void (*release_engine)(
+ struct i2caux *i2caux,
+ struct engine *engine);
+};
+
+struct i2c_engine;
+struct aux_engine;
+
+struct i2caux {
+ struct dc_context *ctx;
+ const struct i2caux_funcs *funcs;
+ /* On ASIC we have certain amount of lines with HW DDC engine
+ * (4, 6, or maybe more in the future).
+ * For every such line, we create separate HW DDC engine
+ * (since we have these engines in HW) and separate SW DDC engine
+ * (to allow concurrent use of few lines).
+ * In similar way we have AUX engines. */
+
+ /* I2C SW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* I2C HW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* AUX engines, per DDC line.
+ * Only lines with HW AUX support will be initialized */
+ struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT];
+
+ /* For all other lines, we can use
+ * single instance of generic I2C HW engine
+ * (since in HW, there is single instance of it)
+ * or single instance of generic I2C SW engine.
+ * AUX is not supported for other lines. */
+
+ /* General-purpose I2C SW engine.
+ * Can be assigned dynamically to any line per transaction */
+ struct i2c_engine *i2c_generic_sw_engine;
+
+ /* General-purpose I2C generic HW engine.
+ * Can be assigned dynamically to almost any line per transaction */
+ struct i2c_engine *i2c_generic_hw_engine;
+
+ /* [anaumov] in DAL2, there is a Mutex */
+
+ uint32_t aux_timeout_period;
+
+ /* expressed in KHz */
+ uint32_t default_i2c_sw_speed;
+ uint32_t default_i2c_hw_speed;
+};
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx);
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine);
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
new file mode 100644
index 000000000000..39ee8eba3c31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef BW_FIXED_H_
+#define BW_FIXED_H_
+
+#define BW_FIXED_BITS_PER_FRACTIONAL_PART 24
+
+#define BW_FIXED_GET_INTEGER_PART(x) ((x) >> BW_FIXED_BITS_PER_FRACTIONAL_PART)
+struct bw_fixed {
+ int64_t value;
+};
+
+#define BW_FIXED_MIN_I32 \
+ (int64_t)(-(1LL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)))
+
+#define BW_FIXED_MAX_I32 \
+ (int64_t)((1ULL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)) - 1)
+
+static inline struct bw_fixed bw_min2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg1.value <= arg2.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_max2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg2.value <= arg1.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_min3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_min2(bw_min2(v1, v2), v3);
+}
+
+static inline struct bw_fixed bw_max3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_max2(bw_max2(v1, v2), v3);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value);
+static inline struct bw_fixed bw_int_to_fixed(int64_t value)
+{
+ if (__builtin_constant_p(value)) {
+ struct bw_fixed res;
+ BUILD_BUG_ON(value > BW_FIXED_MAX_I32 || value < BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return bw_int_to_fixed_nonconst(value);
+}
+
+static inline int32_t bw_fixed_to_int(struct bw_fixed value)
+{
+ return BW_FIXED_GET_INTEGER_PART(value.value);
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t num, int64_t denum);
+
+static inline struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw)
+{
+ struct bw_fixed result = { 0 };
+
+ if (raw < 0) {
+ raw = -raw;
+ result.value = -(raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART));
+ } else {
+ result.value = raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART);
+ }
+
+ return result;
+}
+
+static inline struct bw_fixed bw_add(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+static inline struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2);
+static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return bw_frc_to_fixed(arg1.value, arg2.value);
+}
+
+static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+ div64_u64_rem(arg1.value, arg2.value, &res.value);
+ return res;
+}
+
+struct bw_fixed bw_floor2(const struct bw_fixed arg, const struct bw_fixed significance);
+struct bw_fixed bw_ceil2(const struct bw_fixed arg, const struct bw_fixed significance);
+
+static inline bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+static inline bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value != arg2.value;
+}
+
+static inline bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+static inline bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value >= arg2.value;
+}
+
+static inline bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+static inline bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value > arg2.value;
+}
+
+#endif //BW_FIXED_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
new file mode 100644
index 000000000000..ebcf67b5fc57
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_H__
+#define __DC_CLOCK_SOURCE_H__
+
+#include "dc_types.h"
+#include "include/grph_object_id.h"
+#include "include/bios_parser_types.h"
+
+struct clock_source;
+
+struct spread_spectrum_data {
+ uint32_t percentage; /*> In unit of 0.01% or 0.001%*/
+ uint32_t percentage_divider; /*> 100 or 1000 */
+ uint32_t freq_range_khz;
+ uint32_t modulation_freq_hz;
+
+ struct spread_spectrum_flags flags;
+};
+
+struct delta_sigma_data {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ uint32_t ds_frac_amount;
+};
+
+/**
+ * Pixel Clock Parameters structure
+ * These parameters are required as input
+ * when calculating Pixel Clock Dividers for requested Pixel Clock
+ */
+struct pixel_clk_flags {
+ uint32_t ENABLE_SS:1;
+ uint32_t DISPLAY_BLANKED:1;
+ uint32_t PROGRAM_PIXEL_CLOCK:1;
+ uint32_t PROGRAM_ID_CLOCK:1;
+ uint32_t SUPPORT_YCBCR420:1;
+};
+
+/**
+ * Display Port HW De spread of Reference Clock related Parameters structure
+ * Store it once at boot for later usage
+ */
+struct csdp_ref_clk_ds_params {
+ bool hw_dso_n_dp_ref_clk;
+/* Flag for HW De Spread enabled (if enabled SS on DP Reference Clock)*/
+ uint32_t avg_dp_ref_clk_khz;
+/* Average DP Reference clock (in KHz)*/
+ uint32_t ss_percentage_on_dp_ref_clk;
+/* DP Reference clock SS percentage
+ * (not to be mixed with DP IDCLK SS from PLL Settings)*/
+ uint32_t ss_percentage_divider;
+/* DP Reference clock SS percentage divider */
+};
+
+struct pixel_clk_params {
+ uint32_t requested_pix_clk; /* in KHz */
+/*> Requested Pixel Clock
+ * (based on Video Timing standard used for requested mode)*/
+ uint32_t requested_sym_clk; /* in KHz */
+/*> Requested Sym Clock (relevant only for display port)*/
+ uint32_t dp_ref_clk; /* in KHz */
+/*> DP reference clock - calculated only for DP signal for specific cases*/
+ struct graphics_object_id encoder_object_id;
+/*> Encoder object Id - needed by VBIOS Exec table*/
+ enum signal_type signal_type;
+/*> signalType -> Encoder Mode - needed by VBIOS Exec table*/
+ enum controller_id controller_id;
+/*> ControllerId - which controller using this PLL*/
+ enum dc_color_depth color_depth;
+ struct csdp_ref_clk_ds_params de_spread_params;
+/*> de-spread info, relevant only for on-the-fly tune-up pixel rate*/
+ enum dc_pixel_encoding pixel_encoding;
+ struct pixel_clk_flags flags;
+};
+
+/**
+ * Pixel Clock Dividers structure with desired Pixel Clock
+ * (adjusted after VBIOS exec table),
+ * with actually calculated Clock and reference Crystal frequency
+ */
+struct pll_settings {
+ uint32_t actual_pix_clk;
+ uint32_t adjusted_pix_clk;
+ uint32_t calculated_pix_clk;
+ uint32_t vco_freq;
+ uint32_t reference_freq;
+ uint32_t reference_divider;
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t pix_clk_post_divider;
+ uint32_t ss_percentage;
+ bool use_external_clk;
+};
+
+struct calc_pll_clock_source_init_data {
+ struct dc_bios *bp;
+ uint32_t min_pix_clk_pll_post_divider;
+ uint32_t max_pix_clk_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+ uint32_t min_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t max_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t num_fract_fb_divider_decimal_point;
+/* number of decimal point for fractional feedback divider value */
+
+ uint32_t num_fract_fb_divider_decimal_point_precision;
+/* number of decimal point to round off for fractional feedback divider value*/
+ struct dc_context *ctx;
+
+};
+
+struct calc_pll_clock_source {
+ uint32_t ref_freq_khz;
+ uint32_t min_pix_clock_pll_post_divider;
+ uint32_t max_pix_clock_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+
+ uint32_t max_vco_khz;
+ uint32_t min_vco_khz;
+ uint32_t min_pll_input_freq_khz;
+ uint32_t max_pll_input_freq_khz;
+
+ uint32_t fract_fb_divider_decimal_points_num;
+ uint32_t fract_fb_divider_factor;
+ uint32_t fract_fb_divider_precision;
+ uint32_t fract_fb_divider_precision_factor;
+ struct dc_context *ctx;
+};
+
+struct clock_source_funcs {
+ bool (*cs_power_down)(
+ struct clock_source *);
+ bool (*program_pix_clk)(struct clock_source *,
+ struct pixel_clk_params *, struct pll_settings *);
+ uint32_t (*get_pix_clk_dividers)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+ uint32_t (*get_pix_rate_in_hz)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+};
+
+struct clock_source {
+ const struct clock_source_funcs *funcs;
+ struct dc_context *ctx;
+ enum clock_source_id id;
+ bool dp_clk_src;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
new file mode 100644
index 000000000000..bcb18f5e1e60
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMPRESSOR_H__
+#define __DAL_COMPRESSOR_H__
+
+#include "include/grph_object_id.h"
+#include "bios_parser_interface.h"
+
+enum fbc_compress_ratio {
+ FBC_COMPRESS_RATIO_INVALID = 0,
+ FBC_COMPRESS_RATIO_1TO1 = 1,
+ FBC_COMPRESS_RATIO_2TO1 = 2,
+ FBC_COMPRESS_RATIO_4TO1 = 4,
+ FBC_COMPRESS_RATIO_8TO1 = 8,
+};
+
+union fbc_physical_address {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } addr;
+ uint64_t quad_part;
+};
+
+struct compr_addr_and_pitch_params {
+ /* enum controller_id controller_id; */
+ uint32_t inst;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+};
+
+enum fbc_hw_max_resolution_supported {
+ FBC_MAX_X = 3840,
+ FBC_MAX_Y = 2400,
+ FBC_MAX_X_SG = 1920,
+ FBC_MAX_Y_SG = 1080,
+};
+
+struct compressor;
+
+struct compressor_funcs {
+
+ void (*power_up_fbc)(struct compressor *cp);
+ void (*enable_fbc)(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ void (*disable_fbc)(struct compressor *cp);
+ void (*set_fbc_invalidation_triggers)(struct compressor *cp,
+ uint32_t fbc_trigger);
+ void (*surface_address_and_pitch)(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ bool (*is_fbc_enabled_in_hw)(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+};
+struct compressor {
+ struct dc_context *ctx;
+ uint32_t attached_inst;
+ bool is_enabled;
+ const struct compressor_funcs *funcs;
+ union {
+ uint32_t raw;
+ struct {
+ uint32_t FBC_SUPPORT:1;
+ uint32_t FB_POOL:1;
+ uint32_t DYNAMIC_ALLOC:1;
+ uint32_t LPT_SUPPORT:1;
+ uint32_t LPT_MC_CONFIG:1;
+ uint32_t DUMMY_BACKEND:1;
+ uint32_t CLK_GATING_DISABLED:1;
+
+ } bits;
+ } options;
+
+ union fbc_physical_address compr_surface_address;
+
+ uint32_t embedded_panel_h_size;
+ uint32_t embedded_panel_v_size;
+ uint32_t memory_bus_width;
+ uint32_t banks_num;
+ uint32_t raw_size;
+ uint32_t channel_interleave_size;
+ uint32_t dram_channels_num;
+
+ uint32_t allocated_size;
+ uint32_t preferred_requested_size;
+ uint32_t lpt_channels_num;
+ enum fbc_compress_ratio min_compress_ratio;
+};
+
+struct fbc_input_info {
+ bool dynamic_fbc_buffer_alloc;
+ unsigned int source_view_width;
+ unsigned int source_view_height;
+ unsigned int num_of_active_targets;
+};
+
+
+struct fbc_requested_compressed_size {
+ unsigned int preferred_size;
+ unsigned int preferred_size_alignment;
+ unsigned int min_size;
+ unsigned int min_size_alignment;
+ union {
+ struct {
+ /* Above preferedSize must be allocated in FB pool */
+ unsigned int preferred_must_be_framebuffer_pool : 1;
+ /* Above minSize must be allocated in FB pool */
+ unsigned int min_must_be_framebuffer_pool : 1;
+ } bits;
+ unsigned int flags;
+ };
+};
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index fbdff3e02aa3..94fc31080fda 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -19,20 +19,32 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "power_state.h"
-#include "hardwaremanager.h"
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
+#ifndef _CORE_STATUS_H_
+#define _CORE_STATUS_H_
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
+enum dc_status {
+ DC_OK = 1,
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
+ DC_NO_CONTROLLER_RESOURCE = 2,
+ DC_NO_STREAM_ENG_RESOURCE = 3,
+ DC_NO_CLOCK_SOURCE_RESOURCE = 4,
+ DC_FAIL_CONTROLLER_VALIDATE = 5,
+ DC_FAIL_ENC_VALIDATE = 6,
+ DC_FAIL_ATTACH_SURFACES = 7,
+ DC_FAIL_DETACH_SURFACES = 8,
+ DC_FAIL_SURFACE_VALIDATE = 9,
+ DC_NO_DP_LINK_BANDWIDTH = 10,
+ DC_EXCEED_DONGLE_CAP = 11,
+ DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
+ DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
+ DC_FAIL_SCALING = 14,
+ DC_FAIL_DP_LINK_TRAINING = 15,
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
+ DC_ERROR_UNEXPECTED = -1
+};
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
+#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
new file mode 100644
index 000000000000..b69f321e2ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CORE_TYPES_H_
+#define _CORE_TYPES_H_
+
+#include "dc.h"
+#include "dce_calcs.h"
+#include "dcn_calcs.h"
+#include "ddc_service_types.h"
+#include "dc_bios_types.h"
+#include "mem_input.h"
+#include "hubp.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "mpc.h"
+#endif
+
+#define MAX_CLOCK_SOURCES 7
+
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id);
+
+#include "grph_object_id.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "clock_source.h"
+#include "audio.h"
+#include "dm_pp_smu.h"
+
+
+/************ link *****************/
+struct link_init_data {
+ const struct dc *dc;
+ struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
+ uint32_t connector_index; /* this will be mapped to the HPD pins */
+ uint32_t link_index; /* this is mapped to DAL display_index
+ TODO: remove it when DC is complete. */
+};
+
+enum {
+ FREE_ACQUIRED_RESOURCE = 0,
+ KEEP_ACQUIRED_RESOURCE = 1,
+};
+
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void core_link_resume(struct dc_link *link);
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+/********** DAL Core*********************/
+#include "display_clock.h"
+#include "transform.h"
+#include "dpp.h"
+
+struct resource_pool;
+struct dc_state;
+struct resource_context;
+
+struct resource_funcs {
+ void (*destroy)(struct resource_pool **pool);
+ struct link_encoder *(*link_enc_create)(
+ const struct encoder_init_data *init);
+
+ enum dc_status (*validate_guaranteed)(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+ bool (*validate_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ enum dc_status (*validate_global)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
+ enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+ enum dc_status (*add_stream_to_ctx)(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+};
+
+struct audio_support{
+ bool dp_audio;
+ bool hdmi_audio_on_dongle;
+ bool hdmi_audio_native;
+};
+
+#define NO_UNDERLAY_PIPE -1
+
+struct resource_pool {
+ struct mem_input *mis[MAX_PIPES];
+ struct hubp *hubps[MAX_PIPES];
+ struct input_pixel_processor *ipps[MAX_PIPES];
+ struct transform *transforms[MAX_PIPES];
+ struct dpp *dpps[MAX_PIPES];
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_display_requirement_rv pp_smu_req;
+
+ unsigned int pipe_count;
+ unsigned int underlay_pipe_index;
+ unsigned int stream_enc_count;
+ unsigned int ref_clock_inKhz;
+
+ /*
+ * reserved clock source for DP
+ */
+ struct clock_source *dp_clock_source;
+
+ struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
+ unsigned int clk_src_count;
+
+ struct audio *audios[MAX_PIPES];
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+ struct display_clock *display_clock;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+ struct dmcu *dmcu;
+
+ const struct resource_funcs *funcs;
+ const struct resource_caps *res_cap;
+};
+
+struct stream_resource {
+ struct output_pixel_processor *opp;
+ struct timing_generator *tg;
+ struct stream_encoder *stream_enc;
+ struct audio *audio;
+
+ struct pixel_clk_params pix_clk_params;
+ struct encoder_info_frame encoder_info_frame;
+};
+
+struct plane_resource {
+ struct scaler_data scl_data;
+ struct hubp *hubp;
+ struct mem_input *mi;
+ struct input_pixel_processor *ipp;
+ struct transform *xfm;
+ struct dpp *dpp;
+};
+
+struct pipe_ctx {
+ struct dc_plane_state *plane_state;
+ struct dc_stream_state *stream;
+
+ struct plane_resource plane_res;
+ struct stream_resource stream_res;
+
+ struct clock_source *clock_source;
+
+ struct pll_settings pll_settings;
+
+ uint8_t pipe_idx;
+
+ struct pipe_ctx *top_pipe;
+ struct pipe_ctx *bottom_pipe;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct _vcs_dpi_display_dlg_regs_st dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
+#endif
+ struct dwbc *dwbc;
+};
+
+struct resource_context {
+ struct pipe_ctx pipe_ctx[MAX_PIPES];
+ bool is_stream_enc_acquired[MAX_PIPES * 2];
+ bool is_audio_acquired[MAX_PIPES];
+ uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
+ uint8_t dp_clock_source_ref_count;
+};
+
+struct dce_bw_output {
+ bool cpuc_state_change_enable;
+ bool cpup_state_change_enable;
+ bool stutter_mode_enable;
+ bool nbp_state_change_enable;
+ bool all_displays_in_sync;
+ struct dce_watermarks urgent_wm_ns[MAX_PIPES];
+ struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
+ struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
+ int sclk_khz;
+ int sclk_deep_sleep_khz;
+ int yclk_khz;
+ int dispclk_khz;
+ int blackout_recovery_time_us;
+};
+
+struct dcn_bw_clocks {
+ int dispclk_khz;
+ bool dppclk_div;
+ int dcfclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+ int dram_ccm_us;
+ int min_active_dram_ccm_us;
+};
+
+struct dcn_bw_output {
+ struct dcn_bw_clocks cur_clk;
+ struct dcn_bw_clocks calc_clk;
+ struct dcn_watermark_set watermarks;
+};
+
+union bw_context {
+ struct dcn_bw_output dcn;
+ struct dce_bw_output dce;
+};
+
+struct dc_state {
+ struct dc_stream_state *streams[MAX_PIPES];
+ struct dc_stream_status stream_status[MAX_PIPES];
+ uint8_t stream_count;
+
+ struct resource_context res_ctx;
+
+ /* The output from BW and WM calculations. */
+ union bw_context bw;
+
+ /* Note: these are big structures, do *not* put on stack! */
+ struct dm_pp_display_configuration pp_display_cfg;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_bw_internal_vars dcn_bw_vars;
+#endif
+
+ struct display_clock *dis_clk;
+
+ struct kref refcount;
+};
+
+#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/custom_float.h b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
new file mode 100644
index 000000000000..f57239672216
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef CUSTOM_FLOAT_H_
+#define CUSTOM_FLOAT_H_
+
+#include "bw_fixed.h"
+#include "hw_shared.h"
+#include "opp.h"
+
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result);
+
+
+#endif //CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
new file mode 100644
index 000000000000..0bf73b742f1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DDC_SERVICE_H__
+#define __DAL_DDC_SERVICE_H__
+
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+
+#define EDID_SEGMENT_SIZE 256
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
+struct ddc_service;
+struct graphics_object_id;
+enum ddc_result;
+struct av_sync_data;
+struct dp_receiver_id_info;
+
+struct i2c_payloads;
+struct aux_payloads;
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+struct ddc_service_init_data {
+ struct graphics_object_id id;
+ struct dc_context *ctx;
+ struct dc_link *link;
+};
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *ddc_init_data);
+
+void dal_ddc_service_destroy(struct ddc_service **ddc);
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type);
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap);
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len);
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len);
+
+void dal_ddc_service_write_scdc_data(
+ struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble);
+
+void dal_ddc_service_read_scdc_data(
+ struct ddc_service *ddc_service);
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type);
+
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc);
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
+
+uint32_t get_defer_delay(struct ddc_service *ddc);
+
+#endif /* __DAL_DDC_SERVICE_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
new file mode 100644
index 000000000000..616c73e2b0bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_H__
+#define __DC_LINK_DP_H__
+
+#define LINK_TRAINING_ATTEMPTS 4
+#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+
+struct dc_link;
+struct dc_stream_state;
+struct dc_link_settings;
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting);
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts);
+
+bool is_mst_supported(struct dc_link *link);
+
+void detect_dp_sink_caps(struct dc_link *link);
+
+void detect_edp_sink_caps(struct dc_link *link);
+
+bool is_dp_active_dongle(const struct dc_link *link);
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
+
+#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
new file mode 100644
index 000000000000..ae2399f16d1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCE_CALCS_H__
+#define __DCE_CALCS_H__
+
+#include "bw_fixed.h"
+
+struct pipe_ctx;
+struct dc;
+struct dc_state;
+struct dce_bw_output;
+
+enum bw_calcs_version {
+ BW_CALCS_VERSION_INVALID,
+ BW_CALCS_VERSION_CARRIZO,
+ BW_CALCS_VERSION_POLARIS10,
+ BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_STONEY,
+ BW_CALCS_VERSION_VEGA10
+};
+
+/*******************************************************************************
+ * There are three types of input into Calculations:
+ * 1. per-DCE static values - these are "hardcoded" properties of the DCEIP
+ * 2. board-level values - these are generally coming from VBIOS parser
+ * 3. mode/configuration values - depending Mode, Scaling number of Displays etc.
+ ******************************************************************************/
+
+enum bw_defines {
+ //Common
+ bw_def_no = 0,
+ bw_def_none = 0,
+ bw_def_yes = 1,
+ bw_def_ok = 1,
+ bw_def_high = 2,
+ bw_def_mid = 1,
+ bw_def_low = 0,
+
+ //Internal
+ bw_defs_start = 255,
+ bw_def_underlay422,
+ bw_def_underlay420_luma,
+ bw_def_underlay420_chroma,
+ bw_def_underlay444,
+ bw_def_graphics,
+ bw_def_display_write_back420_luma,
+ bw_def_display_write_back420_chroma,
+ bw_def_portrait,
+ bw_def_hsr_mtn_4,
+ bw_def_hsr_mtn_h_taps,
+ bw_def_ceiling__h_taps_div_4___meq_hsr,
+ bw_def_invalid_linear_or_stereo_mode,
+ bw_def_invalid_rotation_or_bpp_or_stereo,
+ bw_def_vsr_mtn_v_taps,
+ bw_def_vsr_mtn_4,
+ bw_def_auto,
+ bw_def_manual,
+ bw_def_exceeded_allowed_maximum_sclk,
+ bw_def_exceeded_allowed_page_close_open,
+ bw_def_exceeded_allowed_outstanding_pte_req_queue_size,
+ bw_def_exceeded_allowed_maximum_bw,
+ bw_def_landscape,
+
+ //Panning and bezel
+ bw_def_any_lines,
+
+ //Underlay mode
+ bw_def_underlay_only,
+ bw_def_blended,
+ bw_def_blend,
+
+ //Stereo mode
+ bw_def_mono,
+ bw_def_side_by_side,
+ bw_def_top_bottom,
+
+ //Underlay surface type
+ bw_def_420,
+ bw_def_422,
+ bw_def_444,
+
+ //Tiling mode
+ bw_def_linear,
+ bw_def_tiled,
+ bw_def_array_linear_general,
+ bw_def_array_linear_aligned,
+ bw_def_rotated_micro_tiling,
+ bw_def_display_micro_tiling,
+
+ //Memory type
+ bw_def_gddr5,
+ bw_def_hbm,
+
+ //Voltage
+ bw_def_high_no_nbp_state_change,
+ bw_def_0_72,
+ bw_def_0_8,
+ bw_def_0_9,
+
+ bw_def_notok = -1,
+ bw_def_na = -1
+};
+
+struct bw_calcs_dceip {
+ enum bw_calcs_version version;
+ bool large_cursor;
+ uint32_t cursor_max_outstanding_group_num;
+ bool dmif_pipe_en_fbc_chunk_tracker;
+ struct bw_fixed dmif_request_buffer_size;
+ uint32_t lines_interleaved_into_lb;
+ uint32_t low_power_tiling_mode;
+ uint32_t chunk_width;
+ uint32_t number_of_graphics_pipes;
+ uint32_t number_of_underlay_pipes;
+ bool display_write_back_supported;
+ bool argb_compression_support;
+ struct bw_fixed underlay_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed alpha_vscaler_efficiency;
+ uint32_t max_dmif_buffer_allocated;
+ uint32_t graphics_dmif_size;
+ uint32_t underlay_luma_dmif_size;
+ uint32_t underlay_chroma_dmif_size;
+ bool pre_downscaler_enabled;
+ bool underlay_downscale_prefetch_enabled;
+ struct bw_fixed lb_write_pixels_per_dispclk;
+ struct bw_fixed lb_size_per_component444;
+ bool graphics_lb_nodownscaling_multi_line_prefetching;
+ struct bw_fixed stutter_and_dram_clock_state_change_gated_before_cursor;
+ struct bw_fixed underlay420_luma_lb_size_per_component;
+ struct bw_fixed underlay420_chroma_lb_size_per_component;
+ struct bw_fixed underlay422_lb_size_per_component;
+ struct bw_fixed cursor_chunk_width;
+ struct bw_fixed cursor_dcp_buffer_lines;
+ struct bw_fixed underlay_maximum_width_efficient_for_tiling;
+ struct bw_fixed underlay_maximum_height_efficient_for_tiling;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ struct bw_fixed minimum_outstanding_pte_request_limit;
+ struct bw_fixed maximum_total_outstanding_pte_requests_allowed_by_saw;
+ bool limit_excessive_outstanding_dmif_requests;
+ struct bw_fixed linear_mode_line_request_alternation_slice;
+ uint32_t scatter_gather_lines_of_pte_prefetching_in_linear_mode;
+ uint32_t display_write_back420_luma_mcifwr_buffer_size;
+ uint32_t display_write_back420_chroma_mcifwr_buffer_size;
+ struct bw_fixed request_efficiency;
+ struct bw_fixed dispclk_per_request;
+ struct bw_fixed dispclk_ramping_factor;
+ struct bw_fixed display_pipe_throughput_factor;
+ uint32_t scatter_gather_pte_request_rows_in_tiling_mode;
+ struct bw_fixed mcifwr_all_surfaces_burst_time;
+};
+
+struct bw_calcs_vbios {
+ enum bw_defines memory_type;
+ uint32_t dram_channel_width_in_bits;
+ uint32_t number_of_dram_channels;
+ uint32_t number_of_dram_banks;
+ struct bw_fixed low_yclk; /*m_hz*/
+ struct bw_fixed mid_yclk; /*m_hz*/
+ struct bw_fixed high_yclk; /*m_hz*/
+ struct bw_fixed low_sclk; /*m_hz*/
+ struct bw_fixed mid1_sclk; /*m_hz*/
+ struct bw_fixed mid2_sclk; /*m_hz*/
+ struct bw_fixed mid3_sclk; /*m_hz*/
+ struct bw_fixed mid4_sclk; /*m_hz*/
+ struct bw_fixed mid5_sclk; /*m_hz*/
+ struct bw_fixed mid6_sclk; /*m_hz*/
+ struct bw_fixed high_sclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed mid_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed high_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_phyclk;
+ struct bw_fixed mid_voltage_max_phyclk;
+ struct bw_fixed high_voltage_max_phyclk;
+ struct bw_fixed data_return_bus_width;
+ struct bw_fixed trc;
+ struct bw_fixed dmifmc_urgent_latency;
+ struct bw_fixed stutter_self_refresh_exit_latency;
+ struct bw_fixed stutter_self_refresh_entry_latency;
+ struct bw_fixed nbp_state_change_latency;
+ struct bw_fixed mcifwrmc_urgent_latency;
+ bool scatter_gather_enable;
+ struct bw_fixed down_spread_percentage;
+ uint32_t cursor_width;
+ uint32_t average_compression_rate;
+ uint32_t number_of_request_slots_gmc_reserves_for_dmif_per_channel;
+ struct bw_fixed blackout_duration;
+ struct bw_fixed maximum_blackout_recovery_time;
+};
+
+/*******************************************************************************
+ * Temporary data structure(s).
+ ******************************************************************************/
+#define maximum_number_of_surfaces 12
+/*Units : MHz, us */
+
+struct bw_calcs_data {
+ /* data for all displays */
+ uint32_t number_of_displays;
+ enum bw_defines underlay_surface_type;
+ enum bw_defines panning_and_bezel_adjustment;
+ enum bw_defines graphics_tiling_mode;
+ uint32_t graphics_lb_bpc;
+ uint32_t underlay_lb_bpc;
+ enum bw_defines underlay_tiling_mode;
+ enum bw_defines d0_underlay_mode;
+ bool d1_display_write_back_dwb_enable;
+ enum bw_defines d1_underlay_mode;
+
+ bool cpup_state_change_enable;
+ bool cpuc_state_change_enable;
+ bool nbp_state_change_enable;
+ bool stutter_mode_enable;
+ uint32_t y_clk_level;
+ uint32_t sclk_level;
+ uint32_t number_of_underlay_surfaces;
+ uint32_t number_of_dram_wrchannels;
+ uint32_t chunk_request_delay;
+ uint32_t number_of_dram_channels;
+ enum bw_defines underlay_micro_tile_mode;
+ enum bw_defines graphics_micro_tile_mode;
+ struct bw_fixed max_phyclk;
+ struct bw_fixed dram_efficiency;
+ struct bw_fixed src_width_after_surface_type;
+ struct bw_fixed src_height_after_surface_type;
+ struct bw_fixed hsr_after_surface_type;
+ struct bw_fixed vsr_after_surface_type;
+ struct bw_fixed src_width_after_rotation;
+ struct bw_fixed src_height_after_rotation;
+ struct bw_fixed hsr_after_rotation;
+ struct bw_fixed vsr_after_rotation;
+ struct bw_fixed source_height_pixels;
+ struct bw_fixed hsr_after_stereo;
+ struct bw_fixed vsr_after_stereo;
+ struct bw_fixed source_width_in_lb;
+ struct bw_fixed lb_line_pitch;
+ struct bw_fixed underlay_maximum_source_efficient_for_tiling;
+ struct bw_fixed num_lines_at_frame_start;
+ struct bw_fixed min_dmif_size_in_time;
+ struct bw_fixed min_mcifwr_size_in_time;
+ struct bw_fixed total_requests_for_dmif_size;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting;
+ struct bw_fixed useful_pte_per_pte_request;
+ struct bw_fixed scatter_gather_pte_request_rows;
+ struct bw_fixed scatter_gather_row_height;
+ struct bw_fixed scatter_gather_pte_requests_in_vblank;
+ struct bw_fixed inefficient_linear_pitch_in_bytes;
+ struct bw_fixed cursor_total_data;
+ struct bw_fixed cursor_total_request_groups;
+ struct bw_fixed scatter_gather_total_pte_requests;
+ struct bw_fixed scatter_gather_total_pte_request_groups;
+ struct bw_fixed tile_width_in_pixels;
+ struct bw_fixed dmif_total_number_of_data_request_page_close_open;
+ struct bw_fixed mcifwr_total_number_of_data_request_page_close_open;
+ struct bw_fixed bytes_per_page_close_open;
+ struct bw_fixed mcifwr_total_page_close_open_time;
+ struct bw_fixed total_requests_for_adjusted_dmif_size;
+ struct bw_fixed total_dmifmc_urgent_trips;
+ struct bw_fixed total_dmifmc_urgent_latency;
+ struct bw_fixed total_display_reads_required_data;
+ struct bw_fixed total_display_reads_required_dram_access_data;
+ struct bw_fixed total_display_writes_required_data;
+ struct bw_fixed total_display_writes_required_dram_access_data;
+ struct bw_fixed display_reads_required_data;
+ struct bw_fixed display_reads_required_dram_access_data;
+ struct bw_fixed dmif_total_page_close_open_time;
+ struct bw_fixed min_cursor_memory_interface_buffer_size_in_time;
+ struct bw_fixed min_read_buffer_size_in_time;
+ struct bw_fixed display_reads_time_for_data_transfer;
+ struct bw_fixed display_writes_time_for_data_transfer;
+ struct bw_fixed dmif_required_dram_bandwidth;
+ struct bw_fixed mcifwr_required_dram_bandwidth;
+ struct bw_fixed required_dmifmc_urgent_latency_for_page_close_open;
+ struct bw_fixed required_mcifmcwr_urgent_latency;
+ struct bw_fixed required_dram_bandwidth_gbyte_per_second;
+ struct bw_fixed dram_bandwidth;
+ struct bw_fixed dmif_required_sclk;
+ struct bw_fixed mcifwr_required_sclk;
+ struct bw_fixed required_sclk;
+ struct bw_fixed downspread_factor;
+ struct bw_fixed v_scaler_efficiency;
+ struct bw_fixed scaler_limits_factor;
+ struct bw_fixed display_pipe_pixel_throughput;
+ struct bw_fixed total_dispclk_required_with_ramping;
+ struct bw_fixed total_dispclk_required_without_ramping;
+ struct bw_fixed total_read_request_bandwidth;
+ struct bw_fixed total_write_request_bandwidth;
+ struct bw_fixed dispclk_required_for_total_read_request_bandwidth;
+ struct bw_fixed total_dispclk_required_with_ramping_with_request_bandwidth;
+ struct bw_fixed total_dispclk_required_without_ramping_with_request_bandwidth;
+ struct bw_fixed dispclk;
+ struct bw_fixed blackout_recovery_time;
+ struct bw_fixed min_pixels_per_data_fifo_entry;
+ struct bw_fixed sclk_deep_sleep;
+ struct bw_fixed chunk_request_time;
+ struct bw_fixed cursor_request_time;
+ struct bw_fixed line_source_pixels_transfer_time;
+ struct bw_fixed dmifdram_access_efficiency;
+ struct bw_fixed mcifwrdram_access_efficiency;
+ struct bw_fixed total_average_bandwidth_no_compression;
+ struct bw_fixed total_average_bandwidth;
+ struct bw_fixed total_stutter_cycle_duration;
+ struct bw_fixed stutter_burst_time;
+ struct bw_fixed time_in_self_refresh;
+ struct bw_fixed stutter_efficiency;
+ struct bw_fixed worst_number_of_trips_to_memory;
+ struct bw_fixed immediate_flip_time;
+ struct bw_fixed latency_for_non_dmif_clients;
+ struct bw_fixed latency_for_non_mcifwr_clients;
+ struct bw_fixed dmifmc_urgent_latency_supported_in_high_sclk_and_yclk;
+ struct bw_fixed nbp_state_dram_speed_change_margin;
+ struct bw_fixed display_reads_time_for_data_transfer_and_urgent_latency;
+ struct bw_fixed dram_speed_change_margin;
+ struct bw_fixed min_vblank_dram_speed_change_margin;
+ struct bw_fixed min_stutter_refresh_duration;
+ uint32_t total_stutter_dmif_buffer_size;
+ uint32_t total_bytes_requested;
+ uint32_t min_stutter_dmif_buffer_size;
+ uint32_t num_stutter_bursts;
+ struct bw_fixed v_blank_nbp_state_dram_speed_change_latency_supported;
+ struct bw_fixed nbp_state_dram_speed_change_latency_supported;
+ bool fbc_en[maximum_number_of_surfaces];
+ bool lpt_en[maximum_number_of_surfaces];
+ bool displays_match_flag[maximum_number_of_surfaces];
+ bool use_alpha[maximum_number_of_surfaces];
+ bool orthogonal_rotation[maximum_number_of_surfaces];
+ bool enable[maximum_number_of_surfaces];
+ bool access_one_channel_only[maximum_number_of_surfaces];
+ bool scatter_gather_enable_for_pipe[maximum_number_of_surfaces];
+ bool interlace_mode[maximum_number_of_surfaces];
+ bool display_pstate_change_enable[maximum_number_of_surfaces];
+ bool line_buffer_prefetch[maximum_number_of_surfaces];
+ uint32_t bytes_per_pixel[maximum_number_of_surfaces];
+ uint32_t max_chunks_non_fbc_mode[maximum_number_of_surfaces];
+ uint32_t lb_bpc[maximum_number_of_surfaces];
+ uint32_t output_bpphdmi[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr2[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr3[maximum_number_of_surfaces];
+ enum bw_defines stereo_mode[maximum_number_of_surfaces];
+ struct bw_fixed dmif_buffer_transfer_time[maximum_number_of_surfaces];
+ struct bw_fixed displays_with_same_mode[maximum_number_of_surfaces];
+ struct bw_fixed stutter_dmif_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed stutter_refresh_duration[maximum_number_of_surfaces];
+ struct bw_fixed stutter_exit_watermark[maximum_number_of_surfaces];
+ struct bw_fixed stutter_entry_watermark[maximum_number_of_surfaces];
+ struct bw_fixed h_total[maximum_number_of_surfaces];
+ struct bw_fixed v_total[maximum_number_of_surfaces];
+ struct bw_fixed pixel_rate[maximum_number_of_surfaces];
+ struct bw_fixed src_width[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels_after_surface_type[maximum_number_of_surfaces];
+ struct bw_fixed src_height[maximum_number_of_surfaces];
+ struct bw_fixed scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed h_taps[maximum_number_of_surfaces];
+ struct bw_fixed v_taps[maximum_number_of_surfaces];
+ struct bw_fixed h_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed v_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed rotation_angle[maximum_number_of_surfaces];
+ struct bw_fixed compression_rate[maximum_number_of_surfaces];
+ struct bw_fixed hsr[maximum_number_of_surfaces];
+ struct bw_fixed vsr[maximum_number_of_surfaces];
+ struct bw_fixed source_width_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed source_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed source_height_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed display_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed request_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed useful_bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed lines_interleaved_in_mem_access[maximum_number_of_surfaces];
+ struct bw_fixed latency_hiding_lines[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions_max[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_with_ramping[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_without_ramping[maximum_number_of_surfaces];
+ struct bw_fixed data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed outstanding_chunk_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed urgent_watermark[maximum_number_of_surfaces];
+ struct bw_fixed nbp_state_change_watermark[maximum_number_of_surfaces];
+ struct bw_fixed v_filter_init[maximum_number_of_surfaces];
+ struct bw_fixed stutter_cycle_duration[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth_no_compression[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed lb_size_per_component[maximum_number_of_surfaces];
+ struct bw_fixed memory_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed pipe_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed number_of_trips_to_memory_for_getting_apte_row[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size_in_memory[maximum_number_of_surfaces];
+ struct bw_fixed pixels_per_data_fifo_entry[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_requests_in_row[maximum_number_of_surfaces];
+ struct bw_fixed pte_request_per_chunk[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_width[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_height[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_beginning_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_middle_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed cursor_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed active_time[maximum_number_of_surfaces];
+ struct bw_fixed horizontal_blank_and_chunk_granularity_factor[maximum_number_of_surfaces];
+ struct bw_fixed cursor_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed v_blank_dram_speed_change_margin[maximum_number_of_surfaces];
+ uint32_t num_displays_with_margin[3][8];
+ struct bw_fixed dmif_burst_time[3][8];
+ struct bw_fixed mcifwr_burst_time[3][8];
+ struct bw_fixed line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed min_dram_speed_change_margin[3][8];
+ struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
+ struct bw_fixed blackout_duration_margin[3][8];
+ struct bw_fixed dispclk_required_for_blackout_duration[3][8];
+ struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
+ struct bw_fixed dmif_required_sclk_for_urgent_latency[6];
+};
+
+/**
+ * Initialize structures with data which will NOT change at runtime.
+ */
+void bw_calcs_init(
+ struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id);
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+bool bw_calcs(
+ struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx *pipe,
+ int pipe_count,
+ struct dce_bw_output *calcs_output);
+
+#endif /* __BANDWIDTH_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
new file mode 100644
index 000000000000..1e231f6de732
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCN_CALCS_H__
+#define __DCN_CALCS_H__
+
+#include "bw_fixed.h"
+#include "display_clock.h"
+#include "../dml/display_mode_lib.h"
+
+struct dc;
+struct dc_state;
+
+/*******************************************************************************
+ * DCN data structures.
+ ******************************************************************************/
+
+#define number_of_planes 6
+#define number_of_planes_minus_one 5
+#define number_of_states 4
+#define number_of_states_plus_one 5
+
+#define ddr4_dram_width 64
+#define ddr4_dram_factor_single_Channel 16
+enum dcn_bw_defs {
+ dcn_bw_v_min0p65,
+ dcn_bw_v_mid0p72,
+ dcn_bw_v_nom0p8,
+ dcn_bw_v_max0p9,
+ dcn_bw_v_max0p91,
+ dcn_bw_no_support = 5,
+ dcn_bw_yes,
+ dcn_bw_hor,
+ dcn_bw_vert,
+ dcn_bw_override,
+ dcn_bw_rgb_sub_64,
+ dcn_bw_rgb_sub_32,
+ dcn_bw_rgb_sub_16,
+ dcn_bw_no,
+ dcn_bw_sw_linear,
+ dcn_bw_sw_4_kb_d,
+ dcn_bw_sw_4_kb_d_x,
+ dcn_bw_sw_64_kb_d,
+ dcn_bw_sw_64_kb_d_t,
+ dcn_bw_sw_64_kb_d_x,
+ dcn_bw_sw_var_d,
+ dcn_bw_sw_var_d_x,
+ dcn_bw_yuv420_sub_8,
+ dcn_bw_sw_4_kb_s,
+ dcn_bw_sw_4_kb_s_x,
+ dcn_bw_sw_64_kb_s,
+ dcn_bw_sw_64_kb_s_t,
+ dcn_bw_sw_64_kb_s_x,
+ dcn_bw_writeback,
+ dcn_bw_444,
+ dcn_bw_dp,
+ dcn_bw_420,
+ dcn_bw_hdmi,
+ dcn_bw_sw_var_s,
+ dcn_bw_sw_var_s_x,
+ dcn_bw_yuv420_sub_10,
+ dcn_bw_supported_in_v_active,
+ dcn_bw_supported_in_v_blank,
+ dcn_bw_not_supported,
+ dcn_bw_na,
+ dcn_bw_encoder_8bpc,
+ dcn_bw_encoder_10bpc,
+ dcn_bw_encoder_12bpc,
+ dcn_bw_encoder_16bpc,
+};
+
+/*bounding box parameters*/
+/*mode parameters*/
+/*system configuration*/
+/* display configuration*/
+struct dcn_bw_internal_vars {
+ float voltage[number_of_states_plus_one + 1];
+ float max_dispclk[number_of_states_plus_one + 1];
+ float max_dppclk[number_of_states_plus_one + 1];
+ float dcfclk_per_state[number_of_states_plus_one + 1];
+ float phyclk_per_state[number_of_states_plus_one + 1];
+ float fabric_and_dram_bandwidth_per_state[number_of_states_plus_one + 1];
+ float sr_exit_time;
+ float sr_enter_plus_exit_time;
+ float dram_clock_change_latency;
+ float urgent_latency;
+ float write_back_latency;
+ float percent_of_ideal_drambw_received_after_urg_latency;
+ float dcfclkv_max0p9;
+ float dcfclkv_nom0p8;
+ float dcfclkv_mid0p72;
+ float dcfclkv_min0p65;
+ float max_dispclk_vmax0p9;
+ float max_dppclk_vmax0p9;
+ float max_dispclk_vnom0p8;
+ float max_dppclk_vnom0p8;
+ float max_dispclk_vmid0p72;
+ float max_dppclk_vmid0p72;
+ float max_dispclk_vmin0p65;
+ float max_dppclk_vmin0p65;
+ float socclk;
+ float fabric_and_dram_bandwidth_vmax0p9;
+ float fabric_and_dram_bandwidth_vnom0p8;
+ float fabric_and_dram_bandwidth_vmid0p72;
+ float fabric_and_dram_bandwidth_vmin0p65;
+ float round_trip_ping_latency_cycles;
+ float urgent_out_of_order_return_per_channel;
+ float number_of_channels;
+ float vmm_page_size;
+ float return_bus_width;
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ float pte_chunk_size;
+ float meta_chunk_size;
+ float writeback_chunk_size;
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ float line_buffer_size;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ float line_buffer_fixed_bpp;
+ float max_line_buffer_lines;
+ float writeback_luma_buffer_size;
+ float writeback_chroma_buffer_size;
+ float max_num_dpp;
+ float max_num_writeback;
+ float max_dchub_topscl_throughput;
+ float max_pscl_tolb_throughput;
+ float max_lb_tovscl_throughput;
+ float max_vscl_tohscl_throughput;
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ float max_hscl_taps;
+ float max_vscl_taps;
+ float under_scan_factor;
+ float phyclkv_max0p9;
+ float phyclkv_nom0p8;
+ float phyclkv_mid0p72;
+ float phyclkv_min0p65;
+ float pte_buffer_size_in_requests;
+ float dispclk_ramping_margin;
+ float downspreading;
+ float max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int mode;
+ float viewport_width[number_of_planes_minus_one + 1];
+ float htotal[number_of_planes_minus_one + 1];
+ float vtotal[number_of_planes_minus_one + 1];
+ float v_sync_plus_back_porch[number_of_planes_minus_one + 1];
+ float vactive[number_of_planes_minus_one + 1];
+ float pixel_clock[number_of_planes_minus_one + 1]; /*MHz*/
+ float viewport_height[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dcc_enable[number_of_planes_minus_one + 1];
+ float dcc_rate[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_scan[number_of_planes_minus_one + 1];
+ float lb_bit_per_pixel[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_pixel_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_surface_mode[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_deep_color[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output[number_of_planes_minus_one + 1];
+ float scaler_rec_out_width[number_of_planes_minus_one + 1];
+ float scaler_recout_height[number_of_planes_minus_one + 1];
+ float underscan_output[number_of_planes_minus_one + 1];
+ float interlace_output[number_of_planes_minus_one + 1];
+ float override_hta_ps[number_of_planes_minus_one + 1];
+ float override_vta_ps[number_of_planes_minus_one + 1];
+ float override_hta_pschroma[number_of_planes_minus_one + 1];
+ float override_vta_pschroma[number_of_planes_minus_one + 1];
+ float urgent_latency_support_us[number_of_planes_minus_one + 1];
+ float h_ratio[number_of_planes_minus_one + 1];
+ float v_ratio[number_of_planes_minus_one + 1];
+ float htaps[number_of_planes_minus_one + 1];
+ float vtaps[number_of_planes_minus_one + 1];
+ float hta_pschroma[number_of_planes_minus_one + 1];
+ float vta_pschroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs pte_enable;
+ enum dcn_bw_defs synchronized_vblank;
+ enum dcn_bw_defs ta_pscalculation;
+ int voltage_override_level;
+ int number_of_active_planes;
+ int voltage_level;
+ enum dcn_bw_defs immediate_flip_supported;
+ float dcfclk;
+ float max_phyclk;
+ float fabric_and_dram_bandwidth;
+ float dpp_per_plane_per_ratio[1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dispclk_dppclk_support_per_ratio[1 + 1];
+ float required_dispclk_per_ratio[1 + 1];
+ enum dcn_bw_defs error_message[1 + 1];
+ int dispclk_dppclk_ratio;
+ float dpp_per_plane[number_of_planes_minus_one + 1];
+ float det_buffer_size_y[number_of_planes_minus_one + 1];
+ float det_buffer_size_c[number_of_planes_minus_one + 1];
+ float swath_height_y[number_of_planes_minus_one + 1];
+ float swath_height_c[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs final_error_message;
+ float frequency;
+ float header_line;
+ float header;
+ enum dcn_bw_defs voltage_override;
+ enum dcn_bw_defs allow_different_hratio_vratio;
+ float acceptable_quality_hta_ps;
+ float acceptable_quality_vta_ps;
+ float no_of_dpp[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_width_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_cper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float urgent_latency_support_us_per_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_with_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_without_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float required_dispclk[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs dispclk_dppclk_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs total_available_pipes_support[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_dcc_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs urgent_latency_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float return_bw_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs dio_support[number_of_states_plus_one + 1];
+ float urgent_round_trip_and_out_of_order_latency_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs rob_support[number_of_states_plus_one + 1];
+ enum dcn_bw_defs bandwidth_support[number_of_states_plus_one + 1];
+ float prefetch_bw[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_per_frame[number_of_planes_minus_one + 1];
+ float meta_row_bytes[number_of_planes_minus_one + 1];
+ float dpte_bytes_per_row[number_of_planes_minus_one + 1];
+ float prefetch_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_lines_c[number_of_planes_minus_one + 1];
+ float max_num_sw_y[number_of_planes_minus_one + 1];
+ float max_num_sw_c[number_of_planes_minus_one + 1];
+ float line_times_for_prefetch[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_without_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_without_immediate_flip[number_of_planes_minus_one + 1];
+ float min_dppclk_using_single_dpp[number_of_planes_minus_one + 1];
+ float swath_width_ysingle_dpp[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_detc[number_of_planes_minus_one + 1];
+ float number_of_dpp_required_for_det_and_lb_size[number_of_planes_minus_one + 1];
+ float required_phyclk[number_of_planes_minus_one + 1];
+ float read256_block_height_y[number_of_planes_minus_one + 1];
+ float read256_block_width_y[number_of_planes_minus_one + 1];
+ float read256_block_height_c[number_of_planes_minus_one + 1];
+ float read256_block_width_c[number_of_planes_minus_one + 1];
+ float max_swath_height_y[number_of_planes_minus_one + 1];
+ float max_swath_height_c[number_of_planes_minus_one + 1];
+ float min_swath_height_y[number_of_planes_minus_one + 1];
+ float min_swath_height_c[number_of_planes_minus_one + 1];
+ float read_bandwidth[number_of_planes_minus_one + 1];
+ float write_bandwidth[number_of_planes_minus_one + 1];
+ float pscl_factor[number_of_planes_minus_one + 1];
+ float pscl_factor_chroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs scale_ratio_support;
+ enum dcn_bw_defs source_format_pixel_and_scan_support;
+ float total_read_bandwidth_consumed_gbyte_per_second;
+ float total_write_bandwidth_consumed_gbyte_per_second;
+ float total_bandwidth_consumed_gbyte_per_second;
+ enum dcn_bw_defs dcc_enabled_in_any_plane;
+ float return_bw_todcn_per_state;
+ float critical_point;
+ enum dcn_bw_defs writeback_latency_support;
+ float required_output_bw;
+ float total_number_of_active_writeback;
+ enum dcn_bw_defs total_available_writeback_support;
+ float maximum_swath_width;
+ float number_of_dpp_required_for_det_size;
+ float number_of_dpp_required_for_lb_size;
+ float min_dispclk_using_single_dpp;
+ float min_dispclk_using_dual_dpp;
+ enum dcn_bw_defs viewport_size_support;
+ float swath_width_granularity_y;
+ float rounded_up_max_swath_size_bytes_y;
+ float swath_width_granularity_c;
+ float rounded_up_max_swath_size_bytes_c;
+ float lines_in_det_luma;
+ float lines_in_det_chroma;
+ float effective_lb_latency_hiding_source_lines_luma;
+ float effective_lb_latency_hiding_source_lines_chroma;
+ float effective_detlb_lines_luma;
+ float effective_detlb_lines_chroma;
+ float projected_dcfclk_deep_sleep;
+ float meta_req_height_y;
+ float meta_req_width_y;
+ float meta_surface_width_y;
+ float meta_surface_height_y;
+ float meta_pte_bytes_per_frame_y;
+ float meta_row_bytes_y;
+ float macro_tile_block_size_bytes_y;
+ float macro_tile_block_height_y;
+ float data_pte_req_height_y;
+ float data_pte_req_width_y;
+ float dpte_bytes_per_row_y;
+ float meta_req_height_c;
+ float meta_req_width_c;
+ float meta_surface_width_c;
+ float meta_surface_height_c;
+ float meta_pte_bytes_per_frame_c;
+ float meta_row_bytes_c;
+ float macro_tile_block_size_bytes_c;
+ float macro_tile_block_height_c;
+ float macro_tile_block_width_c;
+ float data_pte_req_height_c;
+ float data_pte_req_width_c;
+ float dpte_bytes_per_row_c;
+ float v_init_y;
+ float max_partial_sw_y;
+ float v_init_c;
+ float max_partial_sw_c;
+ float dst_x_after_scaler;
+ float dst_y_after_scaler;
+ float time_calc;
+ float v_update_offset[number_of_planes_minus_one + 1];
+ float total_repeater_delay;
+ float v_update_width[number_of_planes_minus_one + 1];
+ float v_ready_offset[number_of_planes_minus_one + 1];
+ float time_setup;
+ float extra_latency;
+ float maximum_vstartup;
+ float bw_available_for_immediate_flip;
+ float total_immediate_flip_bytes[number_of_planes_minus_one + 1];
+ float time_for_meta_pte_with_immediate_flip;
+ float time_for_meta_pte_without_immediate_flip;
+ float time_for_meta_and_dpte_row_with_immediate_flip;
+ float time_for_meta_and_dpte_row_without_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_with_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_without_immediate_flip;
+ float voltage_level_with_immediate_flip;
+ float voltage_level_without_immediate_flip;
+ float total_number_of_active_dpp_per_ratio[1 + 1];
+ float byte_per_pix_dety;
+ float byte_per_pix_detc;
+ float read256_bytes_block_height_y;
+ float read256_bytes_block_width_y;
+ float read256_bytes_block_height_c;
+ float read256_bytes_block_width_c;
+ float maximum_swath_height_y;
+ float maximum_swath_height_c;
+ float minimum_swath_height_y;
+ float minimum_swath_height_c;
+ float swath_width;
+ float prefetch_bandwidth[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_y[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_c[number_of_planes_minus_one + 1];
+ float max_num_swath_y[number_of_planes_minus_one + 1];
+ float max_num_swath_c[number_of_planes_minus_one + 1];
+ float prefill_y[number_of_planes_minus_one + 1];
+ float prefill_c[number_of_planes_minus_one + 1];
+ float v_startup[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs allow_dram_clock_change_during_vblank[number_of_planes_minus_one + 1];
+ float allow_dram_self_refresh_during_vblank[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_y[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_c[number_of_planes_minus_one + 1];
+ float destination_lines_for_prefetch[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_vm_inv_blank[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_row_in_vblank[number_of_planes_minus_one + 1];
+ float min_ttuv_blank[number_of_planes_minus_one + 1];
+ float byte_per_pixel_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_detc[number_of_planes_minus_one + 1];
+ float swath_width_y[number_of_planes_minus_one + 1];
+ float lines_in_dety[number_of_planes_minus_one + 1];
+ float lines_in_dety_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float lines_in_detc[number_of_planes_minus_one + 1];
+ float lines_in_detc_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_y[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_c[number_of_planes_minus_one + 1];
+ float active_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float v_blank_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float dcfclk_deep_sleep_per_plane[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_luma[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma_prefetch[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma_prefetch[number_of_planes_minus_one + 1];
+ float pixel_pte_bytes_per_row[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_frame[number_of_planes_minus_one + 1];
+ float meta_row_byte[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_c[number_of_planes_minus_one + 1];
+ float pscl_throughput[number_of_planes_minus_one + 1];
+ float pscl_throughput_chroma[number_of_planes_minus_one + 1];
+ float output_bpphdmi[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr2[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr3[number_of_planes_minus_one + 1];
+ float max_vstartup_lines[number_of_planes_minus_one + 1];
+ float dispclk_with_ramping;
+ float dispclk_without_ramping;
+ float dppclk_using_single_dpp_luma;
+ float dppclk_using_single_dpp;
+ float dppclk_using_single_dpp_chroma;
+ enum dcn_bw_defs odm_capable;
+ float dispclk;
+ float dppclk;
+ float return_bandwidth_to_dcn;
+ enum dcn_bw_defs dcc_enabled_any_plane;
+ float return_bw;
+ float critical_compression;
+ float total_data_read_bandwidth;
+ float total_active_dpp;
+ float total_dcc_active_dpp;
+ float urgent_round_trip_and_out_of_order_latency;
+ float last_pixel_of_line_extra_watermark;
+ float data_fabric_line_delivery_time_luma;
+ float data_fabric_line_delivery_time_chroma;
+ float urgent_extra_latency;
+ float urgent_watermark;
+ float ptemeta_urgent_watermark;
+ float dram_clock_change_watermark;
+ float total_active_writeback;
+ float writeback_dram_clock_change_watermark;
+ float min_full_det_buffering_time;
+ float frame_time_for_min_full_det_buffering_time;
+ float average_read_bandwidth_gbyte_per_second;
+ float part_of_burst_that_fits_in_rob;
+ float stutter_burst_time;
+ float stutter_efficiency_not_including_vblank;
+ float smallest_vblank;
+ float v_blank_time;
+ float stutter_efficiency;
+ float dcf_clk_deep_sleep;
+ float stutter_exit_watermark;
+ float stutter_enter_plus_exit_watermark;
+ float effective_det_plus_lb_lines_luma;
+ float urgent_latency_support_us_luma;
+ float effective_det_plus_lb_lines_chroma;
+ float urgent_latency_support_us_chroma;
+ float min_urgent_latency_support_us;
+ float non_urgent_latency_tolerance;
+ float block_height256_bytes_y;
+ float block_height256_bytes_c;
+ float meta_request_width_y;
+ float meta_surf_width_y;
+ float meta_surf_height_y;
+ float meta_pte_bytes_frame_y;
+ float meta_row_byte_y;
+ float macro_tile_size_byte_y;
+ float macro_tile_height_y;
+ float pixel_pte_req_height_y;
+ float pixel_pte_req_width_y;
+ float pixel_pte_bytes_per_row_y;
+ float meta_request_width_c;
+ float meta_surf_width_c;
+ float meta_surf_height_c;
+ float meta_pte_bytes_frame_c;
+ float meta_row_byte_c;
+ float macro_tile_size_bytes_c;
+ float macro_tile_height_c;
+ float pixel_pte_req_height_c;
+ float pixel_pte_req_width_c;
+ float pixel_pte_bytes_per_row_c;
+ float max_partial_swath_y;
+ float max_partial_swath_c;
+ float t_calc;
+ float next_prefetch_mode;
+ float v_startup_lines;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2;
+ enum dcn_bw_defs v_ratio_prefetch_more_than4;
+ enum dcn_bw_defs destination_line_times_for_prefetch_less_than2;
+ float prefetch_mode;
+ float dstx_after_scaler;
+ float dsty_after_scaler;
+ float v_update_offset_pix;
+ float total_repeater_delay_time;
+ float v_update_width_pix;
+ float v_ready_offset_pix;
+ float t_setup;
+ float t_wait;
+ float bandwidth_available_for_immediate_flip;
+ float tot_immediate_flip_bytes;
+ float max_rd_bandwidth;
+ float time_for_fetching_meta_pte;
+ float time_for_fetching_row_in_vblank;
+ float lines_to_request_prefetch_pixel_data;
+ float required_prefetch_pix_data_bw;
+ enum dcn_bw_defs prefetch_mode_supported;
+ float active_dp_ps;
+ float lb_latency_hiding_source_lines_y;
+ float lb_latency_hiding_source_lines_c;
+ float effective_lb_latency_hiding_y;
+ float effective_lb_latency_hiding_c;
+ float dpp_output_buffer_lines_y;
+ float dpp_output_buffer_lines_c;
+ float dppopp_buffering_y;
+ float max_det_buffering_time_y;
+ float active_dram_clock_change_latency_margin_y;
+ float dppopp_buffering_c;
+ float max_det_buffering_time_c;
+ float active_dram_clock_change_latency_margin_c;
+ float writeback_dram_clock_change_latency_margin;
+ float min_active_dram_clock_change_margin;
+ float v_blank_of_min_active_dram_clock_change_margin;
+ float second_min_active_dram_clock_change_margin;
+ float min_vblank_dram_clock_change_margin;
+ float dram_clock_change_margin;
+ float dram_clock_change_support;
+ float wr_bandwidth;
+ float max_used_bw;
+};
+
+struct dcn_soc_bounding_box {
+ float sr_exit_time; /*us*/
+ float sr_enter_plus_exit_time; /*us*/
+ float urgent_latency; /*us*/
+ float write_back_latency; /*us*/
+ float percent_of_ideal_drambw_received_after_urg_latency; /*%*/
+ int max_request_size; /*bytes*/
+ float dcfclkv_max0p9; /*MHz*/
+ float dcfclkv_nom0p8; /*MHz*/
+ float dcfclkv_mid0p72; /*MHz*/
+ float dcfclkv_min0p65; /*MHz*/
+ float max_dispclk_vmax0p9; /*MHz*/
+ float max_dispclk_vmid0p72; /*MHz*/
+ float max_dispclk_vnom0p8; /*MHz*/
+ float max_dispclk_vmin0p65; /*MHz*/
+ float max_dppclk_vmax0p9; /*MHz*/
+ float max_dppclk_vnom0p8; /*MHz*/
+ float max_dppclk_vmid0p72; /*MHz*/
+ float max_dppclk_vmin0p65; /*MHz*/
+ float socclk; /*MHz*/
+ float fabric_and_dram_bandwidth_vmax0p9; /*GB/s*/
+ float fabric_and_dram_bandwidth_vnom0p8; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmid0p72; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmin0p65; /*GB/s*/
+ float phyclkv_max0p9; /*MHz*/
+ float phyclkv_nom0p8; /*MHz*/
+ float phyclkv_mid0p72; /*MHz*/
+ float phyclkv_min0p65; /*MHz*/
+ float downspreading; /*%*/
+ int round_trip_ping_latency_cycles; /*DCFCLK Cycles*/
+ int urgent_out_of_order_return_per_channel; /*bytes*/
+ int number_of_channels;
+ int vmm_page_size; /*bytes*/
+ float dram_clock_change_latency; /*us*/
+ int return_bus_width; /*bytes*/
+ float percent_disp_bw_limit; /*%*/
+};
+extern const struct dcn_soc_bounding_box dcn10_soc_defaults;
+
+struct dcn_ip_params {
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ enum dcn_bw_defs pte_enable;
+ int pte_chunk_size; /*kbytes*/
+ int meta_chunk_size; /*kbytes*/
+ int writeback_chunk_size; /*kbytes*/
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ int line_buffer_size; /*bit*/
+ int max_line_buffer_lines;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ int line_buffer_fixed_bpp;
+ int writeback_luma_buffer_size; /*kbytes*/
+ int writeback_chroma_buffer_size; /*kbytes*/
+ int max_num_dpp;
+ int max_num_writeback;
+ int max_dchub_topscl_throughput; /*pixels/dppclk*/
+ int max_pscl_tolb_throughput; /*pixels/dppclk*/
+ int max_lb_tovscl_throughput; /*pixels/dppclk*/
+ int max_vscl_tohscl_throughput; /*pixels/dppclk*/
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ int max_hscl_taps;
+ int max_vscl_taps;
+ int pte_buffer_size_in_requests;
+ float dispclk_ramping_margin; /*%*/
+ float under_scan_factor;
+ int max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int dcfclk_cstate_latency;
+};
+extern const struct dcn_ip_params dcn10_ip_defaults;
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks);
+
+void dcn_bw_update_from_pplib(struct dc *dc);
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+
+#endif /* __DCN_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
new file mode 100644
index 000000000000..c93b9b9a817c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -0,0 +1,48 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_ABM_H__
+#define __DC_ABM_H__
+
+#include "dm_services_types.h"
+
+struct abm {
+ struct dc_context *ctx;
+ const struct abm_funcs *funcs;
+};
+
+struct abm_funcs {
+ void (*abm_init)(struct abm *abm);
+ bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
+ bool (*set_abm_immediate_disable)(struct abm *abm);
+ bool (*init_backlight)(struct abm *abm);
+ bool (*set_backlight_level)(struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id);
+ unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+ bool (*is_dmcu_initialized)(struct abm *abm);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
new file mode 100644
index 000000000000..925204f49717
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUDIO_H__
+#define __DAL_AUDIO_H__
+
+#include "audio_types.h"
+
+struct audio;
+
+struct audio_funcs {
+
+ bool (*endpoint_valid)(struct audio *audio);
+
+ void (*hw_init)(struct audio *audio);
+
+ void (*az_enable)(struct audio *audio);
+
+ void (*az_disable)(struct audio *audio);
+
+ void (*az_configure)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+ void (*wall_dto_setup)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+ void (*destroy)(struct audio **audio);
+};
+
+struct audio {
+ const struct audio_funcs *funcs;
+ struct dc_context *ctx;
+ unsigned int inst;
+};
+
+#endif /* __DAL_AUDIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
new file mode 100644
index 000000000000..f5f69cd81f6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_CLOCK_H__
+#define __DISPLAY_CLOCK_H__
+
+#include "dm_services_types.h"
+
+
+struct clocks_value {
+ int dispclk_in_khz;
+ int max_pixelclk_in_khz;
+ int max_non_dp_phyclk_in_khz;
+ int max_dp_phyclk_in_khz;
+ bool dispclk_notify_pplib_done;
+ bool pixelclk_notify_pplib_done;
+ bool phyclk_notigy_pplib_done;
+ int dcfclock_in_khz;
+ int dppclk_in_khz;
+ int mclk_in_khz;
+ int phyclk_in_khz;
+ int common_vdd_level;
+};
+
+
+/* Structure containing all state-dependent clocks
+ * (dependent on "enum clocks_state") */
+struct state_dependent_clocks {
+ int display_clk_khz;
+ int pixel_clk_khz;
+};
+
+struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct clocks_value cur_clocks_value;
+};
+
+struct display_clock_funcs {
+ int (*set_clock)(struct display_clock *disp_clk,
+ int requested_clock_khz);
+
+ enum dm_pp_clocks_state (*get_required_clocks_state)(
+ struct display_clock *disp_clk,
+ struct state_dependent_clocks *req_clocks);
+
+ bool (*set_min_clocks_state)(struct display_clock *disp_clk,
+ enum dm_pp_clocks_state dm_pp_clocks_state);
+
+ int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
+
+ bool (*apply_clock_voltage_request)(
+ struct display_clock *disp_clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk);
+};
+
+#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
new file mode 100644
index 000000000000..0574c29cc4a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -0,0 +1,50 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DMCU_H__
+#define __DC_DMCU_H__
+
+#include "dm_services_types.h"
+
+struct dmcu {
+ struct dc_context *ctx;
+ const struct dmcu_funcs *funcs;
+};
+
+struct dmcu_funcs {
+ bool (*load_iram)(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes);
+ void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
+ void (*setup_psr)(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context);
+ void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
+ void (*set_psr_wait_loop)(struct dmcu *dmcu,
+ unsigned int wait_loop_number);
+ void (*get_psr_wait_loop)(unsigned int *psr_wait_loop_number);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
new file mode 100644
index 000000000000..83a68460edcd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_DPP_H__
+#define __DAL_DPP_H__
+
+#include "transform.h"
+
+struct dpp {
+ const struct dpp_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+struct dpp_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct dpp_funcs {
+ void (*dpp_reset)(struct dpp *dpp);
+
+ void (*dpp_set_scaler)(struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+ void (*dpp_set_pixel_storage_depth)(
+ struct dpp *dpp,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*dpp_get_optimal_number_of_taps)(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*dpp_set_gamut_remap)(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct dpp *dpp,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct dpp *dpp,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct dpp *dpp,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct dpp *dpp,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct dpp *dpp, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct dpp *dpp_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct dpp *dpp_base);
+
+ void (*set_cursor_attributes)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width
+ );
+
+};
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
new file mode 100644
index 000000000000..90d0148430fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_H__
+#define __DAL_GPIO_H__
+
+#include "gpio_types.h"
+
+struct gpio {
+ struct gpio_service *service;
+ struct hw_gpio_pin *pin;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ /* when GPIO comes from VBIOS, it has defined output state */
+ enum gpio_pin_output_state output_state;
+};
+
+#if 0
+struct gpio_funcs {
+
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gpio_pad)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+ /* HW translation */
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+#endif
+
+#endif /* __DAL_GPIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
new file mode 100644
index 000000000000..0d186be24cf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HUBP_H__
+#define __DAL_HUBP_H__
+
+#include "mem_input.h"
+
+struct hubp {
+ struct hubp_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ int opp_id;
+ int mpcc_id;
+ struct dc_cursor_attributes curs_attr;
+};
+
+
+struct hubp_funcs {
+ void (*hubp_setup)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct hubp *hubp, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ bool (*hubp_program_surface_flip_and_addr)(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*hubp_program_pte_vm)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*hubp_set_vm_system_aperture_settings)(
+ struct hubp *hubp,
+ struct vm_system_aperture_param *apt);
+
+ void (*hubp_set_vm_context0_settings)(
+ struct hubp *hubp,
+ const struct vm_context0_param *vm0);
+
+ void (*hubp_program_surface_config)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*hubp_is_flip_pending)(struct hubp *hubp);
+
+ void (*hubp_update_dchub)(struct hubp *hubp,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct hubp *hubp, bool blank);
+ void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
new file mode 100644
index 000000000000..9602f261b614
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SHARED_H__
+#define __DAL_HW_SHARED_H__
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "dc_hw_types.h"
+
+/******************************************************************************
+ * Data types shared between different Virtual HW blocks
+ ******************************************************************************/
+
+#define MAX_PIPES 6
+
+struct gamma_curve {
+ uint32_t offset;
+ uint32_t segments_num;
+};
+
+struct curve_points {
+ struct fixed31_32 x;
+ struct fixed31_32 y;
+ struct fixed31_32 offset;
+ struct fixed31_32 slope;
+
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_offset;
+ uint32_t custom_float_slope;
+};
+
+struct pwl_result_data {
+ struct fixed31_32 red;
+ struct fixed31_32 green;
+ struct fixed31_32 blue;
+
+ struct fixed31_32 delta_red;
+ struct fixed31_32 delta_green;
+ struct fixed31_32 delta_blue;
+
+ uint32_t red_reg;
+ uint32_t green_reg;
+ uint32_t blue_reg;
+
+ uint32_t delta_red_reg;
+ uint32_t delta_green_reg;
+ uint32_t delta_blue_reg;
+};
+
+struct pwl_params {
+ struct gamma_curve arr_curve_points[34];
+ struct curve_points arr_points[3];
+ struct pwl_result_data rgb_resulted[256 + 3];
+ uint32_t hw_points_num;
+};
+
+/* move to dpp
+ * while we are moving functionality out of opp to dpp to align
+ * HW programming to HW IP, we define these struct in hw_shared
+ * so we can still compile while refactoring
+ */
+
+enum lb_pixel_depth {
+ /* do not change the values because it is used as bit vector */
+ LB_PIXEL_DEPTH_18BPP = 1,
+ LB_PIXEL_DEPTH_24BPP = 2,
+ LB_PIXEL_DEPTH_30BPP = 4,
+ LB_PIXEL_DEPTH_36BPP = 8
+};
+
+enum graphics_csc_adjust_type {
+ GRAPHICS_CSC_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_CSC_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_CSC_ADJUST_TYPE_SW /*use adjustments */
+};
+
+enum ipp_degamma_mode {
+ IPP_DEGAMMA_MODE_BYPASS,
+ IPP_DEGAMMA_MODE_HW_sRGB,
+ IPP_DEGAMMA_MODE_HW_xvYCC,
+ IPP_DEGAMMA_MODE_USER_PWL
+};
+
+enum ipp_output_format {
+ IPP_OUTPUT_FORMAT_12_BIT_FIX,
+ IPP_OUTPUT_FORMAT_16_BIT_BYPASS,
+ IPP_OUTPUT_FORMAT_FLOAT
+};
+
+enum expansion_mode {
+ EXPANSION_MODE_DYNAMIC,
+ EXPANSION_MODE_ZERO
+};
+
+struct default_adjustment {
+ enum lb_pixel_depth lb_color_depth;
+ enum dc_color_space out_color_space;
+ enum dc_color_space in_color_space;
+ enum dc_color_depth color_depth;
+ enum pixel_format surface_pixel_format;
+ enum graphics_csc_adjust_type csc_adjust_type;
+ bool force_hw_default;
+};
+
+struct out_csc_color_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum opp_regamma {
+ OPP_REGAMMA_BYPASS = 0,
+ OPP_REGAMMA_SRGB,
+ OPP_REGAMMA_3_6,
+ OPP_REGAMMA_USER
+};
+
+#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
new file mode 100644
index 000000000000..f11aa484f46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IPP_H__
+#define __DAL_IPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+
+#define MAXTRIX_COEFFICIENTS_NUMBER 12
+#define MAXTRIX_COEFFICIENTS_WRAP_NUMBER (MAXTRIX_COEFFICIENTS_NUMBER + 4)
+#define MAX_OVL_MATRIX_COUNT 12
+
+/* IPP RELATED */
+struct input_pixel_processor {
+ struct dc_context *ctx;
+ unsigned int inst;
+ const struct ipp_funcs *funcs;
+};
+
+enum ipp_prescale_mode {
+ IPP_PRESCALE_MODE_BYPASS,
+ IPP_PRESCALE_MODE_FIXED_SIGNED,
+ IPP_PRESCALE_MODE_FLOAT_SIGNED,
+ IPP_PRESCALE_MODE_FIXED_UNSIGNED,
+ IPP_PRESCALE_MODE_FLOAT_UNSIGNED
+};
+
+struct ipp_prescale_params {
+ enum ipp_prescale_mode mode;
+ uint16_t bias;
+ uint16_t scale;
+};
+
+
+
+enum ovl_color_space {
+ OVL_COLOR_SPACE_UNKNOWN = 0,
+ OVL_COLOR_SPACE_RGB,
+ OVL_COLOR_SPACE_YUV601,
+ OVL_COLOR_SPACE_YUV709
+};
+
+
+struct ipp_funcs {
+
+ /*** cursor ***/
+ void (*ipp_cursor_set_position)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param);
+
+ void (*ipp_cursor_set_attributes)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes);
+
+ /*** setup input pixel processing ***/
+
+ /* put the entire pixel processor to bypass */
+ void (*ipp_full_bypass)(
+ struct input_pixel_processor *ipp);
+
+ /* setup ipp to expand/convert input to pixel processor internal format */
+ void (*ipp_setup)(
+ struct input_pixel_processor *ipp,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ /* DCE function to setup IPP. TODO: see if we can consolidate to setup */
+ void (*ipp_program_prescale)(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params);
+
+ void (*ipp_program_input_lut)(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma);
+
+ /*** DEGAMMA RELATED ***/
+ void (*ipp_set_degamma)(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_degamma_pwl)(
+ struct input_pixel_processor *ipp,
+ const struct pwl_params *params);
+
+ void (*ipp_destroy)(struct input_pixel_processor **ipp);
+};
+
+#endif /* __DAL_IPP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
new file mode 100644
index 000000000000..498b7f05c5ca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * link_encoder.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef LINK_ENCODER_H_
+#define LINK_ENCODER_H_
+
+#include "grph_object_defs.h"
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dc_context;
+struct encoder_set_dp_phy_pattern_param;
+struct link_mst_stream_allocation_table;
+struct dc_link_settings;
+struct link_training_settings;
+struct pipe_ctx;
+
+struct encoder_init_data {
+ enum channel_id channel;
+ struct graphics_object_id connector;
+ enum hpd_source_id hpd_source;
+ /* TODO: in DAL2, here was pointer to EventManagerInterface */
+ struct graphics_object_id encoder;
+ struct dc_context *ctx;
+ enum transmitter transmitter;
+};
+
+struct encoder_feature_support {
+ union {
+ struct {
+ uint32_t IS_HBR2_CAPABLE:1;
+ uint32_t IS_HBR3_CAPABLE:1;
+ uint32_t IS_TPS3_CAPABLE:1;
+ uint32_t IS_TPS4_CAPABLE:1;
+ uint32_t IS_YCBCR_CAPABLE:1;
+ uint32_t HDMI_6GB_EN:1;
+ } bits;
+ uint32_t raw;
+ } flags;
+
+ enum dc_color_depth max_hdmi_deep_color;
+ unsigned int max_hdmi_pixel_clock;
+ bool ycbcr420_supported;
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char RESERVED : 2;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char RESERVED :6;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct link_encoder {
+ const struct link_encoder_funcs *funcs;
+ int32_t aux_channel_offset;
+ struct dc_context *ctx;
+ struct graphics_object_id id;
+ struct graphics_object_id connector;
+ uint32_t output_signals;
+ enum engine_id preferred_engine;
+ struct encoder_feature_support features;
+ enum transmitter transmitter;
+ enum hpd_source_id hpd_source;
+};
+
+struct link_encoder_funcs {
+ bool (*validate_output_with_stream)(
+ struct link_encoder *enc, const struct dc_stream_state *stream);
+ void (*hw_init)(struct link_encoder *enc);
+ void (*setup)(struct link_encoder *enc,
+ enum signal_type signal);
+ void (*enable_tmds_output)(struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+ void (*enable_dp_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*enable_dp_mst_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*disable_output)(struct link_encoder *link_enc,
+ enum signal_type signal, struct dc_link *link);
+ void (*dp_set_lane_settings)(struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+ void (*dp_set_phy_pattern)(struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *para);
+ void (*update_mst_stream_allocation_table)(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+ void (*psr_program_dp_dphy_fast_training)(struct link_encoder *enc,
+ bool exit_link_training_required);
+ void (*psr_program_secondary_packet)(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+ void (*connect_dig_be_to_fe)(struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+ void (*enable_hpd)(struct link_encoder *enc);
+ void (*disable_hpd)(struct link_encoder *enc);
+ void (*destroy)(struct link_encoder **enc);
+};
+
+#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
new file mode 100644
index 000000000000..3e1e7e6a8792
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_MEM_INPUT_H__
+#define __DAL_MEM_INPUT_H__
+
+#include "dc.h"
+#include "include/grph_object_id.h"
+
+#include "dml/display_mode_structs.h"
+
+struct dchub_init_data;
+struct cstate_pstate_watermarks_st {
+ uint32_t cstate_exit_ns;
+ uint32_t cstate_enter_plus_exit_ns;
+ uint32_t pstate_change_ns;
+};
+
+struct dcn_watermarks {
+ uint32_t pte_meta_urgent_ns;
+ uint32_t urgent_ns;
+ struct cstate_pstate_watermarks_st cstate_pstate;
+};
+
+struct dcn_watermark_set {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+};
+
+struct dce_watermarks {
+ int a_mark;
+ int b_mark;
+ int c_mark;
+ int d_mark;
+};
+
+struct stutter_modes {
+ bool enhanced;
+ bool quad_dmif_buffer;
+ bool watermark_nb_pstate;
+};
+
+struct mem_input {
+ struct mem_input_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ struct stutter_modes stutter_mode;
+};
+
+struct vm_system_aperture_param {
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct vm_context0_param {
+ PHYSICAL_ADDRESS_LOC pte_base;
+ PHYSICAL_ADDRESS_LOC pte_start;
+ PHYSICAL_ADDRESS_LOC pte_end;
+ PHYSICAL_ADDRESS_LOC fault_default;
+};
+
+struct mem_input_funcs {
+ void (*mem_input_setup)(
+ struct mem_input *mem_input,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct mem_input *mem_input, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct mem_input *mem_input,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ void (*mem_input_program_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*mem_input_program_chroma_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*allocate_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t h_total,/* for current target */
+ uint32_t v_total,/* for current target */
+ uint32_t pix_clk_khz,/* for current target */
+ uint32_t total_streams_num);
+
+ void (*free_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t paths_num);
+
+ bool (*mem_input_program_surface_flip_and_addr)(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*mem_input_program_pte_vm)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*mem_input_set_vm_system_aperture_settings)(
+ struct mem_input *mem_input,
+ struct vm_system_aperture_param *apt);
+
+ void (*mem_input_set_vm_context0_settings)(
+ struct mem_input *mem_input,
+ const struct vm_context0_param *vm0);
+
+ void (*mem_input_program_surface_config)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*mem_input_is_flip_pending)(struct mem_input *mem_input);
+
+ void (*mem_input_update_dchub)(struct mem_input *mem_input,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct mem_input *mi, bool blank);
+ void (*set_hubp_blank_en)(struct mem_input *mi, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
new file mode 100644
index 000000000000..d4188b2c0626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -0,0 +1,61 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_H__
+#define __DC_MPCC_H__
+
+#include "dc_hw_types.h"
+#include "opp.h"
+
+struct mpcc_cfg {
+ int dpp_id;
+ int opp_id;
+ struct mpc_tree_cfg *tree_cfg;
+ unsigned int z_index;
+
+ struct tg_color black_color;
+ bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
+};
+
+struct mpc {
+ const struct mpc_funcs *funcs;
+ struct dc_context *ctx;
+};
+
+struct mpc_funcs {
+ int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+ void (*remove)(struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int mpcc_inst);
+
+ void (*wait_for_idle)(struct mpc *mpc, int id);
+
+ void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
new file mode 100644
index 000000000000..75adb8fec551
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_OPP_H__
+#define __DAL_OPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "transform.h"
+
+struct fixed31_32;
+
+/* TODO: Need cleanup */
+enum clamping_range {
+ CLAMPING_FULL_RANGE = 0, /* No Clamping */
+ CLAMPING_LIMITED_RANGE_8BPC, /* 8 bpc: Clamping 1 to FE */
+ CLAMPING_LIMITED_RANGE_10BPC, /* 10 bpc: Clamping 4 to 3FB */
+ CLAMPING_LIMITED_RANGE_12BPC, /* 12 bpc: Clamping 10 to FEF */
+ /* Use programmable clampping value on FMT_CLAMP_COMPONENT_R/G/B. */
+ CLAMPING_LIMITED_RANGE_PROGRAMMABLE
+};
+
+struct clamping_and_pixel_encoding_params {
+ enum dc_pixel_encoding pixel_encoding; /* Pixel Encoding */
+ enum clamping_range clamping_level; /* Clamping identifier */
+ enum dc_color_depth c_depth; /* Deep color use. */
+};
+
+struct bit_depth_reduction_params {
+ struct {
+ /* truncate/round */
+ /* trunc/round enabled*/
+ uint32_t TRUNCATE_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1=8 bpc, 2 = 10bpc*/
+ uint32_t TRUNCATE_DEPTH:2;
+ /* truncate or round*/
+ uint32_t TRUNCATE_MODE:1;
+
+ /* spatial dither */
+ /* Spatial Bit Depth Reduction enabled*/
+ uint32_t SPATIAL_DITHER_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1 = 8 bpc, 2 = 10bpc*/
+ uint32_t SPATIAL_DITHER_DEPTH:2;
+ /* 0-3 to select patterns*/
+ uint32_t SPATIAL_DITHER_MODE:2;
+ /* Enable RGB random dithering*/
+ uint32_t RGB_RANDOM:1;
+ /* Enable Frame random dithering*/
+ uint32_t FRAME_RANDOM:1;
+ /* Enable HighPass random dithering*/
+ uint32_t HIGHPASS_RANDOM:1;
+
+ /* temporal dither*/
+ /* frame modulation enabled*/
+ uint32_t FRAME_MODULATION_ENABLED:1;
+ /* same as for trunc/spatial*/
+ uint32_t FRAME_MODULATION_DEPTH:2;
+ /* 2/4 gray levels*/
+ uint32_t TEMPORAL_LEVEL:1;
+ uint32_t FRC25:2;
+ uint32_t FRC50:2;
+ uint32_t FRC75:2;
+ } flags;
+
+ uint32_t r_seed_value;
+ uint32_t b_seed_value;
+ uint32_t g_seed_value;
+ enum dc_pixel_encoding pixel_encoding;
+};
+
+enum wide_gamut_regamma_mode {
+ /* 0x0 - BITS2:0 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_B,
+ /* 0x0 - BITS6:4 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_B
+};
+
+struct gamma_pixel {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+enum channel_name {
+ CHANNEL_NAME_RED,
+ CHANNEL_NAME_GREEN,
+ CHANNEL_NAME_BLUE
+};
+
+struct custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+struct hw_x_point {
+ uint32_t custom_float_x;
+ struct fixed31_32 x;
+ struct fixed31_32 regamma_y_red;
+ struct fixed31_32 regamma_y_green;
+ struct fixed31_32 regamma_y_blue;
+
+};
+
+struct pwl_float_data_ex {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+ struct fixed31_32 delta_r;
+ struct fixed31_32 delta_g;
+ struct fixed31_32 delta_b;
+};
+
+enum hw_point_position {
+ /* hw point sits between left and right sw points */
+ HW_POINT_POSITION_MIDDLE,
+ /* hw point lays left from left (smaller) sw point */
+ HW_POINT_POSITION_LEFT,
+ /* hw point lays stays from right (bigger) sw point */
+ HW_POINT_POSITION_RIGHT
+};
+
+struct gamma_point {
+ int32_t left_index;
+ int32_t right_index;
+ enum hw_point_position pos;
+ struct fixed31_32 coeff;
+};
+
+struct pixel_gamma_point {
+ struct gamma_point r;
+ struct gamma_point g;
+ struct gamma_point b;
+};
+
+struct gamma_coefficients {
+ struct fixed31_32 a0[3];
+ struct fixed31_32 a1[3];
+ struct fixed31_32 a2[3];
+ struct fixed31_32 a3[3];
+ struct fixed31_32 user_gamma[3];
+ struct fixed31_32 user_contrast;
+ struct fixed31_32 user_brightness;
+};
+
+struct pwl_float_data {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+struct mpc_tree_cfg {
+ int num_pipes;
+ int dpp[MAX_PIPES];
+ int mpcc[MAX_PIPES];
+};
+
+struct output_pixel_processor {
+ struct dc_context *ctx;
+ uint32_t inst;
+ struct pwl_params regamma_params;
+ struct mpc_tree_cfg mpc_tree;
+ bool mpcc_disconnect_pending[MAX_PIPES];
+ const struct opp_funcs *funcs;
+};
+
+enum fmt_stereo_action {
+ FMT_STEREO_ACTION_ENABLE = 0,
+ FMT_STEREO_ACTION_DISABLE,
+ FMT_STEREO_ACTION_UPDATE_POLARITY
+};
+
+struct opp_grph_csc_adjustment {
+ //enum grph_color_adjust_option color_adjust_option;
+ enum dc_color_space c_space;
+ enum dc_color_depth color_depth; /* clean up to uint32_t */
+ enum graphics_csc_adjust_type csc_adjust_type;
+ int32_t adjust_divider;
+ int32_t grph_cont;
+ int32_t grph_sat;
+ int32_t grph_bright;
+ int32_t grph_hue;
+};
+
+/* Underlay related types */
+
+struct hw_adjustment_range {
+ int32_t hw_default;
+ int32_t min;
+ int32_t max;
+ int32_t step;
+ uint32_t divider; /* (actually HW range is min/divider; divider !=0) */
+};
+
+enum ovl_csc_adjust_item {
+ OVERLAY_BRIGHTNESS = 0,
+ OVERLAY_GAMMA,
+ OVERLAY_CONTRAST,
+ OVERLAY_SATURATION,
+ OVERLAY_HUE,
+ OVERLAY_ALPHA,
+ OVERLAY_ALPHA_PER_PIX,
+ OVERLAY_COLOR_TEMPERATURE
+};
+
+struct opp_funcs {
+
+
+ /* FORMATTER RELATED */
+
+ void (*opp_program_fmt)(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+ void (*opp_set_dyn_expansion)(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+ void (*opp_program_bit_depth_reduction)(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+ /* underlay related */
+ void (*opp_get_underlay_adjustment_range)(
+ struct output_pixel_processor *opp,
+ enum ovl_csc_adjust_item overlay_adjust_item,
+ struct hw_adjustment_range *range);
+
+ void (*opp_destroy)(struct output_pixel_processor **opp);
+
+ void (*opp_set_stereo_polarity)(
+ struct output_pixel_processor *opp,
+ bool enable,
+ bool rightEyePolarity);
+
+ void (*opp_set_test_pattern)(
+ struct output_pixel_processor *opp,
+ bool enable);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
new file mode 100644
index 000000000000..b5db1692393c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * stream_encoder.h
+ *
+ */
+
+#ifndef STREAM_ENCODER_H_
+#define STREAM_ENCODER_H_
+
+#include "audio_types.h"
+
+struct dc_bios;
+struct dc_context;
+struct dc_crtc_timing;
+
+struct encoder_info_packet {
+ bool valid;
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t hb3;
+ uint8_t sb[32];
+};
+
+struct encoder_info_frame {
+ /* auxiliary video information */
+ struct encoder_info_packet avi;
+ struct encoder_info_packet gamut;
+ struct encoder_info_packet vendor;
+ /* source product description */
+ struct encoder_info_packet spd;
+ /* video stream configuration */
+ struct encoder_info_packet vsc;
+ /* HDR Static MetaData */
+ struct encoder_info_packet hdrsmd;
+};
+
+struct encoder_unblank_param {
+ struct dc_link_settings link_settings;
+ unsigned int pixel_clk_khz;
+};
+
+struct encoder_set_dp_phy_pattern_param {
+ enum dp_test_pattern dp_phy_pattern;
+ const uint8_t *custom_pattern;
+ uint32_t custom_pattern_size;
+ enum dp_panel_mode dp_panel_mode;
+};
+
+struct stream_encoder {
+ const struct stream_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_bios *bp;
+ enum engine_id id;
+};
+
+struct stream_encoder_funcs {
+ void (*dp_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space);
+
+ void (*hdmi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
+
+ void (*dvi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+
+ void (*set_mst_bandwidth)(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+ void (*update_hdmi_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_hdmi_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*update_dp_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_dp_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*dp_blank)(
+ struct stream_encoder *enc);
+
+ void (*dp_unblank)(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
+ void (*audio_mute_control)(
+ struct stream_encoder *enc, bool mute);
+
+ void (*dp_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+ void (*dp_audio_enable) (
+ struct stream_encoder *enc);
+
+ void (*dp_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*hdmi_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+ void (*hdmi_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*setup_stereo_sync) (
+ struct stream_encoder *enc,
+ int tg_inst,
+ bool enable);
+
+ void (*set_avmute)(
+ struct stream_encoder *enc, bool enable);
+};
+
+#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
new file mode 100644
index 000000000000..c6ab38c5b2be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
+#define __DAL_TIMING_GENERATOR_TYPES_H__
+
+struct dc_bios;
+
+/* Contains CRTC vertical/horizontal pixel counters */
+struct crtc_position {
+ int32_t vertical_count;
+ int32_t horizontal_count;
+ int32_t nominal_vcount;
+};
+
+struct dcp_gsl_params {
+ int gsl_group;
+ int gsl_master;
+};
+
+/* define the structure of Dynamic Refresh Mode */
+struct drr_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ bool immediate_flip;
+};
+
+#define LEFT_EYE_3D_PRIMARY_SURFACE 1
+#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
+
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
+enum crtc_state {
+ CRTC_STATE_VBLANK = 0,
+ CRTC_STATE_VACTIVE
+};
+
+struct _dlg_otg_param {
+ int vstartup_start;
+ int vupdate_offset;
+ int vupdate_width;
+ int vready_offset;
+ enum signal_type signal;
+};
+
+struct crtc_stereo_flags {
+ uint8_t PROGRAM_STEREO : 1;
+ uint8_t PROGRAM_POLARITY : 1;
+ uint8_t RIGHT_EYE_POLARITY : 1;
+ uint8_t FRAME_PACKED : 1;
+ uint8_t DISABLE_STEREO_DP_SYNC : 1;
+};
+
+struct timing_generator {
+ const struct timing_generator_funcs *funcs;
+ struct dc_bios *bp;
+ struct dc_context *ctx;
+ struct _dlg_otg_param dlg_otg_param;
+ int inst;
+};
+
+struct dc_crtc_timing;
+
+struct drr_params;
+
+struct timing_generator_funcs {
+ bool (*validate_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+ void (*program_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+ bool (*enable_crtc)(struct timing_generator *tg);
+ bool (*disable_crtc)(struct timing_generator *tg);
+ bool (*is_counter_moving)(struct timing_generator *tg);
+ void (*get_position)(struct timing_generator *tg,
+ struct crtc_position *position);
+
+ uint32_t (*get_frame_count)(struct timing_generator *tg);
+ void (*get_scanoutpos)(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+ void (*set_early_control)(struct timing_generator *tg,
+ uint32_t early_cntl);
+ void (*wait_for_state)(struct timing_generator *tg,
+ enum crtc_state state);
+ void (*set_blank)(struct timing_generator *tg,
+ bool enable_blanking);
+ bool (*is_blanked)(struct timing_generator *tg);
+ void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
+ void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
+ void (*set_colors)(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+ void (*disable_vga)(struct timing_generator *tg);
+ bool (*did_triggered_reset_occur)(struct timing_generator *tg);
+ void (*setup_global_swap_lock)(struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+ void (*unlock)(struct timing_generator *tg);
+ void (*lock)(struct timing_generator *tg);
+ void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst);
+ void (*disable_reset_trigger)(struct timing_generator *tg);
+ void (*tear_down_global_swap_lock)(struct timing_generator *tg);
+ void (*enable_advanced_request)(struct timing_generator *tg,
+ bool enable, const struct dc_crtc_timing *timing);
+ void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
+ void (*set_static_screen_control)(struct timing_generator *tg,
+ uint32_t value);
+ void (*set_test_pattern)(
+ struct timing_generator *tg,
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+ bool (*arm_vert_intr)(struct timing_generator *tg, uint8_t width);
+
+ void (*program_global_sync)(struct timing_generator *tg);
+ void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
+ void (*program_stereo)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+ bool (*is_stereo_left_eye)(struct timing_generator *tg);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
new file mode 100644
index 000000000000..ea88997e1bbd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_H__
+#define __DAL_TRANSFORM_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "fixed31_32.h"
+
+#define CSC_TEMPERATURE_MATRIX_SIZE 9
+
+struct bit_depth_reduction_params;
+
+struct transform {
+ const struct transform_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+/* Colorimetry */
+enum colorimetry {
+ COLORIMETRY_NO_DATA = 0,
+ COLORIMETRY_ITU601 = 1,
+ COLORIMETRY_ITU709 = 2,
+ COLORIMETRY_EXTENDED = 3
+};
+
+enum colorimetry_ext {
+ COLORIMETRYEX_XVYCC601 = 0,
+ COLORIMETRYEX_XVYCC709 = 1,
+ COLORIMETRYEX_SYCC601 = 2,
+ COLORIMETRYEX_ADOBEYCC601 = 3,
+ COLORIMETRYEX_ADOBERGB = 4,
+ COLORIMETRYEX_BT2020YCC = 5,
+ COLORIMETRYEX_BT2020RGBYCBCR = 6,
+ COLORIMETRYEX_RESERVED = 7
+};
+
+enum active_format_info {
+ ACTIVE_FORMAT_NO_DATA = 0,
+ ACTIVE_FORMAT_VALID = 1
+};
+
+/* Active format aspect ratio */
+enum active_format_aspect_ratio {
+ ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE = 8,
+ ACTIVE_FORMAT_ASPECT_RATIO_4_3 = 9,
+ ACTIVE_FORMAT_ASPECT_RATIO_16_9 = 0XA,
+ ACTIVE_FORMAT_ASPECT_RATIO_14_9 = 0XB
+};
+
+enum bar_info {
+ BAR_INFO_NOT_VALID = 0,
+ BAR_INFO_VERTICAL_VALID = 1,
+ BAR_INFO_HORIZONTAL_VALID = 2,
+ BAR_INFO_BOTH_VALID = 3
+};
+
+enum picture_scaling {
+ PICTURE_SCALING_UNIFORM = 0,
+ PICTURE_SCALING_HORIZONTAL = 1,
+ PICTURE_SCALING_VERTICAL = 2,
+ PICTURE_SCALING_BOTH = 3
+};
+
+/* RGB quantization range */
+enum rgb_quantization_range {
+ RGB_QUANTIZATION_DEFAULT_RANGE = 0,
+ RGB_QUANTIZATION_LIMITED_RANGE = 1,
+ RGB_QUANTIZATION_FULL_RANGE = 2,
+ RGB_QUANTIZATION_RESERVED = 3
+};
+
+/* YYC quantization range */
+enum yyc_quantization_range {
+ YYC_QUANTIZATION_LIMITED_RANGE = 0,
+ YYC_QUANTIZATION_FULL_RANGE = 1,
+ YYC_QUANTIZATION_RESERVED2 = 2,
+ YYC_QUANTIZATION_RESERVED3 = 3
+};
+
+enum graphics_gamut_adjust_type {
+ GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_GAMUT_ADJUST_TYPE_SW /* use adjustments */
+};
+
+enum lb_memory_config {
+ /* Enable all 3 pieces of memory */
+ LB_MEMORY_CONFIG_0 = 0,
+
+ /* Enable only the first piece of memory */
+ LB_MEMORY_CONFIG_1 = 1,
+
+ /* Enable only the second piece of memory */
+ LB_MEMORY_CONFIG_2 = 2,
+
+ /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
+ * last piece of chroma memory used for the luma storage
+ */
+ LB_MEMORY_CONFIG_3 = 3
+};
+
+struct xfm_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct overscan_info {
+ int left;
+ int right;
+ int top;
+ int bottom;
+};
+
+struct scaling_ratios {
+ struct fixed31_32 horz;
+ struct fixed31_32 vert;
+ struct fixed31_32 horz_c;
+ struct fixed31_32 vert_c;
+};
+
+struct sharpness_adj {
+ int horz;
+ int vert;
+};
+
+struct line_buffer_params {
+ bool alpha_en;
+ bool pixel_expan_mode;
+ bool interleave_en;
+ int dynamic_pixel_depth;
+ enum lb_pixel_depth depth;
+};
+
+struct scl_inits {
+ struct fixed31_32 h;
+ struct fixed31_32 h_c;
+ struct fixed31_32 v;
+ struct fixed31_32 v_bot;
+ struct fixed31_32 v_c;
+ struct fixed31_32 v_c_bot;
+};
+
+struct scaler_data {
+ int h_active;
+ int v_active;
+ struct scaling_taps taps;
+ struct rect viewport;
+ struct rect viewport_c;
+ struct rect recout;
+ struct scaling_ratios ratios;
+ struct scl_inits inits;
+ struct sharpness_adj sharpness;
+ enum pixel_format format;
+ struct line_buffer_params lb_params;
+};
+
+struct transform_funcs {
+ void (*transform_reset)(struct transform *xfm);
+
+ void (*transform_set_scaler)(struct transform *xfm,
+ const struct scaler_data *scl_data);
+
+ void (*transform_set_pixel_storage_depth)(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*transform_get_optimal_number_of_taps)(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*transform_set_gamut_remap)(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct transform *xfm,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct transform *xfm,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct transform *xfm,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct transform *xfm, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct transform *xfm_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct transform *xfm_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct transform *xfm_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct transform *xfm_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct transform *xfm_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct transform *xfm_base);
+
+ void (*set_cursor_attributes)(
+ struct transform *xfm_base,
+ const struct dc_cursor_attributes *attr);
+
+};
+
+const uint16_t *get_filter_2tap_16p(void);
+const uint16_t *get_filter_2tap_64p(void);
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio);
+
+
+/* Defines the pixel processing capability of the DSCL */
+enum dscl_data_processing_format {
+ DSCL_DATA_PRCESSING_FIXED_FORMAT, /* The DSCL processes pixel data in fixed format */
+ DSCL_DATA_PRCESSING_FLOAT_FORMAT, /* The DSCL processes pixel data in float format */
+};
+
+/*
+ * The DPP capabilities structure contains enumerations to specify the
+ * HW processing features and an associated function pointers to
+ * provide the function interface that can be overloaded for implementations
+ * based on different capabilities
+ */
+struct dpp_caps {
+ /* DSCL processing pixel data in fixed or float format */
+ enum dscl_data_processing_format dscl_data_proc_format;
+
+ /* Calculates the number of partitions in the line buffer.
+ * The implementation of this function is overloaded for
+ * different versions of DSCL LB.
+ */
+ void (*dscl_calc_lb_num_partitions)(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
new file mode 100644
index 000000000000..8734689a9245
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_H__
+#define __DC_HW_SEQUENCER_H__
+#include "dc_types.h"
+#include "clock_source.h"
+#include "inc/hw/timing_generator.h"
+#include "inc/hw/link_encoder.h"
+#include "core_status.h"
+
+enum pipe_gating_control {
+ PIPE_GATING_CONTROL_DISABLE = 0,
+ PIPE_GATING_CONTROL_ENABLE,
+ PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+};
+
+struct dce_hwseq {
+ struct dc_context *ctx;
+ const struct dce_hwseq_registers *regs;
+ const struct dce_hwseq_shift *shifts;
+ const struct dce_hwseq_mask *masks;
+ struct dce_hwseq_wa wa;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dchub_init_data;
+struct dc_static_screen_events;
+struct resource_pool;
+struct resource_context;
+
+struct hw_sequencer_funcs {
+
+ void (*init_hw)(struct dc *dc);
+
+ enum dc_status (*apply_ctx_to_hw)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*reset_hw_ctx_wrap)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*apply_ctx_for_surface)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context);
+
+ void (*set_plane_config)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx);
+
+ void (*program_gamut_remap)(
+ struct pipe_ctx *pipe_ctx);
+
+ void (*program_csc_matrix)(
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix);
+
+ void (*update_plane_addr)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*update_dchub)(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+
+ void (*update_pending_status)(
+ struct pipe_ctx *pipe_ctx);
+
+ bool (*set_input_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+
+ bool (*set_output_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+
+ void (*power_down)(struct dc *dc);
+
+ void (*enable_accelerated_mode)(struct dc *dc);
+
+ void (*enable_timing_synchronization)(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+
+ void (*enable_display_pipe_clock_gating)(
+ struct dc_context *ctx,
+ bool clock_gating);
+
+ bool (*enable_display_power_gating)(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+
+ void (*power_down_front_end)(struct dc *dc, int fe_idx);
+
+ void (*power_on_front_end)(struct dc *dc,
+ struct pipe_ctx *pipe,
+ struct dc_state *context);
+
+ void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_stream)(struct pipe_ctx *pipe_ctx,
+ int option);
+
+ void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+ void (*pipe_control_lock)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+ void (*set_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ int vmin, int vmax);
+
+ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ struct crtc_position *position);
+
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events);
+
+ enum dc_status (*prog_pixclk_crtc_otg)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+
+ void (*setup_stereo)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc *dc);
+
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+
+ void (*log_hw_state)(struct dc *dc);
+
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
+ void (*optimize_shared_resources)(struct dc *dc);
+ void (*edp_power_control)(
+ struct link_encoder *enc,
+ bool enable);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg);
+
+#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
new file mode 100644
index 000000000000..f2b8c9a376d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_HWSS_H__
+#define __DC_LINK_HWSS_H__
+
+#include "inc/core_status.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern);
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings);
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size);
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
new file mode 100644
index 000000000000..77eb72874e90
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+
+#include "dm_services.h"
+
+/* macro for register read/write
+ * user of macro need to define
+ *
+ * CTX ==> macro to ptr to dc_context
+ * eg. aud110->base.ctx
+ *
+ * REG ==> macro to location of register offset
+ * eg. aud110->regs->reg
+ */
+#define REG_READ(reg_name) \
+ dm_read_reg(CTX, REG(reg_name))
+
+#define REG_WRITE(reg_name, value) \
+ dm_write_reg(CTX, REG(reg_name), value)
+
+#ifdef REG_SET
+#undef REG_SET
+#endif
+
+#ifdef REG_GET
+#undef REG_GET
+#endif
+
+/* macro to set register fields. */
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+ REG_SET_N(reg_name, 1, initial_val, \
+ FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+ REG_SET_N(reg, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+ REG_SET_N(reg, 3, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_SET_N(reg, 4, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4)
+
+#define REG_SET_5(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5) \
+ REG_SET_N(reg, 5, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5)
+
+#define REG_SET_6(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6) \
+ REG_SET_N(reg, 6, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6)
+
+#define REG_SET_7(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7) \
+ REG_SET_N(reg, 7, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7)
+
+#define REG_SET_8(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_SET_N(reg, 8, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7,\
+ FN(reg, f8), v8)
+
+#define REG_SET_9(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_SET_N(reg, 9, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_SET_10(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10) \
+ REG_SET_N(reg, 10, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+/* macro to get register fields
+ * read given register and fill in field value in output parameter */
+#define REG_GET(reg_name, field, val) \
+ generic_reg_get(CTX, REG(reg_name), \
+ FN(reg_name, field), val)
+
+#define REG_GET_2(reg_name, f1, v1, f2, v2) \
+ generic_reg_get2(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2)
+
+#define REG_GET_3(reg_name, f1, v1, f2, v2, f3, v3) \
+ generic_reg_get3(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3)
+
+#define REG_GET_4(reg_name, f1, v1, f2, v2, f3, v3, f4, v4) \
+ generic_reg_get4(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4)
+
+#define REG_GET_5(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ generic_reg_get5(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5)
+
+/* macro to poll and wait for a register field to read back given value */
+
+#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
+ generic_reg_wait(CTX, \
+ REG(reg_name), FN(reg_name, field), val,\
+ delay_between_poll_us, max_try, __func__, __LINE__)
+
+/* macro to update (read, modify, write) register fields
+ */
+#define REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ REG_READ(reg_name), \
+ n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val) \
+ REG_UPDATE_N(reg_name, 1, \
+ FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
+ REG_UPDATE_N(reg, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+ REG_UPDATE_N(reg, 3, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_UPDATE_N(reg, 4, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+#define REG_UPDATE_5(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ REG_UPDATE_N(reg, 5, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5)
+
+#define REG_UPDATE_6(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
+ REG_UPDATE_N(reg, 6, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6)
+
+#define REG_UPDATE_7(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
+ REG_UPDATE_N(reg, 7, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7)
+
+#define REG_UPDATE_8(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_UPDATE_N(reg, 8, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8)
+
+#define REG_UPDATE_9(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_UPDATE_N(reg, 9, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_UPDATE_10(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10)\
+ REG_UPDATE_N(reg, 10, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+#define REG_UPDATE_14(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14)\
+ REG_UPDATE_N(reg, 14, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14)
+
+#define REG_UPDATE_19(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19)\
+ REG_UPDATE_N(reg, 19, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19)
+
+#define REG_UPDATE_20(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19, f20, v20)\
+ REG_UPDATE_N(reg, 20, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19, \
+ FN(reg, f20), v20)
+/* macro to update a register field to specified values in given sequences.
+ * useful when toggling bits
+ */
+#define REG_UPDATE_SEQ(reg, field, value1, value2) \
+{ uint32_t val = REG_UPDATE(reg, field, value1); \
+ REG_SET(reg, val, field, value2); }
+
+/* macro to update fields in register 1 field at a time in given order */
+#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ REG_SET(reg, val, f2, v2); }
+
+#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ val = REG_SET(reg, val, f2, v2); \
+ REG_SET(reg, val, f3, v3); }
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value);
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2);
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3);
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4);
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
new file mode 100644
index 000000000000..5467332faf7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+
+#include "core_types.h"
+#include "core_status.h"
+#include "dal_asic_id.h"
+#include "dm_pp_smu.h"
+
+/* TODO unhardcode, 4 for CZ*/
+#define MEMORY_TYPE_MULTIPLIER 4
+
+enum dce_version resource_parse_asic_id(
+ struct hw_asic_id asic_id);
+
+struct resource_caps {
+ int num_timing_generator;
+ int num_video_plane;
+ int num_audio;
+ int num_stream_encoder;
+ int num_pll;
+ int num_dwb;
+};
+
+struct resource_straps {
+ uint32_t hdmi_disable;
+ uint32_t dc_pinstraps_audio;
+ uint32_t audio_stream_number;
+};
+
+struct resource_create_funcs {
+ void (*read_dce_straps)(
+ struct dc_context *ctx, struct resource_straps *straps);
+
+ struct audio *(*create_audio)(
+ struct dc_context *ctx, unsigned int inst);
+
+ struct stream_encoder *(*create_stream_encoder)(
+ enum engine_id eng_id, struct dc_context *ctx);
+
+ struct dce_hwseq *(*create_hwseq)(
+ struct dc_context *ctx);
+};
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs);
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id);
+
+void dc_destroy_resource_pool(struct dc *dc);
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx);
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx);
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2);
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx);
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream);
+
+bool resource_attach_surfaces_to_context(
+ struct dc_plane_state *const *plane_state,
+ int surface_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream);
+
+bool resource_validate_attach_surfaces(
+ const struct dc_validation_set set[],
+ int set_count,
+ const struct dc_state *old_context,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams);
+
+void resource_validate_ctx_update_pointer_after_copy(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx);
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth);
+
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired);
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
new file mode 100644
index 000000000000..498515aad4a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -0,0 +1,69 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'audio' sub-component of DAL.
+# It provides the control and status of HW adapter resources,
+# that are global for the ASIC and sharable between pipes.
+
+IRQ = irq_service.o
+
+AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+IRQ_DCE80 = irq_service_dce80.o
+
+AMD_DAL_IRQ_DCE80 = $(addprefix $(AMDDALPATH)/dc/irq/dce80/,$(IRQ_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+IRQ_DCE11 = irq_service_dce110.o
+
+AMD_DAL_IRQ_DCE11 = $(addprefix $(AMDDALPATH)/dc/irq/dce110/,$(IRQ_DCE11))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE11)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+IRQ_DCE12 = irq_service_dce120.o
+
+AMD_DAL_IRQ_DCE12 = $(addprefix $(AMDDALPATH)/dc/irq/dce120/,$(IRQ_DCE12))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+IRQ_DCN1 = irq_service_dcn10.o
+
+AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
new file mode 100644
index 000000000000..f7e40b292dfb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc.h"
+#include "core_types.h"
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
+ },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ struct dc_context *dc_ctx = irq_service->ctx;
+ struct dc *core_dc = irq_service->ctx->dc;
+ enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(
+ irq_service->ctx->dc,
+ info->src_id,
+ info->ext_id);
+ uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+
+ struct timing_generator *tg =
+ core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+
+ if (enable) {
+ if (!tg->funcs->arm_vert_intr(tg, 2)) {
+ DC_ERROR("Failed to get VBLANK!\n");
+ return false;
+ }
+ }
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+ return true;
+
+}
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce110[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+
+};
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case VISLANDS30_IV_SRCID_D2_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case VISLANDS30_IV_SRCID_D3_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case VISLANDS30_IV_SRCID_D4_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case VISLANDS30_IV_SRCID_D5_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
+ return DC_IRQ_SOURCE_HPD1;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
+ return DC_IRQ_SOURCE_HPD2;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
+ return DC_IRQ_SOURCE_HPD3;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
+ return DC_IRQ_SOURCE_HPD4;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
+ return DC_IRQ_SOURCE_HPD5;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
+ return DC_IRQ_SOURCE_HPD6;
+ case VISLANDS30_IV_EXTID_HPD_RX_A:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_B:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_C:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_D:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_E:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_F:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static const struct irq_service_funcs irq_service_funcs_dce110 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce110;
+ irq_service->funcs = &irq_service_funcs_dce110;
+}
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
new file mode 100644
index 000000000000..9237646c0959
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE110_H__
+#define __DAL_IRQ_SERVICE_DCE110_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data);
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
new file mode 100644
index 000000000000..2ad56b1a4099
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce120.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(DCP, reg_num, \
+ GRPH_INTERRUPT_CONTROL, GRPH_PFLIP_INT_MASK, \
+ GRPH_INTERRUPT_STATUS, GRPH_PFLIP_INT_CLEAR),\
+ .status_reg = SRI(GRPH_INTERRUPT_STATUS, DCP, reg_num),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
+ CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce120[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce120 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce120;
+ irq_service->funcs = &irq_service_funcs_dce120;
+}
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
new file mode 100644
index 000000000000..420c96e8fefc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE120_H__
+#define __DAL_IRQ_SERVICE_DCE120_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
new file mode 100644
index 000000000000..8a2066c313fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce80.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc_types.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_INVALID + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
+ },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD6 + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce80[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_int_entry(6),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ hpd_rx_int_entry(6),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce80 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce80;
+ irq_service->funcs = &irq_service_funcs_dce80;
+}
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
new file mode 100644
index 000000000000..3dd1013576ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE80_H__
+#define __DAL_IRQ_SERVICE_DCE80_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
new file mode 100644
index 000000000000..74ad24714f6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "irq_service_dcn10.h"
+
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn10(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn10 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn10
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn10;
+ irq_service->funcs = &irq_service_funcs_dcn10;
+}
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
new file mode 100644
index 000000000000..fd2ca4d0c316
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN10_H__
+#define __DAL_IRQ_SERVICE_DCN10_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
new file mode 100644
index 000000000000..b106513fc2dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/irq_service_interface.h"
+#include "include/logger_interface.h"
+
+#include "dce110/irq_service_dce110.h"
+
+
+#include "dce80/irq_service_dce80.h"
+
+#include "dce120/irq_service_dce120.h"
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/irq_service_dcn10.h"
+#endif
+
+#include "reg_helper.h"
+#include "irq_service.h"
+
+
+
+#define CTX \
+ irq_service->ctx
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ if (!init_data || !init_data->ctx) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ irq_service->ctx = init_data->ctx;
+}
+
+void dal_irq_service_destroy(struct irq_service **irq_service)
+{
+ if (!irq_service || !*irq_service) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*irq_service);
+
+ *irq_service = NULL;
+}
+
+const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ return NULL;
+
+ return &irq_service->info[source];
+}
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ uint32_t addr = info->enable_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->enable_mask) |
+ (info->enable_value[enable ? 0 : 1] & info->enable_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ dal_irq_service_ack(irq_service, source);
+
+ if (info->funcs->set)
+ return info->funcs->set(irq_service, info, enable);
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+
+ return true;
+}
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->ack_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->ack_mask) |
+ (info->ack_value & info->ack_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ if (info->funcs->ack)
+ return info->funcs->ack(irq_service, info);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ return true;
+}
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return irq_service->funcs->to_dal_irq_source(
+ irq_service,
+ src_id,
+ ext_id);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
new file mode 100644
index 000000000000..dbfcb096eedd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_H__
+#define __DAL_IRQ_SERVICE_H__
+
+#include "include/irq_service_interface.h"
+
+#include "irq_types.h"
+
+struct irq_service;
+struct irq_source_info;
+
+struct irq_source_info_funcs {
+ bool (*set)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+ bool (*ack)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+};
+
+struct irq_source_info {
+ uint32_t src_id;
+ uint32_t ext_id;
+ uint32_t enable_reg;
+ uint32_t enable_mask;
+ uint32_t enable_value[2];
+ uint32_t ack_reg;
+ uint32_t ack_mask;
+ uint32_t ack_value;
+ uint32_t status_reg;
+ const struct irq_source_info_funcs *funcs;
+};
+
+struct irq_service_funcs {
+ enum dc_irq_source (*to_dal_irq_source)(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+};
+
+struct irq_service {
+ struct dc_context *ctx;
+ const struct irq_source_info *info;
+ const struct irq_service_funcs *funcs;
+};
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data);
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
new file mode 100644
index 000000000000..a506c2e939f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_TYPES_H__
+#define __DAL_IRQ_TYPES_H__
+
+struct dc_context;
+
+typedef void (*interrupt_handler)(void *);
+
+typedef void *irq_handler_idx;
+#define DAL_INVALID_IRQ_HANDLER_IDX NULL
+
+/* The order of the IRQ sources is important and MUST match the one's
+of base driver */
+enum dc_irq_source {
+ /* Use as mask to specify invalid irq source */
+ DC_IRQ_SOURCE_INVALID = 0,
+
+ DC_IRQ_SOURCE_HPD1,
+ DC_IRQ_SOURCE_HPD2,
+ DC_IRQ_SOURCE_HPD3,
+ DC_IRQ_SOURCE_HPD4,
+ DC_IRQ_SOURCE_HPD5,
+ DC_IRQ_SOURCE_HPD6,
+
+ DC_IRQ_SOURCE_HPD1RX,
+ DC_IRQ_SOURCE_HPD2RX,
+ DC_IRQ_SOURCE_HPD3RX,
+ DC_IRQ_SOURCE_HPD4RX,
+ DC_IRQ_SOURCE_HPD5RX,
+ DC_IRQ_SOURCE_HPD6RX,
+
+ DC_IRQ_SOURCE_I2C_DDC1,
+ DC_IRQ_SOURCE_I2C_DDC2,
+ DC_IRQ_SOURCE_I2C_DDC3,
+ DC_IRQ_SOURCE_I2C_DDC4,
+ DC_IRQ_SOURCE_I2C_DDC5,
+ DC_IRQ_SOURCE_I2C_DDC6,
+
+ DC_IRQ_SOURCE_DPSINK1,
+ DC_IRQ_SOURCE_DPSINK2,
+ DC_IRQ_SOURCE_DPSINK3,
+ DC_IRQ_SOURCE_DPSINK4,
+ DC_IRQ_SOURCE_DPSINK5,
+ DC_IRQ_SOURCE_DPSINK6,
+
+ DC_IRQ_SOURCE_TIMER,
+
+ DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP1 = DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP2,
+ DC_IRQ_SOURCE_PFLIP3,
+ DC_IRQ_SOURCE_PFLIP4,
+ DC_IRQ_SOURCE_PFLIP5,
+ DC_IRQ_SOURCE_PFLIP6,
+ DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+ DC_IRQ_SOURCE_PFLIP_LAST = DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+
+ DC_IRQ_SOURCE_GPIOPAD0,
+ DC_IRQ_SOURCE_GPIOPAD1,
+ DC_IRQ_SOURCE_GPIOPAD2,
+ DC_IRQ_SOURCE_GPIOPAD3,
+ DC_IRQ_SOURCE_GPIOPAD4,
+ DC_IRQ_SOURCE_GPIOPAD5,
+ DC_IRQ_SOURCE_GPIOPAD6,
+ DC_IRQ_SOURCE_GPIOPAD7,
+ DC_IRQ_SOURCE_GPIOPAD8,
+ DC_IRQ_SOURCE_GPIOPAD9,
+ DC_IRQ_SOURCE_GPIOPAD10,
+ DC_IRQ_SOURCE_GPIOPAD11,
+ DC_IRQ_SOURCE_GPIOPAD12,
+ DC_IRQ_SOURCE_GPIOPAD13,
+ DC_IRQ_SOURCE_GPIOPAD14,
+ DC_IRQ_SOURCE_GPIOPAD15,
+ DC_IRQ_SOURCE_GPIOPAD16,
+ DC_IRQ_SOURCE_GPIOPAD17,
+ DC_IRQ_SOURCE_GPIOPAD18,
+ DC_IRQ_SOURCE_GPIOPAD19,
+ DC_IRQ_SOURCE_GPIOPAD20,
+ DC_IRQ_SOURCE_GPIOPAD21,
+ DC_IRQ_SOURCE_GPIOPAD22,
+ DC_IRQ_SOURCE_GPIOPAD23,
+ DC_IRQ_SOURCE_GPIOPAD24,
+ DC_IRQ_SOURCE_GPIOPAD25,
+ DC_IRQ_SOURCE_GPIOPAD26,
+ DC_IRQ_SOURCE_GPIOPAD27,
+ DC_IRQ_SOURCE_GPIOPAD28,
+ DC_IRQ_SOURCE_GPIOPAD29,
+ DC_IRQ_SOURCE_GPIOPAD30,
+
+ DC_IRQ_SOURCE_DC1UNDERFLOW,
+ DC_IRQ_SOURCE_DC2UNDERFLOW,
+ DC_IRQ_SOURCE_DC3UNDERFLOW,
+ DC_IRQ_SOURCE_DC4UNDERFLOW,
+ DC_IRQ_SOURCE_DC5UNDERFLOW,
+ DC_IRQ_SOURCE_DC6UNDERFLOW,
+
+ DC_IRQ_SOURCE_DMCU_SCP,
+ DC_IRQ_SOURCE_VBIOS_SW,
+
+ DC_IRQ_SOURCE_VUPDATE1,
+ DC_IRQ_SOURCE_VUPDATE2,
+ DC_IRQ_SOURCE_VUPDATE3,
+ DC_IRQ_SOURCE_VUPDATE4,
+ DC_IRQ_SOURCE_VUPDATE5,
+ DC_IRQ_SOURCE_VUPDATE6,
+
+ DC_IRQ_SOURCE_VBLANK1,
+ DC_IRQ_SOURCE_VBLANK2,
+ DC_IRQ_SOURCE_VBLANK3,
+ DC_IRQ_SOURCE_VBLANK4,
+ DC_IRQ_SOURCE_VBLANK5,
+ DC_IRQ_SOURCE_VBLANK6,
+
+ DAL_IRQ_SOURCES_NUMBER
+};
+
+enum irq_type
+{
+ IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
+ IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
+ IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+};
+
+#define DAL_VALID_IRQ_SRC_NUM(src) \
+ ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+
+/* Number of Page Flip IRQ Sources. */
+#define DAL_PFLIP_IRQ_SRC_NUM \
+ (DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1)
+
+/* the number of contexts may be expanded in the future based on needs */
+enum dc_interrupt_context {
+ INTERRUPT_LOW_IRQ_CONTEXT = 0,
+ INTERRUPT_HIGH_IRQ_CONTEXT,
+ INTERRUPT_CONTEXT_NUMBER
+};
+
+enum dc_interrupt_porlarity {
+ INTERRUPT_POLARITY_DEFAULT = 0,
+ INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT,
+ INTERRUPT_POLARITY_HIGH,
+ INTERRUPT_POLARITY_BOTH
+};
+
+#define DC_DECODE_INTERRUPT_POLARITY(int_polarity) \
+ (int_polarity == INTERRUPT_POLARITY_LOW) ? "Low" : \
+ (int_polarity == INTERRUPT_POLARITY_HIGH) ? "High" : \
+ (int_polarity == INTERRUPT_POLARITY_BOTH) ? "Both" : "Invalid"
+
+struct dc_timer_interrupt_params {
+ uint32_t micro_sec_interval;
+ enum dc_interrupt_context int_context;
+};
+
+struct dc_interrupt_params {
+ /* The polarity *change* which will trigger an interrupt.
+ * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then
+ * 'current_polarity' must be initialised. */
+ enum dc_interrupt_porlarity requested_polarity;
+ /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH',
+ * 'current_polarity' should contain the current state, which means
+ * the interrupt will be triggered when state changes from what is,
+ * in 'current_polarity'. */
+ enum dc_interrupt_porlarity current_polarity;
+ enum dc_irq_source irq_source;
+ enum dc_interrupt_context int_context;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
new file mode 100644
index 000000000000..a87c0329541f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _OS_TYPES_H_
+#define _OS_TYPES_H_
+
+#if defined __KERNEL__
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#include <linux/kref.h>
+
+#include "cgs_linux.h"
+
+#if defined(__BIG_ENDIAN) && !defined(BIGENDIAN_CPU)
+#define BIGENDIAN_CPU
+#elif defined(__LITTLE_ENDIAN) && !defined(LITTLEENDIAN_CPU)
+#define LITTLEENDIAN_CPU
+#endif
+
+#undef READ
+#undef WRITE
+#undef FRAME_SIZE
+
+#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+
+#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
+
+#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
+
+#define dm_vlog(fmt, args) vprintk(fmt, args)
+
+#endif
+
+/*
+ *
+ * general debug capabilities
+ *
+ */
+#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ kgdb_breakpoint(); \
+ } \
+} while (0)
+#else
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ ; \
+ } \
+} while (0)
+#endif
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+#define ASSERT(expr) ASSERT_CRITICAL(expr)
+
+#else
+#define ASSERT(expr) WARN_ON(!(expr))
+#endif
+
+#define BREAK_TO_DEBUGGER() ASSERT(0)
+
+#define DC_ERR(...) do { \
+ dm_error(__VA_ARGS__); \
+ BREAK_TO_DEBUGGER(); \
+} while (0)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
+#endif
+
+#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
new file mode 100644
index 000000000000..07326d244d50
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -0,0 +1,30 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the virtual sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o
+
+AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
new file mode 100644
index 000000000000..88c2bde3f039
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_services_types.h"
+
+#include "virtual_link_encoder.h"
+
+static bool virtual_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream) { return true; }
+
+static void virtual_link_encoder_hw_init(struct link_encoder *enc) {}
+
+static void virtual_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal) {}
+
+static void virtual_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock) {}
+
+static void virtual_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link) {}
+
+static void virtual_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings) {}
+
+static void virtual_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param) {}
+
+static void virtual_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table) {}
+
+static void virtual_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect) {}
+
+static void virtual_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(*enc);
+ *enc = NULL;
+}
+
+
+static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ virtual_link_encoder_validate_output_with_stream,
+ .hw_init = virtual_link_encoder_hw_init,
+ .setup = virtual_link_encoder_setup,
+ .enable_tmds_output = virtual_link_encoder_enable_tmds_output,
+ .enable_dp_output = virtual_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
+ .disable_output = virtual_link_encoder_disable_output,
+ .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ virtual_link_encoder_update_mst_stream_allocation_table,
+ .connect_dig_be_to_fe = virtual_link_encoder_connect_dig_be_to_fe,
+ .destroy = virtual_link_encoder_destroy
+};
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data)
+{
+ enc->funcs = &virtual_lnk_enc_funcs;
+ enc->ctx = init_data->ctx;
+ enc->id = init_data->encoder;
+
+ enc->hpd_source = init_data->hpd_source;
+ enc->connector = init_data->connector;
+
+ enc->transmitter = init_data->transmitter;
+
+ enc->output_signals = SIGNAL_TYPE_VIRTUAL;
+
+ enc->preferred_engine = ENGINE_ID_VIRTUAL;
+
+ return true;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
new file mode 100644
index 000000000000..eb1a94fb8a9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_LINK_ENCODER_H__
+#define __DC_VIRTUAL_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data);
+
+#endif /* __DC_VIRTUAL_LINK_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
new file mode 100644
index 000000000000..3dc1733eea20
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "virtual_stream_encoder.h"
+
+static void virtual_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space) {}
+
+static void virtual_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio) {}
+
+static void virtual_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link) {}
+
+static void virtual_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp) {}
+
+static void virtual_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable) {}
+static void virtual_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_blank(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param) {}
+
+static void virtual_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute) {}
+
+static const struct stream_encoder_funcs virtual_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ virtual_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ virtual_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ virtual_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ virtual_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ virtual_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ virtual_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ virtual_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ virtual_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ virtual_stream_encoder_dp_blank,
+ .dp_unblank =
+ virtual_stream_encoder_dp_unblank,
+
+ .audio_mute_control = virtual_audio_mute_control,
+ .set_avmute = virtual_stream_encoder_set_avmute,
+};
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp)
+{
+ if (!enc)
+ return false;
+ if (!bp)
+ return false;
+
+ enc->funcs = &virtual_str_enc_funcs;
+ enc->ctx = ctx;
+ enc->id = ENGINE_ID_VIRTUAL;
+ enc->bp = bp;
+
+ return true;
+}
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp)
+{
+ struct stream_encoder *enc = kzalloc(sizeof(*enc), GFP_KERNEL);
+
+ if (!enc)
+ return NULL;
+
+ if (virtual_stream_encoder_construct(enc, ctx, bp))
+ return enc;
+
+ BREAK_TO_DEBUGGER();
+ kfree(enc);
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
new file mode 100644
index 000000000000..bf3422c66976
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_STREAM_ENCODER_H__
+#define __DC_VIRTUAL_STREAM_ENCODER_H__
+
+#include "stream_encoder.h"
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp);
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp);
+
+#endif /* __DC_VIRTUAL_STREAM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
new file mode 100644
index 000000000000..6364fbc24cfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AUDIO_TYPES_H__
+#define __AUDIO_TYPES_H__
+
+#include "signal_types.h"
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
+#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+
+
+struct audio_crtc_info {
+ uint32_t h_total;
+ uint32_t h_active;
+ uint32_t v_active;
+ uint32_t pixel_repetition;
+ uint32_t requested_pixel_clock; /* in KHz */
+ uint32_t calculated_pixel_clock; /* in KHz */
+ uint32_t refresh_rate;
+ enum dc_color_depth color_depth;
+ bool interlaced;
+};
+struct azalia_clock_info {
+ uint32_t pixel_clock_in_10khz;
+ uint32_t audio_dto_phase;
+ uint32_t audio_dto_module;
+ uint32_t audio_dto_wall_clock_ratio;
+};
+
+enum audio_dto_source {
+ DTO_SOURCE_UNKNOWN = 0,
+ DTO_SOURCE_ID0,
+ DTO_SOURCE_ID1,
+ DTO_SOURCE_ID2,
+ DTO_SOURCE_ID3,
+ DTO_SOURCE_ID4,
+ DTO_SOURCE_ID5
+};
+
+/* PLL information required for AZALIA DTO calculation */
+
+struct audio_pll_info {
+ uint32_t dp_dto_source_clock_in_khz;
+ uint32_t feed_back_divider;
+ enum audio_dto_source dto_source;
+ bool ss_enabled;
+ uint32_t ss_percentage;
+ uint32_t ss_percentage_divider;
+};
+
+struct audio_channel_associate_info {
+ union {
+ struct {
+ uint32_t ALL_CHANNEL_FL:4;
+ uint32_t ALL_CHANNEL_FR:4;
+ uint32_t ALL_CHANNEL_FC:4;
+ uint32_t ALL_CHANNEL_Sub:4;
+ uint32_t ALL_CHANNEL_SL:4;
+ uint32_t ALL_CHANNEL_SR:4;
+ uint32_t ALL_CHANNEL_BL:4;
+ uint32_t ALL_CHANNEL_BR:4;
+ } bits;
+ uint32_t u32all;
+ };
+};
+
+struct audio_output {
+ /* Front DIG id. */
+ enum engine_id engine_id;
+ /* encoder output signal */
+ enum signal_type signal;
+ /* video timing */
+ struct audio_crtc_info crtc_info;
+ /* PLL for audio */
+ struct audio_pll_info pll_info;
+};
+
+enum audio_payload {
+ CHANNEL_SPLIT_MAPPINGCHANG = 0x9,
+};
+
+#endif /* __AUDIO_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_interface.h b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
new file mode 100644
index 000000000000..d51101c5c6b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_INTERFACE_H__
+#define __DAL_BIOS_PARSER_INTERFACE_H__
+
+#include "dc_bios_types.h"
+
+struct bios_parser;
+
+struct bp_init_data {
+ struct dc_context *ctx;
+ uint8_t *bios;
+};
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+void dal_bios_parser_destroy(struct dc_bios **dcb);
+
+#endif /* __DAL_BIOS_PARSER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
new file mode 100644
index 000000000000..0840f69cde99
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_H__
+
+#define __DAL_BIOS_PARSER_TYPES_H__
+
+#include "dm_services.h"
+#include "include/signal_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/gpio_types.h"
+#include "include/link_service_types.h"
+
+/* TODO: include signal_types.h and remove this enum */
+enum as_signal_type {
+ AS_SIGNAL_TYPE_NONE = 0L, /* no signal */
+ AS_SIGNAL_TYPE_DVI,
+ AS_SIGNAL_TYPE_HDMI,
+ AS_SIGNAL_TYPE_LVDS,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_UNKNOWN
+};
+
+enum bp_result {
+ BP_RESULT_OK = 0, /* There was no error */
+ BP_RESULT_BADINPUT, /*Bad input parameter */
+ BP_RESULT_BADBIOSTABLE, /* Bad BIOS table */
+ BP_RESULT_UNSUPPORTED, /* BIOS Table is not supported */
+ BP_RESULT_NORECORD, /* Record can't be found */
+ BP_RESULT_FAILURE
+};
+
+enum bp_encoder_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ ENCODER_CONTROL_DISABLE = 0,
+ ENCODER_CONTROL_ENABLE,
+ ENCODER_CONTROL_SETUP,
+ ENCODER_CONTROL_INIT
+};
+
+enum bp_transmitter_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ TRANSMITTER_CONTROL_DISABLE = 0,
+ TRANSMITTER_CONTROL_ENABLE,
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF,
+ TRANSMITTER_CONTROL_BACKLIGHT_ON,
+ TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS,
+ TRANSMITTER_CONTROL_LCD_SETF_TEST_START,
+ TRANSMITTER_CONTROL_LCD_SELF_TEST_STOP,
+ TRANSMITTER_CONTROL_INIT,
+ TRANSMITTER_CONTROL_DEACTIVATE,
+ TRANSMITTER_CONTROL_ACTIAVATE,
+ TRANSMITTER_CONTROL_SETUP,
+ TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS,
+ /* ATOM_TRANSMITTER_ACTION_POWER_ON. This action is for eDP only
+ * (power up the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_ON,
+ /* ATOM_TRANSMITTER_ACTION_POWER_OFF. This action is for eDP only
+ * (power down the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_OFF
+};
+
+enum bp_external_encoder_control_action {
+ EXTERNAL_ENCODER_CONTROL_DISABLE = 0,
+ EXTERNAL_ENCODER_CONTROL_ENABLE = 1,
+ EXTERNAL_ENCODER_CONTROL_INIT = 0x7,
+ EXTERNAL_ENCODER_CONTROL_SETUP = 0xf,
+ EXTERNAL_ENCODER_CONTROL_UNBLANK = 0x10,
+ EXTERNAL_ENCODER_CONTROL_BLANK = 0x11,
+};
+
+enum bp_pipe_control_action {
+ ASIC_PIPE_DISABLE = 0,
+ ASIC_PIPE_ENABLE,
+ ASIC_PIPE_INIT
+};
+
+struct bp_encoder_control {
+ enum bp_encoder_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter;
+ enum signal_type signal;
+ enum dc_lane_count lanes_number;
+ enum dc_color_depth color_depth;
+ bool enable_dp_audio;
+ uint32_t pixel_clock; /* khz */
+};
+
+struct bp_external_encoder_control {
+ enum bp_external_encoder_control_action action;
+ enum engine_id engine_id;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lanes_number;
+ enum signal_type signal;
+ enum dc_color_depth color_depth;
+ bool coherent;
+ struct graphics_object_id encoder_id;
+ struct graphics_object_id connector_obj_id;
+ uint32_t pixel_clock; /* in KHz */
+};
+
+struct bp_crtc_source_select {
+ enum engine_id engine_id;
+ enum controller_id controller_id;
+ /* from GPU Tx aka asic_signal */
+ enum signal_type signal;
+ /* sink_signal may differ from asicSignal if Translator encoder */
+ enum signal_type sink_signal;
+ enum display_output_bit_depth display_output_bit_depth;
+ bool enable_dp_audio;
+};
+
+struct bp_transmitter_control {
+ enum bp_transmitter_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter; /* PhyId */
+ enum dc_lane_count lanes_number;
+ enum clock_source_id pll_id; /* needed for DCE 4.0 */
+ enum signal_type signal;
+ enum dc_color_depth color_depth; /* not used for DCE6.0 */
+ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
+ struct graphics_object_id connector_obj_id;
+ /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
+ * be pixel clock * deep_color_ratio (in KHz)
+ */
+ uint32_t pixel_clock;
+ uint32_t lane_select;
+ uint32_t lane_settings;
+ bool coherent;
+ bool multi_path;
+ bool single_pll_mode;
+};
+
+struct bp_hw_crtc_timing_parameters {
+ enum controller_id controller_id;
+ /* horizontal part */
+ uint32_t h_total;
+ uint32_t h_addressable;
+ uint32_t h_overscan_left;
+ uint32_t h_overscan_right;
+ uint32_t h_sync_start;
+ uint32_t h_sync_width;
+
+ /* vertical part */
+ uint32_t v_total;
+ uint32_t v_addressable;
+ uint32_t v_overscan_top;
+ uint32_t v_overscan_bottom;
+ uint32_t v_sync_start;
+ uint32_t v_sync_width;
+
+ struct timing_flags {
+ uint32_t INTERLACE:1;
+ uint32_t PIXEL_REPETITION:4;
+ uint32_t HSYNC_POSITIVE_POLARITY:1;
+ uint32_t VSYNC_POSITIVE_POLARITY:1;
+ uint32_t HORZ_COUNT_BY_TWO:1;
+ } flags;
+};
+
+struct bp_adjust_pixel_clock_parameters {
+ /* Input: Signal Type - to be converted to Encoder mode */
+ enum signal_type signal_type;
+ /* Input: Encoder object id */
+ struct graphics_object_id encoder_object_id;
+ /* Input: Pixel Clock (requested Pixel clock based on Video timing
+ * standard used) in KHz
+ */
+ uint32_t pixel_clock;
+ /* Output: Adjusted Pixel Clock (after VBIOS exec table) in KHz */
+ uint32_t adjusted_pixel_clock;
+ /* Output: If non-zero, this refDiv value should be used to calculate
+ * other ppll params */
+ uint32_t reference_divider;
+ /* Output: If non-zero, this postDiv value should be used to calculate
+ * other ppll params */
+ uint32_t pixel_clock_post_divider;
+ /* Input: Enable spread spectrum */
+ bool ss_enable;
+};
+
+struct bp_pixel_clock_parameters {
+ enum controller_id controller_id; /* (Which CRTC uses this PLL) */
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* signal_type -> Encoder Mode - needed by VBIOS Exec table */
+ enum signal_type signal_type;
+ /* Adjusted Pixel Clock (after VBIOS exec table)
+ * that becomes Target Pixel Clock (KHz) */
+ uint32_t target_pixel_clock;
+ /* Calculated Reference divider of Display PLL */
+ uint32_t reference_divider;
+ /* Calculated Feedback divider of Display PLL */
+ uint32_t feedback_divider;
+ /* Calculated Fractional Feedback divider of Display PLL */
+ uint32_t fractional_feedback_divider;
+ /* Calculated Pixel Clock Post divider of Display PLL */
+ uint32_t pixel_clock_post_divider;
+ struct graphics_object_id encoder_object_id; /* Encoder object id */
+ /* VBIOS returns a fixed display clock when DFS-bypass feature
+ * is enabled (KHz) */
+ uint32_t dfs_bypass_display_clock;
+ /* color depth to support HDMI deep color */
+ enum transmitter_color_depth color_depth;
+
+ struct program_pixel_clock_flags {
+ uint32_t FORCE_PROGRAMMING_OF_PLL:1;
+ /* Use Engine Clock as source for Display Clock when
+ * programming PLL */
+ uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1;
+ /* Use external reference clock (refDivSrc for PLL) */
+ uint32_t SET_EXTERNAL_REF_DIV_SRC:1;
+ /* Force program PHY PLL only */
+ uint32_t PROGRAM_PHY_PLL_ONLY:1;
+ /* Support for YUV420 */
+ uint32_t SUPPORT_YUV_420:1;
+ /* Use XTALIN reference clock source */
+ uint32_t SET_XTALIN_REF_SRC:1;
+ /* Use GENLK reference clock source */
+ uint32_t SET_GENLOCK_REF_DIV_SRC:1;
+ } flags;
+};
+
+enum bp_dce_clock_type {
+ DCECLOCK_TYPE_DISPLAY_CLOCK = 0,
+ DCECLOCK_TYPE_DPREFCLK = 1
+};
+
+/* DCE Clock Parameters structure for SetDceClock Exec command table */
+struct bp_set_dce_clock_parameters {
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* Display clock or DPREFCLK value */
+ uint32_t target_clock_frequency;
+ /* Clock to set: =0: DISPCLK =1: DPREFCLK =2: PIXCLK */
+ enum bp_dce_clock_type clock_type;
+
+ struct set_dce_clock_flags {
+ uint32_t USE_GENERICA_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use XTALIN reference clock source */
+ uint32_t USE_XTALIN_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use PCIE reference clock source */
+ uint32_t USE_PCIE_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use GENLK reference clock source */
+ uint32_t USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK:1;
+ } flags;
+};
+
+struct spread_spectrum_flags {
+ /* 1 = Center Spread; 0 = down spread */
+ uint32_t CENTER_SPREAD:1;
+ /* 1 = external; 0 = internal */
+ uint32_t EXTERNAL_SS:1;
+ /* 1 = delta-sigma type parameter; 0 = ver1 */
+ uint32_t DS_TYPE:1;
+};
+
+struct bp_spread_spectrum_parameters {
+ enum clock_source_id pll_id;
+ uint32_t percentage;
+ uint32_t ds_frac_amount;
+
+ union {
+ struct {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t range; /* In Hz unit */
+ } ver1;
+ struct {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ } ds;
+ };
+
+ struct spread_spectrum_flags flags;
+};
+
+struct bp_encoder_cap_info {
+ uint32_t DP_HBR2_CAP:1;
+ uint32_t DP_HBR2_EN:1;
+ uint32_t DP_HBR3_EN:1;
+ uint32_t HDMI_6GB_EN:1;
+ uint32_t RESERVED:30;
+};
+
+#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
new file mode 100644
index 000000000000..7abe663ecc6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ASIC_ID_H__
+#define __DAL_ASIC_ID_H__
+
+/*
+ * ASIC internal revision ID
+ */
+
+/* DCE80 (based on ci_id.h in Perforce) */
+#define CI_BONAIRE_M_A0 0x14
+#define CI_BONAIRE_M_A1 0x15
+#define CI_HAWAII_P_A0 0x28
+
+#define CI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_BONAIRE_M(rev) \
+ ((rev >= CI_BONAIRE_M_A0) && (rev < CI_HAWAII_P_A0))
+
+#define ASIC_REV_IS_HAWAII_P(rev) \
+ (rev >= CI_HAWAII_P_A0)
+
+/* KV1 with Spectre GFX core, 8-8-1-2 (CU-Pix-Primitive-RB) */
+#define KV_SPECTRE_A0 0x01
+
+/* KV2 with Spooky GFX core, including downgraded from Spectre core,
+ * 3-4-1-1 (CU-Pix-Primitive-RB) */
+#define KV_SPOOKY_A0 0x41
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A0 0x81
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A1 0x82
+
+/* BV with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define BV_KALINDI_A2 0x85
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A0 0xA1
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A1 0xA2
+
+#define KV_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_KALINDI(rev) \
+ ((rev >= KB_KALINDI_A0) && (rev < KV_UNKNOWN))
+
+#define ASIC_REV_IS_BHAVANI(rev) \
+ ((rev >= BV_KALINDI_A2) && (rev < ML_GODAVARI_A0))
+
+#define ASIC_REV_IS_GODAVARI(rev) \
+ ((rev >= ML_GODAVARI_A0) && (rev < KV_UNKNOWN))
+
+/* VI Family */
+/* DCE10 */
+#define VI_TONGA_P_A0 20
+#define VI_TONGA_P_A1 21
+#define VI_FIJI_P_A0 60
+
+/* DCE112 */
+#define VI_POLARIS10_P_A0 80
+#define VI_POLARIS11_M_A0 90
+#define VI_POLARIS12_V_A0 100
+
+#define VI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_TONGA_P(eChipRev) ((eChipRev >= VI_TONGA_P_A0) && \
+ (eChipRev < 40))
+#define ASIC_REV_IS_FIJI_P(eChipRev) ((eChipRev >= VI_FIJI_P_A0) && \
+ (eChipRev < 80))
+
+#define ASIC_REV_IS_POLARIS10_P(eChipRev) ((eChipRev >= VI_POLARIS10_P_A0) && \
+ (eChipRev < VI_POLARIS11_M_A0))
+#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
+ (eChipRev < VI_POLARIS12_V_A0))
+#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+
+/* DCE11 */
+#define CZ_CARRIZO_A0 0x01
+
+#define STONEY_A0 0x61
+#define CZ_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_STONEY(rev) \
+ ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
+
+/* DCN1_0 */
+#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+#define RAVEN_A0 0x01
+#define RAVEN_B0 0x21
+#define RAVEN_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+#define RAVEN1_F0 0xF0
+#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+
+
+#define FAMILY_RV 142 /* DCN 1*/
+
+/*
+ * ASIC chip ID
+ */
+/* DCE80 */
+#define DEVICE_ID_KALINDI_9834 0x9834
+#define DEVICE_ID_TEMASH_9839 0x9839
+#define DEVICE_ID_TEMASH_983D 0x983D
+
+/* Asic Family IDs for different asic family. */
+#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
+#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
+#define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */
+#define FAMILY_CZ 135 /* Carrizo */
+
+#define FAMILY_AI 141
+
+#define FAMILY_UNKNOWN 0xFF
+
+#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
new file mode 100644
index 000000000000..fa543965feb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TYPES_H__
+#define __DAL_TYPES_H__
+
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dal_logger;
+struct dc_bios;
+
+enum dce_version {
+ DCE_VERSION_UNKNOWN = (-1),
+ DCE_VERSION_8_0,
+ DCE_VERSION_8_1,
+ DCE_VERSION_8_3,
+ DCE_VERSION_10_0,
+ DCE_VERSION_11_0,
+ DCE_VERSION_11_2,
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
+ DCN_VERSION_MAX
+};
+
+#endif /* __DAL_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
new file mode 100644
index 000000000000..0ff2a899b8f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_DDC_SERVICE_TYPES_H__
+#define __DAL_DDC_SERVICE_TYPES_H__
+
+#define DP_BRANCH_DEVICE_ID_1 0x0010FA
+#define DP_BRANCH_DEVICE_ID_2 0x0022B9
+#define DP_SINK_DEVICE_ID_1 0x4CE000
+#define DP_BRANCH_DEVICE_ID_3 0x00001A
+#define DP_BRANCH_DEVICE_ID_4 0x0080e1
+#define DP_BRANCH_DEVICE_ID_5 0x006037
+#define DP_SINK_DEVICE_ID_2 0x001CF8
+
+
+enum ddc_result {
+ DDC_RESULT_UNKNOWN = 0,
+ DDC_RESULT_SUCESSFULL,
+ DDC_RESULT_FAILED_CHANNEL_BUSY,
+ DDC_RESULT_FAILED_TIMEOUT,
+ DDC_RESULT_FAILED_PROTOCOL_ERROR,
+ DDC_RESULT_FAILED_NACK,
+ DDC_RESULT_FAILED_INCOMPLETE,
+ DDC_RESULT_FAILED_OPERATION,
+ DDC_RESULT_FAILED_INVALID_OPERATION,
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW
+};
+
+enum ddc_service_type {
+ DDC_SERVICE_TYPE_CONNECTOR,
+ DDC_SERVICE_TYPE_DISPLAY_PORT_MST,
+};
+
+/**
+ * display sink capability
+ */
+struct display_sink_capability {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+
+ /**********************************************************
+ capabilities going INTO SINK DEVICE (stream capabilities)
+ **********************************************************/
+ /* Dongle's downstream count. */
+ uint32_t downstrm_sink_count;
+ /* Is dongle's downstream count info field (downstrm_sink_count)
+ * valid. */
+ bool downstrm_sink_count_valid;
+
+ /* Maximum additional audio delay in microsecond (us) */
+ uint32_t additional_audio_delay;
+ /* Audio latency value in microsecond (us) */
+ uint32_t audio_latency;
+ /* Interlace video latency value in microsecond (us) */
+ uint32_t video_latency_interlace;
+ /* Progressive video latency value in microsecond (us) */
+ uint32_t video_latency_progressive;
+ /* Dongle caps: Maximum pixel clock supported over dongle for HDMI */
+ uint32_t max_hdmi_pixel_clock;
+ /* Dongle caps: Maximum deep color supported over dongle for HDMI */
+ enum dc_color_depth max_hdmi_deep_color;
+
+ /************************************************************
+ capabilities going OUT OF SOURCE DEVICE (link capabilities)
+ ************************************************************/
+ /* support for Spread Spectrum(SS) */
+ bool ss_supported;
+ /* DP link settings (laneCount, linkRate, Spread) */
+ uint32_t dp_link_lane_count;
+ uint32_t dp_link_rate;
+ uint32_t dp_link_spead;
+
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ /* to check if we have queried the display capability
+ * for eDP panel already. */
+ bool is_edp_sink_cap_valid;
+
+ enum ddc_transaction_type transaction_type;
+ enum signal_type signal;
+};
+
+struct av_sync_data {
+ uint8_t av_granularity;/* DPCD 00023h */
+ uint8_t aud_dec_lat1;/* DPCD 00024h */
+ uint8_t aud_dec_lat2;/* DPCD 00025h */
+ uint8_t aud_pp_lat1;/* DPCD 00026h */
+ uint8_t aud_pp_lat2;/* DPCD 00027h */
+ uint8_t vid_inter_lat;/* DPCD 00028h */
+ uint8_t vid_prog_lat;/* DPCD 00029h */
+ uint8_t aud_del_ins1;/* DPCD 0002Bh */
+ uint8_t aud_del_ins2;/* DPCD 0002Ch */
+ uint8_t aud_del_ins3;/* DPCD 0002Dh */
+};
+
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_1[] = "mVGAa";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_1[] = "m2DVIa";
+/*Travis*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
+/*Nutmeg*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_4[] = "DpVga";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
+/*DP to Dual link DVI converter 2*/
+static const uint8_t DP_DVI_CONVERTER_ID_42[] = "v2DVIa";
+
+static const uint8_t DP_SINK_DEV_STRING_ID2_REV0[] = "\0\0\0\0\0\0";
+
+/* Identifies second generation PSR TCON from Parade: Device ID string:
+ * yy-xx-**-**-**-**
+ */
+/* xx - Hw ID high byte */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_HIGH_BYTE =
+ 0x06;
+
+/* yy - HW ID low byte, the same silicon has several package/feature flavors */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE1 =
+ 0x61;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE2 =
+ 0x62;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE3 =
+ 0x63;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE4 =
+ 0x72;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE5 =
+ 0x73;
+
+#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
new file mode 100644
index 000000000000..d8e52e3b8e3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPCD_DEFS_H__
+#define __DAL_DPCD_DEFS_H__
+
+#include <drm/drm_dp_helper.h>
+
+enum dpcd_revision {
+ DPCD_REV_10 = 0x10,
+ DPCD_REV_11 = 0x11,
+ DPCD_REV_12 = 0x12,
+ DPCD_REV_13 = 0x13,
+ DPCD_REV_14 = 0x14
+};
+
+/* these are the types stored at DOWNSTREAMPORT_PRESENT */
+enum dpcd_downstream_port_type {
+ DOWNSTREAM_DP = 0,
+ DOWNSTREAM_VGA,
+ DOWNSTREAM_DVI_HDMI,
+ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
+};
+
+enum dpcd_link_test_patterns {
+ LINK_TEST_PATTERN_NONE = 0,
+ LINK_TEST_PATTERN_COLOR_RAMP,
+ LINK_TEST_PATTERN_VERTICAL_BARS,
+ LINK_TEST_PATTERN_COLOR_SQUARES
+};
+
+enum dpcd_test_color_format {
+ TEST_COLOR_FORMAT_RGB = 0,
+ TEST_COLOR_FORMAT_YCBCR422,
+ TEST_COLOR_FORMAT_YCBCR444
+};
+
+enum dpcd_test_bit_depth {
+ TEST_BIT_DEPTH_6 = 0,
+ TEST_BIT_DEPTH_8,
+ TEST_BIT_DEPTH_10,
+ TEST_BIT_DEPTH_12,
+ TEST_BIT_DEPTH_16
+};
+
+/* PHY (encoder) test patterns
+The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248)
+*/
+enum dpcd_phy_test_patterns {
+ PHY_TEST_PATTERN_NONE = 0,
+ PHY_TEST_PATTERN_D10_2,
+ PHY_TEST_PATTERN_SYMBOL_ERROR,
+ PHY_TEST_PATTERN_PRBS7,
+ PHY_TEST_PATTERN_80BIT_CUSTOM,/* For DP1.2 only */
+ PHY_TEST_PATTERN_CP2520_1,
+ PHY_TEST_PATTERN_CP2520_2,
+ PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */
+};
+
+enum dpcd_test_dyn_range {
+ TEST_DYN_RANGE_VESA = 0,
+ TEST_DYN_RANGE_CEA
+};
+
+enum dpcd_audio_test_pattern {
+ AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0,/* direct HW translation */
+ AUDIO_TEST_PATTERN_SAWTOOTH
+};
+
+enum dpcd_audio_sampling_rate {
+ AUDIO_SAMPLING_RATE_32KHZ = 0,/* direct HW translation */
+ AUDIO_SAMPLING_RATE_44_1KHZ,
+ AUDIO_SAMPLING_RATE_48KHZ,
+ AUDIO_SAMPLING_RATE_88_2KHZ,
+ AUDIO_SAMPLING_RATE_96KHZ,
+ AUDIO_SAMPLING_RATE_176_4KHZ,
+ AUDIO_SAMPLING_RATE_192KHZ
+};
+
+enum dpcd_audio_channels {
+ AUDIO_CHANNELS_1 = 0,/* direct HW translation */
+ AUDIO_CHANNELS_2,
+ AUDIO_CHANNELS_3,
+ AUDIO_CHANNELS_4,
+ AUDIO_CHANNELS_5,
+ AUDIO_CHANNELS_6,
+ AUDIO_CHANNELS_7,
+ AUDIO_CHANNELS_8,
+
+ AUDIO_CHANNELS_COUNT
+};
+
+enum dpcd_audio_test_pattern_periods {
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_NOTUSED = 0,/* direct HW translation */
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_3,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_6,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_12,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_24,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_48,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_96,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_192,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_384,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_768,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_1536
+};
+
+/* This enum is for programming DPCD TRAINING_PATTERN_SET */
+enum dpcd_training_patterns {
+ DPCD_TRAINING_PATTERN_VIDEOIDLE = 0,/* direct HW translation! */
+ DPCD_TRAINING_PATTERN_1,
+ DPCD_TRAINING_PATTERN_2,
+ DPCD_TRAINING_PATTERN_3,
+ DPCD_TRAINING_PATTERN_4 = 7
+};
+
+/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
+It defines the possible PSR states. */
+enum dpcd_psr_sink_states {
+ PSR_SINK_STATE_INACTIVE = 0,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SOURCE_TIMING = 1,
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB = 2,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SINK_TIMING = 3,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_TIMING_RESYNC = 4,
+ PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
+};
+
+#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
new file mode 100644
index 000000000000..3248f699daf2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_FIXED31_32_H__
+#define __DAL_FIXED31_32_H__
+
+#include "os_types.h"
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct fixed31_32 {
+ int64_t value;
+};
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
+static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
+static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
+static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
+
+static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
+static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
+static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
+static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
+static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg);
+static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg)
+{
+ if (__builtin_constant_p(arg)) {
+ struct fixed31_32 res;
+ BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX));
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return dal_fixed31_32_from_int_nonconst(arg);
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return dal_fixed31_32_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct fixed31_32 dal_fixed31_32_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value)
+{
+ if (dal_fixed31_32_le(arg, min_value))
+ return min_value;
+ else if (dal_fixed31_32_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift);
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct fixed31_32 dal_fixed31_32_shr(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+ res.value = arg.value >> shift;
+ return res;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_add(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_sub(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_mul(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
+ int64_t arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ dal_fixed31_32_from_int(arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg);
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
new file mode 100644
index 000000000000..9c70341fe026
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed32_32.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_FIXED32_32_H__
+#define __DAL_FIXED32_32_H__
+
+#include "os_types.h"
+
+struct fixed32_32 {
+ uint64_t value;
+};
+
+static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
+static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
+static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
+static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+{
+ struct fixed32_32 fx;
+
+ fx.value = (uint64_t)value<<32;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_add_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_sub_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_mul_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_div_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+
+static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value < rhs.value) ? lhs : rhs;
+}
+
+static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value > rhs.value) ? lhs : rhs;
+}
+
+static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value > rhs.value;
+}
+
+static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value > ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value < ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value <= rhs.value;
+}
+
+static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value <= ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value == rhs.value;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
+static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value)
+{
+ return value.value>>32;
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 value);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h
new file mode 100644
index 000000000000..e4fd31024b92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_INTERFACE_H__
+#define __DAL_GPIO_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "grph_object_defs.h"
+
+struct gpio;
+
+/* Open the handle for future use */
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get high or low from the pin */
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value);
+
+/* Set pin high or low */
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value);
+
+/* Get current mode */
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio);
+
+/* Change mode of the handle */
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get the GPIO id */
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio);
+
+/* Get the GPIO enum */
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio);
+
+/* Set the GPIO pin configuration */
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data);
+
+/* Obtain GPIO pin info */
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info);
+
+/* Obtain GPIO sync source */
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio);
+
+/* Obtain GPIO pin output state (active low or active high) */
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio);
+
+/* Close the handle */
+void dal_gpio_close(
+ struct gpio *gpio);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
new file mode 100644
index 000000000000..f40259bade40
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_INTERFACE_H__
+#define __DAL_GPIO_SERVICE_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "gpio_interface.h"
+#include "hw/gpio.h"
+
+struct gpio_service;
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state);
+
+void dal_gpio_destroy(
+ struct gpio **ptr);
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx);
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask);
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info);
+
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc);
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr);
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq);
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq);
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config);
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en);
+
+void dal_gpio_destroy_irq(
+ struct gpio **ptr);
+
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type);
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode);
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc);
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type);
+
+void dal_ddc_close(
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_types.h b/drivers/gpu/drm/amd/display/include/gpio_types.h
new file mode 100644
index 000000000000..8dd46ed799e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_types.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_TYPES_H__
+#define __DAL_GPIO_TYPES_H__
+
+#define BUNDLE_A_MASK 0x00FFF000L
+#define BUNDLE_B_MASK 0x00000FFFL
+
+/*
+ * gpio_result
+ *
+ * @brief
+ * The possible return codes that the GPIO object can return.
+ * These return codes can be generated
+ * directly by the GPIO object or from the GPIOPin object.
+ */
+enum gpio_result {
+ GPIO_RESULT_OK,
+ GPIO_RESULT_NULL_HANDLE,
+ GPIO_RESULT_INVALID_DATA,
+ GPIO_RESULT_DEVICE_BUSY,
+ GPIO_RESULT_OPEN_FAILED,
+ GPIO_RESULT_ALREADY_OPENED,
+ GPIO_RESULT_NON_SPECIFIC_ERROR
+};
+
+/*
+ * @brief
+ * Used to identify the specific GPIO device
+ *
+ * @notes
+ * These constants are used as indices in a vector.
+ * Thus they should start from zero and be contiguous.
+ */
+enum gpio_id {
+ GPIO_ID_UNKNOWN = (-1),
+ GPIO_ID_DDC_DATA,
+ GPIO_ID_DDC_CLOCK,
+ GPIO_ID_GENERIC,
+ GPIO_ID_HPD,
+ GPIO_ID_GPIO_PAD,
+ GPIO_ID_VIP_PAD,
+ GPIO_ID_SYNC,
+ GPIO_ID_GSL, /* global swap lock */
+ GPIO_ID_COUNT,
+ GPIO_ID_MIN = GPIO_ID_DDC_DATA,
+ GPIO_ID_MAX = GPIO_ID_GSL
+};
+
+#define GPIO_ENUM_UNKNOWN \
+ 32
+
+struct gpio_pin_info {
+ uint32_t offset;
+ uint32_t offset_y;
+ uint32_t offset_en;
+ uint32_t offset_mask;
+
+ uint32_t mask;
+ uint32_t mask_y;
+ uint32_t mask_en;
+ uint32_t mask_mask;
+};
+
+enum gpio_pin_output_state {
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW,
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH,
+ GPIO_PIN_OUTPUT_STATE_DEFAULT = GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW
+};
+
+enum gpio_generic {
+ GPIO_GENERIC_UNKNOWN = (-1),
+ GPIO_GENERIC_A,
+ GPIO_GENERIC_B,
+ GPIO_GENERIC_C,
+ GPIO_GENERIC_D,
+ GPIO_GENERIC_E,
+ GPIO_GENERIC_F,
+ GPIO_GENERIC_G,
+ GPIO_GENERIC_COUNT,
+ GPIO_GENERIC_MIN = GPIO_GENERIC_A,
+ GPIO_GENERIC_MAX = GPIO_GENERIC_B
+};
+
+enum gpio_hpd {
+ GPIO_HPD_UNKNOWN = (-1),
+ GPIO_HPD_1,
+ GPIO_HPD_2,
+ GPIO_HPD_3,
+ GPIO_HPD_4,
+ GPIO_HPD_5,
+ GPIO_HPD_6,
+ GPIO_HPD_COUNT,
+ GPIO_HPD_MIN = GPIO_HPD_1,
+ GPIO_HPD_MAX = GPIO_HPD_6
+};
+
+enum gpio_gpio_pad {
+ GPIO_GPIO_PAD_UNKNOWN = (-1),
+ GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_1,
+ GPIO_GPIO_PAD_2,
+ GPIO_GPIO_PAD_3,
+ GPIO_GPIO_PAD_4,
+ GPIO_GPIO_PAD_5,
+ GPIO_GPIO_PAD_6,
+ GPIO_GPIO_PAD_7,
+ GPIO_GPIO_PAD_8,
+ GPIO_GPIO_PAD_9,
+ GPIO_GPIO_PAD_10,
+ GPIO_GPIO_PAD_11,
+ GPIO_GPIO_PAD_12,
+ GPIO_GPIO_PAD_13,
+ GPIO_GPIO_PAD_14,
+ GPIO_GPIO_PAD_15,
+ GPIO_GPIO_PAD_16,
+ GPIO_GPIO_PAD_17,
+ GPIO_GPIO_PAD_18,
+ GPIO_GPIO_PAD_19,
+ GPIO_GPIO_PAD_20,
+ GPIO_GPIO_PAD_21,
+ GPIO_GPIO_PAD_22,
+ GPIO_GPIO_PAD_23,
+ GPIO_GPIO_PAD_24,
+ GPIO_GPIO_PAD_25,
+ GPIO_GPIO_PAD_26,
+ GPIO_GPIO_PAD_27,
+ GPIO_GPIO_PAD_28,
+ GPIO_GPIO_PAD_29,
+ GPIO_GPIO_PAD_30,
+ GPIO_GPIO_PAD_COUNT,
+ GPIO_GPIO_PAD_MIN = GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_MAX = GPIO_GPIO_PAD_30
+};
+
+enum gpio_vip_pad {
+ GPIO_VIP_PAD_UNKNOWN = (-1),
+ /* following never used -
+ * GPIO_ID_DDC_CLOCK::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SCL,
+ /* following never used -
+ * GPIO_ID_DDC_DATA::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SDA,
+ GPIO_VIP_PAD_VHAD,
+ GPIO_VIP_PAD_VPHCTL,
+ GPIO_VIP_PAD_VIPCLK,
+ GPIO_VIP_PAD_VID,
+ GPIO_VIP_PAD_VPCLK0,
+ GPIO_VIP_PAD_DVALID,
+ GPIO_VIP_PAD_PSYNC,
+ GPIO_VIP_PAD_COUNT,
+ GPIO_VIP_PAD_MIN = GPIO_VIP_PAD_SCL,
+ GPIO_VIP_PAD_MAX = GPIO_VIP_PAD_PSYNC
+};
+
+enum gpio_sync {
+ GPIO_SYNC_UNKNOWN = (-1),
+ GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_VSYNC_A,
+ GPIO_SYNC_HSYNC_B,
+ GPIO_SYNC_VSYNC_B,
+ GPIO_SYNC_COUNT,
+ GPIO_SYNC_MIN = GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_MAX = GPIO_SYNC_VSYNC_B
+};
+
+enum gpio_gsl {
+ GPIO_GSL_UNKNOWN = (-1),
+ GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_GENLOCK_VSYNC,
+ GPIO_GSL_SWAPLOCK_A,
+ GPIO_GSL_SWAPLOCK_B,
+ GPIO_GSL_COUNT,
+ GPIO_GSL_MIN = GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_MAX = GPIO_GSL_SWAPLOCK_B
+};
+
+/*
+ * @brief
+ * Unique Id for DDC handle.
+ * Values are meaningful (used as indexes to array)
+ */
+enum gpio_ddc_line {
+ GPIO_DDC_LINE_UNKNOWN = (-1),
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA,
+ GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_I2C_PAD = GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_COUNT,
+ GPIO_DDC_LINE_MIN = GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_MAX = GPIO_DDC_LINE_I2C_PAD
+};
+
+/*
+ * @brief
+ * Identifies the mode of operation to open a GPIO device.
+ * A GPIO device (pin) can be programmed in only one of these modes at a time.
+ */
+enum gpio_mode {
+ GPIO_MODE_UNKNOWN = (-1),
+ GPIO_MODE_INPUT,
+ GPIO_MODE_OUTPUT,
+ GPIO_MODE_FAST_OUTPUT,
+ GPIO_MODE_HARDWARE,
+ GPIO_MODE_INTERRUPT
+};
+
+/*
+ * @brief
+ * Identifies the source of the signal when GPIO is in HW mode.
+ * get_signal_source() will return GPIO_SYGNAL_SOURCE__UNKNOWN
+ * when one of the following holds:
+ * 1. GPIO is input GPIO
+ * 2. GPIO is not opened in HW mode
+ * 3. GPIO does not have fixed signal source
+ * (like DC_GenericA have mux instead fixed)
+ */
+enum gpio_signal_source {
+ GPIO_SIGNAL_SOURCE_UNKNOWN = (-1),
+ GPIO_SIGNAL_SOURCE_DACA_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACB_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACA_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACA_VSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_VSYNC,
+};
+
+enum gpio_stereo_source {
+ GPIO_STEREO_SOURCE_UNKNOWN = (-1),
+ GPIO_STEREO_SOURCE_D1,
+ GPIO_STEREO_SOURCE_D2,
+ GPIO_STEREO_SOURCE_D3,
+ GPIO_STEREO_SOURCE_D4,
+ GPIO_STEREO_SOURCE_D5,
+ GPIO_STEREO_SOURCE_D6
+};
+
+/*
+ * GPIO config
+ */
+
+enum gpio_config_type {
+ GPIO_CONFIG_TYPE_NONE,
+ GPIO_CONFIG_TYPE_DDC,
+ GPIO_CONFIG_TYPE_HPD,
+ GPIO_CONFIG_TYPE_GENERIC_MUX,
+ GPIO_CONFIG_TYPE_GSL_MUX,
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE
+};
+
+/* DDC configuration */
+
+enum gpio_ddc_config_type {
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT,
+ GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING
+};
+
+struct gpio_ddc_config {
+ enum gpio_ddc_config_type type;
+ bool data_en_bit_present;
+ bool clock_en_bit_present;
+};
+
+/* HPD configuration */
+
+struct gpio_hpd_config {
+ uint32_t delay_on_connect; /* milliseconds */
+ uint32_t delay_on_disconnect; /* milliseconds */
+};
+
+struct gpio_generic_mux_config {
+ bool enable_output_from_mux;
+ enum gpio_signal_source mux_select;
+ enum gpio_stereo_source stereo_select;
+};
+
+enum gpio_gsl_mux_config_type {
+ GPIO_GSL_MUX_CONFIG_TYPE_DISABLE,
+ GPIO_GSL_MUX_CONFIG_TYPE_TIMING_SYNC,
+ GPIO_GSL_MUX_CONFIG_TYPE_FLIP_SYNC
+};
+
+struct gpio_gsl_mux_config {
+ enum gpio_gsl_mux_config_type type;
+ /* Actually sync_source type,
+ * however we want to avoid inter-component includes here */
+ uint32_t gsl_group;
+};
+
+struct gpio_config_data {
+ enum gpio_config_type type;
+ union {
+ struct gpio_ddc_config ddc;
+ struct gpio_hpd_config hpd;
+ struct gpio_generic_mux_config generic_mux;
+ struct gpio_gsl_mux_config gsl_mux;
+ } config;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
new file mode 100644
index 000000000000..7a9b43f84a31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+#define __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+
+#include "grph_object_defs.h"
+
+/*
+ * #####################################################
+ * #####################################################
+ *
+ * These defines shared between asic_control/bios_parser and other
+ * DAL components
+ *
+ * #####################################################
+ * #####################################################
+ */
+
+enum display_output_bit_depth {
+ PANEL_UNDEFINE = 0,
+ PANEL_6BIT_COLOR = 1,
+ PANEL_8BIT_COLOR = 2,
+ PANEL_10BIT_COLOR = 3,
+ PANEL_12BIT_COLOR = 4,
+ PANEL_16BIT_COLOR = 5,
+};
+
+
+/* Device type as abstracted by ATOM BIOS */
+enum dal_device_type {
+ DEVICE_TYPE_UNKNOWN = 0,
+ DEVICE_TYPE_LCD,
+ DEVICE_TYPE_CRT,
+ DEVICE_TYPE_DFP,
+ DEVICE_TYPE_CV,
+ DEVICE_TYPE_TV,
+ DEVICE_TYPE_CF,
+ DEVICE_TYPE_WIRELESS
+};
+
+/* Device ID as abstracted by ATOM BIOS */
+struct device_id {
+ enum dal_device_type device_type:16;
+ uint32_t enum_id:16; /* 1 based enum */
+ uint16_t raw_device_tag;
+};
+
+struct graphics_object_i2c_info {
+ struct gpio_info {
+ uint32_t clk_mask_register_index;
+ uint32_t clk_en_register_index;
+ uint32_t clk_y_register_index;
+ uint32_t clk_a_register_index;
+ uint32_t data_mask_register_index;
+ uint32_t data_en_register_index;
+ uint32_t data_y_register_index;
+ uint32_t data_a_register_index;
+
+ uint32_t clk_mask_shift;
+ uint32_t clk_en_shift;
+ uint32_t clk_y_shift;
+ uint32_t clk_a_shift;
+ uint32_t data_mask_shift;
+ uint32_t data_en_shift;
+ uint32_t data_y_shift;
+ uint32_t data_a_shift;
+ } gpio_info;
+
+ bool i2c_hw_assist;
+ uint32_t i2c_line;
+ uint32_t i2c_engine_id;
+ uint32_t i2c_slave_address;
+};
+
+struct graphics_object_hpd_info {
+ uint8_t hpd_int_gpio_uid;
+ uint8_t hpd_active;
+};
+
+struct connector_device_tag_info {
+ uint32_t acpi_device;
+ struct device_id dev_id;
+};
+
+struct device_timing {
+ struct misc_info {
+ uint32_t HORIZONTAL_CUT_OFF:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t H_SYNC_POLARITY:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t V_SYNC_POLARITY:1;
+ uint32_t VERTICAL_CUT_OFF:1;
+ uint32_t H_REPLICATION_BY2:1;
+ uint32_t V_REPLICATION_BY2:1;
+ uint32_t COMPOSITE_SYNC:1;
+ uint32_t INTERLACE:1;
+ uint32_t DOUBLE_CLOCK:1;
+ uint32_t RGB888:1;
+ uint32_t GREY_LEVEL:2;
+ uint32_t SPATIAL:1;
+ uint32_t TEMPORAL:1;
+ uint32_t API_ENABLED:1;
+ } misc_info;
+
+ uint32_t pixel_clk; /* in KHz */
+ uint32_t horizontal_addressable;
+ uint32_t horizontal_blanking_time;
+ uint32_t vertical_addressable;
+ uint32_t vertical_blanking_time;
+ uint32_t horizontal_sync_offset;
+ uint32_t horizontal_sync_width;
+ uint32_t vertical_sync_offset;
+ uint32_t vertical_sync_width;
+ uint32_t horizontal_border;
+ uint32_t vertical_border;
+};
+
+struct supported_refresh_rate {
+ uint32_t REFRESH_RATE_30HZ:1;
+ uint32_t REFRESH_RATE_40HZ:1;
+ uint32_t REFRESH_RATE_48HZ:1;
+ uint32_t REFRESH_RATE_50HZ:1;
+ uint32_t REFRESH_RATE_60HZ:1;
+};
+
+struct embedded_panel_info {
+ struct device_timing lcd_timing;
+ uint32_t ss_id;
+ struct supported_refresh_rate supported_rr;
+ uint32_t drr_enabled;
+ uint32_t min_drr_refresh_rate;
+ bool realtek_eDPToLVDS;
+};
+
+struct dc_firmware_info {
+ struct pll_info {
+ uint32_t crystal_frequency; /* in KHz */
+ uint32_t min_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t min_output_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_output_pxl_clk_pll_frequency; /* in KHz */
+ } pll_info;
+
+ struct firmware_feature {
+ uint32_t memory_clk_ss_percentage;
+ uint32_t engine_clk_ss_percentage;
+ } feature;
+
+ uint32_t default_display_engine_pll_frequency; /* in KHz */
+ uint32_t external_clock_source_frequency_for_dp; /* in KHz */
+ uint32_t smu_gpu_pll_output_freq; /* in KHz */
+ uint8_t min_allowed_bl_level;
+ uint8_t remote_display_config;
+ uint32_t default_memory_clk; /* in KHz */
+ uint32_t default_engine_clk; /* in KHz */
+ uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
+ uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
+
+
+};
+
+struct step_and_delay_info {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t recommended_ref_div;
+};
+
+struct spread_spectrum_info {
+ struct spread_spectrum_type {
+ bool CENTER_MODE:1;
+ bool EXTERNAL:1;
+ bool STEP_AND_DELAY_INFO:1;
+ } type;
+
+ /* in unit of 0.01% (spreadPercentageDivider = 100),
+ otherwise in 0.001% units (spreadPercentageDivider = 1000); */
+ uint32_t spread_spectrum_percentage;
+ uint32_t spread_percentage_divider; /* 100 or 1000 */
+ uint32_t spread_spectrum_range; /* modulation freq (HZ)*/
+
+ union {
+ struct step_and_delay_info step_and_delay_info;
+ /* For mem/engine/uvd, Clock Out frequence (VCO ),
+ in unit of kHz. For TMDS/HDMI/LVDS, it is pixel clock,
+ for DP, it is link clock ( 270000 or 162000 ) */
+ uint32_t target_clock_range; /* in KHz */
+ };
+
+};
+
+struct graphics_object_encoder_cap_info {
+ uint32_t dp_hbr2_cap:1;
+ uint32_t dp_hbr2_validated:1;
+ /*
+ * TODO: added MST and HDMI 6G capable flags
+ */
+ uint32_t reserved:15;
+};
+
+struct din_connector_info {
+ uint32_t gpio_id;
+ bool gpio_tv_active_state;
+};
+
+/* Invalid channel mapping */
+enum { INVALID_DDI_CHANNEL_MAPPING = 0x0 };
+
+/**
+ * DDI PHY channel mapping reflecting XBAR setting
+ */
+union ddi_channel_mapping {
+ struct mapping {
+ uint8_t lane0:2; /* Mapping for lane 0 */
+ uint8_t lane1:2; /* Mapping for lane 1 */
+ uint8_t lane2:2; /* Mapping for lane 2 */
+ uint8_t lane3:2; /* Mapping for lane 3 */
+ } mapping;
+ uint8_t raw;
+};
+
+/**
+* Transmitter output configuration description
+*/
+struct transmitter_configuration_info {
+ /* DDI PHY ID for the transmitter */
+ enum transmitter transmitter_phy_id;
+ /* DDI PHY channel mapping reflecting crossbar setting */
+ union ddi_channel_mapping output_channel_mapping;
+};
+
+struct transmitter_configuration {
+ /* Configuration for the primary transmitter */
+ struct transmitter_configuration_info primary_transmitter_config;
+ /* Secondary transmitter configuration for Dual-link DVI */
+ struct transmitter_configuration_info secondary_transmitter_config;
+};
+
+/* These size should be sufficient to store info coming from BIOS */
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+#define NUMBER_OF_CSR_M3_ARB 10
+#define NUMBER_OF_DISP_CLK_VOLTAGE 4
+#define NUMBER_OF_AVAILABLE_SCLK 5
+
+struct i2c_reg_info {
+ unsigned char i2c_reg_index;
+ unsigned char i2c_reg_val;
+};
+
+struct ext_hdmi_settings {
+ unsigned char slv_addr;
+ unsigned char reg_num;
+ struct i2c_reg_info reg_settings[9];
+ unsigned char reg_num_6g;
+ struct i2c_reg_info reg_settings_6g[3];
+};
+
+
+/* V6 */
+struct integrated_info {
+ struct clock_voltage_caps {
+ /* The Voltage Index indicated by FUSE, same voltage index
+ shared with SCLK DPM fuse table */
+ uint32_t voltage_index;
+ /* Maximum clock supported with specified voltage index */
+ uint32_t max_supported_clk; /* in KHz */
+ } disp_clk_voltage[NUMBER_OF_DISP_CLK_VOLTAGE];
+
+ struct display_connection_info {
+ struct external_display_path {
+ /* A bit vector to show what devices are supported */
+ uint32_t device_tag;
+ /* 16bit device ACPI id. */
+ uint32_t device_acpi_enum;
+ /* A physical connector for displays to plug in,
+ using object connector definitions */
+ struct graphics_object_id device_connector_id;
+ /* An index into external AUX/DDC channel LUT */
+ uint8_t ext_aux_ddc_lut_index;
+ /* An index into external HPD pin LUT */
+ uint8_t ext_hpd_pin_lut_index;
+ /* external encoder object id */
+ struct graphics_object_id ext_encoder_obj_id;
+ /* XBAR mapping of the PHY channels */
+ union ddi_channel_mapping channel_mapping;
+
+ unsigned short caps;
+ } path[MAX_NUMBER_OF_EXT_DISPLAY_PATH];
+
+ uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
+ uint8_t checksum;
+ } ext_disp_conn_info; /* exiting long long time */
+
+ struct available_s_clk_list {
+ /* Maximum clock supported with specified voltage index */
+ uint32_t supported_s_clk; /* in KHz */
+ /* The Voltage Index indicated by FUSE for specified SCLK */
+ uint32_t voltage_index;
+ /* The Voltage ID indicated by FUSE for specified SCLK */
+ uint32_t voltage_id;
+ } avail_s_clk[NUMBER_OF_AVAILABLE_SCLK];
+
+ uint8_t memory_type;
+ uint8_t ma_channel_number;
+ uint32_t boot_up_engine_clock; /* in KHz */
+ uint32_t dentist_vco_freq; /* in KHz */
+ uint32_t boot_up_uma_clock; /* in KHz */
+ uint32_t boot_up_req_display_vector;
+ uint32_t other_display_misc;
+ uint32_t gpu_cap_info;
+ uint32_t sb_mmio_base_addr;
+ uint32_t system_config;
+ uint32_t cpu_cap_info;
+ uint32_t max_nb_voltage;
+ uint32_t min_nb_voltage;
+ uint32_t boot_up_nb_voltage;
+ uint32_t ext_disp_conn_info_offset;
+ uint32_t csr_m3_arb_cntl_default[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_uvd[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_fs3d[NUMBER_OF_CSR_M3_ARB];
+ uint32_t gmc_restore_reset_time;
+ uint32_t minimum_n_clk;
+ uint32_t idle_n_clk;
+ uint32_t ddr_dll_power_up_time;
+ uint32_t ddr_pll_power_up_time;
+ /* start for V6 */
+ uint32_t pcie_clk_ss_type;
+ uint32_t lvds_ss_percentage;
+ uint32_t lvds_sspread_rate_in_10hz;
+ uint32_t hdmi_ss_percentage;
+ uint32_t hdmi_sspread_rate_in_10hz;
+ uint32_t dvi_ss_percentage;
+ uint32_t dvi_sspread_rate_in_10_hz;
+ uint32_t sclk_dpm_boost_margin;
+ uint32_t sclk_dpm_throttle_margin;
+ uint32_t sclk_dpm_tdp_limit_pg;
+ uint32_t sclk_dpm_tdp_limit_boost;
+ uint32_t boost_engine_clock;
+ uint32_t boost_vid_2bit;
+ uint32_t enable_boost;
+ uint32_t gnb_tdp_limit;
+ /* Start from V7 */
+ uint32_t max_lvds_pclk_freq_in_single_link;
+ uint32_t lvds_misc;
+ uint32_t lvds_pwr_on_seq_dig_on_to_de_in_4ms;
+ uint32_t lvds_pwr_on_seq_de_to_vary_bl_in_4ms;
+ uint32_t lvds_pwr_off_seq_vary_bl_to_de_in4ms;
+ uint32_t lvds_pwr_off_seq_de_to_dig_on_in4ms;
+ uint32_t lvds_off_to_on_delay_in_4ms;
+ uint32_t lvds_pwr_on_seq_vary_bl_to_blon_in_4ms;
+ uint32_t lvds_pwr_off_seq_blon_to_vary_bl_in_4ms;
+ uint32_t lvds_reserved1;
+ uint32_t lvds_bit_depth_control_val;
+ //Start from V9
+ unsigned char dp0_ext_hdmi_slv_addr;
+ unsigned char dp0_ext_hdmi_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_reg_settings[9];
+ unsigned char dp0_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp1_ext_hdmi_slv_addr;
+ unsigned char dp1_ext_hdmi_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_reg_settings[9];
+ unsigned char dp1_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp2_ext_hdmi_slv_addr;
+ unsigned char dp2_ext_hdmi_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_reg_settings[9];
+ unsigned char dp2_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp3_ext_hdmi_slv_addr;
+ unsigned char dp3_ext_hdmi_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
+ unsigned char dp3_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+};
+
+/**
+* Power source ids.
+*/
+enum power_source {
+ POWER_SOURCE_AC = 0,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_LIMITED_POWER,
+ POWER_SOURCE_LIMITED_POWER_2,
+ POWER_SOURCE_MAX
+};
+
+struct bios_event_info {
+ uint32_t thermal_state;
+ uint32_t backlight_level;
+ enum power_source powerSource;
+ bool has_thermal_state_changed;
+ bool has_power_source_changed;
+ bool has_forced_mode_changed;
+ bool forced_mode;
+ bool backlight_changed;
+};
+
+enum {
+ HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
+ TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
+};
+
+/*
+ * DFS-bypass flag
+ */
+/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+enum {
+ DFS_BYPASS_ENABLE = 0x10
+};
+
+enum {
+ INVALID_BACKLIGHT = -1
+};
+
+struct panel_backlight_boundaries {
+ uint32_t min_signal_level;
+ uint32_t max_signal_level;
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
new file mode 100644
index 000000000000..2941b882b0b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_DEFS_H__
+#define __DAL_GRPH_OBJECT_DEFS_H__
+
+#include "grph_object_id.h"
+
+/* ********************************************************************
+ * ********************************************************************
+ *
+ * These defines shared between All Graphics Objects
+ *
+ * ********************************************************************
+ * ********************************************************************
+ */
+
+/* HPD unit id - HW direct translation */
+enum hpd_source_id {
+ HPD_SOURCEID1 = 0,
+ HPD_SOURCEID2,
+ HPD_SOURCEID3,
+ HPD_SOURCEID4,
+ HPD_SOURCEID5,
+ HPD_SOURCEID6,
+
+ HPD_SOURCEID_COUNT,
+ HPD_SOURCEID_UNKNOWN
+};
+
+/* DDC unit id - HW direct translation */
+enum channel_id {
+ CHANNEL_ID_UNKNOWN = 0,
+ CHANNEL_ID_DDC1,
+ CHANNEL_ID_DDC2,
+ CHANNEL_ID_DDC3,
+ CHANNEL_ID_DDC4,
+ CHANNEL_ID_DDC5,
+ CHANNEL_ID_DDC6,
+ CHANNEL_ID_DDC_VGA,
+ CHANNEL_ID_I2C_PAD,
+ CHANNEL_ID_COUNT
+};
+
+#define DECODE_CHANNEL_ID(ch_id) \
+ (ch_id) == CHANNEL_ID_DDC1 ? "CHANNEL_ID_DDC1" : \
+ (ch_id) == CHANNEL_ID_DDC2 ? "CHANNEL_ID_DDC2" : \
+ (ch_id) == CHANNEL_ID_DDC3 ? "CHANNEL_ID_DDC3" : \
+ (ch_id) == CHANNEL_ID_DDC4 ? "CHANNEL_ID_DDC4" : \
+ (ch_id) == CHANNEL_ID_DDC5 ? "CHANNEL_ID_DDC5" : \
+ (ch_id) == CHANNEL_ID_DDC6 ? "CHANNEL_ID_DDC6" : \
+ (ch_id) == CHANNEL_ID_DDC_VGA ? "CHANNEL_ID_DDC_VGA" : \
+ (ch_id) == CHANNEL_ID_I2C_PAD ? "CHANNEL_ID_I2C_PAD" : "Invalid"
+
+enum transmitter {
+ TRANSMITTER_UNKNOWN = (-1L),
+ TRANSMITTER_UNIPHY_A,
+ TRANSMITTER_UNIPHY_B,
+ TRANSMITTER_UNIPHY_C,
+ TRANSMITTER_UNIPHY_D,
+ TRANSMITTER_UNIPHY_E,
+ TRANSMITTER_UNIPHY_F,
+ TRANSMITTER_NUTMEG_CRT,
+ TRANSMITTER_TRAVIS_CRT,
+ TRANSMITTER_TRAVIS_LCD,
+ TRANSMITTER_UNIPHY_G,
+ TRANSMITTER_COUNT
+};
+
+/* Generic source of the synchronisation input/output signal */
+/* Can be used for flow control, stereo sync, timing sync, frame sync, etc */
+enum sync_source {
+ SYNC_SOURCE_NONE = 0,
+
+ /* Source based on controllers */
+ SYNC_SOURCE_CONTROLLER0,
+ SYNC_SOURCE_CONTROLLER1,
+ SYNC_SOURCE_CONTROLLER2,
+ SYNC_SOURCE_CONTROLLER3,
+ SYNC_SOURCE_CONTROLLER4,
+ SYNC_SOURCE_CONTROLLER5,
+
+ /* Source based on GSL group */
+ SYNC_SOURCE_GSL_GROUP0,
+ SYNC_SOURCE_GSL_GROUP1,
+ SYNC_SOURCE_GSL_GROUP2,
+
+ /* Source based on GSL IOs */
+ /* These IOs normally used as GSL input/output */
+ SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK = SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_A,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+ SYNC_SOURCE_GSL_IO_LAST = SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+
+ /* Source based on regular IOs */
+ SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_A = SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_B,
+ SYNC_SOURCE_IO_GENERIC_C,
+ SYNC_SOURCE_IO_GENERIC_D,
+ SYNC_SOURCE_IO_GENERIC_E,
+ SYNC_SOURCE_IO_GENERIC_F,
+ SYNC_SOURCE_IO_HPD1,
+ SYNC_SOURCE_IO_HPD2,
+ SYNC_SOURCE_IO_HSYNC_A,
+ SYNC_SOURCE_IO_VSYNC_A,
+ SYNC_SOURCE_IO_HSYNC_B,
+ SYNC_SOURCE_IO_VSYNC_B,
+ SYNC_SOURCE_IO_LAST = SYNC_SOURCE_IO_VSYNC_B,
+
+ /* Misc. flow control sources */
+ SYNC_SOURCE_DUAL_GPU_PIN
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
new file mode 100644
index 000000000000..5eb2b4dc7b9c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_ID_H__
+#define __DAL_GRPH_OBJECT_ID_H__
+
+/* Types of graphics objects */
+enum object_type {
+ OBJECT_TYPE_UNKNOWN = 0,
+
+ /* Direct ATOM BIOS translation */
+ OBJECT_TYPE_GPU,
+ OBJECT_TYPE_ENCODER,
+ OBJECT_TYPE_CONNECTOR,
+ OBJECT_TYPE_ROUTER,
+ OBJECT_TYPE_GENERIC,
+
+ /* Driver specific */
+ OBJECT_TYPE_AUDIO,
+ OBJECT_TYPE_CONTROLLER,
+ OBJECT_TYPE_CLOCK_SOURCE,
+ OBJECT_TYPE_ENGINE,
+
+ OBJECT_TYPE_COUNT
+};
+
+/* Enumeration inside one type of graphics objects */
+enum object_enum_id {
+ ENUM_ID_UNKNOWN = 0,
+ ENUM_ID_1,
+ ENUM_ID_2,
+ ENUM_ID_3,
+ ENUM_ID_4,
+ ENUM_ID_5,
+ ENUM_ID_6,
+ ENUM_ID_7,
+
+ ENUM_ID_COUNT
+};
+
+/* Generic object ids */
+enum generic_id {
+ GENERIC_ID_UNKNOWN = 0,
+ GENERIC_ID_MXM_OPM,
+ GENERIC_ID_GLSYNC,
+ GENERIC_ID_STEREO,
+
+ GENERIC_ID_COUNT
+};
+
+/* Controller object ids */
+enum controller_id {
+ CONTROLLER_ID_UNDEFINED = 0,
+ CONTROLLER_ID_D0,
+ CONTROLLER_ID_D1,
+ CONTROLLER_ID_D2,
+ CONTROLLER_ID_D3,
+ CONTROLLER_ID_D4,
+ CONTROLLER_ID_D5,
+ CONTROLLER_ID_UNDERLAY0,
+ CONTROLLER_ID_MAX = CONTROLLER_ID_UNDERLAY0
+};
+
+#define IS_UNDERLAY_CONTROLLER(ctrlr_id) (ctrlr_id >= CONTROLLER_ID_UNDERLAY0)
+
+/*
+ * ClockSource object ids.
+ * We maintain the order matching (more or less) ATOM BIOS
+ * to improve optimized acquire
+ */
+enum clock_source_id {
+ CLOCK_SOURCE_ID_UNDEFINED = 0,
+ CLOCK_SOURCE_ID_PLL0,
+ CLOCK_SOURCE_ID_PLL1,
+ CLOCK_SOURCE_ID_PLL2,
+ CLOCK_SOURCE_ID_EXTERNAL, /* ID (Phy) ref. clk. for DP */
+ CLOCK_SOURCE_ID_DCPLL,
+ CLOCK_SOURCE_ID_DFS, /* DENTIST */
+ CLOCK_SOURCE_ID_VCE, /* VCE does not need a real PLL */
+ /* Used to distinguish between programming pixel clock and ID (Phy) clock */
+ CLOCK_SOURCE_ID_DP_DTO,
+
+ CLOCK_SOURCE_COMBO_PHY_PLL0, /*combo PHY PLL defines (DC 11.2 and up)*/
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0
+};
+
+/* Encoder object ids */
+enum encoder_id {
+ ENCODER_ID_UNKNOWN = 0,
+
+ /* Radeon Class Display Hardware */
+ ENCODER_ID_INTERNAL_LVDS,
+ ENCODER_ID_INTERNAL_TMDS1,
+ ENCODER_ID_INTERNAL_TMDS2,
+ ENCODER_ID_INTERNAL_DAC1,
+ ENCODER_ID_INTERNAL_DAC2, /* TV/CV DAC */
+
+ /* External Third Party Encoders */
+ ENCODER_ID_INTERNAL_LVTM1, /* not used for Radeon */
+ ENCODER_ID_INTERNAL_HDMI,
+
+ /* Kaledisope (KLDSCP) Class Display Hardware */
+ ENCODER_ID_INTERNAL_KLDSCP_TMDS1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC2, /* Shared with CV/TV and CRT */
+ /* External TMDS (dual link) */
+ ENCODER_ID_EXTERNAL_MVPU_FPGA, /* MVPU FPGA chip */
+ ENCODER_ID_INTERNAL_DDI,
+ ENCODER_ID_INTERNAL_UNIPHY,
+ ENCODER_ID_INTERNAL_KLDSCP_LVTMA,
+ ENCODER_ID_INTERNAL_UNIPHY1,
+ ENCODER_ID_INTERNAL_UNIPHY2,
+ ENCODER_ID_EXTERNAL_NUTMEG,
+ ENCODER_ID_EXTERNAL_TRAVIS,
+
+ ENCODER_ID_INTERNAL_WIRELESS, /* Internal wireless display encoder */
+ ENCODER_ID_INTERNAL_UNIPHY3,
+ ENCODER_ID_INTERNAL_VIRTUAL,
+};
+
+/* Connector object ids */
+enum connector_id {
+ CONNECTOR_ID_UNKNOWN = 0,
+ CONNECTOR_ID_SINGLE_LINK_DVII = 1,
+ CONNECTOR_ID_DUAL_LINK_DVII = 2,
+ CONNECTOR_ID_SINGLE_LINK_DVID = 3,
+ CONNECTOR_ID_DUAL_LINK_DVID = 4,
+ CONNECTOR_ID_VGA = 5,
+ CONNECTOR_ID_HDMI_TYPE_A = 12,
+ CONNECTOR_ID_LVDS = 14,
+ CONNECTOR_ID_PCIE = 16,
+ CONNECTOR_ID_HARDCODE_DVI = 18,
+ CONNECTOR_ID_DISPLAY_PORT = 19,
+ CONNECTOR_ID_EDP = 20,
+ CONNECTOR_ID_MXM = 21,
+ CONNECTOR_ID_WIRELESS = 22,
+ CONNECTOR_ID_MIRACAST = 23,
+
+ CONNECTOR_ID_VIRTUAL = 100
+};
+
+/* Audio object ids */
+enum audio_id {
+ AUDIO_ID_UNKNOWN = 0,
+ AUDIO_ID_INTERNAL_AZALIA
+};
+
+/* Engine object ids */
+enum engine_id {
+ ENGINE_ID_DIGA,
+ ENGINE_ID_DIGB,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGE,
+ ENGINE_ID_DIGF,
+ ENGINE_ID_DIGG,
+ ENGINE_ID_DACA,
+ ENGINE_ID_DACB,
+ ENGINE_ID_VCE, /* wireless display pseudo-encoder */
+ ENGINE_ID_VIRTUAL,
+
+ ENGINE_ID_COUNT,
+ ENGINE_ID_UNKNOWN = (-1L)
+};
+
+enum transmitter_color_depth {
+ TRANSMITTER_COLOR_DEPTH_24 = 0, /* 8 bits */
+ TRANSMITTER_COLOR_DEPTH_30, /* 10 bits */
+ TRANSMITTER_COLOR_DEPTH_36, /* 12 bits */
+ TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
+};
+
+/*
+ *****************************************************************************
+ * graphics_object_id struct
+ *
+ * graphics_object_id is a very simple struct wrapping 32bit Graphics
+ * Object identication
+ *
+ * This struct should stay very simple
+ * No dependencies at all (no includes)
+ * No debug messages or asserts
+ * No #ifndef and preprocessor directives
+ * No grow in space (no more data member)
+ *****************************************************************************
+ */
+
+struct graphics_object_id {
+ uint32_t id:8;
+ uint32_t enum_id:4;
+ uint32_t type:4;
+ uint32_t reserved:16; /* for padding. total size should be u32 */
+};
+
+/* some simple functions for convenient graphics_object_id handle */
+
+static inline struct graphics_object_id dal_graphics_object_id_init(
+ uint32_t id,
+ enum object_enum_id enum_id,
+ enum object_type type)
+{
+ struct graphics_object_id result = {
+ id, enum_id, type, 0
+ };
+
+ return result;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2);
+
+/* Based on internal data members memory layout */
+static inline uint32_t dal_graphics_object_id_to_uint(
+ struct graphics_object_id id)
+{
+ return id.id + (id.enum_id << 0x8) + (id.type << 0xc);
+}
+
+static inline enum controller_id dal_graphics_object_id_get_controller_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONTROLLER)
+ return id.id;
+ return CONTROLLER_ID_UNDEFINED;
+}
+
+static inline enum clock_source_id dal_graphics_object_id_get_clock_source_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
+ return id.id;
+ return CLOCK_SOURCE_ID_UNDEFINED;
+}
+
+static inline enum encoder_id dal_graphics_object_id_get_encoder_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENCODER)
+ return id.id;
+ return ENCODER_ID_UNKNOWN;
+}
+
+static inline enum connector_id dal_graphics_object_id_get_connector_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONNECTOR)
+ return id.id;
+ return CONNECTOR_ID_UNKNOWN;
+}
+
+static inline enum audio_id dal_graphics_object_id_get_audio_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_AUDIO)
+ return id.id;
+ return AUDIO_ID_UNKNOWN;
+}
+
+static inline enum engine_id dal_graphics_object_id_get_engine_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENGINE)
+ return id.id;
+ return ENGINE_ID_UNKNOWN;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
new file mode 100644
index 000000000000..13a3c82d118f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2CAUX_INTERFACE_H__
+#define __DAL_I2CAUX_INTERFACE_H__
+
+#include "dc_types.h"
+#include "gpio_service_interface.h"
+
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+
+struct aux_payload {
+ /* set following flag to read/write I2C data,
+ * reset it to read/write DPCD data */
+ bool i2c_over_aux;
+ /* set following flag to write data,
+ * reset it to read data */
+ bool write;
+ uint32_t address;
+ uint8_t length;
+ uint8_t *data;
+};
+
+struct aux_command {
+ struct aux_payload *payloads;
+ uint8_t number_of_payloads;
+
+ /* expressed in milliseconds
+ * zero means "use default value" */
+ uint32_t defer_delay;
+
+ /* zero means "use default value" */
+ uint32_t max_defer_write_retry;
+
+ enum i2c_mot_mode mot;
+};
+
+union aux_config {
+ struct {
+ uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct i2caux;
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx);
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd);
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd);
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/irq_service_interface.h b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
new file mode 100644
index 000000000000..d6ebed524daf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_INTERFACE_H__
+#define __DAL_IRQ_SERVICE_INTERFACE_H__
+
+struct irq_service_init_data {
+ struct dc_context *ctx;
+};
+
+struct irq_service;
+
+void dal_irq_service_destroy(struct irq_service **irq_service);
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable);
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source);
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
new file mode 100644
index 000000000000..adea1a59f620
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LINK_SERVICE_TYPES_H__
+#define __DAL_LINK_SERVICE_TYPES_H__
+
+#include "grph_object_id.h"
+#include "dal_types.h"
+#include "irq_types.h"
+
+/*struct mst_mgr_callback_object;*/
+struct ddc;
+struct irq_manager;
+
+enum {
+ MAX_CONTROLLER_NUM = 6
+};
+
+enum dp_power_state {
+ DP_POWER_STATE_D0 = 1,
+ DP_POWER_STATE_D3
+};
+
+enum edp_revision {
+ /* eDP version 1.1 or lower */
+ EDP_REVISION_11 = 0x00,
+ /* eDP version 1.2 */
+ EDP_REVISION_12 = 0x01,
+ /* eDP version 1.3 */
+ EDP_REVISION_13 = 0x02
+};
+
+enum {
+ LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+};
+
+struct link_training_settings {
+ struct dc_link_settings link_settings;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+ bool allow_invalid_msa_timing_param;
+};
+
+enum hw_dp_training_pattern {
+ HW_DP_TRAINING_PATTERN_1 = 0,
+ HW_DP_TRAINING_PATTERN_2,
+ HW_DP_TRAINING_PATTERN_3,
+ HW_DP_TRAINING_PATTERN_4
+};
+
+/*TODO: Move this enum test harness*/
+/* Test patterns*/
+enum dp_test_pattern {
+ /* Input data is pass through Scrambler
+ * and 8b10b Encoder straight to output*/
+ DP_TEST_PATTERN_VIDEO_MODE = 0,
+
+ /* phy test patterns*/
+ DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_D102 = DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_SYMBOL_ERROR,
+ DP_TEST_PATTERN_PRBS7,
+ DP_TEST_PATTERN_80BIT_CUSTOM,
+ DP_TEST_PATTERN_CP2520_1,
+ DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_CP2520_3,
+
+ /* Link Training Patterns */
+ DP_TEST_PATTERN_TRAINING_PATTERN1,
+ DP_TEST_PATTERN_TRAINING_PATTERN2,
+ DP_TEST_PATTERN_TRAINING_PATTERN3,
+ DP_TEST_PATTERN_TRAINING_PATTERN4,
+ DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
+
+ /* link test patterns*/
+ DP_TEST_PATTERN_COLOR_SQUARES,
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA,
+ DP_TEST_PATTERN_VERTICAL_BARS,
+ DP_TEST_PATTERN_HORIZONTAL_BARS,
+ DP_TEST_PATTERN_COLOR_RAMP,
+
+ /* audio test patterns*/
+ DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED,
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH,
+
+ DP_TEST_PATTERN_UNSUPPORTED
+};
+
+enum dp_panel_mode {
+ /* not required */
+ DP_PANEL_MODE_DEFAULT,
+ /* standard mode for eDP */
+ DP_PANEL_MODE_EDP,
+ /* external chips specific settings */
+ DP_PANEL_MODE_SPECIAL
+};
+
+/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
+union dpcd_training_lane_set {
+ struct {
+#if defined(LITTLEENDIAN_CPU)
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ /* following is reserved in DP 1.1 */
+ uint8_t POST_CURSOR2_SET:2;
+#elif defined(BIGENDIAN_CPU)
+ uint8_t POST_CURSOR2_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t VOLTAGE_SWING_SET:2;
+#else
+ #error ARCH not defined!
+#endif
+ } bits;
+
+ uint8_t raw;
+};
+
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct dp_mst_stream_allocation {
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in
+ * transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct dp_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
new file mode 100644
index 000000000000..8e1fe70097be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_INTERFACE_H__
+#define __DAL_LOGGER_INTERFACE_H__
+
+#include "logger_types.h"
+
+struct dc_context;
+struct dc_link;
+struct dc_surface_update;
+struct resource_context;
+struct dc_state;
+
+/*
+ *
+ * DAL logger functionality
+ *
+ */
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
+
+uint32_t dal_logger_destroy(struct dal_logger **logger);
+
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
+
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...);
+
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...);
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry,
+ enum dc_log_type log_type);
+
+void dm_logger_close(struct log_entry *entry);
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...);
+
+void logger_write(struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ void *paralist);
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count);
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count);
+
+void post_surface_trace(struct dc *dc);
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx);
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context);
+
+/* Any function which is empty or have incomplete implementation should be
+ * marked by this macro.
+ * Note that the message will be printed exactly once for every function
+ * it is used in order to avoid repeating of the same message. */
+#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
+{ \
+ static bool print_not_impl = true; \
+\
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ dm_logger_write(ctx->logger, LOG_WARNING, \
+ "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+}
+
+/******************************************************************************
+ * Convenience macros to save on typing.
+ *****************************************************************************/
+
+#define DC_ERROR(...) \
+ dm_logger_write(dc_ctx->logger, LOG_ERROR, \
+ __VA_ARGS__)
+
+#define DC_SYNC_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_SYNC, \
+ __VA_ARGS__)
+
+/* Connectivity log format:
+ * [time stamp] [drm] [Major_minor] [connector name] message.....
+ * eg:
+ * [ 26.590965] [drm] [Conn_LKTN] [DP-1] HBRx4 pass VS=0, PE=0^
+ * [ 26.881060] [drm] [Conn_Mode] [DP-1] {2560x1080, 2784x1111@185580Khz}^
+ */
+
+#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_DETECTION, ##__VA_ARGS__)
+
+#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+
+#define CONN_MSG_LT(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+
+#define CONN_MSG_MODE(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+
+/*
+ * Display Test Next logging
+ */
+#define DTN_INFO_BEGIN() \
+ dm_dtn_log_begin(dc_ctx)
+
+#define DTN_INFO(msg, ...) \
+ dm_dtn_log_append_v(dc_ctx, msg, ##__VA_ARGS__)
+
+#define DTN_INFO_END() \
+ dm_dtn_log_end(dc_ctx)
+
+#define PERFORMANCE_TRACE_START() \
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
+ unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
+ unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
+ if (dc->debug.performance_trace) {\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
+ dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
+ dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
+ }
+
+#define PERFORMANCE_TRACE_END() do {\
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
+ if (dc->debug.performance_trace) {\
+ dm_logger_write(dc->ctx->logger, \
+ LOG_PERF_TRACE, \
+ "%s duration: %d ticks\n", __func__,\
+ perf_trc_end_stmp - perf_trc_start_stmp); \
+ if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
+ dc->ctx->logger->mask = perf_trc_start_log_msk;\
+ dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ } \
+ } \
+} while (0)
+
+#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
new file mode 100644
index 000000000000..e2ff8cd423d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_TYPES_H__
+#define __DAL_LOGGER_TYPES_H__
+
+#include "os_types.h"
+
+#define MAX_NAME_LEN 32
+
+struct dal_logger;
+
+enum dc_log_type {
+ LOG_ERROR = 0,
+ LOG_WARNING,
+ LOG_DEBUG,
+ LOG_DC,
+ LOG_DTN,
+ LOG_SURFACE,
+ LOG_HW_HOTPLUG,
+ LOG_HW_LINK_TRAINING,
+ LOG_HW_SET_MODE,
+ LOG_HW_RESUME_S3,
+ LOG_HW_AUDIO,
+ LOG_HW_HPD_IRQ,
+ LOG_MST,
+ LOG_SCALER,
+ LOG_BIOS,
+ LOG_BANDWIDTH_CALCS,
+ LOG_BANDWIDTH_VALIDATION,
+ LOG_I2C_AUX,
+ LOG_SYNC,
+ LOG_BACKLIGHT,
+ LOG_FEATURE_OVERRIDE,
+ LOG_DETECTION_EDID_PARSER,
+ LOG_DETECTION_DP_CAPS,
+ LOG_RESOURCE,
+ LOG_DML,
+ LOG_EVENT_MODE_SET,
+ LOG_EVENT_DETECTION,
+ LOG_EVENT_LINK_TRAINING,
+ LOG_EVENT_LINK_LOSS,
+ LOG_EVENT_UNDERFLOW,
+ LOG_IF_TRACE,
+ LOG_PERF_TRACE,
+
+ LOG_SECTION_TOTAL_COUNT
+};
+
+#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_DETECTION_EDID_PARSER))
+
+#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_WARNING) | \
+ (1 << LOG_EVENT_MODE_SET) | \
+ (1 << LOG_EVENT_DETECTION) | \
+ (1 << LOG_EVENT_LINK_TRAINING) | \
+ (1 << LOG_EVENT_LINK_LOSS) | \
+ (1 << LOG_EVENT_UNDERFLOW) | \
+ (1 << LOG_RESOURCE) | \
+ (1 << LOG_FEATURE_OVERRIDE) | \
+ (1 << LOG_DETECTION_EDID_PARSER) | \
+ (1 << LOG_DC) | \
+ (1 << LOG_HW_HOTPLUG) | \
+ (1 << LOG_HW_SET_MODE) | \
+ (1 << LOG_HW_RESUME_S3) | \
+ (1 << LOG_HW_HPD_IRQ) | \
+ (1 << LOG_SYNC) | \
+ (1 << LOG_BANDWIDTH_VALIDATION) | \
+ (1 << LOG_MST) | \
+ (1 << LOG_DETECTION_DP_CAPS) | \
+ (1 << LOG_BACKLIGHT)) | \
+ (1 << LOG_I2C_AUX) | \
+ (1 << LOG_IF_TRACE) | \
+ (1 << LOG_DTN) /* | \
+ (1 << LOG_DEBUG) | \
+ (1 << LOG_BIOS) | \
+ (1 << LOG_SURFACE) | \
+ (1 << LOG_SCALER) | \
+ (1 << LOG_DML) | \
+ (1 << LOG_HW_LINK_TRAINING) | \
+ (1 << LOG_HW_AUDIO)| \
+ (1 << LOG_BANDWIDTH_CALCS)*/
+
+union logger_flags {
+ struct {
+ uint32_t ENABLE_CONSOLE:1; /* Print to console */
+ uint32_t ENABLE_BUFFER:1; /* Print to buffer */
+ uint32_t RESERVED:30;
+ } bits;
+ uint32_t value;
+};
+
+struct log_entry {
+ struct dal_logger *logger;
+ enum dc_log_type type;
+
+ char *buf;
+ uint32_t buf_offset;
+ uint32_t max_buf_bytes;
+};
+
+/**
+* Structure for enumerating log types
+*/
+struct dc_log_type_info {
+ enum dc_log_type type;
+ char name[MAX_NAME_LEN];
+};
+
+/* Structure for keeping track of offsets, buffer, etc */
+
+#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
+
+/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
+ * change log line size to 896 to meet the request.
+ */
+#define LOG_MAX_LINE_SIZE 896
+
+struct dal_logger {
+
+ /* How far into the circular buffer has been read by dsat
+ * Read offset should never cross write offset. Write \0's to
+ * read data just to be sure?
+ */
+ uint32_t buffer_read_offset;
+
+ /* How far into the circular buffer we have written
+ * Write offset should never cross read offset
+ */
+ uint32_t buffer_write_offset;
+
+ uint32_t open_count;
+
+ char *log_buffer; /* Pointer to malloc'ed buffer */
+ uint32_t log_buffer_size; /* Size of circular buffer */
+
+ uint32_t mask; /*array of masks for major elements*/
+
+ union logger_flags flags;
+ struct dc_context *ctx;
+};
+
+#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h
new file mode 100644
index 000000000000..fee2b6ffcfc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_SET_MODE_TYPES_H__
+#define __DAL_SET_MODE_TYPES_H__
+
+#include "dc_types.h"
+#include <linux/hdmi.h>
+
+/* Info frame packet status */
+enum info_frame_flag {
+ INFO_PACKET_PACKET_INVALID = 0,
+ INFO_PACKET_PACKET_VALID = 1,
+ INFO_PACKET_PACKET_RESET = 2,
+ INFO_PACKET_PACKET_UPDATE_SCAN_TYPE = 8
+};
+
+struct hdmi_info_frame_header {
+ uint8_t info_frame_type;
+ uint8_t version;
+ uint8_t length;
+};
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct info_packet_raw_data {
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t sb[28]; /* sb0~sb27 */
+};
+
+union hdmi_info_packet {
+ struct avi_info_frame {
+ struct hdmi_info_frame_header header;
+
+ uint8_t CHECK_SUM:8;
+
+ uint8_t S0_S1:2;
+ uint8_t B0_B1:2;
+ uint8_t A0:1;
+ uint8_t Y0_Y1_Y2:3;
+
+ uint8_t R0_R3:4;
+ uint8_t M0_M1:2;
+ uint8_t C0_C1:2;
+
+ uint8_t SC0_SC1:2;
+ uint8_t Q0_Q1:2;
+ uint8_t EC0_EC2:3;
+ uint8_t ITC:1;
+
+ uint8_t VIC0_VIC7:8;
+
+ uint8_t PR0_PR3:4;
+ uint8_t CN0_CN1:2;
+ uint8_t YQ0_YQ1:2;
+
+ uint16_t bar_top;
+ uint16_t bar_bottom;
+ uint16_t bar_left;
+ uint16_t bar_right;
+
+ uint8_t reserved[14];
+ } bits;
+
+ struct info_packet_raw_data packet_raw_data;
+};
+
+struct info_packet {
+ enum info_frame_flag flags;
+ union hdmi_info_packet info_packet_hdmi;
+};
+
+struct info_frame {
+ struct info_packet avi_info_packet;
+ struct info_packet gamut_packet;
+ struct info_packet vendor_info_packet;
+ struct info_packet spd_info_packet;
+};
+
+#pragma pack(pop)
+
+#endif /* __DAL_SET_MODE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
new file mode 100644
index 000000000000..b5ebde642207
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_SIGNAL_TYPES_H__
+#define __DC_SIGNAL_TYPES_H__
+
+enum signal_type {
+ SIGNAL_TYPE_NONE = 0L, /* no signal */
+ SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
+ SIGNAL_TYPE_DVI_DUAL_LINK = (1 << 1),
+ SIGNAL_TYPE_HDMI_TYPE_A = (1 << 2),
+ SIGNAL_TYPE_LVDS = (1 << 3),
+ SIGNAL_TYPE_RGB = (1 << 4),
+ SIGNAL_TYPE_DISPLAY_PORT = (1 << 5),
+ SIGNAL_TYPE_DISPLAY_PORT_MST = (1 << 6),
+ SIGNAL_TYPE_EDP = (1 << 7),
+ SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */
+};
+
+/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
+static inline bool dc_is_dp_sst_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP);
+}
+
+static inline bool dc_is_dp_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+static inline bool dc_is_embedded_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
+}
+
+static inline bool dc_is_dvi_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
+static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
+}
+
+static inline bool dc_is_dual_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_DUAL_LINK);
+}
+
+static inline bool dc_is_audio_capable_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ dc_is_hdmi_signal(signal));
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/vector.h b/drivers/gpu/drm/amd/display/include/vector.h
new file mode 100644
index 000000000000..8233b7c22a07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/vector.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_VECTOR_H__
+#define __DAL_VECTOR_H__
+
+struct vector {
+ uint8_t *container;
+ uint32_t struct_size;
+ uint32_t count;
+ uint32_t capacity;
+ struct dc_context *ctx;
+};
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+/* 'initial_value' is optional. If initial_value not supplied,
+ * each "structure" in the vector will contain zeros by default. */
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size);
+
+void dal_vector_destruct(
+ struct vector *vector);
+
+void dal_vector_destroy(
+ struct vector **vector);
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector);
+
+/* dal_vector_insert_at
+ * reallocate container if necessary
+ * then shell items at right and insert
+ * return if the container modified
+ * do not check that index belongs to container
+ * since the function is private and index is going to be calculated
+ * either with by function or as get_count+1 */
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position);
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item);
+
+/* operator[] */
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index);
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index);
+
+/* create a clone (copy) of a vector */
+struct vector *dal_vector_clone(
+ const struct vector *vector_other);
+
+/* dal_vector_remove_at_index
+ * Shifts elements on the right from remove position to the left,
+ * removing an element at position by overwrite means*/
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index);
+
+uint32_t dal_vector_capacity(const struct vector *vector);
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity);
+
+void dal_vector_clear(struct vector *vector);
+
+/***************************************************************************
+ * Macro definitions of TYPE-SAFE versions of vector set/get functions.
+ ***************************************************************************/
+
+#define DAL_VECTOR_INSERT_AT(vector_type, type_t) \
+ static bool vector_type##_vector_insert_at( \
+ struct vector *vector, \
+ type_t what, \
+ uint32_t position) \
+{ \
+ return dal_vector_insert_at(vector, what, position); \
+}
+
+#define DAL_VECTOR_APPEND(vector_type, type_t) \
+ static bool vector_type##_vector_append( \
+ struct vector *vector, \
+ type_t item) \
+{ \
+ return dal_vector_append(vector, item); \
+}
+
+/* Note: "type_t" is the ONLY token accepted by "checkpatch.pl" and by
+ * "checkcommit" as *return type*.
+ * For uniformity reasons "type_t" is used for all type-safe macro
+ * definitions here. */
+#define DAL_VECTOR_AT_INDEX(vector_type, type_t) \
+ static type_t vector_type##_vector_at_index( \
+ const struct vector *vector, \
+ uint32_t index) \
+{ \
+ return dal_vector_at_index(vector, index); \
+}
+
+#define DAL_VECTOR_SET_AT_INDEX(vector_type, type_t) \
+ static void vector_type##_vector_set_at_index( \
+ const struct vector *vector, \
+ type_t what, \
+ uint32_t index) \
+{ \
+ dal_vector_set_at_index(vector, what, index); \
+}
+
+#endif /* __DAL_VECTOR_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
new file mode 100644
index 000000000000..fb9a499780e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'freesync' sub-module of DAL.
+#
+
+FREESYNC = freesync.o
+
+AMD_DAL_FREESYNC = $(addprefix $(AMDDALPATH)/modules/freesync/,$(FREESYNC))
+#$(info ************ DAL-FREE SYNC_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_FREESYNC)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
new file mode 100644
index 000000000000..4d7db4aa28e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -0,0 +1,1483 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "mod_freesync.h"
+#include "core_types.h"
+
+#define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32
+
+/* Refresh rate ramp at a fixed rate of 65 Hz/second */
+#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
+/* Number of elements in the render times cache array */
+#define RENDER_TIMES_MAX_COUNT 20
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
+/* Number of consecutive frames to check before entering/exiting fixed refresh*/
+#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
+#define FIXED_REFRESH_EXIT_FRAME_COUNT 5
+
+#define FREESYNC_REGISTRY_NAME "freesync_v1"
+
+#define FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY "DalFreeSyncNoStaticForExternalDp"
+
+#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
+
+struct gradual_static_ramp {
+ bool ramp_is_active;
+ bool ramp_direction_is_up;
+ unsigned int ramp_current_frame_duration_in_ns;
+};
+
+struct time_cache {
+ /* video (48Hz feature) related */
+ unsigned int update_duration_in_ns;
+
+ /* BTR/fixed refresh related */
+ unsigned int prev_time_stamp_in_us;
+
+ unsigned int min_render_time_in_us;
+ unsigned int max_render_time_in_us;
+
+ unsigned int render_times_index;
+ unsigned int render_times[RENDER_TIMES_MAX_COUNT];
+};
+
+struct below_the_range {
+ bool btr_active;
+ bool program_btr;
+
+ unsigned int mid_point_in_us;
+
+ unsigned int inserted_frame_duration_in_us;
+ unsigned int frames_to_insert;
+ unsigned int frame_counter;
+};
+
+struct fixed_refresh {
+ bool fixed_active;
+ bool program_fixed;
+ unsigned int frame_counter;
+};
+
+struct freesync_range {
+ unsigned int min_refresh;
+ unsigned int max_frame_duration;
+ unsigned int vmax;
+
+ unsigned int max_refresh;
+ unsigned int min_frame_duration;
+ unsigned int vmin;
+};
+
+struct freesync_state {
+ bool fullscreen;
+ bool static_screen;
+ bool video;
+
+ unsigned int nominal_refresh_rate_in_micro_hz;
+ bool windowed_fullscreen;
+
+ struct time_cache time;
+
+ struct gradual_static_ramp static_ramp;
+ struct below_the_range btr;
+ struct fixed_refresh fixed_refresh;
+ struct freesync_range freesync_range;
+};
+
+struct freesync_entity {
+ struct dc_stream_state *stream;
+ struct mod_freesync_caps *caps;
+ struct freesync_state state;
+ struct mod_freesync_user_enable user_enable;
+};
+
+struct freesync_registry_options {
+ bool drr_external_supported;
+ bool drr_internal_supported;
+};
+
+struct core_freesync {
+ struct mod_freesync public;
+ struct dc *dc;
+ struct freesync_entity *map;
+ int num_entities;
+ struct freesync_registry_options opts;
+};
+
+#define MOD_FREESYNC_TO_CORE(mod_freesync)\
+ container_of(mod_freesync, struct core_freesync, public)
+
+static bool check_dc_support(const struct dc *dc)
+{
+ if (dc->stream_funcs.adjust_vmin_vmax == NULL)
+ return false;
+
+ return true;
+}
+
+struct mod_freesync *mod_freesync_create(struct dc *dc)
+{
+ struct core_freesync *core_freesync =
+ kzalloc(sizeof(struct core_freesync), GFP_KERNEL);
+
+
+ struct persistent_data_flag flag;
+
+ int i, data = 0;
+
+ if (core_freesync == NULL)
+ goto fail_alloc_context;
+
+ core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+ GFP_KERNEL);
+
+ if (core_freesync->map == NULL)
+ goto fail_alloc_map;
+
+ for (i = 0; i < MOD_FREESYNC_MAX_CONCURRENT_STREAMS; i++)
+ core_freesync->map[i].stream = NULL;
+
+ core_freesync->num_entities = 0;
+
+ if (dc == NULL)
+ goto fail_construct;
+
+ core_freesync->dc = dc;
+
+ if (!check_dc_support(dc))
+ goto fail_construct;
+
+ /* Create initial module folder in registry for freesync enable data */
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+ dm_write_persistent_data(dc->ctx, NULL, FREESYNC_REGISTRY_NAME,
+ NULL, NULL, 0, &flag);
+ flag.save_per_edid = false;
+ flag.save_per_link = false;
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_internal_supported =
+ (data & 1) ? false : true;
+ }
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_external_supported =
+ (data & 1) ? false : true;
+ }
+
+ return &core_freesync->public;
+
+fail_construct:
+ kfree(core_freesync->map);
+
+fail_alloc_map:
+ kfree(core_freesync);
+
+fail_alloc_context:
+ return NULL;
+}
+
+void mod_freesync_destroy(struct mod_freesync *mod_freesync)
+{
+ if (mod_freesync != NULL) {
+ int i;
+ struct core_freesync *core_freesync =
+ MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (i = 0; i < core_freesync->num_entities; i++)
+ if (core_freesync->map[i].stream)
+ dc_stream_release(core_freesync->map[i].stream);
+
+ kfree(core_freesync->map);
+
+ kfree(core_freesync);
+ }
+}
+
+/* Given a specific dc_stream* this function finds its equivalent
+ * on the core_freesync->map and returns the corresponding index
+ */
+static unsigned int map_index_from_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = 0;
+
+ for (index = 0; index < core_freesync->num_entities; index++) {
+ if (core_freesync->map[index].stream == stream) {
+ return index;
+ }
+ }
+ /* Could not find stream requested */
+ ASSERT(false);
+ return index;
+}
+
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps)
+{
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+ int persistent_freesync_enable = 0;
+ struct persistent_data_flag flag;
+ unsigned int nom_refresh_rate_uhz;
+ unsigned long long temp;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ if (core_freesync->num_entities < MOD_FREESYNC_MAX_CONCURRENT_STREAMS) {
+
+ dc_stream_retain(stream);
+
+ temp = stream->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp, stream->timing.h_total);
+ temp = div_u64(temp, stream->timing.v_total);
+
+ nom_refresh_rate_uhz = (unsigned int) temp;
+
+ core_freesync->map[core_freesync->num_entities].stream = stream;
+ core_freesync->map[core_freesync->num_entities].caps = caps;
+
+ core_freesync->map[core_freesync->num_entities].state.
+ fullscreen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_screen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ video = false;
+ core_freesync->map[core_freesync->num_entities].state.time.
+ update_duration_in_ns = 0;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_ramp.ramp_is_active = false;
+
+ /* get persistent data from registry */
+ if (dm_read_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &persistent_freesync_enable,
+ sizeof(int), &flag)) {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming =
+ (persistent_freesync_enable & 1) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static =
+ (persistent_freesync_enable & 2) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video =
+ (persistent_freesync_enable & 4) ? true : false;
+ } else {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video = false;
+ }
+
+ if (caps->supported &&
+ nom_refresh_rate_uhz >= caps->min_refresh_in_micro_hz &&
+ nom_refresh_rate_uhz <= caps->max_refresh_in_micro_hz)
+ stream->ignore_msa_timing_param = 1;
+
+ core_freesync->num_entities++;
+ return true;
+ }
+ return false;
+}
+
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream)
+{
+ int i = 0;
+ struct core_freesync *core_freesync = NULL;
+ unsigned int index = 0;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ dc_stream_release(core_freesync->map[index].stream);
+ core_freesync->map[index].stream = NULL;
+ /* To remove this entity, shift everything after down */
+ for (i = index; i < core_freesync->num_entities - 1; i++)
+ core_freesync->map[i] = core_freesync->map[i + 1];
+ core_freesync->num_entities--;
+ return true;
+}
+
+static void update_stream_freesync_context(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index;
+ struct freesync_context *ctx;
+
+ ctx = &stream->freesync_ctx;
+
+ index = map_index_from_stream(core_freesync, stream);
+
+ ctx->supported = core_freesync->map[index].caps->supported;
+ ctx->enabled = (core_freesync->map[index].user_enable.enable_for_gaming ||
+ core_freesync->map[index].user_enable.enable_for_video ||
+ core_freesync->map[index].user_enable.enable_for_static);
+ ctx->active = (core_freesync->map[index].state.fullscreen ||
+ core_freesync->map[index].state.video ||
+ core_freesync->map[index].state.static_ramp.ramp_is_active);
+ ctx->min_refresh_in_micro_hz =
+ core_freesync->map[index].caps->min_refresh_in_micro_hz;
+ ctx->nominal_refresh_in_micro_hz = core_freesync->
+ map[index].state.nominal_refresh_rate_in_micro_hz;
+
+}
+
+static void update_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ if (core_freesync->map[index].caps->supported) {
+ stream->ignore_msa_timing_param = 1;
+ update_stream_freesync_context(core_freesync, stream);
+ }
+}
+
+static void calc_freesync_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ struct freesync_state *state,
+ unsigned int min_refresh_in_uhz,
+ unsigned int max_refresh_in_uhz)
+{
+ unsigned int min_frame_duration_in_ns = 0, max_frame_duration_in_ns = 0;
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ uint32_t vtotal = stream->timing.v_total;
+
+ if ((min_refresh_in_uhz == 0) || (max_refresh_in_uhz == 0)) {
+ state->freesync_range.min_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+ state->freesync_range.max_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+
+ state->freesync_range.max_frame_duration = 0;
+ state->freesync_range.min_frame_duration = 0;
+
+ state->freesync_range.vmax = vtotal;
+ state->freesync_range.vmin = vtotal;
+
+ return;
+ }
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ max_refresh_in_uhz)));
+ max_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_in_uhz)));
+
+ state->freesync_range.min_refresh = min_refresh_in_uhz;
+ state->freesync_range.max_refresh = max_refresh_in_uhz;
+
+ state->freesync_range.max_frame_duration = max_frame_duration_in_ns;
+ state->freesync_range.min_frame_duration = min_frame_duration_in_ns;
+
+ state->freesync_range.vmax = div64_u64(div64_u64(((unsigned long long)(
+ max_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+ state->freesync_range.vmin = div64_u64(div64_u64(((unsigned long long)(
+ min_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+
+ /* vmin/vmax cannot be less than vtotal */
+ if (state->freesync_range.vmin < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmin + 1) >= vtotal);
+ state->freesync_range.vmin = vtotal;
+ }
+
+ if (state->freesync_range.vmax < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmax + 1) >= vtotal);
+ state->freesync_range.vmax = vtotal;
+ }
+
+ /* Determine whether BTR can be supported */
+ if (max_frame_duration_in_ns >=
+ 2 * min_frame_duration_in_ns)
+ core_freesync->map[index].caps->btr_supported = true;
+ else
+ core_freesync->map[index].caps->btr_supported = false;
+
+ /* Cache the time variables */
+ state->time.max_render_time_in_us =
+ max_frame_duration_in_ns / 1000;
+ state->time.min_render_time_in_us =
+ min_frame_duration_in_ns / 1000;
+ state->btr.mid_point_in_us =
+ (max_frame_duration_in_ns +
+ min_frame_duration_in_ns) / 2000;
+}
+
+static void calc_v_total_from_duration(struct dc_stream_state *stream,
+ unsigned int duration_in_ns, int *v_total_nominal)
+{
+ *v_total_nominal = div64_u64(div64_u64(((unsigned long long)(
+ duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+}
+
+static void calc_v_total_for_static_ramp(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ unsigned int index, int *v_total)
+{
+ unsigned int frame_duration = 0;
+
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* Calc ratio between new and current frame duration with 3 digit */
+ unsigned int frame_duration_ratio = div64_u64(1000000,
+ (1000 + div64_u64(((unsigned long long)(
+ STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
+ static_ramp_variables->ramp_current_frame_duration_in_ns),
+ 1000000000)));
+
+ /* Calculate delta between new and current frame duration in ns */
+ unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
+ static_ramp_variables->ramp_current_frame_duration_in_ns) *
+ (1000 - frame_duration_ratio)), 1000);
+
+ /* Adjust frame duration delta based on ratio between current and
+ * standard frame duration (frame duration at 60 Hz refresh rate).
+ */
+ unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
+ frame_duration_delta) * static_ramp_variables->
+ ramp_current_frame_duration_in_ns), 16666666);
+
+ /* Going to a higher refresh rate (lower frame duration) */
+ if (static_ramp_variables->ramp_direction_is_up) {
+ /* reduce frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns -=
+ ramp_rate_interpolated;
+
+ /* min frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz)));
+
+ /* adjust for frame duration below min */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns <=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ /* Going to a lower refresh rate (larger frame duration) */
+ } else {
+ /* increase frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns +=
+ ramp_rate_interpolated;
+
+ /* max frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].caps->min_refresh_in_micro_hz)));
+
+ /* adjust for frame duration above max */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns >=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ }
+
+ calc_v_total_from_duration(stream, static_ramp_variables->
+ ramp_current_frame_duration_in_ns, v_total);
+}
+
+static void reset_freesync_state_variables(struct freesync_state* state)
+{
+ state->static_ramp.ramp_is_active = false;
+ if (state->nominal_refresh_rate_in_micro_hz)
+ state->static_ramp.ramp_current_frame_duration_in_ns =
+ ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+ state->btr.frames_to_insert = 0;
+ state->btr.inserted_frame_duration_in_us = 0;
+ state->btr.program_btr = false;
+
+ state->fixed_refresh.fixed_active = false;
+ state->fixed_refresh.program_fixed = false;
+}
+/*
+ * Sets freesync mode on a stream depending on current freesync state.
+ */
+static bool set_freesync_on_streams(struct core_freesync *core_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ int v_total_nominal = 0, v_total_min = 0, v_total_max = 0;
+ unsigned int stream_idx, map_index = 0;
+ struct freesync_state *state;
+
+ if (num_streams == 0 || streams == NULL || num_streams > 1)
+ return false;
+
+ for (stream_idx = 0; stream_idx < num_streams; stream_idx++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_idx]);
+
+ state = &core_freesync->map[map_index].state;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Fullscreen has the topmost priority. If the
+ * fullscreen bit is set, we are in a fullscreen
+ * application where it should not matter if it is
+ * static screen. We should not check the static_screen
+ * or video bit.
+ *
+ * Special cases of fullscreen include btr and fixed
+ * refresh. We program btr on every flip and involves
+ * programming full range right before the last inserted frame.
+ * However, we do not want to program the full freesync range
+ * when fixed refresh is active, because we only program
+ * that logic once and this will override it.
+ */
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming == true &&
+ state->fullscreen == true &&
+ state->fixed_refresh.fixed_active == false) {
+ /* Enable freesync */
+
+ v_total_min = state->freesync_range.vmin;
+ v_total_max = state->freesync_range.vmax;
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(core_freesync->dc, streams,
+ num_streams, v_total_min,
+ v_total_max);
+
+ return true;
+
+ } else if (core_freesync->map[map_index].user_enable.
+ enable_for_video && state->video == true) {
+ /* Enable 48Hz feature */
+
+ calc_v_total_from_duration(streams[stream_idx],
+ state->time.update_duration_in_ns,
+ &v_total_nominal);
+
+ /* Program only if v_total_nominal is in range*/
+ if (v_total_nominal >=
+ streams[stream_idx]->timing.v_total) {
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ }
+ return true;
+
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+
+ return true;
+ }
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+ /*
+ * we have to reset drr always even sink does
+ * not support freesync because a former stream has
+ * be programmed
+ */
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+ }
+
+ }
+
+ return false;
+}
+
+static void set_static_ramp_variables(struct core_freesync *core_freesync,
+ unsigned int index, bool enable_static_screen)
+{
+ unsigned int frame_duration = 0;
+ unsigned int nominal_refresh_rate = core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz;
+ unsigned int min_refresh_rate= core_freesync->map[index].caps->
+ min_refresh_in_micro_hz;
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* If we are ENABLING static screen, refresh rate should go DOWN.
+ * If we are DISABLING static screen, refresh rate should go UP.
+ */
+ if (enable_static_screen)
+ static_ramp_variables->ramp_direction_is_up = false;
+ else
+ static_ramp_variables->ramp_direction_is_up = true;
+
+ /* If ramp is not active, set initial frame duration depending on
+ * whether we are enabling/disabling static screen mode. If the ramp is
+ * already active, ramp should continue in the opposite direction
+ * starting with the current frame duration
+ */
+ if (!static_ramp_variables->ramp_is_active) {
+ if (enable_static_screen == true) {
+ /* Going to lower refresh rate, so start from max
+ * refresh rate (min frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ nominal_refresh_rate)));
+ } else {
+ /* Going to higher refresh rate, so start from min
+ * refresh rate (max frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_rate)));
+ }
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns = frame_duration;
+
+ static_ramp_variables->ramp_is_active = true;
+ }
+}
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int index, v_total, inserted_frame_v_total = 0;
+ unsigned int min_frame_duration_in_ns, vmax, vmin = 0;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ index = map_index_from_stream(core_freesync,
+ streams[0]);
+
+ if (core_freesync->map[index].caps->supported == false)
+ return;
+
+ state = &core_freesync->map[index].state;
+
+ /* Below the Range Logic */
+
+ /* Only execute if in fullscreen mode */
+ if (state->fullscreen == true &&
+ core_freesync->map[index].user_enable.enable_for_gaming &&
+ core_freesync->map[index].caps->btr_supported &&
+ state->btr.btr_active) {
+
+ /* TODO: pass in flag for Pre-DCE12 ASIC
+ * in order for frame variable duration to take affect,
+ * it needs to be done one VSYNC early, which is at
+ * frameCounter == 1.
+ * For DCE12 and newer updates to V_TOTAL_MIN/MAX
+ * will take affect on current frame
+ */
+ if (state->btr.frames_to_insert == state->btr.frame_counter) {
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ vmin = state->freesync_range.vmin;
+
+ inserted_frame_v_total = vmin;
+
+ if (min_frame_duration_in_ns / 1000)
+ inserted_frame_v_total =
+ state->btr.inserted_frame_duration_in_us *
+ vmin / (min_frame_duration_in_ns / 1000);
+
+ /* Set length of inserted frames as v_total_max*/
+ vmax = inserted_frame_v_total;
+ vmin = inserted_frame_v_total;
+
+ /* Program V_TOTAL */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, vmin, vmax);
+ }
+
+ if (state->btr.frame_counter > 0)
+ state->btr.frame_counter--;
+
+ /* Restore FreeSync */
+ if (state->btr.frame_counter == 0)
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+ }
+
+ /* If in fullscreen freesync mode or in video, do not program
+ * static screen ramp values
+ */
+ if (state->fullscreen == true || state->video == true) {
+
+ state->static_ramp.ramp_is_active = false;
+
+ return;
+ }
+
+ /* Gradual Static Screen Ramping Logic */
+
+ /* Execute if ramp is active and user enabled freesync static screen*/
+ if (state->static_ramp.ramp_is_active &&
+ core_freesync->map[index].user_enable.enable_for_static) {
+
+ calc_v_total_for_static_ramp(core_freesync, streams[0],
+ index, &v_total);
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync, streams[0]);
+
+ /* Program static screen ramp values */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total,
+ v_total);
+
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+}
+
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params)
+{
+ bool freesync_program_required = false;
+ unsigned int stream_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ for(stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ unsigned int map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ bool is_embedded = dc_is_embedded_signal(
+ streams[stream_index]->sink->sink_signal);
+
+ struct freesync_registry_options *opts = &core_freesync->opts;
+
+ state = &core_freesync->map[map_index].state;
+
+ switch (freesync_params->state){
+ case FREESYNC_STATE_FULLSCREEN:
+ state->fullscreen = freesync_params->enable;
+ freesync_program_required = true;
+ state->windowed_fullscreen =
+ freesync_params->windowed_fullscreen;
+ break;
+ case FREESYNC_STATE_STATIC_SCREEN:
+ /* Static screen ramp is disabled by default, but can
+ * be enabled through regkey.
+ */
+ if ((is_embedded && opts->drr_internal_supported) ||
+ (!is_embedded && opts->drr_external_supported))
+
+ if (state->static_screen !=
+ freesync_params->enable) {
+
+ /* Change the state flag */
+ state->static_screen =
+ freesync_params->enable;
+
+ /* Update static screen ramp */
+ set_static_ramp_variables(core_freesync,
+ map_index,
+ freesync_params->enable);
+ }
+ /* We program the ramp starting next VUpdate */
+ break;
+ case FREESYNC_STATE_VIDEO:
+ /* Change core variables only if there is a change*/
+ if(freesync_params->update_duration_in_ns !=
+ state->time.update_duration_in_ns) {
+
+ state->video = freesync_params->enable;
+ state->time.update_duration_in_ns =
+ freesync_params->update_duration_in_ns;
+
+ freesync_program_required = true;
+ }
+ break;
+ case FREESYNC_STATE_NONE:
+ /* handle here to avoid warning */
+ break;
+ }
+ }
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+
+ if (freesync_program_required)
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->map[index].state.fullscreen) {
+ freesync_params->state = FREESYNC_STATE_FULLSCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.static_screen) {
+ freesync_params->state = FREESYNC_STATE_STATIC_SCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.video) {
+ freesync_params->state = FREESYNC_STATE_VIDEO;
+ freesync_params->enable = true;
+ } else {
+ freesync_params->state = FREESYNC_STATE_NONE;
+ freesync_params->enable = false;
+ }
+
+ freesync_params->update_duration_in_ns =
+ core_freesync->map[index].state.time.update_duration_in_ns;
+
+ freesync_params->windowed_fullscreen =
+ core_freesync->map[index].state.windowed_fullscreen;
+
+ return true;
+}
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int stream_index, map_index;
+ int persistent_data = 0;
+ struct persistent_data_flag flag;
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ for(stream_index = 0; stream_index < num_streams;
+ stream_index++){
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ core_freesync->map[map_index].user_enable = *user_enable;
+
+ /* Write persistent data in registry*/
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming)
+ persistent_data = persistent_data | 1;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_static)
+ persistent_data = persistent_data | 2;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_video)
+ persistent_data = persistent_data | 4;
+
+ dm_write_persistent_data(dc->ctx,
+ streams[stream_index]->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable",
+ &persistent_data,
+ sizeof(int),
+ &flag);
+ }
+
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+
+ return true;
+}
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *user_enable = core_freesync->map[index].user_enable;
+
+ return true;
+}
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *is_ramp_active =
+ core_freesync->map[index].state.static_ramp.ramp_is_active;
+
+ return true;
+}
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync;
+ struct freesync_state *state;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, streams);
+ state = &core_freesync->map[index].state;
+
+ if (max_refresh == 0)
+ max_refresh = state->nominal_refresh_rate_in_micro_hz;
+
+ if (min_refresh == 0) {
+ /* Restore defaults */
+ calc_freesync_range(core_freesync, streams, state,
+ core_freesync->map[index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+ } else {
+ calc_freesync_range(core_freesync, streams,
+ state,
+ min_refresh,
+ max_refresh);
+
+ /* Program vtotal min/max */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &streams, 1,
+ state->freesync_range.vmin,
+ state->freesync_range.vmax);
+ }
+
+ if (min_refresh != 0 &&
+ dc_is_embedded_signal(streams->sink->sink_signal) &&
+ (max_refresh - min_refresh >= 10000000)) {
+ caps->supported = true;
+ caps->min_refresh_in_micro_hz = min_refresh;
+ caps->max_refresh_in_micro_hz = max_refresh;
+ }
+
+ /* Update the stream */
+ update_stream(core_freesync, streams);
+
+ return true;
+}
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *min_refresh =
+ core_freesync->map[index].state.freesync_range.min_refresh;
+ *max_refresh =
+ core_freesync->map[index].state.freesync_range.max_refresh;
+
+ return true;
+}
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *vmin =
+ core_freesync->map[index].state.freesync_range.vmin;
+ *vmax =
+ core_freesync->map[index].state.freesync_range.vmax;
+
+ return true;
+}
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+ struct crtc_position position;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->dc->stream_funcs.get_crtc_position(
+ core_freesync->dc, &stream, 1,
+ &position.vertical_count, &position.nominal_vcount)) {
+
+ *nom_v_pos = position.nominal_vcount;
+ *v_pos = position.vertical_count;
+
+ return true;
+ }
+
+ return false;
+}
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int stream_index, map_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+ unsigned long long temp = 0;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ state = &core_freesync->map[map_index].state;
+
+ /* Update the field rate for new timing */
+ temp = streams[stream_index]->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp,
+ streams[stream_index]->timing.h_total);
+ temp = div_u64(temp,
+ streams[stream_index]->timing.v_total);
+ state->nominal_refresh_rate_in_micro_hz =
+ (unsigned int) temp;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Update the stream */
+ update_stream(core_freesync, streams[stream_index]);
+
+ /* Calculate vmin/vmax and refresh rate for
+ * current mode
+ */
+ calc_freesync_range(core_freesync, *streams, state,
+ core_freesync->map[map_index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+ }
+
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+/* Add the timestamps to the cache and determine whether BTR programming
+ * is required, depending on the times calculated
+ */
+static void update_timestamps(struct core_freesync *core_freesync,
+ const struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ state->time.render_times[state->time.render_times_index] =
+ last_render_time_in_us;
+ state->time.render_times_index++;
+
+ if (state->time.render_times_index >= RENDER_TIMES_MAX_COUNT)
+ state->time.render_times_index = 0;
+
+ if (last_render_time_in_us + BTR_EXIT_MARGIN <
+ state->time.max_render_time_in_us) {
+
+ /* Exit Below the Range */
+ if (state->btr.btr_active) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+
+ /* Exit Fixed Refresh mode */
+ } else if (state->fixed_refresh.fixed_active) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_EXIT_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = false;
+ }
+ }
+
+ } else if (last_render_time_in_us > state->time.max_render_time_in_us) {
+
+ /* Enter Below the Range */
+ if (!state->btr.btr_active &&
+ core_freesync->map[map_index].caps->btr_supported) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = true;
+
+ /* Enter Fixed Refresh mode */
+ } else if (!state->fixed_refresh.fixed_active &&
+ !core_freesync->map[map_index].caps->btr_supported) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_ENTER_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = true;
+ }
+ }
+ }
+
+ /* When Below the Range is active, must react on every frame */
+ if (state->btr.btr_active)
+ state->btr.program_btr = true;
+}
+
+static void apply_below_the_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ unsigned int inserted_frame_duration_in_us = 0;
+ unsigned int mid_point_frames_ceil = 0;
+ unsigned int mid_point_frames_floor = 0;
+ unsigned int frame_time_in_us = 0;
+ unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
+ unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
+ unsigned int frames_to_insert = 0;
+ unsigned int min_frame_duration_in_ns = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->btr.program_btr)
+ return;
+
+ state->btr.program_btr = false;
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ /* Program BTR */
+
+ /* BTR set to "not active" so disengage */
+ if (!state->btr.btr_active)
+
+ /* Restore FreeSync */
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* BTR set to "active" so engage */
+ else {
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take ceil of this value
+ */
+ mid_point_frames_ceil = (last_render_time_in_us +
+ state->btr.mid_point_in_us- 1) /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_ceil > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_ceil;
+ delta_from_mid_point_in_us_1 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take floor of this value
+ */
+ mid_point_frames_floor = last_render_time_in_us /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_floor > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_floor;
+ delta_from_mid_point_in_us_2 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Choose number of frames to insert based on how close it
+ * can get to the mid point of the variable range.
+ */
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ frames_to_insert = mid_point_frames_ceil;
+ else
+ frames_to_insert = mid_point_frames_floor;
+
+ /* Either we've calculated the number of frames to insert,
+ * or we need to insert min duration frames
+ */
+ if (frames_to_insert > 0)
+ inserted_frame_duration_in_us = last_render_time_in_us /
+ frames_to_insert;
+
+ if (inserted_frame_duration_in_us <
+ state->time.min_render_time_in_us)
+
+ inserted_frame_duration_in_us =
+ state->time.min_render_time_in_us;
+
+ /* Cache the calculated variables */
+ state->btr.inserted_frame_duration_in_us =
+ inserted_frame_duration_in_us;
+ state->btr.frames_to_insert = frames_to_insert;
+ state->btr.frame_counter = frames_to_insert;
+
+ }
+}
+
+static void apply_fixed_refresh(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index)
+{
+ unsigned int vmin = 0, vmax = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->fixed_refresh.program_fixed)
+ return;
+
+ state->fixed_refresh.program_fixed = false;
+
+ /* Program Fixed Refresh */
+
+ /* Fixed Refresh set to "not active" so disengage */
+ if (!state->fixed_refresh.fixed_active) {
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* Fixed Refresh set to "active" so engage (fix to max) */
+ } else {
+
+ vmin = state->freesync_range.vmin;
+
+ vmax = vmin;
+
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &stream,
+ 1, vmin,
+ vmax);
+ }
+}
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp_in_us)
+{
+ unsigned int stream_index, map_index, last_render_time_in_us = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ last_render_time_in_us = curr_time_stamp_in_us -
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us;
+
+ /* Add the timestamps to the cache and determine
+ * whether BTR program is required
+ */
+ update_timestamps(core_freesync, streams[stream_index],
+ map_index, last_render_time_in_us);
+
+ if (core_freesync->map[map_index].state.fullscreen &&
+ core_freesync->map[map_index].user_enable.
+ enable_for_gaming) {
+
+ if (core_freesync->map[map_index].caps->btr_supported) {
+
+ apply_below_the_range(core_freesync,
+ streams[stream_index], map_index,
+ last_render_time_in_us);
+ } else {
+ apply_fixed_refresh(core_freesync,
+ streams[stream_index], map_index);
+ }
+ }
+
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us = curr_time_stamp_in_us;
+ }
+
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
new file mode 100644
index 000000000000..84b53425f2c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+
+
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_FREESYNC_H_
+#define MOD_FREESYNC_H_
+
+#include "dm_services.h"
+
+struct mod_freesync *mod_freesync_create(struct dc *dc);
+void mod_freesync_destroy(struct mod_freesync *mod_freesync);
+
+struct mod_freesync {
+ int dummy;
+};
+
+enum mod_freesync_state {
+ FREESYNC_STATE_NONE,
+ FREESYNC_STATE_FULLSCREEN,
+ FREESYNC_STATE_STATIC_SCREEN,
+ FREESYNC_STATE_VIDEO
+};
+
+enum mod_freesync_user_enable_mask {
+ FREESYNC_USER_ENABLE_STATIC = 0x1,
+ FREESYNC_USER_ENABLE_VIDEO = 0x2,
+ FREESYNC_USER_ENABLE_GAMING = 0x4
+};
+
+struct mod_freesync_user_enable {
+ bool enable_for_static;
+ bool enable_for_video;
+ bool enable_for_gaming;
+};
+
+struct mod_freesync_caps {
+ bool supported;
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int max_refresh_in_micro_hz;
+
+ bool btr_supported;
+};
+
+struct mod_freesync_params {
+ enum mod_freesync_state state;
+ bool enable;
+ unsigned int update_duration_in_ns;
+ bool windowed_fullscreen;
+};
+
+/*
+ * Add stream to be tracked by module
+ */
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps);
+
+/*
+ * Remove stream to be tracked by module
+ */
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream);
+
+/*
+ * Update the freesync state flags for each display and program
+ * freesync accordingly
+ */
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active);
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps);
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh);
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax);
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos);
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 3a49fbd8baf8..b72f8a43d86b 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -25,6 +25,8 @@
#include <drm/amd_asic_type.h>
+struct seq_file;
+
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
/*
@@ -119,6 +121,12 @@ enum amd_fan_ctrl_mode {
AMD_FAN_CTRL_AUTO = 2,
};
+enum pp_clock_type {
+ PP_SCLK,
+ PP_MCLK,
+ PP_PCIE,
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
@@ -224,4 +232,96 @@ struct amd_ip_funcs {
void (*get_clockgating_state)(void *handle, u32 *flags);
};
+
+enum amd_pp_task;
+enum amd_pp_clock_type;
+struct pp_states_info;
+struct amd_pp_simple_clock_info;
+struct amd_pp_display_configuration;
+struct amd_pp_clock_info;
+struct pp_display_clock_request;
+struct pp_wm_sets_with_clock_ranges_soc15;
+struct pp_clock_levels_with_voltage;
+struct pp_clock_levels_with_latency;
+struct amd_pp_clocks;
+
+struct amd_pm_funcs {
+/* export for dpm on ci and si */
+ int (*pre_set_power_state)(void *handle);
+ int (*set_power_state)(void *handle);
+ void (*post_set_power_state)(void *handle);
+ void (*display_configuration_changed)(void *handle);
+ void (*print_power_state)(void *handle, void *ps);
+ bool (*vblank_too_short)(void *handle);
+ void (*enable_bapm)(void *handle, bool enable);
+ int (*check_state_equal)(void *handle,
+ void *cps,
+ void *rps,
+ bool *equal);
+/* export for sysfs */
+ int (*get_temperature)(void *handle);
+ void (*set_fan_control_mode)(void *handle, u32 mode);
+ u32 (*get_fan_control_mode)(void *handle);
+ int (*set_fan_speed_percent)(void *handle, u32 speed);
+ int (*get_fan_speed_percent)(void *handle, u32 *speed);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ enum amd_dpm_forced_level (*get_performance_level)(void *handle);
+ enum amd_pm_state_type (*get_current_power_state)(void *handle);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+ void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
+
+ int (*reset_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*get_power_profile_state)(void *handle,
+ struct amd_pp_profile *query);
+ int (*set_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*switch_power_profile)(void *handle,
+ enum amd_pp_profile_type type);
+/* export to amdgpu */
+ void (*powergate_uvd)(void *handle, bool gate);
+ void (*powergate_vce)(void *handle, bool gate);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
+ int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
+ void *input, void *output);
+ int (*load_firmware)(void *handle);
+ int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
+/* export to DC */
+ u32 (*get_sclk)(void *handle, bool low);
+ u32 (*get_mclk)(void *handle, bool low);
+ int (*display_configuration_change)(void *handle,
+ const struct amd_pp_display_configuration *input);
+ int (*get_display_power_level)(void *handle,
+ struct amd_pp_simple_clock_info *output);
+ int (*get_current_clocks)(void *handle,
+ struct amd_pp_clock_info *clocks);
+ int (*get_clock_by_type)(void *handle,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_clock_by_type_with_latency)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks);
+ int (*get_clock_by_type_with_voltage)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+ int (*set_watermarks_for_clocks_ranges)(void *handle,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*display_clock_voltage_request)(void *handle,
+ struct pp_display_clock_request *clock);
+ int (*get_display_mode_validation_clocks)(void *handle,
+ struct amd_pp_simple_clock_info *clocks);
+};
+
+
#endif /* __AMD_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
index b39fb6821faa..4ccf9681c45d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
@@ -2283,6 +2283,10 @@
#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
#define mmDCHUBBUB_SPARE 0x0534
#define mmDCHUBBUB_SPARE_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053a
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053b
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
@@ -10361,6 +10365,8 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x287e
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2882
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2883
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
index 1e98ce86ed19..b28d4b64c05d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
@@ -9361,12 +9361,14 @@
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
@@ -39956,6 +39958,9 @@
#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0x00000F00L
#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0x0000F000L
#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0x000F0000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
//DC_DVODATA_CONFIG
#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x13
#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x14
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 34c6ff52710e..6af9f0217b34 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -5454,5 +5454,7 @@
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en_MASK 0x1
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en__SHIFT 0
#endif /* SMU_7_0_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index 378f4b6b43da..344237256d02 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -36,6 +36,16 @@
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
#define mmUVD_POWER_STATUS_U 0x3bfd
#define mmUVD_NO_OP 0x3bff
+#define mmUVD_RB_BASE_LO2 0x3c21
+#define mmUVD_RB_BASE_HI2 0x3c22
+#define mmUVD_RB_SIZE2 0x3c23
+#define mmUVD_RB_RPTR2 0x3c24
+#define mmUVD_RB_WPTR2 0x3c25
+#define mmUVD_RB_BASE_LO 0x3c26
+#define mmUVD_RB_BASE_HI 0x3c27
+#define mmUVD_RB_SIZE 0x3c28
+#define mmUVD_RB_RPTR 0x3c29
+#define mmUVD_RB_WPTR 0x3c2a
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
@@ -43,6 +53,11 @@
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x3c5f
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x3c5e
#define mmUVD_SEMA_CNTL 0x3d00
+#define mmUVD_RB_WPTR3 0x3d1c
+#define mmUVD_RB_RPTR3 0x3d1b
+#define mmUVD_RB_BASE_LO3 0x3d1d
+#define mmUVD_RB_BASE_HI3 0x3d1e
+#define mmUVD_RB_SIZE3 0x3d1f
#define mmUVD_LMI_EXT40_ADDR 0x3d26
#define mmUVD_CTX_INDEX 0x3d28
#define mmUVD_CTX_DATA 0x3d29
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
index 75b660d57bdf..f730d0629020 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
@@ -1841,6 +1841,10 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x2094
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2096
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmCC_DC_MISC_STRAPS 0x2097
+#define mmCC_DC_MISC_STRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2098
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2099
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
index d8ad862b3a74..6d3162c42957 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
@@ -2447,6 +2447,14 @@
//DCCG_CBUS_WRCMD_DELAY
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY__SHIFT 0x0
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY_MASK 0x0000000FL
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+//CC_DC_MISC_STRAPS
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE_MASK 0x00000040L
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x00000700L
//DCCG_DS_DTO_INCR
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 181a2c3c6362..f696bbb643ef 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4292,6 +4292,7 @@ typedef struct _ATOM_DPCD_INFO
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
+#define ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION 0x2
/***********************************************************************************/
// Structure used in VRAM_UsageByFirmwareTable
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 837296db9628..7c92f4707085 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1017,6 +1017,19 @@ struct atom_14nm_combphy_tmds_vs_set
uint8_t margin_deemph_lane0__deemph_sel_val;
};
+struct atom_i2c_reg_info {
+ uint8_t ucI2cRegIndex;
+ uint8_t ucI2cRegVal;
+};
+
+struct atom_hdmi_retimer_redriver_set {
+ uint8_t HdmiSlvAddr;
+ uint8_t HdmiRegNum;
+ uint8_t Hdmi6GRegNum;
+ struct atom_i2c_reg_info HdmiRegSetting[9]; //For non 6G Hz use
+ struct atom_i2c_reg_info Hdmi6GhzRegSetting[3]; //For 6G Hz use.
+};
+
struct atom_integrated_system_info_v1_11
{
struct atom_common_table_header table_header;
@@ -1052,7 +1065,11 @@ struct atom_integrated_system_info_v1_11
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
struct atom_camera_data camera_info;
- uint32_t reserved[138];
+ struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
+ struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
+ struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
+ struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
+ uint32_t reserved[108];
};
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 0214f63f52fc..675988d56392 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -100,6 +100,7 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
+ CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -193,8 +194,6 @@ struct cgs_acpi_method_info {
* @type: memory type
* @size: size in bytes
* @align: alignment in bytes
- * @min_offset: minimum offset from start of heap
- * @max_offset: maximum offset from start of heap
* @handle: memory handle (output)
*
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
@@ -216,7 +215,6 @@ struct cgs_acpi_method_info {
*/
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle);
/**
@@ -310,6 +308,22 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
+#define CGS_REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define CGS_REG_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define CGS_REG_SET_FIELD(orig_val, reg, field, field_val) \
+ (((orig_val) & ~CGS_REG_FIELD_MASK(reg, field)) | \
+ (CGS_REG_FIELD_MASK(reg, field) & ((field_val) << CGS_REG_FIELD_SHIFT(reg, field))))
+
+#define CGS_REG_GET_FIELD(value, reg, field) \
+ (((value) & CGS_REG_FIELD_MASK(reg, field)) >> CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD(device, reg, field, val) \
+ cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
+ cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
@@ -409,6 +423,10 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
+struct amd_pp_init;
+typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
+ int (*call_back_func)(struct amd_pp_init *, void **));
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_alloc_gpu_mem_t alloc_gpu_mem;
@@ -445,6 +463,7 @@ struct cgs_ops {
cgs_is_virtualization_enabled_t is_virtualization_enabled;
cgs_enter_safe_mode enter_safe_mode;
cgs_lock_grbm_idx lock_grbm_idx;
+ cgs_register_pp_handle register_pp_handle;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -463,8 +482,8 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
- CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
+ CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
#define cgs_free_gpu_mem(dev,handle) \
CGS_CALL(free_gpu_mem,dev,handle)
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
@@ -523,4 +542,7 @@ struct cgs_device
#define cgs_lock_grbm_idx(cgs_device, lock) \
CGS_CALL(lock_grbm_idx, cgs_device, lock)
+#define cgs_register_pp_handle(cgs_device, call_back_func) \
+ CGS_CALL(register_pp_handle, cgs_device, call_back_func)
+
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 94277cb734d2..f516fd10e6ba 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -112,6 +112,9 @@ struct tile_config {
*
* @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
*
+ * @alloc_pasid: Allocate a PASID
+ * @free_pasid: Free a PASID
+ *
* @program_sh_mem_settings: A function that should initiate the memory
* properties such as main aperture memory type (cache / non cached) and
* secondary aperture base address, size and memory type.
@@ -160,6 +163,9 @@ struct kfd2kgd_calls {
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+ int (*alloc_pasid)(unsigned int bits);
+ void (*free_pasid)(unsigned int pasid);
+
/* Register access functions */
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
new file mode 100644
index 000000000000..6dc159924ed1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/linux/chash.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _LINUX_CHASH_H
+#define _LINUX_CHASH_H
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <asm/bitsperlong.h>
+
+#if BITS_PER_LONG == 32
+# define _CHASH_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _CHASH_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
+struct __chash_table {
+ u8 bits;
+ u8 key_size;
+ unsigned int value_size;
+ u32 size_mask;
+ unsigned long *occup_bitmap, *valid_bitmap;
+ union {
+ u32 *keys32;
+ u64 *keys64;
+ };
+ u8 *values;
+
+#ifdef CONFIG_CHASH_STATS
+ u64 hits, hits_steps, hits_time_ns;
+ u64 miss, miss_steps, miss_time_ns;
+ u64 relocs, reloc_dist;
+#endif
+};
+
+#define __CHASH_BITMAP_SIZE(bits) \
+ (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define __CHASH_ARRAY_SIZE(bits, size) \
+ ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
+
+#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
+ (__CHASH_BITMAP_SIZE(bits) * 2 + \
+ __CHASH_ARRAY_SIZE(bits, key_size) + \
+ __CHASH_ARRAY_SIZE(bits, value_size))
+
+#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
+ struct { \
+ struct __chash_table table; \
+ unsigned long data \
+ [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
+ }
+
+/**
+ * struct chash_table - Dynamically allocated closed hash table
+ *
+ * Use this struct for dynamically allocated hash tables (using
+ * chash_table_alloc and chash_table_free), where the size is
+ * determined at runtime.
+ */
+struct chash_table {
+ struct __chash_table table;
+ unsigned long *data;
+};
+
+/**
+ * DECLARE_CHASH_TABLE - macro to declare a closed hash table
+ * @table: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * This declares the hash table variable with a static size.
+ *
+ * The closed hash table stores key-value pairs with low memory and
+ * lookup overhead. In operation it performs no dynamic memory
+ * management. The data being stored does not require any
+ * list_heads. The hash table performs best with small @val_sz and as
+ * long as some space (about 50%) is left free in the table. But the
+ * table can still work reasonably efficiently even when filled up to
+ * about 90%. If bigger data items need to be stored and looked up,
+ * store the pointer to it as value in the hash table.
+ *
+ * @val_sz may be 0. This can be useful when all the stored
+ * information is contained in the key itself and the fact that it is
+ * in the hash table (or not).
+ */
+#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
+ STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
+
+#ifdef CONFIG_CHASH_STATS
+#define __CHASH_STATS_INIT(prefix), \
+ prefix.hits = 0, \
+ prefix.hits_steps = 0, \
+ prefix.hits_time_ns = 0, \
+ prefix.miss = 0, \
+ prefix.miss_steps = 0, \
+ prefix.miss_time_ns = 0, \
+ prefix.relocs = 0, \
+ prefix.reloc_dist = 0
+#else
+#define __CHASH_STATS_INIT(prefix)
+#endif
+
+#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
+ prefix.bits = (bts), \
+ prefix.key_size = (key_sz), \
+ prefix.value_size = (val_sz), \
+ prefix.size_mask = ((1 << bts) - 1), \
+ prefix.occup_bitmap = &data[0], \
+ prefix.valid_bitmap = &data \
+ [__CHASH_BITMAP_SIZE(bts)], \
+ prefix.keys64 = (u64 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2], \
+ prefix.values = (u8 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2 + \
+ __CHASH_ARRAY_SIZE(bts, key_sz)] \
+ __CHASH_STATS_INIT(prefix)
+
+/**
+ * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * Note: the macro can be used for global and local hash table variables.
+ */
+#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
+ .table = { \
+ __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
+ }, \
+ .data = {0} \
+ }
+
+/**
+ * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ */
+#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
+
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask);
+void chash_table_free(struct chash_table *table);
+
+/**
+ * chash_table_dump_stats - Dump statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ *
+ * Dumps some performance statistics of the table gathered in operation
+ * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
+ * user must turn on messages for chash.c (file chash.c +p).
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
+
+void __chash_table_dump_stats(struct __chash_table *table);
+#else
+#define chash_table_dump_stats(tbl)
+#endif
+
+/**
+ * chash_table_reset_stats - Reset statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
+
+static inline void __chash_table_reset_stats(struct __chash_table *table)
+{
+ (void)table __CHASH_STATS_INIT((*table));
+}
+#else
+#define chash_table_reset_stats(tbl)
+#endif
+
+/**
+ * chash_table_copy_in - Copy a new value into the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to add or update
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @key already has an entry, its value is replaced. Otherwise a
+ * new entry is added. If @value is NULL, the value is left unchanged
+ * or uninitialized. Returns 1 if an entry already existed, 0 if a new
+ * entry was added or %-ENOMEM if there was no free space in the
+ * table.
+ */
+#define chash_table_copy_in(tbl, key, value) \
+ __chash_table_copy_in(&(*tbl).table, key, value)
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value);
+
+/**
+ * chash_table_copy_out - Copy a value out of the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. Returns the slot index of the
+ * entry or %-EINVAL if @key was not found.
+ */
+#define chash_table_copy_out(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, false)
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove);
+
+/**
+ * chash_table_remove - Remove an entry from the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. The entry is removed from the
+ * table. Returns the slot index of the removed entry or %-EINVAL if
+ * @key was not found.
+ */
+#define chash_table_remove(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, true)
+
+/*
+ * Low level iterator API used internally by the above functions.
+ */
+struct chash_iter {
+ struct __chash_table *table;
+ unsigned long mask;
+ int slot;
+};
+
+/**
+ * CHASH_ITER_INIT - Initialize a hash table iterator
+ * @tbl: Pointer to hash table to iterate over
+ * @s: Initial slot number
+ */
+#define CHASH_ITER_INIT(table, s) { \
+ table, \
+ 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ s \
+ }
+/**
+ * CHASH_ITER_SET - Set hash table iterator to new slot
+ * @iter: Iterator
+ * @s: Slot number
+ */
+#define CHASH_ITER_SET(iter, s) \
+ (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ (iter).slot = (s)
+/**
+ * CHASH_ITER_INC - Increment hash table iterator
+ * @table: Hash table to iterate over
+ *
+ * Wraps around at the end.
+ */
+#define CHASH_ITER_INC(iter) do { \
+ (iter).mask = (iter).mask << 1 | \
+ (iter).mask >> (BITS_PER_LONG - 1); \
+ (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
+ } while (0)
+
+static inline bool chash_iter_is_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+static inline bool chash_iter_is_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+
+static inline void chash_iter_set_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+}
+static inline void chash_iter_set_invalid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+static inline void chash_iter_set_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+
+static inline u32 chash_iter_key32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys32[iter.slot];
+}
+static inline u64 chash_iter_key64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys64[iter.slot];
+}
+static inline u64 chash_iter_key(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return (iter.table->key_size == 4) ?
+ iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
+}
+
+static inline u32 chash_iter_hash32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ return hash_32(chash_iter_key32(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ return hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash(const struct chash_iter iter)
+{
+ return (iter.table->key_size == 4) ?
+ hash_32(chash_iter_key32(iter), iter.table->bits) :
+ hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline void *chash_iter_value(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->values +
+ ((unsigned long)iter.slot * iter.table->value_size);
+}
+
+#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 9a9e6c7e89ea..2fb25abaf7c8 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -284,8 +284,8 @@ struct v9_mqd {
uint32_t gds_save_mask_hi;
uint32_t ctx_save_base_addr_lo;
uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ uint32_t dynamic_cu_mask_addr_lo;
+ uint32_t dynamic_cu_mask_addr_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
@@ -672,6 +672,14 @@ struct v9_mqd {
uint32_t reserved_511;
};
+struct v9_mqd_allocation {
+ struct v9_mqd mqd;
+ uint32_t wptr_poll_mem;
+ uint32_t rptr_report_mem;
+ uint32_t dynamic_cu_mask;
+ uint32_t dynamic_rb_mask;
+};
+
/* from vega10 all CSA format is shifted to chain ib compatible mode */
struct v9_ce_ib_state {
/* section of non chained ib part */
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 3e606a761d0e..20234820194b 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -423,265 +423,6 @@ struct vi_mqd_allocation {
uint32_t dynamic_rb_mask;
};
-struct cz_mqd {
- uint32_t header;
- uint32_t compute_dispatch_initiator;
- uint32_t compute_dim_x;
- uint32_t compute_dim_y;
- uint32_t compute_dim_z;
- uint32_t compute_start_x;
- uint32_t compute_start_y;
- uint32_t compute_start_z;
- uint32_t compute_num_thread_x;
- uint32_t compute_num_thread_y;
- uint32_t compute_num_thread_z;
- uint32_t compute_pipelinestat_enable;
- uint32_t compute_perfcount_enable;
- uint32_t compute_pgm_lo;
- uint32_t compute_pgm_hi;
- uint32_t compute_tba_lo;
- uint32_t compute_tba_hi;
- uint32_t compute_tma_lo;
- uint32_t compute_tma_hi;
- uint32_t compute_pgm_rsrc1;
- uint32_t compute_pgm_rsrc2;
- uint32_t compute_vmid;
- uint32_t compute_resource_limits;
- uint32_t compute_static_thread_mgmt_se0;
- uint32_t compute_static_thread_mgmt_se1;
- uint32_t compute_tmpring_size;
- uint32_t compute_static_thread_mgmt_se2;
- uint32_t compute_static_thread_mgmt_se3;
- uint32_t compute_restart_x;
- uint32_t compute_restart_y;
- uint32_t compute_restart_z;
- uint32_t compute_thread_trace_enable;
- uint32_t compute_misc_reserved;
- uint32_t compute_dispatch_id;
- uint32_t compute_threadgroup_id;
- uint32_t compute_relaunch;
- uint32_t compute_wave_restore_addr_lo;
- uint32_t compute_wave_restore_addr_hi;
- uint32_t compute_wave_restore_control;
- uint32_t reserved_39;
- uint32_t reserved_40;
- uint32_t reserved_41;
- uint32_t reserved_42;
- uint32_t reserved_43;
- uint32_t reserved_44;
- uint32_t reserved_45;
- uint32_t reserved_46;
- uint32_t reserved_47;
- uint32_t reserved_48;
- uint32_t reserved_49;
- uint32_t reserved_50;
- uint32_t reserved_51;
- uint32_t reserved_52;
- uint32_t reserved_53;
- uint32_t reserved_54;
- uint32_t reserved_55;
- uint32_t reserved_56;
- uint32_t reserved_57;
- uint32_t reserved_58;
- uint32_t reserved_59;
- uint32_t reserved_60;
- uint32_t reserved_61;
- uint32_t reserved_62;
- uint32_t reserved_63;
- uint32_t reserved_64;
- uint32_t compute_user_data_0;
- uint32_t compute_user_data_1;
- uint32_t compute_user_data_2;
- uint32_t compute_user_data_3;
- uint32_t compute_user_data_4;
- uint32_t compute_user_data_5;
- uint32_t compute_user_data_6;
- uint32_t compute_user_data_7;
- uint32_t compute_user_data_8;
- uint32_t compute_user_data_9;
- uint32_t compute_user_data_10;
- uint32_t compute_user_data_11;
- uint32_t compute_user_data_12;
- uint32_t compute_user_data_13;
- uint32_t compute_user_data_14;
- uint32_t compute_user_data_15;
- uint32_t cp_compute_csinvoc_count_lo;
- uint32_t cp_compute_csinvoc_count_hi;
- uint32_t reserved_83;
- uint32_t reserved_84;
- uint32_t reserved_85;
- uint32_t cp_mqd_query_time_lo;
- uint32_t cp_mqd_query_time_hi;
- uint32_t cp_mqd_connect_start_time_lo;
- uint32_t cp_mqd_connect_start_time_hi;
- uint32_t cp_mqd_connect_end_time_lo;
- uint32_t cp_mqd_connect_end_time_hi;
- uint32_t cp_mqd_connect_end_wf_count;
- uint32_t cp_mqd_connect_end_pq_rptr;
- uint32_t cp_mqd_connect_end_pq_wptr;
- uint32_t cp_mqd_connect_end_ib_rptr;
- uint32_t reserved_96;
- uint32_t reserved_97;
- uint32_t cp_mqd_save_start_time_lo;
- uint32_t cp_mqd_save_start_time_hi;
- uint32_t cp_mqd_save_end_time_lo;
- uint32_t cp_mqd_save_end_time_hi;
- uint32_t cp_mqd_restore_start_time_lo;
- uint32_t cp_mqd_restore_start_time_hi;
- uint32_t cp_mqd_restore_end_time_lo;
- uint32_t cp_mqd_restore_end_time_hi;
- uint32_t reserved_106;
- uint32_t reserved_107;
- uint32_t gds_cs_ctxsw_cnt0;
- uint32_t gds_cs_ctxsw_cnt1;
- uint32_t gds_cs_ctxsw_cnt2;
- uint32_t gds_cs_ctxsw_cnt3;
- uint32_t reserved_112;
- uint32_t reserved_113;
- uint32_t cp_pq_exe_status_lo;
- uint32_t cp_pq_exe_status_hi;
- uint32_t cp_packet_id_lo;
- uint32_t cp_packet_id_hi;
- uint32_t cp_packet_exe_status_lo;
- uint32_t cp_packet_exe_status_hi;
- uint32_t gds_save_base_addr_lo;
- uint32_t gds_save_base_addr_hi;
- uint32_t gds_save_mask_lo;
- uint32_t gds_save_mask_hi;
- uint32_t ctx_save_base_addr_lo;
- uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
- uint32_t cp_mqd_base_addr_lo;
- uint32_t cp_mqd_base_addr_hi;
- uint32_t cp_hqd_active;
- uint32_t cp_hqd_vmid;
- uint32_t cp_hqd_persistent_state;
- uint32_t cp_hqd_pipe_priority;
- uint32_t cp_hqd_queue_priority;
- uint32_t cp_hqd_quantum;
- uint32_t cp_hqd_pq_base_lo;
- uint32_t cp_hqd_pq_base_hi;
- uint32_t cp_hqd_pq_rptr;
- uint32_t cp_hqd_pq_rptr_report_addr_lo;
- uint32_t cp_hqd_pq_rptr_report_addr_hi;
- uint32_t cp_hqd_pq_wptr_poll_addr_lo;
- uint32_t cp_hqd_pq_wptr_poll_addr_hi;
- uint32_t cp_hqd_pq_doorbell_control;
- uint32_t cp_hqd_pq_wptr;
- uint32_t cp_hqd_pq_control;
- uint32_t cp_hqd_ib_base_addr_lo;
- uint32_t cp_hqd_ib_base_addr_hi;
- uint32_t cp_hqd_ib_rptr;
- uint32_t cp_hqd_ib_control;
- uint32_t cp_hqd_iq_timer;
- uint32_t cp_hqd_iq_rptr;
- uint32_t cp_hqd_dequeue_request;
- uint32_t cp_hqd_dma_offload;
- uint32_t cp_hqd_sema_cmd;
- uint32_t cp_hqd_msg_type;
- uint32_t cp_hqd_atomic0_preop_lo;
- uint32_t cp_hqd_atomic0_preop_hi;
- uint32_t cp_hqd_atomic1_preop_lo;
- uint32_t cp_hqd_atomic1_preop_hi;
- uint32_t cp_hqd_hq_status0;
- uint32_t cp_hqd_hq_control0;
- uint32_t cp_mqd_control;
- uint32_t cp_hqd_hq_status1;
- uint32_t cp_hqd_hq_control1;
- uint32_t cp_hqd_eop_base_addr_lo;
- uint32_t cp_hqd_eop_base_addr_hi;
- uint32_t cp_hqd_eop_control;
- uint32_t cp_hqd_eop_rptr;
- uint32_t cp_hqd_eop_wptr;
- uint32_t cp_hqd_eop_done_events;
- uint32_t cp_hqd_ctx_save_base_addr_lo;
- uint32_t cp_hqd_ctx_save_base_addr_hi;
- uint32_t cp_hqd_ctx_save_control;
- uint32_t cp_hqd_cntl_stack_offset;
- uint32_t cp_hqd_cntl_stack_size;
- uint32_t cp_hqd_wg_state_offset;
- uint32_t cp_hqd_ctx_save_size;
- uint32_t cp_hqd_gds_resource_state;
- uint32_t cp_hqd_error;
- uint32_t cp_hqd_eop_wptr_mem;
- uint32_t cp_hqd_eop_dones;
- uint32_t reserved_182;
- uint32_t reserved_183;
- uint32_t reserved_184;
- uint32_t reserved_185;
- uint32_t reserved_186;
- uint32_t reserved_187;
- uint32_t reserved_188;
- uint32_t reserved_189;
- uint32_t reserved_190;
- uint32_t reserved_191;
- uint32_t iqtimer_pkt_header;
- uint32_t iqtimer_pkt_dw0;
- uint32_t iqtimer_pkt_dw1;
- uint32_t iqtimer_pkt_dw2;
- uint32_t iqtimer_pkt_dw3;
- uint32_t iqtimer_pkt_dw4;
- uint32_t iqtimer_pkt_dw5;
- uint32_t iqtimer_pkt_dw6;
- uint32_t iqtimer_pkt_dw7;
- uint32_t iqtimer_pkt_dw8;
- uint32_t iqtimer_pkt_dw9;
- uint32_t iqtimer_pkt_dw10;
- uint32_t iqtimer_pkt_dw11;
- uint32_t iqtimer_pkt_dw12;
- uint32_t iqtimer_pkt_dw13;
- uint32_t iqtimer_pkt_dw14;
- uint32_t iqtimer_pkt_dw15;
- uint32_t iqtimer_pkt_dw16;
- uint32_t iqtimer_pkt_dw17;
- uint32_t iqtimer_pkt_dw18;
- uint32_t iqtimer_pkt_dw19;
- uint32_t iqtimer_pkt_dw20;
- uint32_t iqtimer_pkt_dw21;
- uint32_t iqtimer_pkt_dw22;
- uint32_t iqtimer_pkt_dw23;
- uint32_t iqtimer_pkt_dw24;
- uint32_t iqtimer_pkt_dw25;
- uint32_t iqtimer_pkt_dw26;
- uint32_t iqtimer_pkt_dw27;
- uint32_t iqtimer_pkt_dw28;
- uint32_t iqtimer_pkt_dw29;
- uint32_t iqtimer_pkt_dw30;
- uint32_t iqtimer_pkt_dw31;
- uint32_t reserved_225;
- uint32_t reserved_226;
- uint32_t reserved_227;
- uint32_t set_resources_header;
- uint32_t set_resources_dw1;
- uint32_t set_resources_dw2;
- uint32_t set_resources_dw3;
- uint32_t set_resources_dw4;
- uint32_t set_resources_dw5;
- uint32_t set_resources_dw6;
- uint32_t set_resources_dw7;
- uint32_t reserved_236;
- uint32_t reserved_237;
- uint32_t reserved_238;
- uint32_t reserved_239;
- uint32_t queue_doorbell_id0;
- uint32_t queue_doorbell_id1;
- uint32_t queue_doorbell_id2;
- uint32_t queue_doorbell_id3;
- uint32_t queue_doorbell_id4;
- uint32_t queue_doorbell_id5;
- uint32_t queue_doorbell_id6;
- uint32_t queue_doorbell_id7;
- uint32_t queue_doorbell_id8;
- uint32_t queue_doorbell_id9;
- uint32_t queue_doorbell_id10;
- uint32_t queue_doorbell_id11;
- uint32_t queue_doorbell_id12;
- uint32_t queue_doorbell_id13;
- uint32_t queue_doorbell_id14;
- uint32_t queue_doorbell_id15;
-};
-
struct vi_ce_ib_state {
uint32_t ce_ib_completion_status;
uint32_t ce_constegnine_count;
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
new file mode 100644
index 000000000000..776ef3434c10
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Kconfig
@@ -0,0 +1,28 @@
+menu "AMD Library routines"
+
+#
+# Closed hash table
+#
+config CHASH
+ tristate
+ default DRM_AMDGPU
+ help
+ Statically sized closed hash table implementation with low
+ memory and CPU overhead.
+
+config CHASH_STATS
+ bool "Closed hash table performance statistics"
+ depends on CHASH
+ default n
+ help
+ Enable collection of performance statistics for closed hash tables.
+
+config CHASH_SELFTEST
+ bool "Closed hash table self test"
+ depends on CHASH
+ default n
+ help
+ Runs a selftest during module load. Several module parameters
+ are available to modify the behaviour of the test.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
new file mode 100644
index 000000000000..690243001e1a
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for AMD library routines, which are used by AMD driver
+# components.
+#
+# This is for common library routines that can be shared between AMD
+# driver components or later moved to kernel/lib for sharing with
+# other drivers.
+
+ccflags-y := -I$(src)/../include
+
+obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
new file mode 100644
index 000000000000..b8e45f356a1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/chash.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <asm/div64.h>
+#include <linux/chash.h>
+
+/**
+ * chash_table_alloc - Allocate closed hash table
+ * @table: Pointer to the table structure
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @value_size: Size of data values in bytes, can be 0
+ */
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask)
+{
+ if (bits > 31)
+ return -EINVAL;
+
+ if (key_size != 4 && key_size != 8)
+ return -EINVAL;
+
+ table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
+ sizeof(long), gfp_mask);
+ if (!table->data)
+ return -ENOMEM;
+
+ __CHASH_TABLE_INIT(table->table, table->data,
+ bits, key_size, value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(chash_table_alloc);
+
+/**
+ * chash_table_free - Free closed hash table
+ * @table: Pointer to the table structure
+ */
+void chash_table_free(struct chash_table *table)
+{
+ kfree(table->data);
+}
+EXPORT_SYMBOL(chash_table_free);
+
+#ifdef CONFIG_CHASH_STATS
+
+#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
+ u64 __nom = (nom); \
+ u64 __denom = (denom); \
+ u64 __quot, __frac; \
+ u32 __rem; \
+ \
+ while (__denom >> 32) { \
+ __nom >>= 1; \
+ __denom >>= 1; \
+ } \
+ __quot = __nom; \
+ __rem = do_div(__quot, __denom); \
+ __frac = __rem * (frac_digits) + (__denom >> 1); \
+ do_div(__frac, __denom); \
+ (quot) = __quot; \
+ (frac) = __frac; \
+ } while (0)
+
+void __chash_table_dump_stats(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ u32 filled = 0, empty = 0, tombstones = 0;
+ u64 quot1, quot2;
+ u32 frac1, frac2;
+
+ do {
+ if (chash_iter_is_valid(iter))
+ filled++;
+ else if (chash_iter_is_empty(iter))
+ empty++;
+ else
+ tombstones++;
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ pr_debug("chash: key size %u, value size %u\n",
+ table->key_size, table->value_size);
+ pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
+ 1 << table->bits, filled, empty, tombstones);
+ if (table->hits > 0) {
+ DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
+ DIV_FRAC(table->hits * 1000, table->hits_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits, quot1, frac1, quot2, frac2);
+ if (table->miss > 0) {
+ DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
+ DIV_FRAC(table->miss * 1000, table->miss_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->miss, quot1, frac1, quot2, frac2);
+ if (table->hits + table->miss > 0) {
+ DIV_FRAC(table->hits_steps + table->miss_steps,
+ table->hits + table->miss, quot1, frac1, 1000);
+ DIV_FRAC((table->hits + table->miss) * 1000,
+ (table->hits_time_ns + table->miss_time_ns),
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits + table->miss, quot1, frac1, quot2, frac2);
+ if (table->relocs > 0) {
+ DIV_FRAC(table->hits + table->miss, table->relocs,
+ quot1, frac1, 1000);
+ DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
+ pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
+ table->relocs, quot1, frac1, quot2, frac2);
+ } else {
+ pr_debug(" No relocations\n");
+ }
+}
+EXPORT_SYMBOL(__chash_table_dump_stats);
+
+#undef DIV_FRAC
+#endif
+
+#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
+#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
+#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
+#define CHASH_IN_RANGE(table, slot, first, last) \
+ (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
+
+/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
+#ifdef CHASH_DEBUG
+static void chash_table_dump(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if ((iter.slot & 3) == 0)
+ pr_debug("%04x: ", iter.slot);
+
+ if (chash_iter_is_valid(iter))
+ pr_debug("[%016llx] ", chash_iter_key(iter));
+ else if (chash_iter_is_empty(iter))
+ pr_debug("[ <empty> ] ");
+ else
+ pr_debug("[ <tombstone> ] ");
+
+ if ((iter.slot & 3) == 3)
+ pr_debug("\n");
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ if ((iter.slot & 3) != 0)
+ pr_debug("\n");
+}
+
+static int chash_table_check(struct __chash_table *table)
+{
+ u32 hash;
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ struct chash_iter cur = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if (!chash_iter_is_valid(iter)) {
+ CHASH_ITER_INC(iter);
+ continue;
+ }
+
+ hash = chash_iter_hash(iter);
+ CHASH_ITER_SET(cur, hash);
+ while (cur.slot != iter.slot) {
+ if (chash_iter_is_empty(cur)) {
+ pr_err("Path to element at %x with hash %x broken at slot %x\n",
+ iter.slot, hash, cur.slot);
+ chash_table_dump(table);
+ return -EINVAL;
+ }
+ CHASH_ITER_INC(cur);
+ }
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ return 0;
+}
+#endif
+
+static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
+{
+ BUG_ON(src.table == dst.table && src.slot == dst.slot);
+ BUG_ON(src.table->key_size != dst.table->key_size);
+ BUG_ON(src.table->value_size != dst.table->value_size);
+
+ if (dst.table->key_size == 4)
+ dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
+ else
+ dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
+
+ if (dst.table->value_size)
+ memcpy(chash_iter_value(dst), chash_iter_value(src),
+ dst.table->value_size);
+
+ chash_iter_set_valid(dst);
+ chash_iter_set_invalid(src);
+
+#ifdef CONFIG_CHASH_STATS
+ if (src.table == dst.table) {
+ dst.table->relocs++;
+ dst.table->reloc_dist +=
+ CHASH_SUB(dst.table, src.slot, dst.slot);
+ }
+#endif
+}
+
+/**
+ * __chash_table_find - Helper for looking up a hash table entry
+ * @iter: Pointer to hash table iterator
+ * @key: Key of the entry to find
+ * @for_removal: set to true if the element will be removed soon
+ *
+ * Searches for an entry in the hash table with a given key. iter must
+ * be initialized by the caller to point to the home position of the
+ * hypothetical entry, i.e. it must be initialized with the hash table
+ * and the key's hash as the initial slot for the search.
+ *
+ * This function also does some local clean-up to speed up future
+ * look-ups by relocating entries to better slots and removing
+ * tombstones that are no longer needed.
+ *
+ * If @for_removal is true, the function avoids relocating the entry
+ * that is being returned.
+ *
+ * Returns 0 if the search is successful. In this case iter is updated
+ * to point to the found entry. Otherwise %-EINVAL is returned and the
+ * iter is updated to point to the first available slot for the given
+ * key. If the table is full, the slot is set to -1.
+ */
+static int chash_table_find(struct chash_iter *iter, u64 key,
+ bool for_removal)
+{
+#ifdef CONFIG_CHASH_STATS
+ u64 ts1 = local_clock();
+#endif
+ u32 hash = iter->slot;
+ struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
+ int first_avail = (for_removal ? -2 : -1);
+
+ while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
+ if (chash_iter_is_empty(*iter)) {
+ /* Found an empty slot, which ends the
+ * search. Clean up any preceding tombstones
+ * that are no longer needed because they lead
+ * to no-where
+ */
+ if ((int)first_redundant.slot < 0)
+ goto not_found;
+ while (first_redundant.slot != iter->slot) {
+ if (!chash_iter_is_valid(first_redundant))
+ chash_iter_set_empty(first_redundant);
+ CHASH_ITER_INC(first_redundant);
+ }
+#ifdef CHASH_DEBUG
+ chash_table_check(iter->table);
+#endif
+ goto not_found;
+ } else if (!chash_iter_is_valid(*iter)) {
+ /* Found a tombstone. Remember it as candidate
+ * for relocating the entry we're looking for
+ * or for adding a new entry with the given key
+ */
+ if (first_avail == -1)
+ first_avail = iter->slot;
+ /* Or mark it as the start of a series of
+ * potentially redundant tombstones
+ */
+ else if (first_redundant.slot == -1)
+ CHASH_ITER_SET(first_redundant, iter->slot);
+ } else if (first_redundant.slot >= 0) {
+ /* Found a valid, occupied slot with a
+ * preceding series of tombstones. Relocate it
+ * to a better position that no longer depends
+ * on those tombstones
+ */
+ u32 cur_hash = chash_iter_hash(*iter);
+
+ if (!CHASH_IN_RANGE(iter->table, cur_hash,
+ first_redundant.slot + 1,
+ iter->slot)) {
+ /* This entry has a hash at or before
+ * the first tombstone we found. We
+ * can relocate it to that tombstone
+ * and advance to the next tombstone
+ */
+ chash_iter_relocate(first_redundant, *iter);
+ do {
+ CHASH_ITER_INC(first_redundant);
+ } while (chash_iter_is_valid(first_redundant));
+ } else if (cur_hash != iter->slot) {
+ /* Relocate entry to its home position
+ * or as close as possible so it no
+ * longer depends on any preceding
+ * tombstones
+ */
+ struct chash_iter new_iter =
+ CHASH_ITER_INIT(iter->table, cur_hash);
+
+ while (new_iter.slot != iter->slot &&
+ chash_iter_is_valid(new_iter))
+ CHASH_ITER_INC(new_iter);
+
+ if (new_iter.slot != iter->slot)
+ chash_iter_relocate(new_iter, *iter);
+ }
+ }
+
+ CHASH_ITER_INC(*iter);
+ if (iter->slot == hash) {
+ iter->slot = -1;
+ goto not_found;
+ }
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits++;
+ iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0) {
+ CHASH_ITER_SET(first_redundant, first_avail);
+ chash_iter_relocate(first_redundant, *iter);
+ iter->slot = first_redundant.slot;
+ iter->mask = first_redundant.mask;
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits_time_ns += local_clock() - ts1;
+#endif
+
+ return 0;
+
+not_found:
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss++;
+ iter->table->miss_steps += (iter->slot < 0) ?
+ (1 << iter->table->bits) :
+ CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0)
+ CHASH_ITER_SET(*iter, first_avail);
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss_time_ns += local_clock() - ts1;
+#endif
+
+ return -EINVAL;
+}
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, false);
+
+ /* Found an existing entry */
+ if (!r) {
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value,
+ table->value_size);
+ return 1;
+ }
+
+ /* Is there a place to add a new entry? */
+ if (iter.slot < 0) {
+ pr_err("Hash table overflow\n");
+ return -ENOMEM;
+ }
+
+ chash_iter_set_valid(iter);
+
+ if (table->key_size == 4)
+ table->keys32[iter.slot] = key;
+ else
+ table->keys64[iter.slot] = key;
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value, table->value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(__chash_table_copy_in);
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, remove);
+
+ if (r < 0)
+ return r;
+
+ if (value && table->value_size)
+ memcpy(value, chash_iter_value(iter), table->value_size);
+
+ if (remove)
+ chash_iter_set_invalid(iter);
+
+ return iter.slot;
+}
+EXPORT_SYMBOL(__chash_table_copy_out);
+
+#ifdef CONFIG_CHASH_SELFTEST
+/**
+ * chash_self_test - Run a self-test of the hash table implementation
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @min_fill: Minimum fill level during the test
+ * @max_fill: Maximum fill level during the test
+ * @iterations: Number of test iterations
+ *
+ * The test adds and removes entries from a hash table, cycling the
+ * fill level between min_fill and max_fill entries. Also tests lookup
+ * and value retrieval.
+ */
+static int __init chash_self_test(u8 bits, u8 key_size,
+ int min_fill, int max_fill,
+ u64 iterations)
+{
+ struct chash_table table;
+ int ret;
+ u64 add_count, rmv_count;
+ u64 value;
+
+ if (key_size == 4 && iterations > 0xffffffff)
+ return -EINVAL;
+ if (min_fill >= max_fill)
+ return -EINVAL;
+
+ ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("chash_table_alloc failed: %d\n", ret);
+ return ret;
+ }
+
+ for (add_count = 0, rmv_count = 0; add_count < iterations;
+ add_count++) {
+ /* When we hit the max_fill level, remove entries down
+ * to min_fill
+ */
+ if (add_count - rmv_count == max_fill) {
+ u64 find_count = rmv_count;
+
+ /* First try to find all entries that we're
+ * about to remove, confirm their value, test
+ * writing them back a second time.
+ */
+ for (; add_count - find_count > min_fill;
+ find_count++) {
+ ret = chash_table_copy_out(&table, find_count,
+ &value);
+ if (ret < 0) {
+ pr_err("chash_table_copy_out failed: %d\n",
+ ret);
+ goto out;
+ }
+ if (value != ~find_count) {
+ pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
+ find_count, ~find_count, value);
+#ifdef CHASH_DEBUG
+ chash_table_dump(&table.table);
+#endif
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = chash_table_copy_in(&table, find_count,
+ &value);
+ if (ret != 1) {
+ pr_err("copy_in second time returned %d, expected 1\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ /* Remove them until we hit min_fill level */
+ for (; add_count - rmv_count > min_fill; rmv_count++) {
+ ret = chash_table_remove(&table, rmv_count,
+ NULL);
+ if (ret < 0) {
+ pr_err("chash_table_remove failed: %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ /* Add a new value */
+ value = ~add_count;
+ ret = chash_table_copy_in(&table, add_count, &value);
+ if (ret != 0) {
+ pr_err("copy_in first time returned %d, expected 0\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ chash_table_dump_stats(&table);
+ chash_table_reset_stats(&table);
+
+out:
+ chash_table_free(&table);
+ return ret;
+}
+
+static unsigned int chash_test_bits = 10;
+MODULE_PARM_DESC(test_bits,
+ "Selftest number of hash bits ([4..20], default=10)");
+module_param_named(test_bits, chash_test_bits, uint, 0444);
+
+static unsigned int chash_test_keysize = 8;
+MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
+module_param_named(test_keysize, chash_test_keysize, uint, 0444);
+
+static unsigned int chash_test_minfill;
+MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
+module_param_named(test_minfill, chash_test_minfill, uint, 0444);
+
+static unsigned int chash_test_maxfill;
+MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
+module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
+
+static unsigned long chash_test_iters;
+MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
+module_param_named(test_iters, chash_test_iters, ulong, 0444);
+
+static int __init chash_init(void)
+{
+ int ret;
+ u64 ts1_ns;
+
+ /* Skip self test on user errors */
+ if (chash_test_bits < 4 || chash_test_bits > 20) {
+ pr_err("chash: test_bits out of range [4..20].\n");
+ return 0;
+ }
+ if (chash_test_keysize != 4 && chash_test_keysize != 8) {
+ pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
+ return 0;
+ }
+
+ if (!chash_test_minfill)
+ chash_test_minfill = (1 << chash_test_bits) / 2;
+ if (!chash_test_maxfill)
+ chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
+ if (!chash_test_iters)
+ chash_test_iters = (1 << chash_test_bits) * 1000;
+
+ if (chash_test_minfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_minfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_maxfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_maxfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_minfill >= chash_test_maxfill) {
+ pr_err("chash: test_minfill must be < test_maxfill.\n");
+ return 0;
+ }
+ if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
+ pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
+ return 0;
+ }
+
+ ts1_ns = local_clock();
+ ret = chash_self_test(chash_test_bits, chash_test_keysize,
+ chash_test_minfill, chash_test_maxfill,
+ chash_test_iters);
+ if (!ret) {
+ u64 ts_delta_us = local_clock() - ts1_ns;
+ u64 iters_per_second = (u64)chash_test_iters * 1000000;
+
+ do_div(ts_delta_us, 1000);
+ do_div(iters_per_second, ts_delta_us);
+ pr_info("chash: self test took %llu us, %llu iterations/s\n",
+ ts_delta_us, iters_per_second);
+ } else {
+ pr_err("chash: self test failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+module_init(chash_init);
+
+#endif /* CONFIG_CHASH_SELFTEST */
+
+MODULE_DESCRIPTION("Closed hash table");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 72d5f50508b6..231785a9e24c 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,16 +1,35 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/powerplay/inc/ \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/powerplay/hwmgr \
- -I$(FULL_AMD_PATH)/powerplay/eventmgr
+ -I$(FULL_AMD_PATH)/powerplay/hwmgr
AMD_PP_PATH = ../powerplay
-PP_LIBS = smumgr hwmgr eventmgr
+PP_LIBS = smumgr hwmgr
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index f73e80c4bf33..c7e34128cbde 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -29,72 +29,98 @@
#include "amd_powerplay.h"
#include "pp_instance.h"
#include "power_state.h"
-#include "eventmanager.h"
+#define PP_DPM_DISABLED 0xCCCC
+
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ void *input, void *output);
static inline int pp_check(struct pp_instance *handle)
{
- if (handle == NULL || handle->pp_valid != PP_VALID)
+ if (handle == NULL)
return -EINVAL;
- if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL)
+ if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
return -EINVAL;
if (handle->pm_en == 0)
return PP_DPM_DISABLED;
- if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL
- || handle->eventmgr == NULL)
+ if (handle->hwmgr->hwmgr_func == NULL)
return PP_DPM_DISABLED;
return 0;
}
+static int amd_powerplay_create(struct amd_pp_init *pp_init,
+ void **handle)
+{
+ struct pp_instance *instance;
+
+ if (pp_init == NULL || handle == NULL)
+ return -EINVAL;
+
+ instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
+ if (instance == NULL)
+ return -ENOMEM;
+
+ instance->chip_family = pp_init->chip_family;
+ instance->chip_id = pp_init->chip_id;
+ instance->pm_en = pp_init->pm_en;
+ instance->feature_mask = pp_init->feature_mask;
+ instance->device = pp_init->device;
+ mutex_init(&instance->pp_lock);
+ *handle = instance;
+ return 0;
+}
+
+static int amd_powerplay_destroy(void *handle)
+{
+ struct pp_instance *instance = (struct pp_instance *)handle;
+
+ kfree(instance->hwmgr->hardcode_pp_table);
+ instance->hwmgr->hardcode_pp_table = NULL;
+
+ kfree(instance->hwmgr);
+ instance->hwmgr = NULL;
+
+ kfree(instance);
+ instance = NULL;
+ return 0;
+}
+
static int pp_early_init(void *handle)
{
int ret;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_instance *pp_handle = NULL;
- ret = smum_early_init(pp_handle);
- if (ret)
- return ret;
+ pp_handle = cgs_register_pp_handle(handle, amd_powerplay_create);
- if ((pp_handle->pm_en == 0)
- || cgs_is_virtualization_enabled(pp_handle->device))
- return PP_DPM_DISABLED;
+ if (!pp_handle)
+ return -EINVAL;
ret = hwmgr_early_init(pp_handle);
- if (ret) {
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
-
- ret = eventmgr_early_init(pp_handle);
- if (ret) {
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
+ if (ret)
+ return -EINVAL;
return 0;
}
static int pp_sw_init(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_init == NULL)
+ if (hwmgr->smumgr_funcs->smu_init == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_init(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
pr_info("amdgpu: powerplay sw initialized\n");
}
@@ -103,84 +129,86 @@ static int pp_sw_init(void *handle)
static int pp_sw_fini(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_fini == NULL)
+ if (hwmgr->smumgr_funcs->smu_fini == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_fini(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
}
return ret;
}
static int pp_hw_init(void *handle)
{
- struct pp_smumgr *smumgr;
- struct pp_eventmgr *eventmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if(smumgr->smumgr_funcs->start_smu(smumgr)) {
+ if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
return -EINVAL;;
}
if (ret == PP_DPM_DISABLED)
- return PP_DPM_DISABLED;
+ goto exit;
+ ret = hwmgr_hw_init(pp_handle);
+ if (ret)
+ goto exit;
}
-
- ret = hwmgr_hw_init(pp_handle);
- if (ret)
- goto err;
-
- eventmgr = pp_handle->eventmgr;
- if (eventmgr->pp_eventmgr_init == NULL ||
- eventmgr->pp_eventmgr_init(eventmgr))
- goto err;
-
- return 0;
-err:
+ return ret;
+exit:
pp_handle->pm_en = 0;
- kfree(pp_handle->eventmgr);
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->eventmgr = NULL;
- return PP_DPM_DISABLED;
+ cgs_notify_dpm_enabled(hwmgr->device, false);
+ return 0;
+
}
static int pp_hw_fini(void *handle)
{
- struct pp_eventmgr *eventmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
ret = pp_check(pp_handle);
+ if (ret == 0)
+ hwmgr_hw_fini(pp_handle);
+
+ return 0;
+}
- if (ret == 0) {
- eventmgr = pp_handle->eventmgr;
+static int pp_late_init(void *handle)
+{
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (eventmgr->pp_eventmgr_fini != NULL)
- eventmgr->pp_eventmgr_fini(eventmgr);
+ ret = pp_check(pp_handle);
+ if (ret == 0)
+ pp_dpm_dispatch_tasks(pp_handle,
+ AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
- hwmgr_hw_fini(pp_handle);
- }
return 0;
}
+static void pp_late_fini(void *handle)
+{
+ amd_powerplay_destroy(handle);
+}
+
+
static bool pp_is_idle(void *handle)
{
return false;
@@ -196,28 +224,6 @@ static int pp_sw_reset(void *handle)
return 0;
}
-
-int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
-{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- int ret = 0;
-
- ret = pp_check(pp_handle);
-
- if (ret != 0)
- return ret;
-
- hwmgr = pp_handle->hwmgr;
-
- if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
-
- return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-}
-
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -227,7 +233,7 @@ static int pp_set_powergating_state(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -244,67 +250,52 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
ret = pp_check(pp_handle);
-
- if (ret == PP_DPM_DISABLED)
- return 0;
- else if (ret != 0)
- return ret;
-
- eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
-
+ if (ret == 0)
+ hwmgr_hw_suspend(pp_handle);
return 0;
}
static int pp_resume(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
- struct pp_smumgr *smumgr;
- int ret, ret1;
+ struct pp_hwmgr *hwmgr;
+ int ret;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret1 = pp_check(pp_handle);
+ ret = pp_check(pp_handle);
- if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
- return ret1;
+ if (ret < 0)
+ return ret;
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->start_smu(smumgr);
- if (ret) {
+ if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
- return ret;
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
+ return -EINVAL;
}
- if (ret1 == PP_DPM_DISABLED)
+ if (ret == PP_DPM_DISABLED)
return 0;
- eventmgr = pp_handle->eventmgr;
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
-
- return 0;
+ return hwmgr_hw_resume(pp_handle);
}
const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
- .late_init = NULL,
+ .late_init = pp_late_init,
.sw_init = pp_sw_init,
.sw_fini = pp_sw_fini,
.hw_init = pp_hw_init,
.hw_fini = pp_hw_fini,
+ .late_fini = pp_late_fini,
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
@@ -324,6 +315,63 @@ static int pp_dpm_fw_loading_complete(void *handle)
return 0;
}
+static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
+{
+ struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
+
+ ret = pp_check(pp_handle);
+
+ if (ret)
+ return ret;
+
+ hwmgr = pp_handle->hwmgr;
+
+ if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+}
+
+static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ hwmgr->en_umd_pstate = true;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = hwmgr->saved_dpm_level;
+ hwmgr->en_umd_pstate = false;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+}
+
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
@@ -333,18 +381,27 @@ static int pp_dpm_force_performance_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
+ if (level == hwmgr->dpm_level)
+ return 0;
+
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+ hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ if (!ret)
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
mutex_unlock(&pp_handle->pp_lock);
return 0;
}
@@ -359,7 +416,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -369,15 +426,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
return level;
}
-static int pp_dpm_get_sclk(void *handle, bool low)
+static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -387,20 +445,21 @@ static int pp_dpm_get_sclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_get_mclk(void *handle, bool low)
+static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -410,12 +469,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_powergate_vce(void *handle, bool gate)
+static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -423,22 +482,21 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_powergate_uvd(void *handle, bool gate)
+static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -446,75 +504,35 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
-}
-
-static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
-{
- switch (state) {
- case POWER_STATE_TYPE_BATTERY:
- return PP_StateUILabel_Battery;
- case POWER_STATE_TYPE_BALANCED:
- return PP_StateUILabel_Balanced;
- case POWER_STATE_TYPE_PERFORMANCE:
- return PP_StateUILabel_Performance;
- default:
- return PP_StateUILabel_None;
- }
}
-static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
void *input, void *output)
{
int ret = 0;
- struct pem_event_data data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
- mutex_lock(&pp_handle->pp_lock);
- switch (event_id) {
- case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_ENABLE_USER_STATE:
- {
- enum amd_pm_state_type ps;
-
- if (input == NULL) {
- ret = -EINVAL;
- break;
- }
- ps = *(unsigned long *)input;
- data.requested_ui_label = power_state_convert(ps);
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- }
- case AMD_PP_EVENT_COMPLETE_INIT:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_READJUST_POWER_STATE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- default:
- break;
- }
+ mutex_lock(&pp_handle->pp_lock);
+ ret = hwmgr_handle_task(pp_handle, task_id, input, output);
mutex_unlock(&pp_handle->pp_lock);
+
return ret;
}
@@ -528,7 +546,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -562,7 +580,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
return pm_type;
}
-static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -570,30 +588,30 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_get_fan_control_mode(void *handle)
+static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t mode = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -603,9 +621,9 @@ static int pp_dpm_get_fan_control_mode(void *handle)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return mode;
}
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
@@ -616,7 +634,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -639,7 +657,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -663,7 +681,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -685,7 +703,7 @@ static int pp_dpm_get_temperature(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -710,7 +728,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -755,7 +773,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -778,7 +796,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -820,7 +838,7 @@ static int pp_dpm_force_clock_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -844,7 +862,7 @@ static int pp_dpm_print_clock_levels(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -867,7 +885,7 @@ static int pp_dpm_get_sclk_od(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -890,7 +908,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -914,7 +932,7 @@ static int pp_dpm_get_mclk_od(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -937,7 +955,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -961,7 +979,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -987,7 +1005,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return NULL;
hwmgr = pp_handle->hwmgr;
@@ -1128,7 +1146,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-const struct amd_powerplay_funcs pp_dpm_funcs = {
+const struct amd_pm_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1160,81 +1178,27 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_power_profile_state = pp_dpm_get_power_profile_state,
.set_power_profile_state = pp_dpm_set_power_profile_state,
.switch_power_profile = pp_dpm_switch_power_profile,
+ .set_clockgating_by_smu = pp_set_clockgating_by_smu,
};
-int amd_powerplay_create(struct amd_pp_init *pp_init,
- void **handle)
-{
- struct pp_instance *instance;
-
- if (pp_init == NULL || handle == NULL)
- return -EINVAL;
-
- instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
- if (instance == NULL)
- return -ENOMEM;
-
- instance->pp_valid = PP_VALID;
- instance->chip_family = pp_init->chip_family;
- instance->chip_id = pp_init->chip_id;
- instance->pm_en = pp_init->pm_en;
- instance->feature_mask = pp_init->feature_mask;
- instance->device = pp_init->device;
- mutex_init(&instance->pp_lock);
- *handle = instance;
- return 0;
-}
-
-int amd_powerplay_destroy(void *handle)
-{
- struct pp_instance *instance = (struct pp_instance *)handle;
-
- if (instance->pm_en) {
- kfree(instance->eventmgr);
- kfree(instance->hwmgr);
- instance->hwmgr = NULL;
- instance->eventmgr = NULL;
- }
-
- kfree(instance->smu_mgr);
- instance->smu_mgr = NULL;
- kfree(instance);
- instance = NULL;
- return 0;
-}
-
int amd_powerplay_reset(void *handle)
{
struct pp_instance *instance = (struct pp_instance *)handle;
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
int ret;
- if (cgs_is_virtualization_enabled(instance->smu_mgr->device))
- return PP_DPM_DISABLED;
-
ret = pp_check(instance);
- if (ret != 0)
+ if (ret)
return ret;
- ret = pp_hw_fini(handle);
+ ret = pp_hw_fini(instance);
if (ret)
return ret;
ret = hwmgr_hw_init(instance);
if (ret)
- return PP_DPM_DISABLED;
-
- eventmgr = instance->eventmgr;
-
- if (eventmgr->pp_eventmgr_init == NULL)
- return PP_DPM_DISABLED;
-
- ret = eventmgr->pp_eventmgr_init(eventmgr);
- if (ret)
return ret;
- return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+ return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
}
/* export this function to DAL */
@@ -1248,7 +1212,7 @@ int amd_powerplay_display_configuration_change(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1267,7 +1231,7 @@ int amd_powerplay_get_display_power_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1292,7 +1256,7 @@ int amd_powerplay_get_current_clocks(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1309,7 +1273,7 @@ int amd_powerplay_get_current_clocks(void *handle,
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
- if (ret != 0) {
+ if (ret) {
pr_info("Error in phm_get_clock_info \n");
mutex_unlock(&pp_handle->pp_lock);
return -EINVAL;
@@ -1343,7 +1307,7 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1366,7 +1330,7 @@ int amd_powerplay_get_clock_by_type_with_latency(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clocks)
@@ -1388,7 +1352,7 @@ int amd_powerplay_get_clock_by_type_with_voltage(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clocks)
@@ -1412,7 +1376,7 @@ int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!wm_with_clock_ranges)
@@ -1436,7 +1400,7 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clock)
@@ -1460,7 +1424,7 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
deleted file mode 100644
index 7509e3850087..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the 'event manager' sub-component of powerplay.
-# It provides the event management services for the driver.
-
-EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
- eventactionchains.o eventsubchains.o eventtasks.o psm.o
-
-AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
-
-AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
deleted file mode 100644
index 8cee4e0f9fde..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventactionchains.h"
-#include "eventsubchains.h"
-
-static const pem_event_action * const initialize_event[] = {
- block_adjust_power_state_tasks,
- power_budget_tasks,
- system_config_tasks,
- setup_asic_tasks,
- enable_dynamic_state_management_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- initialize_thermal_controller_tasks,
- conditionally_force_3d_performance_state_tasks,
- process_vbios_eventinfo_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain initialize_action_chain = {
- "Initialize",
- initialize_event
-};
-
-static const pem_event_action * const uninitialize_event[] = {
- ungate_all_display_phys_tasks,
- uninitialize_display_phy_access_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- uninitialize_thermal_controller_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_dynamic_state_management_tasks,
- disable_clock_power_gatings_tasks,
- cleanup_asic_tasks,
- prepare_for_pnp_stop_tasks,
- NULL
-};
-
-const struct action_chain uninitialize_action_chain = {
- "Uninitialize",
- uninitialize_event
-};
-
-static const pem_event_action * const power_source_change_event_pp_enabled[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_pp_enabled = {
- "Power source change - PowerPlay enabled",
- power_source_change_event_pp_enabled
-};
-
-static const pem_event_action * const power_source_change_event_pp_disabled[] = {
- set_power_source_tasks,
- set_nbmcu_state_tasks,
- NULL
-};
-
-const struct action_chain power_source_changes_action_chain_pp_disabled = {
- "Power source change - PowerPlay disabled",
- power_source_change_event_pp_disabled
-};
-
-static const pem_event_action * const power_source_change_event_hardware_dc[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- reset_hardware_dc_notification_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_hardware_dc = {
- "Power source change - with Hardware DC switching",
- power_source_change_event_hardware_dc
-};
-
-static const pem_event_action * const suspend_event[] = {
- reset_display_phy_access_tasks,
- unregister_interrupt_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- notify_smu_suspend_tasks,
- disable_smc_firmware_ctf_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_fps_tasks,
- vari_bright_suspend_tasks,
- reset_fan_speed_to_default_tasks,
- power_down_asic_tasks,
- disable_stutter_mode_tasks,
- set_connected_standby_tasks,
- block_hw_access_tasks,
- NULL
-};
-
-const struct action_chain suspend_action_chain = {
- "Suspend",
- suspend_event
-};
-
-static const pem_event_action * const resume_event[] = {
- unblock_hw_access_tasks,
- resume_connected_standby_tasks,
- notify_smu_resume_tasks,
- reset_display_configCounter_tasks,
- update_dal_configuration_tasks,
- vari_bright_resume_tasks,
- setup_asic_tasks,
- enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
- enable_dynamic_state_management_tasks,
- enable_disable_bapm_tasks,
- initialize_thermal_controller_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- notify_hw_power_source_tasks,
- process_vbios_event_info_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- reset_clock_gating_tasks,
- notify_smu_vpu_recovery_end_tasks,
- disable_vpu_cap_tasks,
- execute_escape_sequence_tasks,
- NULL
-};
-
-
-const struct action_chain resume_action_chain = {
- "resume",
- resume_event
-};
-
-static const pem_event_action * const complete_init_event[] = {
- unblock_adjust_power_state_tasks,
- adjust_power_state_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- notify_power_state_change_tasks,
- NULL
-};
-
-const struct action_chain complete_init_action_chain = {
- "complete init",
- complete_init_event
-};
-
-static const pem_event_action * const enable_gfx_clock_gating_event[] = {
- enable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain enable_gfx_clock_gating_action_chain = {
- "enable gfx clock gate",
- enable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const disable_gfx_clock_gating_event[] = {
- disable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain disable_gfx_clock_gating_action_chain = {
- "disable gfx clock gate",
- disable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const enable_cgpg_event[] = {
- enable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain enable_cgpg_action_chain = {
- "eable cg pg",
- enable_cgpg_event
-};
-
-static const pem_event_action * const disable_cgpg_event[] = {
- disable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain disable_cgpg_action_chain = {
- "disable cg pg",
- disable_cgpg_event
-};
-
-
-/* Enable user _2d performance and activate */
-
-static const pem_event_action * const enable_user_state_event[] = {
- create_new_user_performance_state_tasks,
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_state_action_chain = {
- "Enable user state",
- enable_user_state_event
-};
-
-static const pem_event_action * const enable_user_2d_performance_event[] = {
- enable_user_2d_performance_tasks,
- add_user_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_2d_performance_action_chain = {
- "enable_user_2d_performance_event_activate",
- enable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const disable_user_2d_performance_event[] = {
- disable_user_2d_performance_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain disable_user_2d_performance_action_chain = {
- "disable_user_2d_performance_event",
- disable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const display_config_change_event[] = {
- /* countDisplayConfigurationChangeEventTasks, */
- unblock_adjust_power_state_tasks,
- set_cpu_power_state,
- notify_hw_power_source_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- /* updateDALConfigurationTasks,
- variBrightDisplayConfigurationChangeTasks, */
- adjust_power_state_tasks,
- /*enableDisableFPSTasks,
- setNBMCUStateTasks,
- notifyPCIEDeviceReadyTasks,*/
- NULL
-};
-
-const struct action_chain display_config_change_action_chain = {
- "Display configuration change",
- display_config_change_event
-};
-
-static const pem_event_action * const readjust_power_state_event[] = {
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain readjust_power_state_action_chain = {
- "re-adjust power state",
- readjust_power_state_event
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
deleted file mode 100644
index f181e53cdcda..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_ACTION_CHAINS_H_
-#define _EVENT_ACTION_CHAINS_H_
-#include "eventmgr.h"
-
-extern const struct action_chain initialize_action_chain;
-
-extern const struct action_chain uninitialize_action_chain;
-
-extern const struct action_chain power_source_change_action_chain_pp_enabled;
-
-extern const struct action_chain power_source_changes_action_chain_pp_disabled;
-
-extern const struct action_chain power_source_change_action_chain_hardware_dc;
-
-extern const struct action_chain suspend_action_chain;
-
-extern const struct action_chain resume_action_chain;
-
-extern const struct action_chain complete_init_action_chain;
-
-extern const struct action_chain enable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain disable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain enable_cgpg_action_chain;
-
-extern const struct action_chain disable_cgpg_action_chain;
-
-extern const struct action_chain enable_user_2d_performance_action_chain;
-
-extern const struct action_chain disable_user_2d_performance_action_chain;
-
-extern const struct action_chain enable_user_state_action_chain;
-
-extern const struct action_chain readjust_power_state_action_chain;
-
-extern const struct action_chain display_config_change_action_chain;
-
-#endif /*_EVENT_ACTION_CHAINS_H_*/
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
deleted file mode 100644
index a3cd230d636d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "ppinterrupt.h"
-#include "hardwaremanager.h"
-
-void pem_init_feature_info(struct pp_eventmgr *eventmgr)
-{
-
- /* PowerPlay info */
- eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable =
- PP_StateUILabel_Battery;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label =
- PP_StateUILabel_Battery;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) {
- eventmgr->features[PP_Feature_PowerPlay].supported = true;
- eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = true;
- eventmgr->features[PP_Feature_PowerPlay].enabled = true;
- } else {
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_Force3DClock].supported = true;
- eventmgr->features[PP_Feature_Force3DClock].enabled = false;
- eventmgr->features[PP_Feature_Force3DClock].enabled_default = false;
- eventmgr->features[PP_Feature_Force3DClock].version = 1;
-
- /* over drive*/
- eventmgr->features[PP_Feature_User2DPerformance].version = 4;
- eventmgr->features[PP_Feature_User3DPerformance].version = 4;
- eventmgr->features[PP_Feature_OverdriveTest].version = 4;
-
- eventmgr->features[PP_Feature_OverDrive].version = 4;
- eventmgr->features[PP_Feature_OverDrive].enabled = false;
- eventmgr->features[PP_Feature_OverDrive].enabled_default = false;
-
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverDrive].supported = false;
-
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false;
-
- /* Multi UVD States support */
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false;
-
- /* Dynamic UVD States support */
- eventmgr->features[PP_Feature_DynamicUVDState].supported = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false;
-
- /* VCE DPM support */
- eventmgr->features[PP_Feature_VCEDPM].supported = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled_default = false;
-
- /* ACP PowerGating support */
- eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false;
-
- /* PPM support */
- eventmgr->features[PP_Feature_PPM].version = 1;
- eventmgr->features[PP_Feature_PPM].supported = false;
- eventmgr->features[PP_Feature_PPM].enabled = false;
-
- /* FFC support (enables fan and temp settings, Gemini needs temp settings) */
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) ||
- phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) {
- eventmgr->features[PP_Feature_FFC].version = 1;
- eventmgr->features[PP_Feature_FFC].supported = true;
- eventmgr->features[PP_Feature_FFC].enabled = true;
- eventmgr->features[PP_Feature_FFC].enabled_default = true;
- } else {
- eventmgr->features[PP_Feature_FFC].supported = false;
- eventmgr->features[PP_Feature_FFC].enabled = false;
- eventmgr->features[PP_Feature_FFC].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_VariBright].enabled = false;
- eventmgr->features[PP_Feature_VariBright].enabled_default = false;
-
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].enabled_default = false;
-
- /* PowerDown feature support */
- eventmgr->features[PP_Feature_PowerDown].supported = false;
- eventmgr->features[PP_Feature_PowerDown].enabled = false;
- eventmgr->features[PP_Feature_PowerDown].enabled_default = false;
-
- eventmgr->features[PP_Feature_FPS].version = 1;
- eventmgr->features[PP_Feature_FPS].supported = false;
- eventmgr->features[PP_Feature_FPS].enabled_default = false;
- eventmgr->features[PP_Feature_FPS].enabled = false;
-
- eventmgr->features[PP_Feature_ViPG].version = 1;
- eventmgr->features[PP_Feature_ViPG].supported = false;
- eventmgr->features[PP_Feature_ViPG].enabled_default = false;
- eventmgr->features[PP_Feature_ViPG].enabled = false;
-}
-
-static int thermal_interrupt_callback(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
-{
- /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
- pr_info("current thermal is out of range \n");
- return 0;
-}
-
-int pem_register_interrupts(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pp_interrupt_registration_info info;
-
- info.call_back = thermal_interrupt_callback;
- info.context = eventmgr;
-
- result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info);
-
- /* TODO:
- * 2. Register CTF event interrupt
- * 3. Register for vbios events interrupt
- * 4. Register External Throttle Interrupt
- * 5. Register Smc To Host Interrupt
- * */
- return result;
-}
-
-
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr)
-{
- return 0;
-}
-
-
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr)
-{
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_OverDrive].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_Force3DClock].supported = false;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
deleted file mode 100644
index cd1ca07ef7f7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmanagement.h"
-#include "eventmgr.h"
-#include "eventactionchains.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
-{
- int i;
-
- for (i = 0; i < AMD_PP_EVENT_MAX; i++)
- eventmgr->event_chain[i] = NULL;
-
- eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr);
- return 0;
-}
-
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
-{
- const pem_event_action * const *paction_chain;
- const pem_event_action *psub_chain;
- int tmp_result = 0;
- int result = 0;
-
- if (eventmgr == NULL || event_chain == NULL || event_data == NULL)
- return -EINVAL;
-
- for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) {
- if (0 != result)
- return result;
-
- for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) {
- tmp_result = (*psub_chain)(eventmgr, event_data);
- if (0 == result)
- result = tmp_result;
- }
- }
-
- return result;
-}
-
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &suspend_action_chain;
-}
-
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &initialize_action_chain;
-}
-
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &uninitialize_action_chain;
-}
-
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/
-}
-
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &resume_action_chain;
-}
-
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &complete_init_action_chain;
-}
-
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_user_state_action_chain;
-}
-
-const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &readjust_power_state_action_chain;
-}
-
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &display_config_change_action_chain;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
deleted file mode 100644
index 383d4b295aa9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGEMENT_H_
-#define _EVENT_MANAGEMENT_H_
-
-#include "eventmgr.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
-
-extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
-extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
-
-
-#endif /* _EVENT_MANAGEMENT_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
deleted file mode 100644
index 3e3ca03bd344..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "eventmgr.h"
-#include "hwmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-
-static int pem_init(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pem_event_data event_data = { {0} };
-
- /* Initialize PowerPlay feature info */
- pem_init_feature_info(eventmgr);
-
- /* Initialize event action chains */
- pem_init_event_action_chains(eventmgr);
-
- /* Call initialization event */
- result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data);
-
- /* if (0 != result)
- return result; */
-
- /* Register interrupt callback functions */
- result = pem_register_interrupts(eventmgr);
- return 0;
-}
-
-static void pem_fini(struct pp_eventmgr *eventmgr)
-{
- struct pem_event_data event_data = { {0} };
-
- pem_uninit_featureInfo(eventmgr);
- pem_unregister_interrupts(eventmgr);
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-}
-
-int eventmgr_early_init(struct pp_instance *handle)
-{
- struct pp_eventmgr *eventmgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL);
- if (eventmgr == NULL)
- return -ENOMEM;
-
- eventmgr->hwmgr = handle->hwmgr;
- handle->eventmgr = eventmgr;
-
- eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor);
- eventmgr->pp_eventmgr_init = pem_init;
- eventmgr->pp_eventmgr_fini = pem_fini;
-
- return 0;
-}
-
-static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data)
-{
- if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL)
- return -EINVAL;
-
- return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data);
-}
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data)
-{
- int r = 0;
-
- r = pem_handle_event_unlocked(eventmgr, event, event_data);
-
- return r;
-}
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr)
-{
- return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr));
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
deleted file mode 100644
index b82c43af59ab..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventsubchains.h"
-#include "eventtasks.h"
-#include "hardwaremanager.h"
-
-const pem_event_action reset_display_phy_access_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action broadcast_power_policy_tasks[] = {
- /* PEM_Task_BroadcastPowerPolicyChange, */
- NULL
-};
-
-const pem_event_action unregister_interrupt_tasks[] = {
- pem_task_unregister_interrupts,
- NULL
-};
-
-/* Disable GFX Voltage Islands Power Gating */
-const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = {
- pem_task_disable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action disable_gfx_clockgating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action block_adjust_power_state_tasks[] = {
- pem_task_block_adjust_power_state,
- NULL
-};
-
-
-const pem_event_action unblock_adjust_power_state_tasks[] = {
- pem_task_unblock_adjust_power_state,
- NULL
-};
-
-const pem_event_action set_performance_state_tasks[] = {
- pem_task_set_performance_state,
- NULL
-};
-
-const pem_event_action get_2d_performance_state_tasks[] = {
- pem_task_get_2D_performance_state_id,
- NULL
-};
-
-const pem_event_action conditionally_force3D_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action process_vbios_eventinfo_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_dynamic_state_management_tasks[] = {
- /* PEM_Task_ResetBAPMPolicyChangedFlag,*/
- pem_task_get_boot_state_id,
- pem_task_enable_dynamic_state_management,
- pem_task_register_interrupts,
- NULL
-};
-
-const pem_event_action enable_clock_power_gatings_tasks[] = {
- pem_task_enable_clock_power_gatings_tasks,
- pem_task_powerdown_uvd_tasks,
- pem_task_powerdown_vce_tasks,
- NULL
-};
-
-const pem_event_action setup_asic_tasks[] = {
- pem_task_setup_asic,
- NULL
-};
-
-const pem_event_action power_budget_tasks[] = {
- /* TODO
- * PEM_Task_PowerBudgetWaiverAvailable,
- * PEM_Task_PowerBudgetWarningMessage,
- * PEM_Task_PruneStatesBasedOnPowerBudget,
- */
- NULL
-};
-
-const pem_event_action system_config_tasks[] = {
- /* PEM_Task_PruneStatesBasedOnSystemConfig,*/
- NULL
-};
-
-
-const pem_event_action conditionally_force_3d_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action ungate_all_display_phys_tasks[] = {
- /* PEM_Task_GetDisplayPhyAccessInfo */
- NULL
-};
-
-const pem_event_action uninitialize_display_phy_access_tasks[] = {
- /* PEM_Task_UninitializeDisplayPhysAccess, */
- NULL
-};
-
-const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = {
- /* PEM_Task_DisableVoltageIslandPowerGating, */
- NULL
-};
-
-const pem_event_action disable_gfx_clock_gating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action set_boot_state_tasks[] = {
- pem_task_get_boot_state_id,
- pem_task_set_boot_state,
- NULL
-};
-
-const pem_event_action adjust_power_state_tasks[] = {
- pem_task_notify_hw_mgr_display_configuration_change,
- pem_task_adjust_power_state,
- pem_task_notify_smc_display_config_after_power_state_adjustment,
- pem_task_update_allowed_performance_levels,
- /* to do pem_task_Enable_disable_bapm, */
- NULL
-};
-
-const pem_event_action disable_dynamic_state_management_tasks[] = {
- pem_task_unregister_interrupts,
- pem_task_get_boot_state_id,
- pem_task_disable_dynamic_state_management,
- NULL
-};
-
-const pem_event_action disable_clock_power_gatings_tasks[] = {
- pem_task_disable_clock_power_gatings_tasks,
- NULL
-};
-
-const pem_event_action cleanup_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_cleanup_asic,
- NULL
-};
-
-const pem_event_action prepare_for_pnp_stop_tasks[] = {
- /* PEM_Task_PrepareForPnpStop,*/
- NULL
-};
-
-const pem_event_action set_power_source_tasks[] = {
- pem_task_set_power_source,
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action set_power_saving_state_tasks[] = {
- pem_task_reset_power_saving_state,
- pem_task_get_power_saving_state,
- pem_task_set_power_saving_state,
- /* PEM_Task_ResetODDCState,
- * PEM_Task_GetODDCState,
- * PEM_Task_SetODDCState,*/
- NULL
-};
-
-const pem_event_action enable_disable_fps_tasks[] = {
- /* PEM_Task_EnableDisableFPS,*/
- NULL
-};
-
-const pem_event_action set_nbmcu_state_tasks[] = {
- /* PEM_Task_NBMCUStateChange,*/
- NULL
-};
-
-const pem_event_action reset_hardware_dc_notification_tasks[] = {
- /* PEM_Task_ResetHardwareDCNotification,*/
- NULL
-};
-
-
-const pem_event_action notify_smu_suspend_tasks[] = {
- /* PEM_Task_NotifySMUSuspend,*/
- NULL
-};
-
-const pem_event_action disable_smc_firmware_ctf_tasks[] = {
- pem_task_disable_smc_firmware_ctf,
- NULL
-};
-
-const pem_event_action disable_fps_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- NULL
-};
-
-const pem_event_action vari_bright_suspend_tasks[] = {
- /* PEM_Task_VariBright_Suspend,*/
- NULL
-};
-
-const pem_event_action reset_fan_speed_to_default_tasks[] = {
- /* PEM_Task_ResetFanSpeedToDefault,*/
- NULL
-};
-
-const pem_event_action power_down_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_power_down_asic,
- NULL
-};
-
-const pem_event_action disable_stutter_mode_tasks[] = {
- /* PEM_Task_DisableStutterMode,*/
- NULL
-};
-
-const pem_event_action set_connected_standby_tasks[] = {
- /* PEM_Task_SetConnectedStandby,*/
- NULL
-};
-
-const pem_event_action block_hw_access_tasks[] = {
- pem_task_block_hw_access,
- NULL
-};
-
-const pem_event_action unblock_hw_access_tasks[] = {
- pem_task_un_block_hw_access,
- NULL
-};
-
-const pem_event_action resume_connected_standby_tasks[] = {
- /* PEM_Task_ResumeConnectedStandby,*/
- NULL
-};
-
-const pem_event_action notify_smu_resume_tasks[] = {
- /* PEM_Task_NotifySMUResume,*/
- NULL
-};
-
-const pem_event_action reset_display_configCounter_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action update_dal_configuration_tasks[] = {
- /* PEM_Task_CheckVBlankTime,*/
- NULL
-};
-
-const pem_event_action vari_bright_resume_tasks[] = {
- /* PEM_Task_VariBright_Resume,*/
- NULL
-};
-
-const pem_event_action notify_hw_power_source_tasks[] = {
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action process_vbios_event_info_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_gfx_clock_gating_tasks[] = {
- pem_task_enable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = {
- pem_task_enable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action reset_clock_gating_tasks[] = {
- /* PEM_Task_ResetClockGating*/
- NULL
-};
-
-const pem_event_action notify_smu_vpu_recovery_end_tasks[] = {
- /* PEM_Task_NotifySmuVPURecoveryEnd,*/
- NULL
-};
-
-const pem_event_action disable_vpu_cap_tasks[] = {
- /* PEM_Task_DisableVPUCap,*/
- NULL
-};
-
-const pem_event_action execute_escape_sequence_tasks[] = {
- /* PEM_Task_ExecuteEscapesequence,*/
- NULL
-};
-
-const pem_event_action notify_power_state_change_tasks[] = {
- pem_task_notify_power_state_change,
- NULL
-};
-
-const pem_event_action enable_cgpg_tasks[] = {
- pem_task_enable_cgpg,
- NULL
-};
-
-const pem_event_action disable_cgpg_tasks[] = {
- pem_task_disable_cgpg,
- NULL
-};
-
-const pem_event_action enable_user_2d_performance_tasks[] = {
- /* PEM_Task_SetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/
- NULL
-};
-
-const pem_event_action add_user_2d_performance_state_tasks[] = {
- /* PEM_Task_Get2DPerformanceTemplate,*/
- /* PEM_Task_AllocateNewPowerStateMemory,*/
- /* PEM_Task_CopyNewPowerStateInfo,*/
- /* PEM_Task_UpdateNewPowerStateClocks,*/
- /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/
- /* PEM_Task_AddPowerState,*/
- /* PEM_Task_ReleaseNewPowerStateMemory,*/
- NULL
-};
-
-const pem_event_action delete_user_2d_performance_state_tasks[] = {
- /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/
- /* PEM_Task_DeletePowerState,*/
- /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/
- NULL
-};
-
-const pem_event_action disable_user_2d_performance_tasks[] = {
- /* PEM_Task_ResetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/
- NULL
-};
-
-const pem_event_action enable_stutter_mode_tasks[] = {
- pem_task_enable_stutter_mode,
- NULL
-};
-
-const pem_event_action enable_disable_bapm_tasks[] = {
- /*PEM_Task_EnableDisableBAPM,*/
- NULL
-};
-
-const pem_event_action reset_boot_state_tasks[] = {
- pem_task_reset_boot_state,
- NULL
-};
-
-const pem_event_action create_new_user_performance_state_tasks[] = {
- pem_task_create_user_performance_state,
- NULL
-};
-
-const pem_event_action initialize_thermal_controller_tasks[] = {
- pem_task_initialize_thermal_controller,
- NULL
-};
-
-const pem_event_action uninitialize_thermal_controller_tasks[] = {
- pem_task_uninitialize_thermal_controller,
- NULL
-};
-
-const pem_event_action set_cpu_power_state[] = {
- pem_task_set_cpu_power_state,
- NULL
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
deleted file mode 100644
index 7714cb927428..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_SUB_CHAINS_H_
-#define _EVENT_SUB_CHAINS_H_
-
-#include "eventmgr.h"
-
-extern const pem_event_action reset_display_phy_access_tasks[];
-extern const pem_event_action broadcast_power_policy_tasks[];
-extern const pem_event_action unregister_interrupt_tasks[];
-extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[];
-extern const pem_event_action disable_GFX_clockgating_tasks[];
-extern const pem_event_action block_adjust_power_state_tasks[];
-extern const pem_event_action unblock_adjust_power_state_tasks[];
-extern const pem_event_action set_performance_state_tasks[];
-extern const pem_event_action get_2D_performance_state_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action process_vbios_eventinfo_tasks[];
-extern const pem_event_action enable_dynamic_state_management_tasks[];
-extern const pem_event_action enable_clock_power_gatings_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action setup_asic_tasks[];
-extern const pem_event_action power_budget_tasks[];
-extern const pem_event_action system_config_tasks[];
-extern const pem_event_action get_2d_performance_state_tasks[];
-extern const pem_event_action conditionally_force_3d_performance_state_tasks[];
-extern const pem_event_action ungate_all_display_phys_tasks[];
-extern const pem_event_action uninitialize_display_phy_access_tasks[];
-extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action disable_gfx_clock_gating_tasks[];
-extern const pem_event_action set_boot_state_tasks[];
-extern const pem_event_action adjust_power_state_tasks[];
-extern const pem_event_action disable_dynamic_state_management_tasks[];
-extern const pem_event_action disable_clock_power_gatings_tasks[];
-extern const pem_event_action cleanup_asic_tasks[];
-extern const pem_event_action prepare_for_pnp_stop_tasks[];
-extern const pem_event_action set_power_source_tasks[];
-extern const pem_event_action set_power_saving_state_tasks[];
-extern const pem_event_action enable_disable_fps_tasks[];
-extern const pem_event_action set_nbmcu_state_tasks[];
-extern const pem_event_action reset_hardware_dc_notification_tasks[];
-extern const pem_event_action notify_smu_suspend_tasks[];
-extern const pem_event_action disable_smc_firmware_ctf_tasks[];
-extern const pem_event_action disable_fps_tasks[];
-extern const pem_event_action vari_bright_suspend_tasks[];
-extern const pem_event_action reset_fan_speed_to_default_tasks[];
-extern const pem_event_action power_down_asic_tasks[];
-extern const pem_event_action disable_stutter_mode_tasks[];
-extern const pem_event_action set_connected_standby_tasks[];
-extern const pem_event_action block_hw_access_tasks[];
-extern const pem_event_action unblock_hw_access_tasks[];
-extern const pem_event_action resume_connected_standby_tasks[];
-extern const pem_event_action notify_smu_resume_tasks[];
-extern const pem_event_action reset_display_configCounter_tasks[];
-extern const pem_event_action update_dal_configuration_tasks[];
-extern const pem_event_action vari_bright_resume_tasks[];
-extern const pem_event_action notify_hw_power_source_tasks[];
-extern const pem_event_action process_vbios_event_info_tasks[];
-extern const pem_event_action enable_gfx_clock_gating_tasks[];
-extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action reset_clock_gating_tasks[];
-extern const pem_event_action notify_smu_vpu_recovery_end_tasks[];
-extern const pem_event_action disable_vpu_cap_tasks[];
-extern const pem_event_action execute_escape_sequence_tasks[];
-extern const pem_event_action notify_power_state_change_tasks[];
-extern const pem_event_action enable_cgpg_tasks[];
-extern const pem_event_action disable_cgpg_tasks[];
-extern const pem_event_action enable_user_2d_performance_tasks[];
-extern const pem_event_action add_user_2d_performance_state_tasks[];
-extern const pem_event_action delete_user_2d_performance_state_tasks[];
-extern const pem_event_action disable_user_2d_performance_tasks[];
-extern const pem_event_action enable_stutter_mode_tasks[];
-extern const pem_event_action enable_disable_bapm_tasks[];
-extern const pem_event_action reset_boot_state_tasks[];
-extern const pem_event_action create_new_user_performance_state_tasks[];
-extern const pem_event_action initialize_thermal_controller_tasks[];
-extern const pem_event_action uninitialize_thermal_controller_tasks[];
-extern const pem_event_action set_cpu_power_state[];
-#endif /* _EVENT_SUB_CHAINS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
deleted file mode 100644
index 8c4ebaae1e0c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "hardwaremanager.h"
-#include "eventtasks.h"
-#include "power_state.h"
-#include "hwmgr.h"
-#include "amd_powerplay.h"
-#include "psm.h"
-
-#define TEMP_RANGE_MIN (90 * 1000)
-#define TEMP_RANGE_MAX (120 * 1000)
-
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
-
- if (eventmgr == NULL || eventmgr->hwmgr == NULL)
- return -EINVAL;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level);
-
- return 0;
-}
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_hwmgr *hwmgr;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- if (event_data->pnew_power_state != NULL)
- hwmgr->request_ps = event_data->pnew_power_state;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
- psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules);
- else
- psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules);
-
- return 0;
-}
-
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_power_down_asic(eventmgr->hwmgr);
-}
-
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return pem_unregister_interrupts(eventmgr);
-}
-
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id)
- );
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_clock_power_gatings(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_powerdown_uvd(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_powergate_uvd(eventmgr->hwmgr, true);
- phm_powergate_vce(eventmgr->hwmgr, true);
- return 0;
-}
-
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_disable_clock_power_gatings(eventmgr->hwmgr);
- return 0;
-}
-
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
-}
-
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_setup_asic(eventmgr->hwmgr);
-}
-
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config)
-{
- /* TODO */
- return 0;
- /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */
-}
-
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_display_configuration_changed(eventmgr->hwmgr);
-}
-
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return 0;
-}
-
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr);
-}
-
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = true;
- /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/
- return 0;
-}
-
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = false;
- return 0;
-}
-
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_set_cpu_power_state(eventmgr->hwmgr);
-}
-
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- if (eventmgr->features[PP_Feature_PowerPlay].supported &&
- !(eventmgr->features[PP_Feature_PowerPlay].enabled))
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id));
- else if (eventmgr->features[PP_Feature_User2DPerformance].enabled)
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_User2DPerformance,
- &(event_data->requested_state_id));
- else
- result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance,
- &(event_data->requested_state_id));
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
-restart_search:
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & event_data->requested_ui_label) {
- event_data->pnew_power_state = state;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
-
- switch (event_data->requested_ui_label) {
- case PP_StateUILabel_Battery:
- case PP_StateUILabel_Balanced:
- event_data->requested_ui_label = PP_StateUILabel_Performance;
- goto restart_search;
- default:
- break;
- }
- return -1;
-}
-
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct PP_TemperatureRange range;
-
- range.max = TEMP_RANGE_MAX;
- range.min = TEMP_RANGE_MIN;
-
- if (eventmgr == NULL || eventmgr->platform_descriptor == NULL)
- return -EINVAL;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController))
- return phm_start_thermal_controller(eventmgr->hwmgr, &range);
-
- return 0;
-}
-
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_stop_thermal_controller(eventmgr->hwmgr);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
deleted file mode 100644
index 37e7ca5a58e0..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_TASKS_H_
-#define _EVENT_TASKS_H_
-#include "eventmgr.h"
-
-struct amd_display_configuration;
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config);
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*thermal */
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-#endif /* _EVENT_TASKS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
deleted file mode 100644
index 489908887e9c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "psm.h"
-
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & ui_label) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.flags & flag) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->id == *state_id) {
- memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
-{
-
- struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
- struct pp_hwmgr *hwmgr;
- bool equal;
-
- if (skip)
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- if (requested == NULL)
- return 0;
-
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
-
- if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
- equal = false;
-
- if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
- memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
- }
- return 0;
-}
-
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index d13fdadbbf9e..a212c27f2e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -1,16 +1,37 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
-HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
+HARDWARE_MGR = hwmgr.o processpptables.o \
hardwaremanager.o pp_acpi.o cz_hwmgr.o \
cz_clockpowergating.o pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
smu7_clockpowergating.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
- vega10_thermal.o pp_overdriver.o rv_hwmgr.o
+ vega10_thermal.o rv_hwmgr.o pp_psm.o\
+ pp_overdriver.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b33935fcf428..44de0874629f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -103,16 +103,6 @@ int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
-static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -123,12 +113,12 @@ int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_UVDDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= UVD_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
return 0;
@@ -144,12 +134,12 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_VCEDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= VCE_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
@@ -157,7 +147,7 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
}
-int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -183,10 +173,9 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -215,29 +204,6 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
- return 0;
}
-
- return 0;
}
-
-static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
- /*we don't need an exit table here, because there is only D3 cold on Kv*/
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
- .tableFunction = cz_tf_uvd_power_gating_initialize
- },
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
- .tableFunction = cz_tf_vce_power_gating_initialize
- },
- /* to do { NULL, cz_tf_xdma_power_gating_enable }, */
- { }
-};
-
-const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_clock_power_gatings_list
-};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
index 1954ceaed439..92f707bc46e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -29,8 +29,8 @@
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index bc839ff0bdd0..ad1f6b57884b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -162,8 +162,8 @@ static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel);
- cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
+ cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
}
return cz_hwmgr->max_sclk_level;
@@ -440,14 +440,7 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
-{
- return 0;
-}
-
-static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
@@ -469,7 +462,7 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
if (!hwmgr->need_pp_table_upload)
return 0;
- ret = smum_download_powerplay_table(hwmgr->smumgr, &table);
+ ret = smum_download_powerplay_table(hwmgr, &table);
PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
"Fail to get clock table from SMU!", return -EINVAL;);
@@ -561,13 +554,12 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
(uint8_t)dividers.pll_post_divider;
}
- ret = smum_upload_powerplay_table(hwmgr->smumgr);
+ ret = smum_upload_powerplay_table(hwmgr);
return ret;
}
-static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -593,8 +585,7 @@ static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_uvd_clock_voltage_dependency_table *table =
@@ -607,8 +598,8 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->uvd_dpm.soft_min_clk = 0;
cz_hwmgr->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -621,8 +612,7 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_vce_clock_voltage_dependency_table *table =
@@ -635,8 +625,8 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->vce_dpm.soft_min_clk = 0;
cz_hwmgr->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -649,8 +639,7 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_acp_clock_voltage_dependency_table *table =
@@ -663,8 +652,8 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->acp_dpm.soft_min_clk = 0;
cz_hwmgr->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -676,8 +665,7 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -686,22 +674,16 @@ static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->samu_power_gated = false;
cz_hwmgr->acp_power_gated = false;
cz_hwmgr->pgacpinit = true;
-
- return 0;
}
-static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->low_sclk_interrupt_threshold = 0;
-
- return 0;
}
-static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+
+static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -722,12 +704,12 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
clock = hwmgr->display_config.min_core_set_clock;
if (clock == 0)
- pr_info("min_core_set_clock not set\n");
+ pr_debug("min_core_set_clock not set\n");
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.hard_min_clk,
@@ -753,7 +735,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
cz_hwmgr->sclk_dpm.soft_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -764,7 +746,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_StablePState) &&
cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -774,9 +756,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
@@ -786,7 +766,7 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
clks);
}
@@ -794,77 +774,84 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
-static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
+ struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+ if (hw_data->is_nb_dpm_enabled) {
+ if (enable) {
+ PP_DBG_LOG("enable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableLowMemoryPstate,
+ (lock ? 1 : 0));
+ } else {
+ PP_DBG_LOG("disable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableLowMemoryPstate,
+ (lock ? 1 : 0));
+ }
+ }
+
return 0;
}
-
-static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
unsigned long dpm_features = 0;
- if (!cz_hwmgr->is_nb_dpm_enabled) {
- PP_DBG_LOG("enabling ALL SMU features.\n");
+ if (cz_hwmgr->is_nb_dpm_enabled) {
+ cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_EnableAllSmuFeatures,
+ hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = true;
+ cz_hwmgr->is_nb_dpm_enabled = false;
}
return ret;
}
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
- if (hw_data->is_nb_dpm_enabled) {
- if (enable) {
- PP_DBG_LOG("enable Low Memory PState.\n");
+ int ret = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
- } else {
- PP_DBG_LOG("disable Low Memory PState.\n");
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
- }
+ if (!cz_hwmgr->is_nb_dpm_enabled) {
+ PP_DBG_LOG("enabling ALL SMU features.\n");
+ dpm_features |= NB_DPM_MASK;
+ ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features);
+ if (ret == 0)
+ cz_hwmgr->is_nb_dpm_enabled = true;
}
- return 0;
+ return ret;
}
-static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
@@ -886,64 +873,64 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item cz_set_power_state_list[] = {
- { .tableFunction = cz_tf_update_sclk_limit },
- { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
- { .tableFunction = cz_tf_set_watermark_threshold },
- { .tableFunction = cz_tf_set_enabled_levels },
- { .tableFunction = cz_tf_enable_nb_dpm },
- { .tableFunction = cz_tf_update_low_mem_pstate },
- { }
-};
+static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int ret = 0;
-static const struct phm_master_table_header cz_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_set_power_state_list
-};
+ cz_update_sclk_limit(hwmgr);
+ cz_set_deep_sleep_sclk_threshold(hwmgr);
+ cz_set_watermark_threshold(hwmgr);
+ ret = cz_enable_nb_dpm(hwmgr);
+ if (ret)
+ return ret;
+ cz_update_low_mem_pstate(hwmgr, input);
-static const struct phm_master_table_item cz_setup_asic_list[] = {
- { .tableFunction = cz_tf_reset_active_process_mask },
- { .tableFunction = cz_tf_upload_pptable_to_smu },
- { .tableFunction = cz_tf_init_sclk_limit },
- { .tableFunction = cz_tf_init_uvd_limit },
- { .tableFunction = cz_tf_init_vce_limit },
- { .tableFunction = cz_tf_init_acp_limit },
- { .tableFunction = cz_tf_init_power_gate_state },
- { .tableFunction = cz_tf_init_sclk_threshold },
- { }
+ return 0;
};
-static const struct phm_master_table_header cz_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_setup_asic_list
-};
-static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+
+ ret = cz_upload_pptable_to_smu(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_sclk_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_uvd_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_vce_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_acp_limit(hwmgr);
+ if (ret)
+ return ret;
+
+ cz_init_power_gate_state(hwmgr);
+ cz_init_sclk_threshold(hwmgr);
+
+ return 0;
+}
+
+static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
-
- return 0;
}
-static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
- hw_data->is_nb_dpm_enabled = false;
- return 0;
+ hw_data->is_nb_dpm_enabled = false;
}
-static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
@@ -951,63 +938,68 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
hw_data->cc6_settings.cpu_cc6_disable = false;
hw_data->cc6_settings.cpu_pstate_disable = false;
-
- return 0;
}
-static const struct phm_master_table_item cz_power_down_asic_list[] = {
- { .tableFunction = cz_tf_power_up_display_clock_sys_pll },
- { .tableFunction = cz_tf_clear_nb_dpm_flag },
- { .tableFunction = cz_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header cz_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_power_down_asic_list
+static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ cz_power_up_display_clock_sys_pll(hwmgr);
+ cz_clear_nb_dpm_flag(hwmgr);
+ cz_reset_cc6_data(hwmgr);
+ return 0;
};
-static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
{
PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
- return 0;
}
-static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output,
- void *storage, int result)
+static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct pp_hwmgr *hwmgr)
{
- int res = 0xff;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- unsigned long dpm_features = 0;
cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
- dpm_features |= SCLK_DPM_MASK;
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ SCLK_DPM_MASK);
+}
+
+static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
- return res;
+ if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+ dpm_features |= SCLK_DPM_MASK;
+ cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features);
+ }
+ return ret;
}
-static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1016,13 +1008,11 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->acp_boot_level = 0xff;
- return 0;
}
static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
@@ -1031,67 +1021,52 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
int result;
unsigned long features;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
if (result == 0) {
- features = smum_get_argument(hwmgr->smumgr);
+ features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
- return result;
+ return false;
}
-static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
{
if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
+ return true;
+ return false;
}
-static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- /* TO DO */
- return 0;
-}
+ if (!cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been disabled\n");
+ return 0;
+ }
+ cz_disable_nb_dpm(hwmgr);
-static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
-{
- if (!cz_dpm_check_smu_features(hwmgr,
- SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
-}
+ cz_clear_voting_clients(hwmgr);
+ if (cz_stop_dpm(hwmgr))
+ return -EINVAL;
-static const struct phm_master_table_item cz_disable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_enabled },
- { },
+ return 0;
};
+static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ if (cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
-static const struct phm_master_table_header cz_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_disable_dpm_list
-};
-
-static const struct phm_master_table_item cz_enable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_disabled },
- { .tableFunction = cz_tf_program_voting_clients },
- { .tableFunction = cz_tf_start_dpm },
- { .tableFunction = cz_tf_program_bootup_state },
- { .tableFunction = cz_tf_enable_didt },
- { .tableFunction = cz_tf_reset_acp_boot_level },
- { },
-};
+ cz_program_voting_clients(hwmgr);
+ if (cz_start_dpm(hwmgr))
+ return -EINVAL;
+ cz_program_bootup_state(hwmgr);
+ cz_reset_acp_boot_level(hwmgr);
-static const struct phm_master_table_header cz_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_dpm_list
+ return 0;
};
static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
@@ -1138,7 +1113,11 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_ps->action = cz_current_ps->action;
- if (!force_high && (cz_ps->action == FORCE_HIGH))
+ if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+ else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ else if (!force_high && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
else if (force_high && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
@@ -1173,62 +1152,16 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
cz_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &cz_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
- result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
- if (result != 0) {
- pr_err("Fail to construct enable_clock_power_gatings\n");
- return result;
- }
return result;
}
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
- phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings));
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -1240,13 +1173,13 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1278,13 +1211,13 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
cz_hwmgr->sclk_dpm.hard_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1297,13 +1230,13 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -1312,106 +1245,25 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
-{
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMin));
-
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMax));
- return 0;
-}
-
-static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
-{
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- int32_t tmp_sclk;
- int32_t count;
-
- tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
-
- for (count = table->count-1; count >= 0; count--) {
- if (tmp_sclk >= table->entries[count].clk) {
- tmp_sclk = table->entries[count].clk;
- *sclk = tmp_sclk;
- break;
- }
- }
- if (count < 0)
- *sclk = table->entries[0].clk;
-
- return 0;
-}
-
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
- uint32_t sclk = 0;
int ret = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = cz_phm_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = cz_get_profiling_clk(hwmgr, &sclk);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- cz_phm_force_dpm_sclk(hwmgr, sclk);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -1422,27 +1274,18 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerOFF);
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
return 0;
}
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 1);
- } else {
- return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 0);
- }
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+ return smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_UVDPowerON,
+ PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
}
return 0;
@@ -1456,16 +1299,16 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
if (!bgate) {
/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
cz_hwmgr->uvd_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].vclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetUvdHardMin,
- cz_get_uvd_level(hwmgr,
- cz_hwmgr->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUvdHardMin,
+ cz_get_uvd_level(hwmgr,
+ cz_hwmgr->uvd_dpm.hard_min_clk,
+ PPSMC_MSG_SetUvdHardMin));
cz_enable_disable_uvd_dpm(hwmgr, true);
} else {
@@ -1485,32 +1328,32 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
cz_hwmgr->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
} else {
/*Program HardMin based on the vce_arbiter.ecclk */
if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
} else {
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
}
}
return 0;
@@ -1518,30 +1361,28 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
-static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
return cz_hwmgr->sys_info.bootup_uma_clock;
}
-static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct cz_power_state *cz_ps;
@@ -1679,7 +1520,7 @@ static void cz_hw_print_display_cfg(
PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
data);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
data);
}
@@ -1744,10 +1585,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
mask);
break;
@@ -1989,7 +1830,7 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
if (0 == result) {
activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
@@ -2012,10 +1853,36 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
}
+static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrHiVirtual,
+ mc_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrLoVirtual,
+ mc_addr_low);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrHiPhysical,
+ virtual_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrLoPhysical,
+ virtual_addr_low);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramBufferSize,
+ size);
+ return 0;
+}
+
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = cz_apply_state_adjust_rules,
.force_dpm_level = cz_dpm_force_dpm_level,
.get_power_state_size = cz_get_power_state_size,
@@ -2036,7 +1903,14 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
+ .get_temperature = cz_thermal_get_temperature,
.read_sensor = cz_read_sensor,
+ .power_off_asic = cz_power_off_asic,
+ .asic_setup = cz_setup_asic_task,
+ .dynamic_state_management_enable = cz_enable_dpm_tasks,
+ .power_state_set = cz_set_power_state_tasks,
+ .dynamic_state_management_disable = cz_disable_dpm_tasks,
+ .notify_cac_buffer_info = cz_notify_cac_buffer_info,
};
int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
deleted file mode 100644
index bc7d8bd7e7cb..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "hwmgr.h"
-
-static int phm_run_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input,
- void *output,
- void *temp_storage)
-{
- int result = 0;
- phm_table_function *function;
-
- if (rt_table->function_list == NULL) {
- pr_debug("this function not implement!\n");
- return 0;
- }
-
- for (function = rt_table->function_list; NULL != *function; function++) {
- int tmp = (*function)(hwmgr, input, output, temp_storage, result);
-
- if (tmp == PP_Result_TableImmediateExit)
- break;
- if (tmp) {
- if (0 == result)
- result = tmp;
- if (rt_table->exit_error)
- break;
- }
- }
-
- return result;
-}
-
-int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output)
-{
- int result;
- void *temp_storage;
-
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- if (0 != rt_table->storage_size) {
- temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
- if (temp_storage == NULL) {
- pr_err("Could not allocate table temporary storage\n");
- return -ENOMEM;
- }
- } else {
- temp_storage = NULL;
- }
-
- result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
-
- kfree(temp_storage);
-
- return result;
-}
-
-int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table)
-{
- uint32_t function_count = 0;
- const struct phm_master_table_item *table_item;
- uint32_t size;
- phm_table_function *run_time_list;
- phm_table_function *rtf;
-
- if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr)))
- function_count++;
- }
-
- size = (function_count + 1) * sizeof(phm_table_function);
- run_time_list = kzalloc(size, GFP_KERNEL);
-
- if (NULL == run_time_list)
- return -ENOMEM;
-
- rtf = run_time_list;
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr))) {
- *(rtf++) = table_item->tableFunction;
- }
- }
-
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- *rtf = NULL;
- rt_table->function_list = run_time_list;
- rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError));
- rt_table->storage_size = master_table->storage_size;
- return 0;
-}
-
-int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table)
-{
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter\n");
- return -EINVAL;
- }
-
- if (NULL == rt_table->function_list)
- return 0;
-
- kfree(rt_table->function_list);
-
- rt_table->function_list = NULL;
- rt_table->storage_size = 0;
- rt_table->exit_error = false;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index fcc722ea7649..623cff90233d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -26,35 +26,22 @@
#include "hardwaremanager.h"
#include "power_state.h"
+
+#define TEMP_RANGE_MIN (0)
+#define TEMP_RANGE_MAX (80 * 1000)
+
#define PHM_FUNC_CHECK(hw) \
do { \
if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
return -EINVAL; \
} while (0)
-bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
-{
- return hwmgr->block_hw_access;
-}
-
-int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block)
-{
- hwmgr->block_hw_access = block;
- return 0;
-}
-
int phm_setup_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->asic_setup)
- return hwmgr->hwmgr_func->asic_setup(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->asic_setup)
+ return hwmgr->hwmgr_func->asic_setup(hwmgr);
return 0;
}
@@ -63,14 +50,8 @@ int phm_power_down_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_off_asic)
- return hwmgr->hwmgr_func->power_off_asic(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_off_asic)
+ return hwmgr->hwmgr_func->power_off_asic(hwmgr);
return 0;
}
@@ -86,13 +67,8 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
states.pcurrent_state = pcurrent_state;
states.pnew_state = pnew_power_state;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_state_set)
- return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_state_set)
+ return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
return 0;
}
@@ -103,15 +79,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
bool enabled;
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->enable_dynamic_state_management),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
enabled = ret == 0;
@@ -127,15 +96,8 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (hwmgr->hwmgr_func->dynamic_state_management_disable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->disable_dynamic_state_management),
- NULL, NULL);
- }
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
enabled = ret == 0 ? false : true;
@@ -193,35 +155,13 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_uvd != NULL)
- return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- return 0;
-}
-
-int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_vce != NULL)
- return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- return 0;
-}
-
int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
- return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
+ return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -229,11 +169,9 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
- return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
- }
+ if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
+ return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -242,12 +180,9 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->display_config_changed)
- hwmgr->hwmgr_func->display_config_changed(hwmgr);
- } else
- return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL);
+ if (NULL != hwmgr->hwmgr_func->display_config_changed)
+ hwmgr->hwmgr_func->display_config_changed(hwmgr);
+
return 0;
}
@@ -255,9 +190,7 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface))
- if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
+ if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
return 0;
@@ -277,10 +210,10 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL)
- return -EINVAL;
+ if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL)
+ return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
- return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
+ return 0;
}
/**
@@ -292,7 +225,21 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
*/
int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range)
{
- return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL);
+ struct PP_TemperatureRange range;
+
+ if (temperature_range == NULL) {
+ range.max = TEMP_RANGE_MAX;
+ range.min = TEMP_RANGE_MIN;
+ } else {
+ range.max = temperature_range->max;
+ range.min = temperature_range->min;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController)
+ && hwmgr->hwmgr_func->start_thermal_controller != NULL)
+ return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
+
+ return 0;
}
@@ -323,6 +270,9 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr,
int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
const struct amd_pp_display_configuration *display_config)
{
+ int index = 0;
+ int number_of_active_display = 0;
+
PHM_FUNC_CHECK(hwmgr);
if (display_config == NULL)
@@ -330,6 +280,17 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
hwmgr->display_config = *display_config;
+ if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk);
+
+ for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) {
+ if (hwmgr->display_config.displays[index].controller_id != 0)
+ number_of_active_display++;
+ }
+
+ if (NULL != hwmgr->hwmgr_func->set_active_display_count)
+ hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
+
if (hwmgr->hwmgr_func->store_cc6_data == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 9547f265a8bb..ce59e0e67cb2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -26,8 +26,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <drm/amdgpu_drm.h>
-#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
#include "pppcielanes.h"
@@ -35,21 +35,100 @@
#include "ppsmc.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
+#include "pp_psm.h"
-extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern const struct pp_smumgr_func ci_smu_funcs;
+extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func iceland_smu_funcs;
+extern const struct pp_smumgr_func tonga_smu_funcs;
+extern const struct pp_smumgr_func fiji_smu_funcs;
+extern const struct pp_smumgr_func polaris10_smu_funcs;
+extern const struct pp_smumgr_func vega10_smu_funcs;
+extern const struct pp_smumgr_func rv_smu_funcs;
+extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
uint8_t convert_to_vid(uint16_t vddc)
{
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
}
+static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
+ struct cgs_system_info *sys_info)
+{
+ sys_info->size = sizeof(struct cgs_system_info);
+ sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
+
+ return cgs_query_system_info(hwmgr->device, sys_info);
+}
+
+static int phm_thermal_l2h_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_thermal_h2l_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_ctf_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
+ { .handler = phm_thermal_l2h_irq },
+ { .handler = phm_thermal_h2l_irq },
+ { .handler = phm_ctf_irq }
+};
+
int hwmgr_early_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -62,7 +141,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return -ENOMEM;
handle->hwmgr = hwmgr;
- hwmgr->smumgr = handle->smu_mgr;
hwmgr->device = handle->device;
hwmgr->chip_family = handle->chip_family;
hwmgr->chip_id = handle->chip_id;
@@ -73,24 +151,38 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+ hwmgr->reload_fw = 1;
switch (hwmgr->chip_family) {
+ case AMDGPU_FAMILY_CI:
+ hwmgr->smumgr_funcs = &ci_smu_funcs;
+ ci_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
+ smu7_init_function_pointers(hwmgr);
+ break;
case AMDGPU_FAMILY_CZ:
+ hwmgr->smumgr_funcs = &cz_smu_funcs;
cz_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
+ hwmgr->smumgr_funcs = &iceland_smu_funcs;
topaz_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
+ hwmgr->smumgr_funcs = &tonga_smu_funcs;
tonga_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
break;
case CHIP_FIJI:
+ hwmgr->smumgr_funcs = &fiji_smu_funcs;
fiji_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
@@ -98,6 +190,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
+ hwmgr->smumgr_funcs = &polaris10_smu_funcs;
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -109,6 +202,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
default:
@@ -118,6 +212,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_RV:
switch (hwmgr->chip_id) {
case CHIP_RAVEN:
+ hwmgr->smumgr_funcs = &rv_smu_funcs;
rv_init_function_pointers(hwmgr);
break;
default:
@@ -131,80 +226,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return 0;
}
-static int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- unsigned int i;
- unsigned int table_entries;
- struct pp_power_state *state;
- int size;
-
- if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
- return -EINVAL;
-
- if (hwmgr->hwmgr_func->get_power_state_size == NULL)
- return -EINVAL;
-
- hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
-
- hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
- sizeof(struct pp_power_state);
-
- hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
- if (hwmgr->ps == NULL)
- return -ENOMEM;
-
- hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->request_ps == NULL) {
- kfree(hwmgr->ps);
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->current_ps == NULL) {
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
-
- if (state->classification.flags & PP_StateClassificationFlag_Boot) {
- hwmgr->boot_ps = state;
- memcpy(hwmgr->current_ps, state, size);
- memcpy(hwmgr->request_ps, state, size);
- }
-
- state->id = i + 1; /* assigned unique num for every power state id */
-
- if (state->classification.flags & PP_StateClassificationFlag_Uvd)
- hwmgr->uvd_ps = state;
- state = (struct pp_power_state *)((unsigned long)state + size);
- }
-
- return 0;
-}
-
-static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL)
- return -EINVAL;
-
- kfree(hwmgr->current_ps);
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- hwmgr->current_ps = NULL;
- return 0;
-}
-
int hwmgr_hw_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -228,9 +249,26 @@ int hwmgr_hw_init(struct pp_instance *handle)
if (ret)
goto err1;
- ret = hw_init_power_state_table(hwmgr);
+ ret = psm_init_power_state_table(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_setup_asic(hwmgr);
if (ret)
goto err2;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ goto err2;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
+ if (ret)
+ goto err2;
+
return 0;
err2:
if (hwmgr->hwmgr_func->backend_fini)
@@ -247,19 +285,137 @@ int hwmgr_hw_fini(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
- if (handle == NULL)
+ if (handle == NULL || handle->hwmgr == NULL)
return -EINVAL;
hwmgr = handle->hwmgr;
+ phm_stop_thermal_controller(hwmgr);
+ psm_set_boot_states(hwmgr);
+ psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ phm_disable_dynamic_state_management(hwmgr);
+ phm_disable_clock_power_gatings(hwmgr);
+
if (hwmgr->hwmgr_func->backend_fini)
hwmgr->hwmgr_func->backend_fini(hwmgr);
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
- return hw_fini_power_state_table(hwmgr);
+ return psm_fini_power_state_table(hwmgr);
}
+int hwmgr_hw_suspend(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ if (ret)
+ return ret;
+ ret = phm_power_down_asic(hwmgr);
+
+ return ret;
+}
+int hwmgr_hw_resume(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ ret = phm_setup_asic(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ return ret;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ if (ret)
+ return ret;
+
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+
+ return ret;
+}
+
+static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
+{
+ switch (state) {
+ case POWER_STATE_TYPE_BATTERY:
+ return PP_StateUILabel_Battery;
+ case POWER_STATE_TYPE_BALANCED:
+ return PP_StateUILabel_Balanced;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ return PP_StateUILabel_Performance;
+ default:
+ return PP_StateUILabel_None;
+ }
+}
+
+int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
+ void *input, void *output)
+{
+ int ret = 0;
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_set_cpu_power_state(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ case AMD_PP_TASK_ENABLE_USER_STATE:
+ {
+ enum amd_pm_state_type ps;
+ enum PP_StateUILabel requested_ui_label;
+ struct pp_power_state *requested_ps = NULL;
+
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ps = *(unsigned long *)input;
+
+ requested_ui_label = power_state_convert(ps);
+ ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
+ break;
+ }
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
/**
* Returns once the part of the register indicated by the mask has
* reached the given value.
@@ -294,7 +450,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
* reached the given value.The indirect space is described by giving
* the memory-mapped index of the indirect index register.
*/
-void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
@@ -302,14 +458,50 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
{
if (hwmgr == NULL || hwmgr->device == NULL) {
pr_err("Invalid Hardware Manager!");
- return;
+ return -EINVAL;
}
cgs_write_register(hwmgr->device, indirect_port, index);
- phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+}
+
+int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device,
+ index);
+ if ((cur_value & mask) != (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == hwmgr->usec_timeout)
+ return -ETIME;
+ return 0;
}
+int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+ value, mask);
+}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
@@ -678,7 +870,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
for (i = 0; i < vddc_table->count; i++) {
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VddC_Request, req_volt);
return;
}
@@ -689,28 +881,8 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
{
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
@@ -735,7 +907,6 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
return;
}
@@ -784,7 +955,8 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
-
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -793,10 +965,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
-
if (hwmgr->chip_id != CHIP_POLARIS10)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
@@ -814,6 +982,8 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -822,15 +992,13 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
return 0;
}
int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -844,14 +1012,25 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
+ return 0;
+}
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
+ PHM_PlatformCaps_EVV);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
return 0;
}
-int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
@@ -862,8 +1041,8 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EVV);
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index e0766c5e3d74..8de384bf9a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -1,1264 +1,1275 @@
-// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "pp_overdriver.h"
#include <linux/errno.h>
-struct phm_fuses_default vega10_fuses_default[] = {
- {"0000001000010011111010101001010011011110000011100100100101100100",0x00003C96,0xFFFFE226,0x00000656,0x00002203,0xFFFFF201,0x000003FF,0x00002203,0xFFFFF201,0x000003FF},
- {"0000001000010011111010101001010011011110000010100001100010000100",0x00003CC5,0xFFFFE23A,0x0000064E,0x00002258,0xFFFFF1F7,0x000003FC,0x00002258,0xFFFFF1F7,0x000003FC},
- {"0000001000010011111010101001010011011110000011100011000110100100",0x00003CAF,0xFFFFE36E,0x00000602,0x00001E98,0xFFFFF569,0x00000357,0x00001E98,0xFFFFF569,0x00000357},
- {"0000001000010011111010101001010011011110001011000001000101000100",0x0000391A,0xFFFFE548,0x000005C9,0x00001B98,0xFFFFF707,0x00000324,0x00001B98,0xFFFFF707,0x00000324},
- {"0000001000010011111010101001010011011110001011000001100011000100",0x00003821,0xFFFFE674,0x00000597,0x00002196,0xFFFFF361,0x000003C0,0x00002196,0xFFFFF361,0x000003C0},
- {"0000001000010011111010101001010011011110001001100011100010000100",0x000044A2,0xFFFFDCB7,0x00000738,0x0000325C,0xFFFFE6A7,0x000005E6,0x0000325C,0xFFFFE6A7,0x000005E6},
- {"0000001000010011111010101001010011011110000010000010100100100100",0x00004057,0xFFFFE1CF,0x0000063C,0x00002E2E,0xFFFFEB62,0x000004FD,0x00002E2E,0xFFFFEB62,0x000004FD},
- {"0000001000010011111010101001010011011110001010000100100100100100",0x00003FD0,0xFFFFDF0F,0x000006E5,0x0000267C,0xFFFFEE2D,0x000004AB,0x0000267C,0xFFFFEE2D,0x000004AB},
- {"0000001000010011111010101001010011011110001010000000100100000100",0x00003F13,0xFFFFE010,0x000006AD,0x000020E7,0xFFFFF266,0x000003EC,0x000020E7,0xFFFFF266,0x000003EC},
- {"0000001000010011111010101001010011011110000010000010000001000100",0x00004088,0xFFFFDFAB,0x000006B6,0x0000252B,0xFFFFEFDB,0x00000458,0x0000252B,0xFFFFEFDB,0x00000458},
- {"0000001000010011111010101001010011011110001010000011100010000100",0x00003EF6,0xFFFFE017,0x000006AA,0x00001F67,0xFFFFF369,0x000003BE,0x00001F67,0xFFFFF369,0x000003BE},
- {"0000001000010011111010101001010011011110001011000010000110000100",0x00003CDD,0xFFFFE2A7,0x0000063C,0x000026C6,0xFFFFEF38,0x00000478,0x000026C6,0xFFFFEF38,0x00000478},
- {"0000001000010011111010101001010011011110000100000101000100100100",0x00003FA8,0xFFFFDF02,0x000006F0,0x000027FE,0xFFFFECF6,0x000004EA,0x000027FE,0xFFFFECF6,0x000004EA},
- {"0000001000010011111010101001010011011110001001100011100011000100",0x00004670,0xFFFFDC40,0x00000742,0x00003A7A,0xFFFFE1A7,0x000006B6,0x00003A7A,0xFFFFE1A7,0x000006B6},
- {"0000001000010011111010101001010011011110001011000011000000100100",0x00003CDC,0xFFFFE18C,0x00000683,0x00002A69,0xFFFFEBE7,0x00000515,0x00002A69,0xFFFFEBE7,0x00000515},
- {"0000001000010011111010101001010011011110000011100011100011000100",0x00003CEC,0xFFFFE38E,0x00000601,0x00002752,0xFFFFEFA7,0x00000453,0x00002752,0xFFFFEFA7,0x00000453},
- {"0000001000010011111010101001010011011110001011000001000100100100",0x000037D0,0xFFFFE634,0x000005A7,0x00001CD2,0xFFFFF644,0x00000348,0x00001CD2,0xFFFFF644,0x00000348},
- {"0000001000010011111010101001010011011110001010000011100101100100",0x00003DF5,0xFFFFE0A5,0x00000698,0x00001FD5,0xFFFFF30E,0x000003D1,0x00001FD5,0xFFFFF30E,0x000003D1},
- {"0000001000010011111010101001010011011110000010000010100011000100",0x00004201,0xFFFFE03E,0x00000688,0x00003206,0xFFFFE852,0x0000058A,0x00003206,0xFFFFE852,0x0000058A},
- {"0000001000010011111010101001010011011110001011000001100001100100",0x00003BED,0xFFFFE2F5,0x00000638,0x0000270D,0xFFFFEED0,0x0000048E,0x0000270D,0xFFFFEED0,0x0000048E},
- {"0000001000010011111010101001010011011110000010100001100100000100",0x00003E82,0xFFFFE1BE,0x00000654,0x000025FB,0xFFFFEFFA,0x00000448,0x000025FB,0xFFFFEFFA,0x00000448},
- {"0000001000010011111010101001010011011110001011000100000011000100",0x00003962,0xFFFFE4B9,0x000005EF,0x00002385,0xFFFFF156,0x00000423,0x00002385,0xFFFFF156,0x00000423},
- {"0000001000010011111010101001010011011110001011000000100101000100",0x00003D88,0xFFFFE21A,0x00000655,0x0000295A,0xFFFFED68,0x000004C4,0x0000295A,0xFFFFED68,0x000004C4},
- {"0000001000010011111010101001010011011110001011000001000100000100",0x00003AA4,0xFFFFE4A3,0x000005E0,0x000022EF,0xFFFFF250,0x000003EB,0x000022EF,0xFFFFF250,0x000003EB},
- {"0000001000010011111010101001010011011110000011100010100110100100",0x00003D97,0xFFFFE30D,0x0000060D,0x0000205D,0xFFFFF45D,0x00000380,0x0000205D,0xFFFFF45D,0x00000380},
- {"0000001000010011111010101001010011011110001011000100000010100100",0x000039B6,0xFFFFE446,0x00000605,0x00002325,0xFFFFF16C,0x0000041F,0x00002325,0xFFFFF16C,0x0000041F},
- {"0000001000010011111010101001010011011110001001100011100100000100",0x0000457E,0xFFFFDCF6,0x00000722,0x00003972,0xFFFFE27B,0x0000068E,0x00003972,0xFFFFE27B,0x0000068E},
- {"0000001000010011111010101001010011011110000010100001100100100100",0x00003FB8,0xFFFFE101,0x00000670,0x00002787,0xFFFFEEF5,0x00000471,0x00002787,0xFFFFEEF5,0x00000471},
- {"0000001000010011111010101001010011011110000011100011100010100100",0x00003BB2,0xFFFFE430,0x000005EA,0x000024A5,0xFFFFF162,0x00000409,0x000024A5,0xFFFFF162,0x00000409},
- {"0000001000010011111010101001010011011110000010000010000101000100",0x00003EC5,0xFFFFE1BD,0x0000064F,0x000022F0,0xFFFFF227,0x000003E8,0x000022F0,0xFFFFF227,0x000003E8},
- {"0000001000010011111010101001010011011110001011000011000101100100",0x000038A7,0xFFFFE59F,0x000005C1,0x000021CC,0xFFFFF2DF,0x000003D9,0x000021CC,0xFFFFF2DF,0x000003D9},
- {"0000001000010011111010101001010011011110001100100100000110000100",0x00002995,0xFFFFEF7A,0x0000044C,0x00001552,0xFFFFFB5D,0x00000292,0x00001552,0xFFFFFB5D,0x00000292},
- {"0000001000010011111010101001010011011110001011000100000001100100",0x00003B26,0xFFFFE2D3,0x00000649,0x000023B4,0xFFFFF09B,0x00000449,0x000023B4,0xFFFFF09B,0x00000449},
- {"0000001000010011111010101001010011011110000010000001000100100100",0x000040D2,0xFFFFE00A,0x00000696,0x000022DA,0xFFFFF1E9,0x000003F2,0x000022DA,0xFFFFF1E9,0x000003F2},
- {"0000001000010011111010101001010011011110001011000011100100100100",0x00003C98,0xFFFFE365,0x00000618,0x00002D5D,0xFFFFEB3A,0x0000051D,0x00002D5D,0xFFFFEB3A,0x0000051D},
- {"0000001000010011111010101001010011011110001011000001000010100100",0x00003BBD,0xFFFFE37E,0x00000617,0x0000252E,0xFFFFF06E,0x00000441,0x0000252E,0xFFFFF06E,0x00000441},
- {"0000001000010011111010101001010011011110001001100010100100100100",0x00004363,0xFFFFDF7A,0x000006A0,0x000031F5,0xFFFFE880,0x0000057B,0x000031F5,0xFFFFE880,0x0000057B},
- {"0000001000010011111010101001010011011110000011100011100001000100",0x00003CFC,0xFFFFE2AF,0x0000062E,0x0000212A,0xFFFFF335,0x000003BF,0x0000212A,0xFFFFF335,0x000003BF},
- {"0000001000010011111010101001010011011110000111000100100100100100",0x0000252D,0xFFFFF31B,0x000003C3,0x00001A1A,0xFFFFF882,0x00000325,0x00001A1A,0xFFFFF882,0x00000325},
- {"0000001000010011111010101001010011011110000010100010100110100100",0x00003FE2,0xFFFFDFEF,0x000006AC,0x000025A2,0xFFFFEF84,0x00000462,0x000025A2,0xFFFFEF84,0x00000462},
- {"0000001000010011111010101001010011011110000010000010000011100100",0x000040A5,0xFFFFE13B,0x0000065B,0x00002C13,0xFFFFEC75,0x000004D7,0x00002C13,0xFFFFEC75,0x000004D7},
- {"0000001000010011111010101001010011011110000011100100100010100100",0x00003E42,0xFFFFE1B3,0x00000657,0x0000221D,0xFFFFF273,0x000003DE,0x0000221D,0xFFFFF273,0x000003DE},
- {"0000001000010011111010101001010011011110000010100010000011100100",0x00003E7F,0xFFFFE255,0x00000638,0x00002D30,0xFFFFEB8A,0x00000503,0x00002D30,0xFFFFEB8A,0x00000503},
- {"0000001000010011111010101001010011011110001011000010100111000100",0x00003E56,0xFFFFE16D,0x00000670,0x000028DC,0xFFFFEDA0,0x000004BA,0x000028DC,0xFFFFEDA0,0x000004BA},
- {"0000001000010011111010101001010011011110001001100011000010100100",0x000044AD,0xFFFFDE24,0x000006DD,0x000031AD,0xFFFFE850,0x00000585,0x000031AD,0xFFFFE850,0x00000585},
- {"0000001000010011111010101001010011011110001011000010000011100100",0x00003AF3,0xFFFFE5B0,0x000005A6,0x00002CF6,0xFFFFEC75,0x000004DD,0x00002CF6,0xFFFFEC75,0x000004DD},
- {"0000001000010011111010101001010011011110000010100010000010000100",0x00003E66,0xFFFFE19E,0x0000065B,0x00002332,0xFFFFF1B9,0x000003FD,0x00002332,0xFFFFF1B9,0x000003FD},
- {"0000001000010011111010101001010011011110000010000010100010000100",0x00003FB4,0xFFFFE0A5,0x00000686,0x0000253E,0xFFFFF02E,0x00000444,0x0000253E,0xFFFFF02E,0x00000444},
- {"0000001000010011111010101001010011011110001010000001100010100100",0x00003E28,0xFFFFE14D,0x0000066E,0x00001FE2,0xFFFFF39A,0x000003B1,0x00001FE2,0xFFFFF39A,0x000003B1},
- {"0000001000010011111010101001010011011110001011000000100100000100",0x000039E6,0xFFFFE44B,0x000005FE,0x0000210C,0xFFFFF2F4,0x000003DA,0x0000210C,0xFFFFF2F4,0x000003DA},
- {"0000001000010011111010101001010011011110001011000101000100000100",0x00003A4D,0xFFFFE252,0x0000067A,0x000027E2,0xFFFFECED,0x000004FA,0x000027E2,0xFFFFECED,0x000004FA},
- {"0000001000010011111010101001010011011110000010100010100101100100",0x00004065,0xFFFFE02F,0x0000069B,0x0000299D,0xFFFFED38,0x000004C2,0x0000299D,0xFFFFED38,0x000004C2},
- {"0000001000010011111010101001010011011110000011100010000010100100",0x000039EE,0xFFFFE603,0x00000594,0x0000214F,0xFFFFF429,0x0000038E,0x0000214F,0xFFFFF429,0x0000038E},
- {"0000001000010011111010101001010011011110000011100100100011100100",0x00003BD2,0xFFFFE351,0x00000618,0x000020B8,0xFFFFF377,0x000003B4,0x000020B8,0xFFFFF377,0x000003B4},
- {"0000001000010011111010101001010011011110000010100011000100100100",0x00003FAA,0xFFFFE183,0x0000065E,0x000032AE,0xFFFFE7C2,0x000005A6,0x000032AE,0xFFFFE7C2,0x000005A6},
- {"0000001000010011111010101001010011011110001011000010100110000100",0x00003AFB,0xFFFFE3E4,0x00000608,0x00002293,0xFFFFF21F,0x000003FA,0x00002293,0xFFFFF21F,0x000003FA},
- {"0000001000010011111010101001010011011110001001100010000001100100",0x0000448B,0xFFFFDD5D,0x0000070D,0x00002E4E,0xFFFFE9DF,0x00000551,0x00002E4E,0xFFFFE9DF,0x00000551},
- {"0000001000010011111010101001010011011110000011100010000110000100",0x00003D46,0xFFFFE39B,0x000005F3,0x0000218E,0xFFFFF3CD,0x00000398,0x0000218E,0xFFFFF3CD,0x00000398},
- {"0000001000010011111010101001010011011110000010000100100011100100",0x00003F01,0xFFFFDFD9,0x000006BF,0x000023AF,0xFFFFF04E,0x0000044C,0x000023AF,0xFFFFF04E,0x0000044C},
- {"0000001000010011111010101001010011011110000100000010100110100100",0x0000403D,0xFFFFDF6B,0x000006C9,0x0000270D,0xFFFFEE4B,0x0000049E,0x0000270D,0xFFFFEE4B,0x0000049E},
- {"0000001000010011111010101001010011011110000011100011100101100100",0x00003C11,0xFFFFE35C,0x00000613,0x000020F9,0xFFFFF365,0x000003B9,0x000020F9,0xFFFFF365,0x000003B9},
- {"0000001000010011111010101001010011011110001011000011100010000100",0x00003B58,0xFFFFE37D,0x0000061F,0x00002698,0xFFFFEF46,0x00000478,0x00002698,0xFFFFEF46,0x00000478},
- {"0000001000010011111010101001010011011110001010000100000110100100",0x00003EBC,0xFFFFDF7A,0x000006D6,0x0000212B,0xFFFFF195,0x0000041B,0x0000212B,0xFFFFF195,0x0000041B},
- {"0000001000010011111010101001010011011110000010000100100011000100",0x00004050,0xFFFFDEB3,0x000006FE,0x00002D6C,0xFFFFE961,0x00000582,0x00002D6C,0xFFFFE961,0x00000582},
- {"0000001000010011111010101001010011011110001001100010000001000100",0x000043F0,0xFFFFDD9C,0x00000702,0x00002B31,0xFFFFEBEA,0x000004F7,0x00002B31,0xFFFFEBEA,0x000004F7},
- {"0000001000010011111010101001010011011110000100000000100100100100",0x00003EFA,0xFFFFE093,0x00000696,0x000026DB,0xFFFFEEB3,0x00000489,0x000026DB,0xFFFFEEB3,0x00000489},
- {"0000001000010011111010101001010011011110000010000010000001100100",0x0000425D,0xFFFFDE8D,0x000006E6,0x00002CA4,0xFFFFEAD2,0x00000531,0x00002CA4,0xFFFFEAD2,0x00000531},
- {"0000001000010011111010101001010011011110001001100011100110100100",0x000043B0,0xFFFFDD03,0x00000728,0x00002946,0xFFFFECA6,0x000004DE,0x00002946,0xFFFFECA6,0x000004DE},
- {"0000001000010011111010101001010011011110001010000010100001100100",0x00003F6A,0xFFFFE03A,0x0000069D,0x00002208,0xFFFFF1F8,0x000003F6,0x00002208,0xFFFFF1F8,0x000003F6},
- {"0000001000010011111010101001010011011110001011000010100101100100",0x00003A94,0xFFFFE4A7,0x000005E2,0x000024D0,0xFFFFF100,0x00000426,0x000024D0,0xFFFFF100,0x00000426},
- {"0000001000010011111010101001010011011110001010000001000011000100",0x00003F2F,0xFFFFE0A3,0x00000688,0x00002198,0xFFFFF271,0x000003E2,0x00002198,0xFFFFF271,0x000003E2},
- {"0000001000010011111010101001010011011110000100000100100011100100",0x00003EA5,0xFFFFE032,0x000006AE,0x0000227C,0xFFFFF130,0x00000426,0x0000227C,0xFFFFF130,0x00000426},
- {"0000001000010011111010101001010011011110001001100100000101000100",0x0000442F,0xFFFFDBC4,0x0000078B,0x00003CD6,0xFFFFDE6C,0x0000076C,0x00003CD6,0xFFFFDE6C,0x0000076C},
- {"0000001000010011111010101001010011011110001010000010100010000100",0x00003DDE,0xFFFFE174,0x00000668,0x00001FF4,0xFFFFF38F,0x000003B1,0x00001FF4,0xFFFFF38F,0x000003B1},
- {"0000001000010011111010101001010011011110000010100011000101000100",0x000040B0,0xFFFFE016,0x000006A0,0x00002DBB,0xFFFFEA7F,0x00000537,0x00002DBB,0xFFFFEA7F,0x00000537},
- {"0000001000010011111010101001010011011110001011000011000100000100",0x00003429,0xFFFFEA97,0x000004DD,0x000024D5,0xFFFFF26F,0x000003DF,0x000024D5,0xFFFFF26F,0x000003DF},
- {"0000001000010011111010101001010011011110000011100001100100000100",0x00003AEB,0xFFFFE590,0x000005A3,0x000022CB,0xFFFFF347,0x000003B2,0x000022CB,0xFFFFF347,0x000003B2},
- {"0000001000010011111010101001010011011110001010000011100100000100",0x00003B8E,0xFFFFE2EF,0x00000636,0x00002351,0xFFFFF143,0x0000041C,0x00002351,0xFFFFF143,0x0000041C},
- {"0000001000010011111010101001010011011110001100100100000011000100",0x00002926,0xFFFFF0B0,0x00000410,0x0000194E,0xFFFFF94E,0x000002E9,0x0000194E,0xFFFFF94E,0x000002E9},
- {"0000001000010011111010101001010011011110001010000011000110000100",0x0000402B,0xFFFFDF78,0x000006C2,0x00002273,0xFFFFF16C,0x00000414,0x00002273,0xFFFFF16C,0x00000414},
- {"0000001000010011111010101001010011011110000010100001000010100100",0x00003D6A,0xFFFFE1D3,0x00000659,0x00002006,0xFFFFF394,0x000003B1,0x00002006,0xFFFFF394,0x000003B1},
- {"0000001000010011111010101001010011011110001010000100000001100100",0x00004042,0xFFFFDFD8,0x000006A8,0x00002135,0xFFFFF29F,0x000003D9,0x00002135,0xFFFFF29F,0x000003D9},
- {"0000001000010011111010101001010011011110000010000010000010100100",0x0000405B,0xFFFFE093,0x00000682,0x0000288F,0xFFFFEE3A,0x00000491,0x0000288F,0xFFFFEE3A,0x00000491},
- {"0000001000010011111010101001010011011110001011000100100010100100",0x00003A49,0xFFFFE30C,0x00000648,0x000023F9,0xFFFFF02D,0x00000460,0x000023F9,0xFFFFF02D,0x00000460},
- {"0000001000010011111010101001010011011110001010000010100101100100",0x00003D59,0xFFFFE1CC,0x0000065B,0x00002013,0xFFFFF37D,0x000003B6,0x00002013,0xFFFFF37D,0x000003B6},
- {"0000001000010011111010101001010011011110001011000011100110000100",0x000040C1,0xFFFFDF8C,0x000006CA,0x00003271,0xFFFFE6CA,0x000005EA,0x00003271,0xFFFFE6CA,0x000005EA},
- {"0000001000010011111010101001010011011110001001100010000011100100",0x000042E9,0xFFFFDFDC,0x0000068C,0x00002ED9,0xFFFFEAAF,0x0000051B,0x00002ED9,0xFFFFEAAF,0x0000051B},
- {"0000001000010011111010101001010011011110000010000011000010000100",0x000042ED,0xFFFFDE50,0x000006F0,0x00002FCF,0xFFFFE8BB,0x0000058C,0x00002FCF,0xFFFFE8BB,0x0000058C},
- {"0000001000010011111010101001010011011110000010100100000100000100",0x00003EBD,0xFFFFE099,0x00000698,0x00002709,0xFFFFEE7B,0x00000495,0x00002709,0xFFFFEE7B,0x00000495},
- {"0000001000010011111010101001010011011110001010000100100100000100",0x00003F71,0xFFFFDF82,0x000006C9,0x0000219B,0xFFFFF1AD,0x0000040F,0x0000219B,0xFFFFF1AD,0x0000040F},
- {"0000001000010011111010101001010011011110001010000000100011100100",0x00003E73,0xFFFFE080,0x0000069B,0x000020E7,0xFFFFF273,0x000003E9,0x000020E7,0xFFFFF273,0x000003E9},
- {"0000001000010011111010101001010011011110000011100011000110000100",0x00003E14,0xFFFFE278,0x0000062C,0x00002275,0xFFFFF2B3,0x000003CE,0x00002275,0xFFFFF2B3,0x000003CE},
- {"0000001000010011111010101001010011011110001011000010000110100100",0x00003ABB,0xFFFFE3B9,0x00000615,0x00002192,0xFFFFF28F,0x000003EB,0x00002192,0xFFFFF28F,0x000003EB},
- {"0000001000010011111010101001010011011110001010000011000100100100",0x00003D53,0xFFFFE255,0x00000643,0x0000275B,0xFFFFEEED,0x00000479,0x0000275B,0xFFFFEEED,0x00000479},
- {"0000001000010011111010101001010011011110001001100010100001100100",0x000043E3,0xFFFFDDC3,0x000006FB,0x00002B6B,0xFFFFEBD6,0x000004FA,0x00002B6B,0xFFFFEBD6,0x000004FA},
- {"0000001000010011111010101001010011011110000011100010000101000100",0x00003BDE,0xFFFFE507,0x000005B4,0x000022CE,0xFFFFF358,0x000003AB,0x000022CE,0xFFFFF358,0x000003AB},
- {"0000001000010011111010101001010011011110001100100011000101100100",0x00002460,0xFFFFF3B5,0x000003A2,0x000014E7,0xFFFFFC32,0x0000027C,0x000014E7,0xFFFFFC32,0x0000027C},
- {"0000001000010011111010101001010011011110001010000010000011000100",0x00003D20,0xFFFFE298,0x0000062F,0x00002080,0xFFFFF3AF,0x000003A8,0x00002080,0xFFFFF3AF,0x000003A8},
- {"0000001000010011111010101001010011011110000010000001100100000100",0x00003E14,0xFFFFE221,0x00000641,0x000021BB,0xFFFFF2EA,0x000003CA,0x000021BB,0xFFFFF2EA,0x000003CA},
- {"0000001000010011111010101001010011011110000010100100000011000100",0x00003DE1,0xFFFFE14E,0x00000677,0x00002468,0xFFFFF068,0x00000440,0x00002468,0xFFFFF068,0x00000440},
- {"0000001000010011111010101001010011011110001001100001000010000100",0x00004372,0xFFFFDDF8,0x000006F5,0x00002B3F,0xFFFFEBE8,0x000004F8,0x00002B3F,0xFFFFEBE8,0x000004F8},
- {"0000001000010011111010101001010011011110000010100010100011000100",0x00003E4F,0xFFFFE2A3,0x0000062B,0x00002F5A,0xFFFFEA37,0x0000053B,0x00002F5A,0xFFFFEA37,0x0000053B},
- {"0000001000010011111010101001010011011110001010000101000011100100",0x00003E07,0xFFFFE02F,0x000006B6,0x0000216B,0xFFFFF1A3,0x00000416,0x0000216B,0xFFFFF1A3,0x00000416},
- {"0000001000010011111010101001010011011110001010000011100010100100",0x00003DAB,0xFFFFE128,0x0000067F,0x0000216F,0xFFFFF236,0x000003F3,0x0000216F,0xFFFFF236,0x000003F3},
- {"0000001000010011111010101001010011011110001011000010100100100100",0x0000364B,0xFFFFE8CB,0x0000052A,0x00002568,0xFFFFF1B2,0x00000400,0x00002568,0xFFFFF1B2,0x00000400},
- {"0000001000010011111010101001010011011110001001100001000001100100",0x00004219,0xFFFFDE87,0x000006E8,0x00002C59,0xFFFFEAEE,0x00000529,0x00002C59,0xFFFFEAEE,0x00000529},
- {"0000001000010011111010101001010011011110000011100001100101000100",0x000039A8,0xFFFFE602,0x00000594,0x00001D06,0xFFFFF6F0,0x00000316,0x00001D06,0xFFFFF6F0,0x00000316},
- {"0000001000010011111010101001010011011110001001100001000011100100",0x00004052,0xFFFFE01C,0x00000698,0x00002310,0xFFFFF1A1,0x000003FE,0x00002310,0xFFFFF1A1,0x000003FE},
- {"0000001000010011111010101001010011011110000011100010100000100100",0x00003C1C,0xFFFFE3EB,0x000005F1,0x00002289,0xFFFFF2CF,0x000003C9,0x00002289,0xFFFFF2CF,0x000003C9},
- {"0000001000010011111010101001010011011110000011100101000100100100",0x00003F19,0xFFFFE085,0x0000069E,0x00002B94,0xFFFFEB72,0x0000051D,0x00002B94,0xFFFFEB72,0x0000051D},
- {"0000001000010011111010101001010011011110000011100100000110100100",0x00003C51,0xFFFFE2AD,0x00000638,0x0000206B,0xFFFFF361,0x000003BE,0x0000206B,0xFFFFF361,0x000003BE},
- {"0000001000010011111010101001010011011110001001100001000011000100",0x000040B9,0xFFFFDFBB,0x000006AB,0x0000241F,0xFFFFF0CC,0x00000425,0x0000241F,0xFFFFF0CC,0x00000425},
- {"0000001000010011111010101001010011011110000010100010000001100100",0x00003E62,0xFFFFE12C,0x00000678,0x00002445,0xFFFFF09E,0x00000435,0x00002445,0xFFFFF09E,0x00000435},
- {"0000001000010011111010101001010011011110000011100001100110000100",0x00003C97,0xFFFFE399,0x000005FB,0x0000209D,0xFFFFF41D,0x0000038F,0x0000209D,0xFFFFF41D,0x0000038F},
- {"0000001000010011111010101001010011011110000011100011000101000100",0x00003FF9,0xFFFFE1E9,0x0000063E,0x00002E96,0xFFFFEAF5,0x00000516,0x00002E96,0xFFFFEAF5,0x00000516},
- {"0000001000010011111010101001010011011110000010100011000010000100",0x00003F04,0xFFFFE109,0x0000067A,0x000026E1,0xFFFFEF0B,0x00000476,0x000026E1,0xFFFFEF0B,0x00000476},
- {"0000001000010011111010101001010011011110000100000001000100100100",0x00003E3E,0xFFFFE187,0x00000660,0x00002049,0xFFFFF38D,0x000003B0,0x00002049,0xFFFFF38D,0x000003B0},
- {"0000001000010011111010101001010011011110001010000010100101000100",0x00003D58,0xFFFFE253,0x0000063D,0x00002158,0xFFFFF308,0x000003C3,0x00002158,0xFFFFF308,0x000003C3},
- {"0000001000010011111010101001010011011110000010000100000011000100",0x00004074,0xFFFFDF8D,0x000006C0,0x00002799,0xFFFFEE19,0x000004A5,0x00002799,0xFFFFEE19,0x000004A5},
- {"0000001000010011111010101001010011011110001010000001100100100100",0x00003DAF,0xFFFFE1C9,0x00000659,0x000020E5,0xFFFFF313,0x000003C6,0x000020E5,0xFFFFF313,0x000003C6},
- {"0000001000010011111010101001010011011110000010100011100101100100",0x000041DD,0xFFFFDDFA,0x0000071B,0x0000348D,0xFFFFE4B4,0x0000064C,0x0000348D,0xFFFFE4B4,0x0000064C},
- {"0000001000010011111010101001010011011110001011000010100010000100",0x00003947,0xFFFFE5AE,0x000005B8,0x000024A6,0xFFFFF140,0x0000041D,0x000024A6,0xFFFFF140,0x0000041D},
- {"0000001000010011111010101001010011011110000100000001100001000100",0x00003D35,0xFFFFE197,0x0000066E,0x00002248,0xFFFFF1BC,0x00000408,0x00002248,0xFFFFF1BC,0x00000408},
- {"0000001000010011111010101001010011011110000010100001100011100100",0x00003F4F,0xFFFFE13E,0x0000066D,0x00002AF0,0xFFFFEC99,0x000004DB,0x00002AF0,0xFFFFEC99,0x000004DB},
- {"0000001000010011111010101001010011011110001001100011100101000100",0x0000430F,0xFFFFDDFB,0x000006FC,0x00002D4D,0xFFFFEA55,0x00000540,0x00002D4D,0xFFFFEA55,0x00000540},
- {"0000001000010011111010101001010011011110000011100010100101000100",0x00003B22,0xFFFFE543,0x000005B1,0x000022E1,0xFFFFF31B,0x000003B9,0x000022E1,0xFFFFF31B,0x000003B9},
- {"0000001000010011111010101001010011011110000011100010000010000100",0x00003978,0xFFFFE611,0x00000592,0x00001C36,0xFFFFF771,0x00000302,0x00001C36,0xFFFFF771,0x00000302},
- {"0000001000010011111010101001010011011110001001100010000101100100",0x000044DF,0xFFFFDDAB,0x000006F2,0x00002CEA,0xFFFFEB47,0x00000507,0x00002CEA,0xFFFFEB47,0x00000507},
- {"0000001000010011111010101001010011011110000010100011100011000100",0x00003E9B,0xFFFFE12C,0x0000067C,0x00002B79,0xFFFFEBD9,0x00000503,0x00002B79,0xFFFFEBD9,0x00000503},
- {"0000001000010011111010101001010011011110001001100011000001000100",0x00004464,0xFFFFDCD3,0x00000731,0x00002D14,0xFFFFEA2D,0x0000054E,0x00002D14,0xFFFFEA2D,0x0000054E},
- {"0000001000010011111010101001010011011110001010000001000100100100",0x00003FB3,0xFFFFE052,0x00000693,0x000020AC,0xFFFFF311,0x000003C6,0x000020AC,0xFFFFF311,0x000003C6},
- {"0000001000010011111010101001010011011110001011000001000010000100",0x00003BDA,0xFFFFE2FB,0x00000636,0x0000261E,0xFFFFEF72,0x00000471,0x0000261E,0xFFFFEF72,0x00000471},
- {"0000001000010011111010101001010011011110001011000001100101100100",0x00003D72,0xFFFFE28A,0x0000063E,0x000029D8,0xFFFFED54,0x000004C7,0x000029D8,0xFFFFED54,0x000004C7},
- {"0000001000010011111010101001010011011110001011000010100000100100",0x00003E26,0xFFFFE102,0x00000694,0x00002DD1,0xFFFFE9CA,0x0000056D,0x00002DD1,0xFFFFE9CA,0x0000056D},
- {"0000001000010011111010101001010011011110000100000100000100100100",0x000041CD,0xFFFFDE97,0x000006ED,0x00002DE5,0xFFFFE9B9,0x00000565,0x00002DE5,0xFFFFE9B9,0x00000565},
- {"0000001000010011111010101001010011011110000010100010100110000100",0x00003F30,0xFFFFE06E,0x00000698,0x000024FF,0xFFFFEFFC,0x0000044F,0x000024FF,0xFFFFEFFC,0x0000044F},
- {"0000001000010011111010101001010011011110001011000011100011000100",0x0000378B,0xFFFFE6B4,0x00000594,0x000023A7,0xFFFFF1DC,0x00000407,0x000023A7,0xFFFFF1DC,0x00000407},
- {"0000001000010011111010101001010011011110000011100100000101100100",0x00003CD7,0xFFFFE28D,0x00000636,0x00002036,0xFFFFF3B5,0x000003AA,0x00002036,0xFFFFF3B5,0x000003AA},
- {"0000001000010011111010101001010011011110000010100011100010000100",0x00003EF9,0xFFFFE0AA,0x0000068D,0x000024D3,0xFFFFF02F,0x00000445,0x000024D3,0xFFFFF02F,0x00000445},
- {"0000001000010011111010101001010011011110001010000011100101000100",0x00003D08,0xFFFFE1BB,0x00000665,0x00002159,0xFFFFF26F,0x000003E6,0x00002159,0xFFFFF26F,0x000003E6},
- {"0000001000010011111010101001010011011110001011000010000011000100",0x000038A9,0xFFFFE6CA,0x00000580,0x000025D3,0xFFFFF101,0x00000421,0x000025D3,0xFFFFF101,0x00000421},
- {"0000001000010011111010101001010011011110000010100010000010100100",0x00003E45,0xFFFFE1F8,0x0000064D,0x000027E3,0xFFFFEEBB,0x0000047F,0x000027E3,0xFFFFEEBB,0x0000047F},
- {"0000001000010011111010101001010011011110000011100011100001100100",0x00003F76,0xFFFFE128,0x0000066E,0x0000286B,0xFFFFEE4C,0x00000493,0x0000286B,0xFFFFEE4C,0x00000493},
- {"0000001000010011111010101001010011011110001001100100000100000100",0x0000440D,0xFFFFDCA2,0x0000074F,0x00003817,0xFFFFE256,0x000006AF,0x00003817,0xFFFFE256,0x000006AF},
- {"0000001000010011111010101001010011011110000100000101000100000100",0x00003EE1,0xFFFFDFA7,0x000006D4,0x000027EA,0xFFFFED2B,0x000004DE,0x000027EA,0xFFFFED2B,0x000004DE},
- {"0000001000010011111010101001010011011110001011000011100001100100",0x00003C62,0xFFFFE285,0x0000064A,0x00002520,0xFFFFF001,0x0000045C,0x00002520,0xFFFFF001,0x0000045C},
- {"0000001000010011111010101001010011011110001100100011100101100100",0x0000272E,0xFFFFF17A,0x000003FA,0x0000150B,0xFFFFFBD5,0x00000284,0x0000150B,0xFFFFFBD5,0x00000284},
- {"0000001000010011111010101001010011011110001001100001100100100100",0x00004275,0xFFFFDF69,0x000006A5,0x000025AA,0xFFFFF05C,0x0000042B,0x000025AA,0xFFFFF05C,0x0000042B},
- {"0000001000010011111010101001010011011110000011100100000011100100",0x00003CAA,0xFFFFE392,0x000005FF,0x000023A8,0xFFFFF20E,0x000003E9,0x000023A8,0xFFFFF20E,0x000003E9},
- {"0000001000010011111010101001010011011110001011000101000011000100",0x00003CF8,0xFFFFE0FB,0x000006A6,0x00002CA7,0xFFFFE9FF,0x0000056E,0x00002CA7,0xFFFFE9FF,0x0000056E},
- {"0000001000010011111010101001010011011110001010000010000100100100",0x00003D00,0xFFFFE296,0x00000633,0x000021C1,0xFFFFF2C8,0x000003CF,0x000021C1,0xFFFFF2C8,0x000003CF},
- {"0000001000010011111010101001010011011110001010000011100011100100",0x00003B46,0xFFFFE301,0x00000632,0x0000204C,0xFFFFF33B,0x000003C8,0x0000204C,0xFFFFF33B,0x000003C8},
- {"0000001000010011111010101001010011011110001000000100000101100100",0x00002026,0xFFFFF5CE,0x00000368,0x00001598,0xFFFFFB29,0x000002C3,0x00001598,0xFFFFFB29,0x000002C3},
- {"0000001000010011111010101001010011011110001010000011000101100100",0x00003DCA,0xFFFFE178,0x00000668,0x00001FDB,0xFFFFF39D,0x000003AF,0x00001FDB,0xFFFFF39D,0x000003AF},
- {"0000001000010011111010101001010011011110001011000100100011000100",0x00003A59,0xFFFFE327,0x00000642,0x000024B9,0xFFFFEFC4,0x00000471,0x000024B9,0xFFFFEFC4,0x00000471},
- {"0000001000010011111010101001010011011110001011000010100101000100",0x00003C26,0xFFFFE440,0x000005EB,0x00002C0F,0xFFFFEC88,0x000004E0,0x00002C0F,0xFFFFEC88,0x000004E0},
- {"0000001000010011111010101001010011011110000010000011100010000100",0x00004149,0xFFFFDEB8,0x000006E7,0x0000280A,0xFFFFED89,0x000004C2,0x0000280A,0xFFFFED89,0x000004C2},
- {"0000001000010011111010101001010011011110000011100100000100100100",0x00003EB4,0xFFFFE1E5,0x0000064D,0x0000299F,0xFFFFEDB3,0x000004A9,0x0000299F,0xFFFFEDB3,0x000004A9},
- {"0000001000010011111010101001010011011110001011000011100110100100",0x00003BBF,0xFFFFE268,0x0000065A,0x00002504,0xFFFFEFB0,0x00000470,0x00002504,0xFFFFEFB0,0x00000470},
- {"0000001000010011111010101001010011011110000010000100100100000100",0x00004203,0xFFFFDDC6,0x00000720,0x0000303B,0xFFFFE78F,0x000005D0,0x0000303B,0xFFFFE78F,0x000005D0},
- {"0000001000010011111010101001010011011110000011100011100110000100",0x00003DA3,0xFFFFE244,0x0000063E,0x000021B4,0xFFFFF2DA,0x000003CD,0x000021B4,0xFFFFF2DA,0x000003CD},
- {"0000001000010011111010101001010011011110000010100011100011100100",0x00004035,0xFFFFE065,0x0000069B,0x00003323,0xFFFFE6D6,0x000005D8,0x00003323,0xFFFFE6D6,0x000005D8},
- {"0000001000010011111010101001010011011110001011000001000101100100",0x00003944,0xFFFFE4E5,0x000005E2,0x00001F3C,0xFFFFF456,0x0000039D,0x00001F3C,0xFFFFF456,0x0000039D},
- {"0000001000010011111010101001010011011110000001100001100100000100",0x000032D8,0xFFFFEAE8,0x000004E6,0x00001812,0xFFFFFA1C,0x000002BC,0x00001812,0xFFFFFA1C,0x000002BC},
- {"0000001000010011111100001111110101000010110100100010100101000100",0x000041F6,0xFFFFE025,0x0000069A,0x0000241E,0xFFFFF1B4,0x00000402,0x0000241E,0xFFFFF1B4,0x00000402},
- {"0000001000010011111100001111111010011001000011000011000010100100",0x00003300,0xFFFFEB60,0x000004C1,0x00001E15,0xFFFFF6A6,0x0000033B,0x00001E15,0xFFFFF6A6,0x0000033B},
- {"0000001000010011111010101001010011011110000001000000100010100100",0x000037F0,0xFFFFE68F,0x0000059B,0x00001F8A,0xFFFFF467,0x000003A3,0x00001F8A,0xFFFFF467,0x000003A3},
- {"0000001000010011111100001111111010011001000110000010100110000100",0x000025D8,0xFFFFF2AA,0x000003C3,0x000018A8,0xFFFFF9BE,0x000002D2,0x000018A8,0xFFFFF9BE,0x000002D2},
- {"0000001000010011111100001111111010011001000001100010000011000100",0x0000364F,0xFFFFE988,0x000004FC,0x00001E51,0xFFFFF633,0x0000034F,0x00001E51,0xFFFFF633,0x0000034F},
- {"0000001000010011111010101001010011011110000001100001000101000100",0x00002288,0xFFFFF483,0x0000036C,0x0000280F,0xFFFFEF39,0x0000047B,0x0000280F,0xFFFFEF39,0x0000047B},
- {"0000001000010011111100001111111010011001000010000010000010000100",0x00003322,0xFFFFEA7E,0x000004ED,0x00001DAD,0xFFFFF62B,0x00000355,0x00001DAD,0xFFFFF62B,0x00000355},
- {"0000001000010011111010101001010011011110000000100101000011100100",0x00002B7B,0xFFFFEE4F,0x0000045B,0x00001AA2,0xFFFFF710,0x0000033E,0x00001AA2,0xFFFFF710,0x0000033E},
- {"0000001000010011111100001111111010011001000001000010000011000100",0x000034CC,0xFFFFEA79,0x000004E4,0x00001B05,0xFFFFF8B3,0x000002EC,0x00001B05,0xFFFFF8B3,0x000002EC},
- {"0000001000010011111100001111110101000010110111000010100001100100",0x00003837,0xFFFFE5ED,0x000005C3,0x00001ACB,0xFFFFF7B2,0x00000314,0x00001ACB,0xFFFFF7B2,0x00000314},
- {"0000001000010011111100001111111010011001000001000100000101100100",0x0000352D,0xFFFFE88F,0x00000548,0x000021E6,0xFFFFF3B5,0x000003AA,0x000021E6,0xFFFFF3B5,0x000003AA},
- {"0000001000010011111100001111111010011001000010100100100010000100",0x00003300,0xFFFFE835,0x0000057B,0x00001A85,0xFFFFF715,0x00000336,0x00001A85,0xFFFFF715,0x00000336},
- {"0000001000010011111010101001010011011110000001000100100010100100",0x000033FA,0xFFFFE851,0x00000565,0x00001A8E,0xFFFFF727,0x0000033B,0x00001A8E,0xFFFFF727,0x0000033B},
- {"0000001000010011111100001111110101000010110110100011100100100100",0x000039D3,0xFFFFE5D3,0x000005B0,0x00001888,0xFFFFF978,0x000002C8,0x00001888,0xFFFFF978,0x000002C8},
- {"0000001000010011111100001111111010011001000011100100100001100100",0x00002F6B,0xFFFFEC53,0x000004B9,0x00001C15,0xFFFFF71B,0x00000337,0x00001C15,0xFFFFF71B,0x00000337},
- {"0000001000010011111100001111111010011001000001100100000101000100",0x0000384D,0xFFFFE737,0x00000569,0x00001D2D,0xFFFFF673,0x00000343,0x00001D2D,0xFFFFF673,0x00000343},
- {"0000001000010011111100001111111010011001000001100010000010100100",0x00003A49,0xFFFFE70B,0x0000055F,0x00001A63,0xFFFFF8CD,0x000002E2,0x00001A63,0xFFFFF8CD,0x000002E2},
- {"0000001000010011111100001111111010011001000001000010100110000100",0x0000311E,0xFFFFEB97,0x000004C6,0x00001EAE,0xFFFFF5A9,0x00000367,0x00001EAE,0xFFFFF5A9,0x00000367},
- {"0000001000010011111100001111111010011001000011100001000100100100",0x000027D3,0xFFFFF075,0x00000417,0x00002001,0xFFFFF44A,0x000003A2,0x00002001,0xFFFFF44A,0x000003A2},
- {"0000001000010011111100001111111010011001000001100100100100000100",0x00003B72,0xFFFFE4BD,0x000005DC,0x00001D76,0xFFFFF606,0x0000035A,0x00001D76,0xFFFFF606,0x0000035A},
- {"0000001000010011111100001111111010011001000100000001000100100100",0x00002E0F,0xFFFFECA7,0x000004AE,0x00001DC6,0xFFFFF5BF,0x0000036A,0x00001DC6,0xFFFFF5BF,0x0000036A},
- {"0000001000010011111100001111111010011001000000100011100010100100",0x000032C7,0xFFFFEA7A,0x000004F0,0x00001A7B,0xFFFFF827,0x00000301,0x00001A7B,0xFFFFF827,0x00000301},
- {"0000001000010011111010101001010011011110000001000100100010000100",0x0000312D,0xFFFFEA39,0x00000515,0x00001948,0xFFFFF800,0x00000318,0x00001948,0xFFFFF800,0x00000318},
- {"0000001000010011111010101001010011011110000001100010000010000100",0x00003611,0xFFFFE8D7,0x00000533,0x00001929,0xFFFFF965,0x000002D2,0x00001929,0xFFFFF965,0x000002D2},
- {"0000001000010011111100001111111010011001001011000011000011100100",0x00002FE2,0xFFFFED89,0x00000470,0x00001A3C,0xFFFFF955,0x000002D5,0x00001A3C,0xFFFFF955,0x000002D5},
- {"0000001000010011111010101001010011011110000000100000100010100100",0x000035FF,0xFFFFE884,0x00000548,0x0000182A,0xFFFFF9AB,0x000002CF,0x0000182A,0xFFFFF9AB,0x000002CF},
- {"0000001000010011111100001111111010011001000000100010000011100100",0x00003597,0xFFFFE904,0x00000528,0x00001A94,0xFFFFF840,0x00000300,0x00001A94,0xFFFFF840,0x00000300},
- {"0000001000010011111100001111111010011001000110000001100101000100",0x000026CB,0xFFFFF1FB,0x000003E4,0x000017CC,0xFFFFFA25,0x000002C8,0x000017CC,0xFFFFFA25,0x000002C8},
- {"0000001000010011111010101001010011011110000001100000100011000100",0x00003274,0xFFFFEA39,0x0000050C,0x00001B20,0xFFFFF7C1,0x00000314,0x00001B20,0xFFFFF7C1,0x00000314},
- {"0000001000010011111100001111110101000010110110000010100100100100",0x0000280B,0xFFFFF283,0x000003B5,0x000018D0,0xFFFFF992,0x000002EC,0x000018D0,0xFFFFF992,0x000002EC},
- {"0000001000010011111100001111111010011001000001100010000100000100",0x000033AB,0xFFFFEB1B,0x000004C4,0x00001FEE,0xFFFFF53A,0x00000378,0x00001FEE,0xFFFFF53A,0x00000378},
- {"0000001000010011111100001111111010011001000010100011100101100100",0x00002F79,0xFFFFEB0C,0x000004FA,0x00001E57,0xFFFFF4BF,0x0000039B,0x00001E57,0xFFFFF4BF,0x0000039B},
- {"0000001000010011111100001111111010011001000001000100100011100100",0x00003487,0xFFFFE8F2,0x00000539,0x0000185B,0xFFFFF9AE,0x000002BA,0x0000185B,0xFFFFF9AE,0x000002BA},
- {"0000001000010011111100001111111010011001000010100001100010100100",0x00003500,0xFFFFE793,0x0000058A,0x00001AA2,0xFFFFF792,0x0000031D,0x00001AA2,0xFFFFF792,0x0000031D},
- {"0000001000010011111100001111111010011001000010000001000101100100",0x00003943,0xFFFFE54D,0x000005D9,0x00001BC8,0xFFFFF6E0,0x00000339,0x00001BC8,0xFFFFF6E0,0x00000339},
- {"0000001000010011111010101001010011011110000001000011000010100100",0x0000306D,0xFFFFEC5E,0x000004A5,0x00001A3A,0xFFFFF85F,0x00000304,0x00001A3A,0xFFFFF85F,0x00000304},
- {"0000001000010011111100001111110101000010110110000011000010000100",0x00002BA4,0xFFFFEE8D,0x0000046A,0x0000198C,0xFFFFF88E,0x00000307,0x0000198C,0xFFFFF88E,0x00000307},
- {"0000001000010011111100001111110101000010110100100001100011100100",0x00003D30,0xFFFFE2F6,0x0000062A,0x000025DC,0xFFFFF074,0x00000435,0x000025DC,0xFFFFF074,0x00000435},
- {"0000001000010011111100001111110101000010110110000011100101100100",0x00002CD6,0xFFFFED79,0x0000049B,0x000016D0,0xFFFFFA53,0x000002BB,0x000016D0,0xFFFFFA53,0x000002BB},
- {"0000001000010011111100001111111010011001000101100011000101100100",0x00002484,0xFFFFF3BD,0x000003A0,0x000015B8,0xFFFFFB6B,0x000002A4,0x000015B8,0xFFFFFB6B,0x000002A4},
- {"0000001000010011111100001111111010011001000011100011100101000100",0x000038AE,0xFFFFE6D1,0x00000587,0x00001A2A,0xFFFFF8F1,0x000002D4,0x00001A2A,0xFFFFF8F1,0x000002D4},
- {"0000001000010011111100001111111010011001000001000100100101000100",0x000036FD,0xFFFFE76C,0x00000576,0x00001EE4,0xFFFFF58D,0x00000361,0x00001EE4,0xFFFFF58D,0x00000361},
- {"0000001000010011111100001111110101000010110110000011000010100100",0x00002BCF,0xFFFFEF28,0x00000448,0x00001B93,0xFFFFF7BA,0x00000327,0x00001B93,0xFFFFF7BA,0x00000327},
- {"0000001000010011111100001111111010011001000001100010100010000100",0x00003834,0xFFFFE818,0x0000053B,0x00001AFE,0xFFFFF85C,0x000002F3,0x00001AFE,0xFFFFF85C,0x000002F3},
- {"0000001000010011111100001111111010011001001100100011000110100100",0x00002EF7,0xFFFFEBFC,0x000004CE,0x00001897,0xFFFFF8EF,0x000002EC,0x00001897,0xFFFFF8EF,0x000002EC},
- {"0000001000010011111100001111111010011001001011000001100011000100",0x000035BD,0xFFFFE8BB,0x0000053B,0x00001F22,0xFFFFF561,0x00000373,0x00001F22,0xFFFFF561,0x00000373},
- {"0000001000010011111100001111111010011001000110000011100110000100",0x00002D42,0xFFFFEE1D,0x00000478,0x000016F0,0xFFFFFAAE,0x000002B3,0x000016F0,0xFFFFFAAE,0x000002B3},
- {"0000001000010011111010101001010011011110000001000101000100100100",0x00002F98,0xFFFFEB3C,0x000004F0,0x00001903,0xFFFFF818,0x00000319,0x00001903,0xFFFFF818,0x00000319},
- {"0000001000010011111100001111110101000010110101000010000101000100",0x00004081,0xFFFFDF13,0x000006F3,0x00002A6D,0xFFFFEC1B,0x00000509,0x00002A6D,0xFFFFEC1B,0x00000509},
- {"0000001000010011111010101001010011011110000001000000100100000100",0x00002D68,0xFFFFED21,0x00000498,0x00001FF6,0xFFFFF427,0x000003B0,0x00001FF6,0xFFFFF427,0x000003B0},
- {"0000001000010011111100001111111010011001000000100011100010000100",0x00003243,0xFFFFEA5C,0x000004FD,0x000020FB,0xFFFFF39E,0x000003C0,0x000020FB,0xFFFFF39E,0x000003C0},
- {"0000001000010011111100001111110101000010110110000100100010100100",0x00002F20,0xFFFFEC19,0x000004C6,0x00001748,0xFFFFF99F,0x000002DA,0x00001748,0xFFFFF99F,0x000002DA},
- {"0000001000010011111100001111111010011001000100000011100110000100",0x00002D68,0xFFFFED21,0x00000498,0x00001A43,0xFFFFF843,0x000002F9,0x00001A43,0xFFFFF843,0x000002F9},
- {"0000001000010011111100001111111010011001000000100010000010100100",0x0000396E,0xFFFFE616,0x000005A9,0x00001A51,0xFFFFF850,0x000002FA,0x00001A51,0xFFFFF850,0x000002FA},
- {"0000001000010011111100001111111010011001000001000011000101000100",0x0000305C,0xFFFFED4B,0x0000046C,0x00001CF9,0xFFFFF7BA,0x00000304,0x00001CF9,0xFFFFF7BA,0x00000304},
- {"0000001000010011111100001111110101000010110110100100000101100100",0x0000343C,0xFFFFE869,0x00000559,0x00001CE2,0xFFFFF614,0x00000359,0x00001CE2,0xFFFFF614,0x00000359},
- {"0000001000010011111100001111111010011001000110000011100101100100",0x00002782,0xFFFFF1FE,0x000003D9,0x000015DC,0xFFFFFB8B,0x00000290,0x000015DC,0xFFFFFB8B,0x00000290},
- {"0000001000010011111100001111111010011001000110000001100011000100",0x00002B9C,0xFFFFEF63,0x00000443,0x00001369,0xFFFFFD51,0x00000244,0x00001369,0xFFFFFD51,0x00000244},
- {"0000001000010011111100001111111010011001000010100010000010000100",0x000035F8,0xFFFFE743,0x00000592,0x000018D8,0xFFFFF8EE,0x000002E4,0x000018D8,0xFFFFF8EE,0x000002E4},
- {"0000001000010011111010101001010011011110000001100010100001000100",0x00002B72,0xFFFFEF1E,0x0000043C,0x00002647,0xFFFFF092,0x0000043E,0x00002647,0xFFFFF092,0x0000043E},
- {"0000001000010011111100001111111010011001000100000010000110000100",0x00002EC9,0xFFFFEC5F,0x000004B8,0x000018B6,0xFFFFF936,0x000002D8,0x000018B6,0xFFFFF936,0x000002D8},
- {"0000001000010011111100001111111010011001000001100100000010000100",0x000038A7,0xFFFFE6AC,0x00000589,0x00001C42,0xFFFFF70B,0x00000329,0x00001C42,0xFFFFF70B,0x00000329},
- {"0000001000010011111100001111111010011001001100000000100010100100",0x00002F6B,0xFFFFEBF6,0x000004CF,0x000018AE,0xFFFFF928,0x000002E3,0x000018AE,0xFFFFF928,0x000002E3},
- {"0000001000010011111100001111110101000010110110100101000100000100",0x000029CD,0xFFFFEEE1,0x00000459,0x00001AB5,0xFFFFF76F,0x00000324,0x00001AB5,0xFFFFF76F,0x00000324},
- {"0000001000010011111010101001010011011110000001100011100011000100",0x00003921,0xFFFFE71D,0x00000577,0x00001646,0xFFFFFB24,0x00000293,0x00001646,0xFFFFFB24,0x00000293},
- {"0000001000010011111010101001010011011110000001000100000101100100",0x00003940,0xFFFFE521,0x000005E8,0x00001947,0xFFFFF839,0x0000030D,0x00001947,0xFFFFF839,0x0000030D},
- {"0000001000010011111100001111110101000010110100100100000101100100",0x00003DCA,0xFFFFE211,0x00000659,0x0000250E,0xFFFFF072,0x00000443,0x0000250E,0xFFFFF072,0x00000443},
- {"0000001000010011111100001111111010011001000011000000100100000100",0x00002E95,0xFFFFEC20,0x000004C9,0x000015B4,0xFFFFFAD3,0x0000029D,0x000015B4,0xFFFFFAD3,0x0000029D},
- {"0000001000010011111100001111111010011001000001000001000010000100",0x00002C11,0xFFFFEE6E,0x00000468,0x00001901,0xFFFFF924,0x000002E7,0x00001901,0xFFFFF924,0x000002E7},
- {"0000001000010011111010101001010011011110000001100010000100000100",0x0000293F,0xFFFFF158,0x000003E6,0x0000183F,0xFFFFF9F6,0x000002D2,0x0000183F,0xFFFFF9F6,0x000002D2},
- {"0000001000010011111100001111111010011001000011100001000100000100",0x00002A67,0xFFFFEF34,0x0000043E,0x00001C6F,0xFFFFF6F1,0x0000032B,0x00001C6F,0xFFFFF6F1,0x0000032B},
- {"0000001000010011111010101001010011011110000001100101000100100100",0x00002F8D,0xFFFFEB77,0x000004DA,0x00001C0D,0xFFFFF627,0x00000365,0x00001C0D,0xFFFFF627,0x00000365},
- {"0000001000010011111100001111111010011001000011000011100011000100",0x00003476,0xFFFFEA5B,0x000004E7,0x00001DBF,0xFFFFF6C7,0x00000333,0x00001DBF,0xFFFFF6C7,0x00000333},
- {"0000001000010011111100001111111010011001000011100000100101000100",0x00003336,0xFFFFE92F,0x00000546,0x00001614,0xFFFFFAE0,0x00000296,0x00001614,0xFFFFFAE0,0x00000296},
- {"0000001000010011111100001111111010011001000101100010000101100100",0x00002513,0xFFFFF323,0x000003BC,0x000016DB,0xFFFFFA79,0x000002CD,0x000016DB,0xFFFFFA79,0x000002CD},
- {"0000001000010011111100001111111010011001000010100010100101000100",0x000035A7,0xFFFFE78E,0x00000584,0x00001B0D,0xFFFFF77D,0x0000031F,0x00001B0D,0xFFFFF77D,0x0000031F},
- {"0000001000010011111100001111111010011001001100100011100011100100",0x00003171,0xFFFFEB98,0x000004C6,0x00001C76,0xFFFFF71F,0x0000032F,0x00001C76,0xFFFFF71F,0x0000032F},
- {"0000001000010011111100001111110101000010110110100001000010000100",0x00002C52,0xFFFFED2E,0x000004A7,0x00002182,0xFFFFF2F4,0x000003E4,0x00002182,0xFFFFF2F4,0x000003E4},
- {"0000001000010011111100001111111010011001000100000010100100100100",0x000032E1,0xFFFFEB39,0x000004D0,0x00001B55,0xFFFFF859,0x000002FA,0x00001B55,0xFFFFF859,0x000002FA},
- {"0000001000010011111100001111111010011001000110000100100010100100",0x000029B6,0xFFFFEFF7,0x00000430,0x0000151B,0xFFFFFBC6,0x0000027F,0x0000151B,0xFFFFFBC6,0x0000027F},
- {"0000001000010011111100001111110101000010110110100001100101100100",0x00002FF7,0xFFFFEB67,0x000004DA,0x000020E9,0xFFFFF363,0x000003CE,0x000020E9,0xFFFFF363,0x000003CE},
- {"0000001000010011111100001111110101000010110110100101000100100100",0x00003CDD,0xFFFFE2B2,0x00000649,0x00001B18,0xFFFFF739,0x00000329,0x00001B18,0xFFFFF739,0x00000329},
- {"0000001000010011111100001111111010011001000001100010100010100100",0x00003C82,0xFFFFE5C6,0x0000058E,0x00001F3F,0xFFFFF5AD,0x00000361,0x00001F3F,0xFFFFF5AD,0x00000361},
- {"0000001000010011111100001111110101000010110111000100000010000100",0x0000319B,0xFFFFEA15,0x0000051B,0x00001CC9,0xFFFFF62E,0x00000358,0x00001CC9,0xFFFFF62E,0x00000358},
- {"0000001000010011111010101001010011011110000001100011100011100100",0x000032B6,0xFFFFEB2B,0x000004D6,0x000018E0,0xFFFFF966,0x000002DE,0x000018E0,0xFFFFF966,0x000002DE},
- {"0000001000010011111010101001010011011110000000100011100110000100",0x0000300A,0xFFFFEBA6,0x000004D1,0x00001CFD,0xFFFFF5F6,0x0000036D,0x00001CFD,0xFFFFF5F6,0x0000036D},
- {"0000001000010011111100001111110101000010110110000010100110000100",0x000026A9,0xFFFFF15D,0x00000400,0x00001561,0xFFFFFB1F,0x000002A0,0x00001561,0xFFFFFB1F,0x000002A0},
- {"0000001000010011111100001111111010011001000011100101000100100100",0x00003123,0xFFFFEAD2,0x000004FA,0x000018CB,0xFFFFF8F5,0x000002EC,0x000018CB,0xFFFFF8F5,0x000002EC},
- {"0000001000010011111100001111111010011001000110000100000011000100",0x00003577,0xFFFFE935,0x00000533,0x000016CD,0xFFFFFB44,0x00000289,0x000016CD,0xFFFFFB44,0x00000289},
- {"0000001000010011111100001111111010011001001010000010000110000100",0x00002875,0xFFFFF170,0x000003F3,0x00001567,0xFFFFFBD5,0x00000289,0x00001567,0xFFFFFBD5,0x00000289},
- {"0000001000010011111100001111111010011001000010000100000010000100",0x00003AE2,0xFFFFE538,0x000005C1,0x00001CB4,0xFFFFF6A3,0x0000033C,0x00001CB4,0xFFFFF6A3,0x0000033C},
- {"0000001000010011111100001111111010011001000011000011100011100100",0x000031DF,0xFFFFEC2A,0x000004A3,0x00001EF0,0xFFFFF626,0x00000352,0x00001EF0,0xFFFFF626,0x00000352},
- {"0000001000010011111100001111110101000010110100100101000101000100",0x00004A6A,0xFFFFDB15,0x00000758,0x000027F3,0xFFFFEEEE,0x00000479,0x000027F3,0xFFFFEEEE,0x00000479},
- {"0000001000010011111010101001010011011110000001100011100100000100",0x00002BB9,0xFFFFEF5D,0x00000433,0x00001589,0xFFFFFB57,0x00000295,0x00001589,0xFFFFFB57,0x00000295},
- {"0000001000010011111100001111111010011001000001000010000101100100",0x000033A0,0xFFFFE98F,0x00000528,0x00001CB4,0xFFFFF706,0x0000032D,0x00001CB4,0xFFFFF706,0x0000032D},
- {"0000001000010011111100001111111010011001000101100011000001100100",0x0000248E,0xFFFFF380,0x000003AC,0x000016EA,0xFFFFFA6C,0x000002CE,0x000016EA,0xFFFFFA6C,0x000002CE},
- {"0000001000010011111100001111111010011001000000100010000110100100",0x00002FE2,0xFFFFEB2F,0x000004E9,0x00001D4E,0xFFFFF56B,0x00000380,0x00001D4E,0xFFFFF56B,0x00000380},
- {"0000001000010011111100001111111010011001000010100010100010000100",0x00003283,0xFFFFE9E7,0x0000051D,0x00000694,0xFFFFFD32,0x000003C3,0x00000694,0xFFFFFD32,0x000003C3},
- {"0000001000010011111100001111110101000010110110000101000011000100",0x00002EE4,0xFFFFEBFD,0x000004D3,0x0000151A,0xFFFFFAF6,0x000002A4,0x0000151A,0xFFFFFAF6,0x000002A4},
- {"0000001000010011111100001111110101000010110111000001100011100100",0x0000302D,0xFFFFEB7F,0x000004DA,0x00001E6D,0xFFFFF54B,0x00000380,0x00001E6D,0xFFFFF54B,0x00000380},
- {"0000001000010011111100001111110101000010110110100101000011000100",0x000033DA,0xFFFFE7FB,0x0000057F,0x00001DED,0xFFFFF50E,0x0000038D,0x00001DED,0xFFFFF50E,0x0000038D},
- {"0000001000010011111100001111111010011001001011000100000010000100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001C3F,0xFFFFF726,0x0000032A,0x00001C3F,0xFFFFF726,0x0000032A},
- {"0000001000010011111100001111111010011001000010000011000111000100",0x00003BBD,0xFFFFE55C,0x000005B8,0x000019DB,0xFFFFF8BB,0x000002EF,0x000019DB,0xFFFFF8BB,0x000002EF},
- {"0000001000010011111100001111111010011001000011100011100010000100",0x00002964,0xFFFFF051,0x0000040E,0x000025CD,0xFFFFF11B,0x0000041F,0x000025CD,0xFFFFF11B,0x0000041F},
- {"0000001000010011111100001111110101000010110111000100100010000100",0x000033F5,0xFFFFE863,0x00000560,0x00001BCE,0xFFFFF689,0x0000034B,0x00001BCE,0xFFFFF689,0x0000034B},
- {"0000001000010011111100001111111010011001000010100010100001100100",0x00003294,0xFFFFE924,0x00000548,0x00001D41,0xFFFFF580,0x0000037D,0x00001D41,0xFFFFF580,0x0000037D},
- {"0000001000010011111100001111110101000010110111000011100110100100",0x000034FB,0xFFFFE7FE,0x0000056D,0x00001CB1,0xFFFFF635,0x00000357,0x00001CB1,0xFFFFF635,0x00000357},
- {"0000001000010011111100001111111010011001000010100001000010100100",0x00002E28,0xFFFFEBB9,0x000004E0,0x00001B20,0xFFFFF6E3,0x0000033C,0x00001B20,0xFFFFF6E3,0x0000033C},
- {"0000001000010011111100001111110101000010110110100001100100000100",0x00002799,0xFFFFF0F4,0x000003FC,0x00001C9D,0xFFFFF6A1,0x00000345,0x00001C9D,0xFFFFF6A1,0x00000345},
- {"0000001000010011111100001111111010011001000001100100000100000100",0x00003AEA,0xFFFFE5DB,0x0000059D,0x00001B61,0xFFFFF7F0,0x00000301,0x00001B61,0xFFFFF7F0,0x00000301},
- {"0000001000010011111010101001010011011110000001000001100110000100",0x000031F6,0xFFFFEAB8,0x000004F3,0x00001D90,0xFFFFF622,0x00000359,0x00001D90,0xFFFFF622,0x00000359},
- {"0000001000010011111100001111111010011001000011000100000001100100",0x000031B8,0xFFFFEA61,0x0000050F,0x0000199D,0xFFFFF87C,0x000002FD,0x0000199D,0xFFFFF87C,0x000002FD},
- {"0000001000010011111100001111110101000010110100100011000101000100",0x00004514,0xFFFFDDFF,0x000006F6,0x000022CD,0xFFFFF29F,0x000003D9,0x000022CD,0xFFFFF29F,0x000003D9},
- {"0000001000010011111010101001010011011110000001000011000101100100",0x00002F30,0xFFFFECB8,0x000004A0,0x00001B07,0xFFFFF7E2,0x00000313,0x00001B07,0xFFFFF7E2,0x00000313},
- {"0000001000010011111100001111110101000010110111000011000010100100",0x0000383B,0xFFFFE702,0x00000581,0x00001A08,0xFFFFF8CA,0x000002E2,0x00001A08,0xFFFFF8CA,0x000002E2},
- {"0000001000010011111100001111111010011001000000100010000101100100",0x00002CC5,0xFFFFEDF8,0x00000465,0x00001F47,0xFFFFF4B2,0x00000393,0x00001F47,0xFFFFF4B2,0x00000393},
- {"0000001000010011111100001111111010011001000101100010000111000100",0x00002304,0xFFFFF453,0x00000384,0x0000170A,0xFFFFFA3F,0x000002CE,0x0000170A,0xFFFFFA3F,0x000002CE},
- {"0000001000010011111100001111111010011001000010100101000100100100",0x0000337E,0xFFFFE850,0x0000056E,0x00001BDD,0xFFFFF668,0x00000353,0x00001BDD,0xFFFFF668,0x00000353},
- {"0000001000010011111100001111111010011001000011100100100100100100",0x00002E2F,0xFFFFEC9B,0x000004AE,0x00001C4D,0xFFFFF6D3,0x00000338,0x00001C4D,0xFFFFF6D3,0x00000338},
- {"0000001000010011111010101001010011011110000001100001000100100100",0x00002DDD,0xFFFFEDA4,0x00000477,0x00002010,0xFFFFF4BB,0x00000390,0x00002010,0xFFFFF4BB,0x00000390},
- {"0000001000010011111100001111110101000010110110100100100011100100",0x0000290C,0xFFFFEF61,0x00000445,0x00002133,0xFFFFF324,0x000003D8,0x00002133,0xFFFFF324,0x000003D8},
- {"0000001000010011111100001111111010011001000001100010100100100100",0x0000371E,0xFFFFE8D5,0x00000524,0x00001C3A,0xFFFFF7AE,0x00000314,0x00001C3A,0xFFFFF7AE,0x00000314},
- {"0000001000010011111100001111110101000010110110000011100011100100",0x00002A58,0xFFFFF007,0x00000429,0x000018A6,0xFFFFF98F,0x000002E1,0x000018A6,0xFFFFF98F,0x000002E1},
- {"0000001000010011111100001111111010011001000000100011000010000100",0x00002FED,0xFFFFEC48,0x000004AA,0x00001E9D,0xFFFFF584,0x00000370,0x00001E9D,0xFFFFF584,0x00000370},
- {"0000001000010011111100001111111010011001000110000001100010000100",0x00002829,0xFFFFF15F,0x000003F7,0x0000157E,0xFFFFFBD4,0x00000282,0x0000157E,0xFFFFFBD4,0x00000282},
- {"0000001000010011111100001111111010011001000100000001100100100100",0x000030CF,0xFFFFEB8D,0x000004CE,0x00001A4C,0xFFFFF868,0x000002F7,0x00001A4C,0xFFFFF868,0x000002F7},
- {"0000001000010011111100001111110101000010110110100010000010000100",0x00002C8F,0xFFFFEDD2,0x0000047D,0x00001CCE,0xFFFFF6A1,0x00000343,0x00001CCE,0xFFFFF6A1,0x00000343},
- {"0000001000010011111100001111111010011001000110000010000101100100",0x00002A84,0xFFFFEFBA,0x0000043E,0x000015EF,0xFFFFFB4B,0x0000029E,0x000015EF,0xFFFFFB4B,0x0000029E},
- {"0000001000010011111100001111111010011001000011000010100010100100",0x000034CA,0xFFFFEA08,0x000004FF,0x00001C19,0xFFFFF7ED,0x00000309,0x00001C19,0xFFFFF7ED,0x00000309},
- {"0000001000010011111100001111111010011001000101100011100110100100",0x00002187,0xFFFFF4B0,0x0000037E,0x0000154A,0xFFFFFB0C,0x000002AE,0x0000154A,0xFFFFFB0C,0x000002AE},
- {"0000001000010011111100001111110101000010110110100011100001000100",0x00002F4F,0xFFFFEB3C,0x000004F8,0x0000181F,0xFFFFF92D,0x000002DF,0x0000181F,0xFFFFF92D,0x000002DF},
- {"0000001000010011111100001111111010011001000001000001000011100100",0x0000290C,0xFFFFF0B1,0x000003FC,0x00001DB0,0xFFFFF636,0x00000355,0x00001DB0,0xFFFFF636,0x00000355},
- {"0000001000010011111100001111111010011001000010100001000001100100",0x000034C1,0xFFFFE888,0x0000055A,0x000019BF,0xFFFFF881,0x000002FB,0x000019BF,0xFFFFF881,0x000002FB},
- {"0000001000010011111100001111110101000010110111000001100011000100",0x00003139,0xFFFFEA98,0x00000504,0x000019F2,0xFFFFF820,0x0000030B,0x000019F2,0xFFFFF820,0x0000030B},
- {"0000001000010011111100001111110101000010110110000011000101000100",0x00002CAC,0xFFFFEEB2,0x00000458,0x0000152C,0xFFFFFBEF,0x0000027B,0x0000152C,0xFFFFFBEF,0x0000027B},
- {"0000001000010011111100001111111010011001001011000011100011100100",0x00003577,0xFFFFE99C,0x0000050D,0x00001E64,0xFFFFF679,0x0000033F,0x00001E64,0xFFFFF679,0x0000033F},
- {"0000001000010011111100001111110101000010110110100100000100000100",0x0000263A,0xFFFFF1E4,0x000003D4,0x00001F68,0xFFFFF4ED,0x00000386,0x00001F68,0xFFFFF4ED,0x00000386},
- {"0000001000010011111100001111110101000010110110000001100110000100",0x00002CE9,0xFFFFED63,0x00000497,0x00001810,0xFFFFF94D,0x000002E3,0x00001810,0xFFFFF94D,0x000002E3},
- {"0000001000010011111010101001010011011110000001000100000100000100",0x0000318A,0xFFFFEAC8,0x000004F5,0x0000195C,0xFFFFF896,0x000002FB,0x0000195C,0xFFFFF896,0x000002FB},
- {"0000001000010011111100001111110101000010110110000011100100000100",0x00002C41,0xFFFFEEC6,0x0000045D,0x000017DD,0xFFFFFA16,0x000002CB,0x000017DD,0xFFFFFA16,0x000002CB},
- {"0000001000010011111100001111111010011001000000100011000110100100",0x00002DD4,0xFFFFEC98,0x000004AD,0x00001BD7,0xFFFFF69F,0x00000347,0x00001BD7,0xFFFFF69F,0x00000347},
- {"0000001000010011111100001111110101000010110110100011100101000100",0x00003351,0xFFFFE9B2,0x0000051A,0x00001CA1,0xFFFFF6A4,0x00000341,0x00001CA1,0xFFFFF6A4,0x00000341},
- {"0000001000010011111100001111111010011001000000100001000100000100",0x0000322D,0xFFFFE9BE,0x00000527,0x00001CF9,0xFFFFF5EB,0x00000366,0x00001CF9,0xFFFFF5EB,0x00000366},
- {"0000001000010011111100001111111010011001000011000010100011000100",0x00003678,0xFFFFE9A8,0x00000503,0x00001AD4,0xFFFFF8F6,0x000002E3,0x00001AD4,0xFFFFF8F6,0x000002E3},
- {"0000001000010011111100001111111010011001000101100001100100100100",0x0000260E,0xFFFFF2C1,0x000003CA,0x00001139,0xFFFFFE48,0x00000236,0x00001139,0xFFFFFE48,0x00000236},
- {"0000001000010011111100001111111010011001000010100010000101100100",0x000033D3,0xFFFFE872,0x00000565,0x00001B72,0xFFFFF713,0x00000332,0x00001B72,0xFFFFF713,0x00000332},
- {"0000001000010011111100001111111010011001001100100011100001000100",0x0000309B,0xFFFFEB42,0x000004E4,0x00001918,0xFFFFF8C8,0x000002F2,0x00001918,0xFFFFF8C8,0x000002F2},
- {"0000001000010011111100001111111010011001000110000010100001100100",0x000028B8,0xFFFFF105,0x00000402,0x000018BB,0xFFFFF9BC,0x000002D3,0x000018BB,0xFFFFF9BC,0x000002D3},
- {"0000001000010011111100001111111010011001000010100001100010000100",0x00003123,0xFFFFE9D1,0x00000534,0x00001B19,0xFFFFF6FE,0x0000033C,0x00001B19,0xFFFFF6FE,0x0000033C},
- {"0000001000010011111100001111111010011001000000100010000101000100",0x00003216,0xFFFFEA8E,0x000004F6,0x00001F72,0xFFFFF4CE,0x0000038B,0x00001F72,0xFFFFF4CE,0x0000038B},
- {"0000001000010011111100001111111010011001000101100010100101100100",0x00002564,0xFFFFF32D,0x000003B6,0x00001685,0xFFFFFADB,0x000002BB,0x00001685,0xFFFFFADB,0x000002BB},
- {"0000001000010011111100001111110101000010110110100010100100100100",0x00002E60,0xFFFFED13,0x00000497,0x00001CA5,0xFFFFF6B9,0x00000346,0x00001CA5,0xFFFFF6B9,0x00000346},
- {"0000001000010011111100001111111010011001000011100011100110100100",0x0000336D,0xFFFFE934,0x0000053B,0x00001B3E,0xFFFFF763,0x00000327,0x00001B3E,0xFFFFF763,0x00000327},
- {"0000001000010011111100001111111010011001000100000001000010000100",0x0000274A,0xFFFFF119,0x000003FA,0x00001D75,0xFFFFF5CD,0x0000036F,0x00001D75,0xFFFFF5CD,0x0000036F},
- {"0000001000010011111100001111110101000010110110100010000101100100",0x0000366B,0xFFFFE70A,0x0000059A,0x00001ED8,0xFFFFF501,0x00000389,0x00001ED8,0xFFFFF501,0x00000389},
- {"0000001000010011111100001111111010011001001000100011100101100100",0x00003164,0xFFFFEAB4,0x000004FA,0x00001C52,0xFFFFF6E0,0x00000336,0x00001C52,0xFFFFF6E0,0x00000336},
- {"0000001000010011111100001111110101000010110100100011000001100100",0x00004224,0xFFFFDF7F,0x000006C1,0x00002A52,0xFFFFED5E,0x000004BB,0x00002A52,0xFFFFED5E,0x000004BB},
- {"0000001000010011111100001111111010011001000100000010100001100100",0x000030E3,0xFFFFEB07,0x000004ED,0x00001FD3,0xFFFFF46D,0x000003A1,0x00001FD3,0xFFFFF46D,0x000003A1},
- {"0000001000010011111100001111110101000010110110000010100010000100",0x00002AEB,0xFFFFEF1B,0x00000454,0x00001829,0xFFFFF995,0x000002DD,0x00001829,0xFFFFF995,0x000002DD},
- {"0000001000010011111100001111110101000010110111000101000011100100",0x0000346B,0xFFFFE7A2,0x0000058B,0x000020C5,0xFFFFF2E8,0x000003EC,0x000020C5,0xFFFFF2E8,0x000003EC},
- {"0000001000010011111100001111110101000010110111000100000101100100",0x000039CF,0xFFFFE5D7,0x000005A9,0x00001D66,0xFFFFF5D6,0x00000366,0x00001D66,0xFFFFF5D6,0x00000366},
- {"0000001000010011111100001111111010011001000001000001100011100100",0x000034AC,0xFFFFE9AE,0x00000515,0x00001A28,0xFFFFF904,0x000002DC,0x00001A28,0xFFFFF904,0x000002DC},
- {"0000001000010011111100001111110101000010110111000010000010000100",0x00002D68,0xFFFFED21,0x00000498,0x00001C6F,0xFFFFF686,0x0000034C,0x00001C6F,0xFFFFF686,0x0000034C},
- {"0000001000010011111100001111111010011001000010000010000011000100",0x0000328B,0xFFFFEBA1,0x000004B4,0x00001DA3,0xFFFFF683,0x00000349,0x00001DA3,0xFFFFF683,0x00000349},
- {"0000001000010011111100001111111010011001000110000010100011000100",0x000027DC,0xFFFFF295,0x000003BF,0x000019C1,0xFFFFF98E,0x000002E8,0x000019C1,0xFFFFF98E,0x000002E8},
- {"0000001000010011111100001111111010011001000110000100000010000100",0x00002756,0xFFFFF1D7,0x000003DF,0x000015D9,0xFFFFFB51,0x00000298,0x000015D9,0xFFFFFB51,0x00000298},
- {"0000001000010011111100001111111010011001000010000011100010000100",0x00003526,0xFFFFE907,0x00000526,0x000017AB,0xFFFFFA12,0x000002AB,0x000017AB,0xFFFFFA12,0x000002AB},
- {"0000001000010011111100001111110101000010110110100001100011100100",0x0000351B,0xFFFFE8B7,0x00000540,0x00001A86,0xFFFFF821,0x00000303,0x00001A86,0xFFFFF821,0x00000303},
- {"0000001000010011111100001111111010011001000101100100000101000100",0x000024B2,0xFFFFF34E,0x000003B1,0x000018E2,0xFFFFF926,0x000002FC,0x000018E2,0xFFFFF926,0x000002FC},
- {"0000001000010011111100001111110101000010110110000010100010100100",0x00002F36,0xFFFFED5D,0x00000486,0x0000157A,0xFFFFFB85,0x00000293,0x0000157A,0xFFFFFB85,0x00000293},
- {"0000001000010011111100001111110101000010110111000101000011000100",0x00003A6E,0xFFFFE456,0x000005FD,0x00001F68,0xFFFFF3D1,0x000003C3,0x00001F68,0xFFFFF3D1,0x000003C3},
- {"0000001000010011111100001111111010011001000010100011000110100100",0x00002BC3,0xFFFFED2D,0x000004A7,0x00001C3F,0xFFFFF609,0x00000364,0x00001C3F,0xFFFFF609,0x00000364},
- {"0000001000010011111100001111111010011001000011100010000010000100",0x000032E1,0xFFFFEA83,0x000004F6,0x00001B37,0xFFFFF842,0x000002F5,0x00001B37,0xFFFFF842,0x000002F5},
- {"0000001000010011111100001111110101000010110110000011000110000100",0x000028E3,0xFFFFF07F,0x00000412,0x00001676,0xFFFFFA68,0x000002BE,0x00001676,0xFFFFFA68,0x000002BE},
- {"0000001000010011111100001111110101000010110100100001000100000100",0x0000444C,0xFFFFDDAD,0x00000712,0x00002634,0xFFFFEF89,0x0000046C,0x00002634,0xFFFFEF89,0x0000046C},
- {"0000001000010011111100001111111010011001000001000001100011000100",0x00003121,0xFFFFEBBB,0x000004C6,0x00001C98,0xFFFFF72B,0x0000032D,0x00001C98,0xFFFFF72B,0x0000032D},
- {"0000001000010011111100001111110101000010110110000100000010100100",0x00002C31,0xFFFFEDC4,0x00000490,0x0000162D,0xFFFFFA8E,0x000002B4,0x0000162D,0xFFFFFA8E,0x000002B4},
- {"0000001000010011111100001111110101000010110110100001100011000100",0x00002749,0xFFFFF112,0x000003FC,0x00001C85,0xFFFFF6B8,0x00000342,0x00001C85,0xFFFFF6B8,0x00000342},
- {"0000001000010011111100001111111010011001000001000100000100000100",0x00003159,0xFFFFEB99,0x000004C2,0x00001BD0,0xFFFFF7CA,0x00000307,0x00001BD0,0xFFFFF7CA,0x00000307},
- {"0000001000010011111100001111111010011001000101100100000101100100",0x00002610,0xFFFFF1FD,0x000003EC,0x000016BE,0xFFFFFA53,0x000002CB,0x000016BE,0xFFFFFA53,0x000002CB},
- {"0000001000010011111100001111111010011001000000100011000110000100",0x000037B5,0xFFFFE63D,0x000005B5,0x00002285,0xFFFFF25D,0x000003F7,0x00002285,0xFFFFF25D,0x000003F7},
- {"0000001000010011111100001111111010011001000010100010100010100100",0x00002FEE,0xFFFFEB47,0x000004EF,0x00001CBE,0xFFFFF64E,0x00000358,0x00001CBE,0xFFFFF64E,0x00000358},
- {"0000001000010011111100001111111010011001000100000101000100000100",0x00002E90,0xFFFFEC48,0x000004C0,0x00001A47,0xFFFFF7D1,0x0000031A,0x00001A47,0xFFFFF7D1,0x0000031A},
- {"0000001000010011111100001111110101000010110110100100000010000100",0x000034AB,0xFFFFE84A,0x00000559,0x00001A72,0xFFFFF79A,0x0000031C,0x00001A72,0xFFFFF79A,0x0000031C},
- {"0000001000010011111100001111111010011001000110000011100010000100",0x00002F7B,0xFFFFECFC,0x0000049C,0x00001814,0xFFFFFA22,0x000002C2,0x00001814,0xFFFFFA22,0x000002C2},
- {"0000001000010011111100001111111010011001000000100001100101100100",0x00003618,0xFFFFE709,0x00000596,0x00001EBF,0xFFFFF482,0x000003A5,0x00001EBF,0xFFFFF482,0x000003A5},
- {"0000001000010011111010101001010011011110000000100100100100000100",0x0000341B,0xFFFFE8B2,0x0000054F,0x00001D26,0xFFFFF578,0x00000388,0x00001D26,0xFFFFF578,0x00000388},
- {"0000001000010011111100001111111010011001000100000010000101000100",0x000030F6,0xFFFFEB89,0x000004CD,0x000019C0,0xFFFFF8CC,0x000002E6,0x000019C0,0xFFFFF8CC,0x000002E6},
- {"0000001000010011111100001111111010011001001010000100000110100100",0x00002B76,0xFFFFEF6C,0x00000444,0x00001563,0xFFFFFBBE,0x0000028D,0x00001563,0xFFFFFBBE,0x0000028D},
- {"0000001000010011111100001111110101000010110110000001100001100100",0x00002BA2,0xFFFFEE31,0x0000047F,0x00001A3D,0xFFFFF7F3,0x00000320,0x00001A3D,0xFFFFF7F3,0x00000320},
- {"0000001000010011111100001111111010011001001011000100100011100100",0x00003545,0xFFFFE87A,0x0000054A,0x00001B5A,0xFFFFF7B0,0x0000030C,0x00001B5A,0xFFFFF7B0,0x0000030C},
- {"0000001000010011111010101001010011011110000001000010100101000100",0x00003879,0xFFFFE73F,0x00000578,0x00001649,0xFFFFFB57,0x00000283,0x00001649,0xFFFFFB57,0x00000283},
- {"0000001000010011111100001111110101000010110110000100000011000100",0x00002772,0xFFFFF0F1,0x00000410,0x0000142F,0xFFFFFBCF,0x00000287,0x0000142F,0xFFFFFBCF,0x00000287},
- {"0000001000010011111100001111110101000010110110100011000110000100",0x00003228,0xFFFFE98E,0x00000535,0x00001F48,0xFFFFF495,0x00000399,0x00001F48,0xFFFFF495,0x00000399},
- {"0000001000010011111100001111111010011001000011100100000011100100",0x00002887,0xFFFFF119,0x000003E8,0x000021AA,0xFFFFF3F5,0x000003A5,0x000021AA,0xFFFFF3F5,0x000003A5},
- {"0000001000010011111100001111110101000010110110100010100010100100",0x0000301F,0xFFFFEBB2,0x000004D2,0x00001C02,0xFFFFF736,0x0000032B,0x00001C02,0xFFFFF736,0x0000032B},
- {"0000001000010011111100001111111010011001000110000010000010100100",0x00002E13,0xFFFFEE3F,0x00000468,0x000016AC,0xFFFFFB32,0x0000029E,0x000016AC,0xFFFFFB32,0x0000029E},
- {"0000001000010011111100001111111010011001000001000100100100100100",0x00003478,0xFFFFE8F9,0x00000538,0x00001DAB,0xFFFFF645,0x00000345,0x00001DAB,0xFFFFF645,0x00000345},
- {"0000001000010011111100001111111010011001000001100000100011000100",0x000030C6,0xFFFFEB6C,0x000004D4,0x0000184A,0xFFFFF934,0x000002E1,0x0000184A,0xFFFFF934,0x000002E1},
- {"0000001000010011111100001111111010011001000010100010000001000100",0x00002F1B,0xFFFFEBD3,0x000004D3,0x000019E7,0xFFFFF813,0x0000030D,0x000019E7,0xFFFFF813,0x0000030D},
- {"0000001000010011111100001111111010011001000000100011100100000100",0x00003214,0xFFFFEAE9,0x000004E0,0x0000178F,0xFFFFFA1C,0x000002B1,0x0000178F,0xFFFFFA1C,0x000002B1},
- {"0000001000010011111100001111110101000010110111000011000101000100",0x0000399C,0xFFFFE738,0x0000055E,0x00001EA1,0xFFFFF5E7,0x0000035A,0x00001EA1,0xFFFFF5E7,0x0000035A},
- {"0000001000010011111100001111111010011001000001100101000011000100",0x00003A01,0xFFFFE5B2,0x000005B6,0x00001D95,0xFFFFF5D2,0x0000036A,0x00001D95,0xFFFFF5D2,0x0000036A},
- {"0000001000010011111100001111111010011001000001000011100010000100",0x0000310D,0xFFFFEB78,0x000004D0,0x00001C06,0xFFFFF76E,0x0000031A,0x00001C06,0xFFFFF76E,0x0000031A},
- {"0000001000010011111100001111111010011001000001100011100001100100",0x00003CD1,0xFFFFE42F,0x000005EB,0x00001933,0xFFFFF91F,0x000002D4,0x00001933,0xFFFFF91F,0x000002D4},
- {"0000001000010011111100001111110101000010110110100011000101100100",0x00003119,0xFFFFEB1B,0x000004E1,0x00001FC7,0xFFFFF46A,0x000003A2,0x00001FC7,0xFFFFF46A,0x000003A2},
- {"0000001000010011111010101001010011011110000001100100100010100100",0x0000390D,0xFFFFE566,0x000005D8,0x00001EC6,0xFFFFF4DC,0x00000391,0x00001EC6,0xFFFFF4DC,0x00000391},
- {"0000001000010011111100001111110101000010110110100001000011000100",0x00003446,0xFFFFE858,0x00000561,0x00001FDB,0xFFFFF3FF,0x000003B9,0x00001FDB,0xFFFFF3FF,0x000003B9},
- {"0000001000010011111100001111111010011001000001000100100100000100",0x000032BA,0xFFFFEA07,0x00000511,0x00001B25,0xFFFFF7C9,0x0000030D,0x00001B25,0xFFFFF7C9,0x0000030D},
- {"0000001000010011111100001111111010011001000011100001100001100100",0x00002CCF,0xFFFFEDE5,0x00000478,0x00001BC8,0xFFFFF761,0x00000326,0x00001BC8,0xFFFFF761,0x00000326},
- {"0000001000010011111100001111111010011001000001100010100110000100",0x0000400E,0xFFFFE1CB,0x00000652,0x00001AF8,0xFFFFF7B9,0x00000312,0x00001AF8,0xFFFFF7B9,0x00000312},
- {"0000001000010011111100001111111010011001000001000000100011100100",0x00002F24,0xFFFFEC2A,0x000004C7,0x00001B94,0xFFFFF748,0x00000333,0x00001B94,0xFFFFF748,0x00000333},
- {"0000001000010011111100001111110101000010110100100001100100100100",0x00003FDA,0xFFFFE1C1,0x0000064B,0x00002427,0xFFFFF180,0x0000040C,0x00002427,0xFFFFF180,0x0000040C},
- {"0000001000010011111100001111111010011001000010100001100011000100",0x00002F6B,0xFFFFEBA7,0x000004DD,0x00001C25,0xFFFFF6C1,0x00000344,0x00001C25,0xFFFFF6C1,0x00000344},
- {"0000001000010011111100001111111010011001000110000010000100000100",0x00002A53,0xFFFFF0EE,0x00000402,0x000017C6,0xFFFFFAA0,0x000002BF,0x000017C6,0xFFFFFAA0,0x000002BF},
- {"0000001000010011111100001111111010011001000100000101000101000100",0x000031F4,0xFFFFEA34,0x00000517,0x000016FF,0xFFFFFA4E,0x000002AC,0x000016FF,0xFFFFFA4E,0x000002AC},
- {"0000001000010011111100001111111010011001001100100010000101000100",0x00002E24,0xFFFFED46,0x00000489,0x00001712,0xFFFFFA5D,0x000002AC,0x00001712,0xFFFFFA5D,0x000002AC},
- {"0000001000010011111100001111111010011001000110000010100000100100",0x000028CD,0xFFFFF0E3,0x0000040E,0x00001606,0xFFFFFB37,0x000002A4,0x00001606,0xFFFFFB37,0x000002A4},
- {"0000001000010011111100001111111010011001000000100010000011000100",0x00003184,0xFFFFEB88,0x000004C3,0x000018DA,0xFFFFF939,0x000002DB,0x000018DA,0xFFFFF939,0x000002DB},
- {"0000001000010011111100001111111010011001000101100010000100100100",0x0000239B,0xFFFFF470,0x00000386,0x00001714,0xFFFFFA9F,0x000002C8,0x00001714,0xFFFFFA9F,0x000002C8},
- {"0000001000010011111100001111110101000010110111000011100011100100",0x00003641,0xFFFFE92B,0x00000515,0x00001BE2,0xFFFFF795,0x0000031B,0x00001BE2,0xFFFFF795,0x0000031B},
- {"0000001000010011111100001111111010011001001011000001000101000100",0x00003278,0xFFFFEA17,0x00000510,0x00001B71,0xFFFFF778,0x0000031D,0x00001B71,0xFFFFF778,0x0000031D},
- {"0000001000010011111100001111111010011001000001100010100001000100",0x000035B9,0xFFFFE8DA,0x0000052D,0x00001A6A,0xFFFFF83B,0x000002FF,0x00001A6A,0xFFFFF83B,0x000002FF},
- {"0000001000010011111100001111111010011001000011100001100011000100",0x00002E5E,0xFFFFED32,0x0000048B,0x00001E7D,0xFFFFF60E,0x0000034E,0x00001E7D,0xFFFFF60E,0x0000034E},
- {"0000001000010011111100001111111010011001000100000001100110100100",0x00003178,0xFFFFEA52,0x00000513,0x00001AD0,0xFFFFF793,0x0000031F,0x00001AD0,0xFFFFF793,0x0000031F},
- {"0000001000010011111100001111110101000010110101000100000100000100",0x00003A2C,0xFFFFE346,0x00000641,0x000023D0,0xFFFFF0CE,0x00000433,0x000023D0,0xFFFFF0CE,0x00000433},
- {"0000001000010011111100001111110101000010110110000001100011000100",0x000028FD,0xFFFFF02A,0x0000042B,0x0000152B,0xFFFFFB90,0x00000289,0x0000152B,0xFFFFFB90,0x00000289},
- {"0000001000010011111100001111111010011001000011100011000010000100",0x000030DE,0xFFFFEBDF,0x000004BE,0x00001CDC,0xFFFFF747,0x0000031C,0x00001CDC,0xFFFFF747,0x0000031C},
- {"0000001000010011111100001111111010011001000000100001100101000100",0x000036CB,0xFFFFE6EE,0x00000596,0x00002096,0xFFFFF3C2,0x000003BB,0x00002096,0xFFFFF3C2,0x000003BB},
- {"0000001000010011111100001111111010011001000011000100100011000100",0x00003172,0xFFFFEAC1,0x000004F4,0x00001C87,0xFFFFF6CD,0x00000337,0x00001C87,0xFFFFF6CD,0x00000337},
- {"0000001000010011111100001111110101000010110100100100100001100100",0x00004A18,0xFFFFDB34,0x00000758,0x0000213C,0xFFFFF3A2,0x000003AC,0x0000213C,0xFFFFF3A2,0x000003AC},
- {"0000001000010011111100001111111010011001000000100010000100000100",0x000031F3,0xFFFFEB73,0x000004C6,0x00001B23,0xFFFFF7CB,0x0000031A,0x00001B23,0xFFFFF7CB,0x0000031A},
- {"0000001000010011111100001111111010011001000010100010100100100100",0x000031C0,0xFFFFEABA,0x000004F7,0x00001A5A,0xFFFFF845,0x000002FF,0x00001A5A,0xFFFFF845,0x000002FF},
- {"0000001000010011111100001111111010011001000100000100100101000100",0x00003B77,0xFFFFE3B3,0x00000623,0x00001BCA,0xFFFFF6F8,0x00000333,0x00001BCA,0xFFFFF6F8,0x00000333},
- {"0000001000010011111100001111111010011001000010100011100101000100",0x000035AF,0xFFFFE76D,0x00000588,0x00001C16,0xFFFFF6AB,0x00000341,0x00001C16,0xFFFFF6AB,0x00000341},
- {"0000001000010011111010101001010011011110000001000011100011000100",0x000032AD,0xFFFFEA8E,0x000004F8,0x00001A3A,0xFFFFF832,0x0000030E,0x00001A3A,0xFFFFF832,0x0000030E},
- {"0000001000010011111100001111111010011001000100000100100010000100",0x00002E92,0xFFFFEBD2,0x000004DA,0x00001E04,0xFFFFF51E,0x0000038A,0x00001E04,0xFFFFF51E,0x0000038A},
- {"0000001000010011111100001111110101000010110101000100000010100100",0x00003E57,0xFFFFE0F7,0x0000068F,0x000021F1,0xFFFFF1C6,0x00000411,0x000021F1,0xFFFFF1C6,0x00000411},
- {"0000001000010011111100001111111010011001000010000010000110100100",0x00003598,0xFFFFE8BB,0x00000535,0x00001B62,0xFFFFF764,0x00000326,0x00001B62,0xFFFFF764,0x00000326},
- {"0000001000010011111100001111111010011001000010100011100010000100",0x00002B15,0xFFFFEDEC,0x00000487,0x00001E8B,0xFFFFF4AB,0x0000039F,0x00001E8B,0xFFFFF4AB,0x0000039F},
- {"0000001000010011111010101001010011011110000001100000100100000100",0x0000267E,0xFFFFF1A7,0x000003E1,0x000021C1,0xFFFFF2E9,0x000003EA,0x000021C1,0xFFFFF2E9,0x000003EA},
- {"0000001000010011111010101001010011011110000000100011100110100100",0x00002ED7,0xFFFFEC88,0x000004A6,0x00001DEC,0xFFFFF57C,0x00000378,0x00001DEC,0xFFFFF57C,0x00000378},
- {"0000001000010011111010101001010011011110000001000100000110100100",0x00003365,0xFFFFE946,0x00000536,0x000019E9,0xFFFFF7E0,0x0000031D,0x000019E9,0xFFFFF7E0,0x0000031D},
- {"0000001000010011111100001111111010011001000110000001100011100100",0x000029A4,0xFFFFF0FD,0x000003FE,0x0000163F,0xFFFFFB68,0x00000299,0x0000163F,0xFFFFFB68,0x00000299},
- {"0000001000010011111010101001010011011110000000100001100100000100",0x0000348D,0xFFFFE9F7,0x00000509,0x000017A0,0xFFFFFA59,0x000002B6,0x000017A0,0xFFFFFA59,0x000002B6},
- {"0000001000010011111100001111111010011001000001100001000011000100",0x00003144,0xFFFFEB23,0x000004D9,0x00001C9B,0xFFFFF664,0x00000351,0x00001C9B,0xFFFFF664,0x00000351},
- {"0000001000010011111010101001010011011110000001100010000011100100",0x00002E95,0xFFFFEE1A,0x00000463,0x00001707,0xFFFFFAB7,0x000002B3,0x00001707,0xFFFFFAB7,0x000002B3},
- {"0000001000010011111100001111110101000010110101000001100001100100",0x0000489C,0xFFFFDA43,0x000007AC,0x00002866,0xFFFFED6B,0x000004D0,0x00002866,0xFFFFED6B,0x000004D0},
- {"0000001000010011111100001111111010011001000101100001100001000100",0x00002895,0xFFFFF10A,0x0000040A,0x000013E9,0xFFFFFC9F,0x0000026E,0x000013E9,0xFFFFFC9F,0x0000026E},
- {"0000001000010011111100001111111010011001000001100001100101100100",0x000033A0,0xFFFFE9B1,0x00000510,0x00001D96,0xFFFFF5AE,0x0000036F,0x00001D96,0xFFFFF5AE,0x0000036F},
- {"0000001000010011111100001111111010011001000010000011100110000100",0x0000327C,0xFFFFEAEA,0x000004DD,0x00001D45,0xFFFFF649,0x00000356,0x00001D45,0xFFFFF649,0x00000356},
- {"0000001000010011111010101001010011011110000000100100100010100100",0x000031DF,0xFFFFE9AB,0x0000052F,0x000019C8,0xFFFFF7B7,0x00000321,0x000019C8,0xFFFFF7B7,0x00000321},
- {"0000001000010011111100001111111010011001000101100100000010100100",0x00002BCC,0xFFFFEEF4,0x0000045C,0x000015CD,0xFFFFFB58,0x0000029E,0x000015CD,0xFFFFFB58,0x0000029E},
- {"0000001000010011111100001111111010011001000001100011100011100100",0x00003534,0xFFFFEA10,0x000004EB,0x00001BB6,0xFFFFF7B9,0x00000314,0x00001BB6,0xFFFFF7B9,0x00000314},
- {"0000001000010011111100001111111010011001000001000001100110000100",0x00002F4F,0xFFFFEC35,0x000004B9,0x0000205D,0xFFFFF47F,0x00000392,0x0000205D,0xFFFFF47F,0x00000392},
- {"0000001000010011111100001111111010011001000011000010000010100100",0x00003295,0xFFFFEB1C,0x000004D6,0x000019C1,0xFFFFF931,0x000002D5,0x000019C1,0xFFFFF931,0x000002D5},
- {"0000001000010011111100001111111010011001000000100100000101000100",0x00003557,0xFFFFE7F7,0x00000568,0x00002342,0xFFFFF1F9,0x00000405,0x00002342,0xFFFFF1F9,0x00000405},
- {"0000001000010011111100001111111010011001000001000101000011000100",0x00003487,0xFFFFE872,0x0000055D,0x000019D7,0xFFFFF823,0x0000030C,0x000019D7,0xFFFFF823,0x0000030C},
- {"0000001000010011111100001111111010011001001011000011100101000100",0x0000378F,0xFFFFE7A6,0x00000566,0x00001875,0xFFFFFA04,0x000002AF,0x00001875,0xFFFFFA04,0x000002AF},
- {"0000001000010011111010101001010011011110000000100011000011100100",0x00002A67,0xFFFFF157,0x000003DD,0x000017BD,0xFFFFFA53,0x000002D1,0x000017BD,0xFFFFFA53,0x000002D1},
- {"0000001000010011111100001111110101000010110100100010000011100100",0x000030B5,0xFFFFEB32,0x000004D9,0x00002129,0xFFFFF38A,0x000003BB,0x00002129,0xFFFFF38A,0x000003BB},
- {"0000001000010011111100001111111010011001000001100001000010100100",0x00003786,0xFFFFE703,0x00000584,0x00001D63,0xFFFFF5DC,0x00000367,0x00001D63,0xFFFFF5DC,0x00000367},
- {"0000001000010011111100001111110101000010110110100010000011000100",0x0000346A,0xFFFFE93E,0x0000052C,0x00001B27,0xFFFFF79D,0x0000031F,0x00001B27,0xFFFFF79D,0x0000031F},
- {"0000001000010011111100001111111010011001000011100011000000100100",0x0000294E,0xFFFFF0A5,0x00000409,0x00001928,0xFFFFF93B,0x000002E6,0x00001928,0xFFFFF93B,0x000002E6},
- {"0000001000010011111100001111110101000010110101000001000011000100",0x00003E09,0xFFFFE0FF,0x00000694,0x000025A0,0xFFFFEF0F,0x0000048F,0x000025A0,0xFFFFEF0F,0x0000048F},
- {"0000001000010011111100001111111010011001000010100010100101100100",0x00003197,0xFFFFEA06,0x00000520,0x00001B42,0xFFFFF73B,0x0000032A,0x00001B42,0xFFFFF73B,0x0000032A},
- {"0000001000010011111100001111111010011001000101100001100001100100",0x000022CB,0xFFFFF3FC,0x000003A3,0x00001449,0xFFFFFBD0,0x00000297,0x00001449,0xFFFFFBD0,0x00000297},
- {"0000001000010011111100001111110101000010110110000010100101000100",0x00002A79,0xFFFFEFD2,0x00000433,0x00001585,0xFFFFFB92,0x0000028E,0x00001585,0xFFFFFB92,0x0000028E},
- {"0000001000010011111100001111111010011001000011000100000110000100",0x00003249,0xFFFFEA92,0x000004F4,0x000019CB,0xFFFFF8CF,0x000002E1,0x000019CB,0xFFFFF8CF,0x000002E1},
- {"0000001000010011111010101001010011011110000000100001100010100100",0x00002CEA,0xFFFFEE46,0x00000463,0x00001A5E,0xFFFFF83C,0x0000030D,0x00001A5E,0xFFFFF83C,0x0000030D},
- {"0000001000010011111100001111110101000010110111000101000101000100",0x00003AE2,0xFFFFE422,0x00000600,0x00001C65,0xFFFFF62F,0x0000034B,0x00001C65,0xFFFFF62F,0x0000034B},
- {"0000001000010011111100001111111010011001000110000001000110000100",0x000026A0,0xFFFFF1C2,0x000003F8,0x000010E5,0xFFFFFE56,0x0000022A,0x000010E5,0xFFFFFE56,0x0000022A},
- {"0000001000010011111100001111111010011001001010000010100110100100",0x00002A7B,0xFFFFF063,0x00000417,0x000016FC,0xFFFFFAD7,0x000002B1,0x000016FC,0xFFFFFAD7,0x000002B1},
- {"0000001000010011111100001111111010011001001100100001000011000100",0x00003092,0xFFFFEAB9,0x00000507,0x00001AE3,0xFFFFF783,0x00000323,0x00001AE3,0xFFFFF783,0x00000323},
- {"0000001000010011111100001111111010011001000001000011100011100100",0x00003265,0xFFFFEBE8,0x000004AA,0x00001D65,0xFFFFF73F,0x00000321,0x00001D65,0xFFFFF73F,0x00000321},
- {"0000001000010011111010101001010011011110000000100011000010000100",0x00002F14,0xFFFFECC2,0x000004A4,0x00001A8D,0xFFFFF7F3,0x0000031D,0x00001A8D,0xFFFFF7F3,0x0000031D},
- {"0000001000010011111100001111110101000010110111000001000011100100",0x000035FB,0xFFFFE6D3,0x000005AC,0x00001B19,0xFFFFF712,0x00000338,0x00001B19,0xFFFFF712,0x00000338},
- {"0000001000010011111100001111110101000010110110100010000100100100",0x00003519,0xFFFFE8CC,0x0000053A,0x00001A0F,0xFFFFF86E,0x000002F5,0x00001A0F,0xFFFFF86E,0x000002F5},
- {"0000001000010011111100001111111010011001001011000010000101000100",0x0000364C,0xFFFFE879,0x00000541,0x00001A42,0xFFFFF8BA,0x000002E2,0x00001A42,0xFFFFF8BA,0x000002E2},
- {"0000001000010011111010101001010011011110000000100001100011000100",0x000029BA,0xFFFFF09A,0x00000408,0x00001986,0xFFFFF8D9,0x000002FE,0x00001986,0xFFFFF8D9,0x000002FE},
- {"0000001000010011111100001111110101000010110110100011100011100100",0x00003507,0xFFFFE961,0x00000518,0x00001B79,0xFFFFF775,0x00000325,0x00001B79,0xFFFFF775,0x00000325},
- {"0000001000010011111100001111110101000010110111000011000110000100",0x00003AD5,0xFFFFE415,0x00000613,0x00001CB4,0xFFFFF66D,0x00000348,0x00001CB4,0xFFFFF66D,0x00000348},
- {"0000001000010011111100001111111010011001000101100100000011100100",0x000023D1,0xFFFFF42B,0x0000038F,0x00001546,0xFFFFFBA0,0x0000029F,0x00001546,0xFFFFFBA0,0x0000029F},
- {"0000001000010011111100001111111010011001000010100001100100100100",0x0000399E,0xFFFFE518,0x000005E7,0x00001990,0xFFFFF871,0x000002FB,0x00001990,0xFFFFF871,0x000002FB},
- {"0000001000010011111100001111110101000010110110000010100101100100",0x00002EDE,0xFFFFEC93,0x000004B8,0x0000152C,0xFFFFFBB3,0x0000027E,0x0000152C,0xFFFFFBB3,0x0000027E},
- {"0000001000010011111010101001010011011110000001000010100101100100",0x00003140,0xFFFFEBC9,0x000004BB,0x000016BE,0xFFFFFB0A,0x00000288,0x000016BE,0xFFFFFB0A,0x00000288},
- {"0000001000010011111100001111111010011001000001100100000001100100",0x000030F6,0xFFFFEB89,0x000004CD,0x0000185D,0xFFFFF95A,0x000002D9,0x0000185D,0xFFFFF95A,0x000002D9},
- {"0000001000010011111100001111111010011001000000100011100001000100",0x0000389C,0xFFFFE65A,0x000005A2,0x0000195D,0xFFFFF8C8,0x000002E8,0x0000195D,0xFFFFF8C8,0x000002E8},
- {"0000001000010011111100001111111010011001000001000010000100000100",0x0000362B,0xFFFFE9EC,0x000004F6,0x00001605,0xFFFFFC1C,0x00000263,0x00001605,0xFFFFFC1C,0x00000263},
- {"0000001000010011111100001111111010011001001010100001100101100100",0x00002946,0xFFFFF04F,0x00000426,0x000015BA,0xFFFFFB2F,0x000002A3,0x000015BA,0xFFFFFB2F,0x000002A3},
- {"0000001000010011111100001111111010011001000010000010000110000100",0x0000368E,0xFFFFE837,0x0000054A,0x000017D7,0xFFFFF9EB,0x000002BA,0x000017D7,0xFFFFF9EB,0x000002BA},
- {"0000001000010011111100001111110101000010110110100010100001000100",0x00002E74,0xFFFFEBE8,0x000004DA,0x00001DD6,0xFFFFF57E,0x00000379,0x00001DD6,0xFFFFF57E,0x00000379},
- {"0000001000010011111100001111111010011001000001000001100101000100",0x0000322D,0xFFFFEAA8,0x000004F5,0x00001B55,0xFFFFF7DD,0x0000030B,0x00001B55,0xFFFFF7DD,0x0000030B},
- {"0000001000010011111100001111111010011001000110000001100100000100",0x00002A29,0xFFFFF07B,0x00000416,0x00001671,0xFFFFFB3E,0x0000029F,0x00001671,0xFFFFFB3E,0x0000029F},
- {"0000001000010011111100001111110101000010110110100010000100000100",0x000030F6,0xFFFFEB89,0x000004CD,0x00001815,0xFFFFF9AE,0x000002C9,0x00001815,0xFFFFF9AE,0x000002C9},
- {"0000001000010011111100001111111010011001000011100001000011100100",0x0000265F,0xFFFFF1CB,0x000003D5,0x00001ED2,0xFFFFF539,0x0000037A,0x00001ED2,0xFFFFF539,0x0000037A},
- {"0000001000010011111100001111111010011001000101100010000110000100",0x000027A8,0xFFFFF10D,0x00000413,0x000014B5,0xFFFFFBA1,0x00000299,0x000014B5,0xFFFFFBA1,0x00000299},
- {"0000001000010011111100001111111010011001000001000011000001100100",0x00002CEE,0xFFFFEDF6,0x00000476,0x00001A99,0xFFFFF83E,0x00000305,0x00001A99,0xFFFFF83E,0x00000305},
- {"0000001000010011111100001111111010011001000001100100000011000100",0x0000346C,0xFFFFEA17,0x000004EF,0x00001D38,0xFFFFF69F,0x0000033D,0x00001D38,0xFFFFF69F,0x0000033D},
- {"0000001000010011111100001111110101000010110110100010100101000100",0x00002DBB,0xFFFFED35,0x00000490,0x000018C1,0xFFFFF930,0x000002DA,0x000018C1,0xFFFFF930,0x000002DA},
- {"0000001000010011111100001111111010011001000001000010100100100100",0x000038DF,0xFFFFE8A7,0x0000051E,0x00001B59,0xFFFFF915,0x000002D3,0x00001B59,0xFFFFF915,0x000002D3},
- {"0000001000010011111100001111111010011001000010000000100101000100",0x00003384,0xFFFFE979,0x00000524,0x00001AF3,0xFFFFF74C,0x0000032F,0x00001AF3,0xFFFFF74C,0x0000032F},
- {"0000001000010011111100001111111010011001000110000001100001100100",0x0000258B,0xFFFFF2AE,0x000003CB,0x0000190C,0xFFFFF93E,0x000002EF,0x0000190C,0xFFFFF93E,0x000002EF},
- {"0000001000010011111100001111111010011001000100000011100010000100",0x000034F1,0xFFFFE84B,0x0000055E,0x00001CB8,0xFFFFF670,0x0000034A,0x00001CB8,0xFFFFF670,0x0000034A},
- {"0000001000010011111100001111111010011001000011000010000100000100",0x000030FB,0xFFFFECD2,0x00000488,0x00001BF4,0xFFFFF821,0x00000302,0x00001BF4,0xFFFFF821,0x00000302},
- {"0000001000010011111100001111111010011001000001100011000001000100",0x000036A6,0xFFFFE815,0x00000556,0x000018FD,0xFFFFF925,0x000002DF,0x000018FD,0xFFFFF925,0x000002DF},
- {"0000001000010011111010101001010011011110000000100011000001000100",0x0000302A,0xFFFFEB79,0x000004E0,0x00001C11,0xFFFFF694,0x00000358,0x00001C11,0xFFFFF694,0x00000358},
- {"0000001000010011111100001111111010011001000110000001000100100100",0x00002555,0xFFFFF2C4,0x000003CB,0x000017E3,0xFFFFFA1F,0x000002CB,0x000017E3,0xFFFFFA1F,0x000002CB},
- {"0000001000010011111100001111111010011001000010100011000101100100",0x000032A3,0xFFFFE933,0x00000544,0x000019D3,0xFFFFF81A,0x00000306,0x000019D3,0xFFFFF81A,0x00000306},
- {"0000001000010011111100001111110101000010110110000101000100000100",0x00002B91,0xFFFFED81,0x000004A9,0x0000158B,0xFFFFFAB9,0x000002AC,0x0000158B,0xFFFFFAB9,0x000002AC},
- {"0000001000010011111100001111111010011001000011100010000011000100",0x00003537,0xFFFFE912,0x0000052C,0x00001C8A,0xFFFFF754,0x0000031B,0x00001C8A,0xFFFFF754,0x0000031B},
- {"0000001000010011111010101001010011011110000001100011000110000100",0x000032E1,0xFFFFEA5A,0x000004F9,0x000017B4,0xFFFFF9D9,0x000002C2,0x000017B4,0xFFFFF9D9,0x000002C2},
- {"0000001000010011111100001111110101000010110100100001000011000100",0x00003B76,0xFFFFE330,0x00000636,0x000026FB,0xFFFFEF06,0x00000481,0x000026FB,0xFFFFEF06,0x00000481},
- {"0000001000010011111100001111111010011001000001000010000101000100",0x0000320C,0xFFFFEB84,0x000004C3,0x00001A3A,0xFFFFF8E9,0x000002DF,0x00001A3A,0xFFFFF8E9,0x000002DF},
- {"0000001000010011111100001111111010011001000000100011100110000100",0x0000317D,0xFFFFEA1F,0x00000515,0x00002100,0xFFFFF31B,0x000003DD,0x00002100,0xFFFFF31B,0x000003DD},
- {"0000001000010011111100001111110101000010110101000011000101100100",0x00003DCB,0xFFFFE0B4,0x000006B4,0x00002160,0xFFFFF269,0x000003F0,0x00002160,0xFFFFF269,0x000003F0},
- {"0000001000010011111100001111111010011001000101100001100011000100",0x00002737,0xFFFFF218,0x000003E1,0x000015B5,0xFFFFFB8F,0x0000029C,0x000015B5,0xFFFFFB8F,0x0000029C},
- {"0000001000010011111010101001010011011110000000100011000110000100",0x0000318F,0xFFFFEB3F,0x000004D8,0x00001938,0xFFFFF8E9,0x000002EB,0x00001938,0xFFFFF8E9,0x000002EB},
- {"0000001000010011111100001111111010011001000100000100100011000100",0x000031BD,0xFFFFE9DE,0x00000527,0x000018A7,0xFFFFF8CA,0x000002ED,0x000018A7,0xFFFFF8CA,0x000002ED},
- {"0000001000010011111100001111110101000010110110100011100010000100",0x00002F77,0xFFFFEC2F,0x000004B4,0x00001D25,0xFFFFF61B,0x0000035D,0x00001D25,0xFFFFF61B,0x0000035D},
- {"0000001000010011111100001111111010011001000011100100100100000100",0x00002CCA,0xFFFFEDB3,0x0000047C,0x00001FBD,0xFFFFF4A7,0x00000391,0x00001FBD,0xFFFFF4A7,0x00000391},
- {"0000001000010011111100001111110101000010110101000011100010100100",0x00003FF6,0xFFFFE058,0x000006A2,0x000024CD,0xFFFFF026,0x00000452,0x000024CD,0xFFFFF026,0x00000452},
- {"0000001000010011111100001111111010011001000010100011100011100100",0x00003161,0xFFFFEAC8,0x000004F3,0x00001BB6,0xFFFFF72A,0x0000032B,0x00001BB6,0xFFFFF72A,0x0000032B},
- {"0000001000010011111100001111110101000010110110000011100010100100",0x00002EA0,0xFFFFECA6,0x000004B7,0x000018C2,0xFFFFF94E,0x000002E1,0x000018C2,0xFFFFF94E,0x000002E1},
- {"0000001000010011111100001111111010011001000110000010000110000100",0x00002F62,0xFFFFEC9E,0x000004B8,0x00001531,0xFFFFFBCD,0x00000285,0x00001531,0xFFFFFBCD,0x00000285},
- {"0000001000010011111100001111111010011001000001000100000010100100",0x00003013,0xFFFFEBD6,0x000004C2,0x00001B01,0xFFFFF802,0x000002FF,0x00001B01,0xFFFFF802,0x000002FF},
- {"0000001000010011111100001111111010011001000110000011000001100100",0x00002972,0xFFFFF08D,0x00000417,0x00001A32,0xFFFFF8A4,0x00000305,0x00001A32,0xFFFFF8A4,0x00000305},
- {"0000001000010011111100001111110101000010110110000010000011100100",0x00002E95,0xFFFFED94,0x00000487,0x00001529,0xFFFFFC26,0x00000271,0x00001529,0xFFFFFC26,0x00000271},
- {"0000001000010011111100001111111010011001000010100001000010000100",0x00002D6A,0xFFFFEC79,0x000004C1,0x00001AE2,0xFFFFF725,0x00000337,0x00001AE2,0xFFFFF725,0x00000337},
- {"0000001000010011111100001111111010011001000000100001100010000100",0x000036B4,0xFFFFE704,0x00000591,0x00001E7E,0xFFFFF51C,0x00000383,0x00001E7E,0xFFFFF51C,0x00000383},
- {"0000001000010011111100001111111010011001000001000001100001000100",0x00002A6F,0xFFFFEF70,0x00000443,0x00001BAA,0xFFFFF752,0x00000336,0x00001BAA,0xFFFFF752,0x00000336},
- {"0000001000010011111100001111111010011001000110000011100101000100",0x00002C66,0xFFFFEF5F,0x0000043A,0x000019F7,0xFFFFF931,0x000002EC,0x000019F7,0xFFFFF931,0x000002EC},
- {"0000001000010011111010101001010011011110000001100011000111000100",0x00003852,0xFFFFE6AB,0x00000590,0x000019C1,0xFFFFF8B1,0x000002E5,0x000019C1,0xFFFFF8B1,0x000002E5},
- {"0000001000010011111100001111110101000010110110100011000100100100",0x00003521,0xFFFFE932,0x00000523,0x000018A9,0xFFFFF96B,0x000002D0,0x000018A9,0xFFFFF96B,0x000002D0},
- {"0000001000010011111100001111111010011001000001100010000101100100",0x000031B9,0xFFFFEB36,0x000004D0,0x00001D65,0xFFFFF612,0x0000035D,0x00001D65,0xFFFFF612,0x0000035D},
- {"0000001000010011111100001111110101000010110101000001000001100100",0x00003ED0,0xFFFFE135,0x00000679,0x00002351,0xFFFFF0FE,0x00000433,0x00002351,0xFFFFF0FE,0x00000433},
- {"0000001000010011111100001111111010011001000010100010000011100100",0x000033ED,0xFFFFE91A,0x00000541,0x00001C93,0xFFFFF6A0,0x0000034A,0x00001C93,0xFFFFF6A0,0x0000034A},
- {"0000001000010011111010101001010011011110000000100001100001000100",0x0000356F,0xFFFFE8F7,0x00000530,0x000016BF,0xFFFFFA85,0x000002AB,0x000016BF,0xFFFFFA85,0x000002AB},
- {"0000001000010011111100001111111010011001000110000100000011100100",0x00002304,0xFFFFF4F3,0x00000364,0x000017CC,0xFFFFFA41,0x000002CA,0x000017CC,0xFFFFFA41,0x000002CA},
- {"0000001000010011111100001111111010011001000101100001000101100100",0x00002887,0xFFFFEFD7,0x00000450,0x00001474,0xFFFFFB94,0x00000299,0x00001474,0xFFFFFB94,0x00000299},
- {"0000001000010011111100001111111010011001000001100011000001100100",0x00003D0B,0xFFFFE416,0x000005EF,0x00001C7E,0xFFFFF71D,0x00000325,0x00001C7E,0xFFFFF71D,0x00000325},
- {"0000001000010011111100001111111010011001000010000001000011100100",0x00003185,0xFFFFEAFA,0x000004E4,0x00001A12,0xFFFFF83C,0x00000303,0x00001A12,0xFFFFF83C,0x00000303},
- {"0000001000010011111100001111111010011001000010100001100101000100",0x00003032,0xFFFFEAE6,0x000004FC,0x00001B2A,0xFFFFF73F,0x0000032B,0x00001B2A,0xFFFFF73F,0x0000032B},
- {"0000001000010011111100001111110101000010110110000011100011000100",0x00002691,0xFFFFF22D,0x000003D6,0x00001700,0xFFFFFA6E,0x000002C0,0x00001700,0xFFFFFA6E,0x000002C0},
- {"0000001000010011111100001111111010011001000000100001100010100100",0x00002B2F,0xFFFFEEC4,0x0000044B,0x0000215F,0xFFFFF33F,0x000003D2,0x0000215F,0xFFFFF33F,0x000003D2},
- {"0000001000010011111100001111111010011001000010100100000110000100",0x000034AA,0xFFFFE706,0x000005B1,0x00001B28,0xFFFFF6B5,0x00000349,0x00001B28,0xFFFFF6B5,0x00000349},
- {"0000001000010011111100001111110101000010110110100010100101100100",0x0000307E,0xFFFFEB38,0x000004E6,0x00001A22,0xFFFFF83F,0x00000300,0x00001A22,0xFFFFF83F,0x00000300},
- {"0000001000010011111100001111111010011001000001100001100010100100",0x000038D6,0xFFFFE6D8,0x0000057C,0x00001B24,0xFFFFF7E4,0x00000307,0x00001B24,0xFFFFF7E4,0x00000307},
- {"0000001000010011111100001111111010011001000110000011000001000100",0x00002757,0xFFFFF1E8,0x000003DD,0x000017F5,0xFFFFFA15,0x000002C8,0x000017F5,0xFFFFFA15,0x000002C8},
- {"0000001000010011111100001111111010011001000010000011000110000100",0x000031FC,0xFFFFEB3E,0x000004CE,0x00001B4C,0xFFFFF7AD,0x00000319,0x00001B4C,0xFFFFF7AD,0x00000319},
- {"0000001000010011111100001111111010011001001100000001100001100100",0x00002933,0xFFFFF073,0x0000040E,0x00001C3C,0xFFFFF701,0x0000033C,0x00001C3C,0xFFFFF701,0x0000033C},
- {"0000001000010011111100001111110101000010110100100001100010100100",0x000040BB,0xFFFFE066,0x0000069A,0x0000257F,0xFFFFF08A,0x00000435,0x0000257F,0xFFFFF08A,0x00000435},
- {"0000001000010011111100001111111010011001000100000001000010100100",0x0000305B,0xFFFFEB9B,0x000004CB,0x00001996,0xFFFFF846,0x00000308,0x00001996,0xFFFFF846,0x00000308},
- {"0000001000010011111100001111111010011001000001100100100010000100",0x000039C0,0xFFFFE5D3,0x000005B0,0x00001A8D,0xFFFFF7DA,0x00000313,0x00001A8D,0xFFFFF7DA,0x00000313},
- {"0000001000010011111010101001010011011110000000100001000010100100",0x00002E23,0xFFFFED3F,0x0000048F,0x0000189D,0xFFFFF94C,0x000002DE,0x0000189D,0xFFFFF94C,0x000002DE},
- {"0000001000010011111010101001010011011110000000100001100110000100",0x0000332B,0xFFFFE9F1,0x00000516,0x000018E6,0xFFFFF8FE,0x000002EC,0x000018E6,0xFFFFF8FE,0x000002EC},
- {"0000001000010011111100001111111010011001000010000011100011000100",0x000034A0,0xFFFFEA44,0x000004E4,0x00001ECD,0xFFFFF5B4,0x00000364,0x00001ECD,0xFFFFF5B4,0x00000364},
- {"0000001000010011111100001111110101000010110100100100000100000100",0x0000448C,0xFFFFDF34,0x000006A8,0x0000231C,0xFFFFF286,0x000003D9,0x0000231C,0xFFFFF286,0x000003D9},
- {"0000001000010011111010101001010011011110000001100010000101000100",0x00002D8C,0xFFFFEE65,0x00000456,0x000018B1,0xFFFFF9C8,0x000002C8,0x000018B1,0xFFFFF9C8,0x000002C8},
- {"0000001000010011111100001111111010011001000001100001100100000100",0x00003527,0xFFFFE9BF,0x000004FD,0x00001D23,0xFFFFF69F,0x00000342,0x00001D23,0xFFFFF69F,0x00000342},
- {"0000001000010011111100001111110101000010110111000011100010100100",0x00002C51,0xFFFFEDC3,0x00000483,0x00001BE0,0xFFFFF720,0x0000032D,0x00001BE0,0xFFFFF720,0x0000032D},
- {"0000001000010011111100001111111010011001000010100011000001000100",0x00002C6C,0xFFFFECEB,0x000004B7,0x00001C86,0xFFFFF5E7,0x00000371,0x00001C86,0xFFFFF5E7,0x00000371},
- {"0000001000010011111100001111111010011001000001000101000101000100",0x000037CF,0xFFFFE6BE,0x00000599,0x000018CD,0xFFFFF967,0x000002C7,0x000018CD,0xFFFFF967,0x000002C7},
- {"0000001000010011111100001111111010011001000100000011000101100100",0x00002E6F,0xFFFFED1D,0x0000048E,0x00001ADC,0xFFFFF7F4,0x0000030E,0x00001ADC,0xFFFFF7F4,0x0000030E},
- {"0000001000010011111100001111110101000010110101000010100110000100",0x00003FF3,0xFFFFDF13,0x000006F9,0x000025BF,0xFFFFEEEE,0x00000497,0x000025BF,0xFFFFEEEE,0x00000497},
- {"0000001000010011111100001111110101000010110111000101000100000100",0x00004135,0xFFFFDF97,0x000006CC,0x00001D52,0xFFFFF541,0x00000383,0x00001D52,0xFFFFF541,0x00000383},
- {"0000001000010011111100001111110101000010110111000010000011100100",0x00002EA9,0xFFFFEDDB,0x0000045F,0x0000197C,0xFFFFF8E1,0x000002F0,0x0000197C,0xFFFFF8E1,0x000002F0},
- {"0000001000010011111010101001010011011110000001000011000010000100",0x0000345C,0xFFFFE922,0x00000532,0x00001922,0xFFFFF8C7,0x000002F1,0x00001922,0xFFFFF8C7,0x000002F1},
- {"0000001000010011111100001111111010011001000001100100000100100100",0x000035C4,0xFFFFE8FE,0x00000521,0x00001C87,0xFFFFF6F3,0x00000330,0x00001C87,0xFFFFF6F3,0x00000330},
- {"0000001000010011111100001111110101000010110110000011000101100100",0x00002888,0xFFFFF08A,0x0000041E,0x0000150F,0xFFFFFB87,0x00000291,0x0000150F,0xFFFFFB87,0x00000291},
- {"0000001000010011111100001111111010011001000010100001000100100100",0x000035E9,0xFFFFE657,0x000005CC,0x00001BD6,0xFFFFF664,0x00000355,0x00001BD6,0xFFFFF664,0x00000355},
- {"0000001000010011111100001111111010011001000101100100100011100100",0x00002F94,0xFFFFEBD0,0x000004E5,0x00001333,0xFFFFFCA7,0x00000266,0x00001333,0xFFFFFCA7,0x00000266},
- {"0000001000010011111100001111111010011001000110000001100101100100",0x000029E7,0xFFFFF009,0x00000433,0x0000144A,0xFFFFFC37,0x0000027D,0x0000144A,0xFFFFFC37,0x0000027D},
- {"0000001000010011111100001111111010011001001011000001100101000100",0x00003418,0xFFFFE979,0x00000521,0x00001D33,0xFFFFF66B,0x0000034A,0x00001D33,0xFFFFF66B,0x0000034A},
- {"0000001000010011111010101001010011011110000001000100000011100100",0x00003656,0xFFFFE79D,0x0000057A,0x000017C2,0xFFFFF992,0x000002D4,0x000017C2,0xFFFFF992,0x000002D4},
- {"0000001000010011111100001111111010011001000011000100000011000100",0x00002EB2,0xFFFFECFE,0x00000493,0x00001F2A,0xFFFFF543,0x0000037B,0x00001F2A,0xFFFFF543,0x0000037B},
- {"0000001000010011111100001111111010011001000000100001000100100100",0x00002FC1,0xFFFFEB3F,0x000004E8,0x00001CD0,0xFFFFF5F7,0x00000364,0x00001CD0,0xFFFFF5F7,0x00000364},
- {"0000001000010011111100001111111010011001000011000001000100100100",0x0000307B,0xFFFFEB66,0x000004DE,0x00001953,0xFFFFF8ED,0x000002E4,0x00001953,0xFFFFF8ED,0x000002E4},
- {"0000001000010011111100001111110101000010110110100001100010000100",0x00002CAA,0xFFFFED07,0x000004AC,0x0000251C,0xFFFFF086,0x0000044D,0x0000251C,0xFFFFF086,0x0000044D},
- {"0000001000010011111010101001010011011110000001000011100101000100",0x00002C94,0xFFFFEE5F,0x0000045B,0x000018D7,0xFFFFF900,0x000002EB,0x000018D7,0xFFFFF900,0x000002EB},
- {"0000001000010011111100001111111010011001000000100001100001100100",0x000031F1,0xFFFFE9BE,0x0000052E,0x00001DDF,0xFFFFF558,0x00000380,0x00001DDF,0xFFFFF558,0x00000380},
- {"0000001000010011111100001111111010011001000011100101000011000100",0x00002603,0xFFFFF1E9,0x000003DA,0x00001B37,0xFFFFF75A,0x0000032F,0x00001B37,0xFFFFF75A,0x0000032F},
- {"0000001000010011111100001111110101000010110110100011000001000100",0x00003992,0xFFFFE4F9,0x000005EB,0x00001775,0xFFFFF9B8,0x000002C2,0x00001775,0xFFFFF9B8,0x000002C2},
- {"0000001000010011111100001111111010011001000110000100100101100100",0x000029DA,0xFFFFF052,0x0000041F,0x000016E2,0xFFFFFA99,0x000002BB,0x000016E2,0xFFFFFA99,0x000002BB},
- {"0000001000010011111100001111111010011001000100000001000001100100",0x00002FF2,0xFFFFEB8F,0x000004DF,0x00001AF6,0xFFFFF7A1,0x00000321,0x00001AF6,0xFFFFF7A1,0x00000321},
- {"0000001000010011111100001111111010011001000101100000100011100100",0x00002590,0xFFFFF222,0x000003EE,0x0000130B,0xFFFFFCC9,0x00000268,0x0000130B,0xFFFFFCC9,0x00000268},
- {"0000001000010011111100001111111010011001000000100100000001100100",0x000038A2,0xFFFFE65F,0x000005A2,0x000018B1,0xFFFFF917,0x000002E1,0x000018B1,0xFFFFF917,0x000002E1},
- {"0000001000010011111100001111110101000010110111000100100011100100",0x000035FD,0xFFFFE73C,0x0000058D,0x00001BB3,0xFFFFF6E1,0x00000337,0x00001BB3,0xFFFFF6E1,0x00000337},
- {"0000001000010011111100001111111010011001000100000011100011000100",0x00002AB7,0xFFFFEF98,0x00000429,0x00001F35,0xFFFFF539,0x0000037C,0x00001F35,0xFFFFF539,0x0000037C},
- {"0000001000010011111100001111111010011001000010100000100101000100",0x000034BA,0xFFFFE73D,0x000005A6,0x000018A6,0xFFFFF888,0x000002FB,0x000018A6,0xFFFFF888,0x000002FB},
- {"0000001000010011111100001111111010011001000001100011100001000100",0x000032EA,0xFFFFEA78,0x000004F4,0x00001AB6,0xFFFFF812,0x00000308,0x00001AB6,0xFFFFF812,0x00000308},
- {"0000001000010011111100001111111010011001000011000011000001000100",0x00002BE9,0xFFFFEE9A,0x00000457,0x00001942,0xFFFFF8D2,0x000002F2,0x00001942,0xFFFFF8D2,0x000002F2},
- {"0000001000010011111100001111111010011001000100000101000100100100",0x00002FAB,0xFFFFEB76,0x000004E1,0x00001DCA,0xFFFFF57D,0x00000378,0x00001DCA,0xFFFFF57D,0x00000378},
- {"0000001000010011111100001111111010011001001011100010100001000100",0x0000330A,0xFFFFE9E1,0x0000051B,0x00001CC4,0xFFFFF6DF,0x00000335,0x00001CC4,0xFFFFF6DF,0x00000335},
- {"0000001000010011111100001111111010011001000110000010100010100100",0x000027D8,0xFFFFF276,0x000003BF,0x0000178A,0xFFFFFABF,0x000002B5,0x0000178A,0xFFFFFABF,0x000002B5},
- {"0000001000010011111100001111110101000010110111000011100001100100",0x0000340A,0xFFFFE86D,0x00000562,0x00001B85,0xFFFFF719,0x0000032F,0x00001B85,0xFFFFF719,0x0000032F},
- {"0000001000010011111010101001010011011110000001100011000010000100",0x00003879,0xFFFFE73F,0x00000578,0x0000161C,0xFFFFFB6B,0x00000281,0x0000161C,0xFFFFFB6B,0x00000281},
- {"0000001000010011111100001111111010011001000110000100000001100100",0x00002879,0xFFFFF0F8,0x0000040A,0x00001749,0xFFFFFA37,0x000002CC,0x00001749,0xFFFFFA37,0x000002CC},
- {"0000001000010011111100001111111010011001000001000011100101100100",0x00002C3A,0xFFFFEEA0,0x0000044F,0x00001D57,0xFFFFF6C2,0x00000332,0x00001D57,0xFFFFF6C2,0x00000332},
- {"0000001000010011111010101001010011011110000000100001100101100100",0x000035BB,0xFFFFE90D,0x0000052A,0x000017D9,0xFFFFF9F5,0x000002C3,0x000017D9,0xFFFFF9F5,0x000002C3},
- {"0000001000010011111010101001010011011110000001000001000100100100",0x000031F1,0xFFFFEAD4,0x000004ED,0x00001F10,0xFFFFF539,0x0000037D,0x00001F10,0xFFFFF539,0x0000037D},
- {"0000001000010011111100001111111010011001000100000010100000100100",0x00002A1A,0xFFFFEFAD,0x00000430,0x00001D47,0xFFFFF62F,0x0000035E,0x00001D47,0xFFFFF62F,0x0000035E},
- {"0000001000010011111100001111111010011001000101100100100100100100",0x00002AF0,0xFFFFEEDC,0x00000465,0x0000145F,0xFFFFFBEB,0x00000281,0x0000145F,0xFFFFFBEB,0x00000281},
- {"0000001000010011111100001111111010011001000110000011000101100100",0x00002657,0xFFFFF2E0,0x000003B6,0x00001664,0xFFFFFB37,0x000002A2,0x00001664,0xFFFFFB37,0x000002A2},
- {"0000001000010011111100001111110101000010110100000011100001100100",0x00003183,0xFFFFE9F1,0x0000052B,0x00002020,0xFFFFF3CE,0x000003C1,0x00002020,0xFFFFF3CE,0x000003C1},
- {"0000001000010011111100001111110101000010110001100010100011100100",0x00003240,0xFFFFEB65,0x000004C7,0x00002425,0xFFFFF245,0x000003F3,0x00002425,0xFFFFF245,0x000003F3},
- {"0000001000010011111010101001010011011110001100100001000100000100",0x000023D0,0xFFFFF400,0x00000397,0x00001345,0xFFFFFD6B,0x00000241,0x00001345,0xFFFFFD6B,0x00000241},
- {"0000001000010011111100001111110101000010110011100011100010100100",0x00003440,0xFFFFE872,0x0000055B,0x00002247,0xFFFFF296,0x000003E8,0x00002247,0xFFFFF296,0x000003E8},
- {"0000001000010011111100001111110101000010110100000100100100000100",0x00003275,0xFFFFE970,0x00000538,0x00001F94,0xFFFFF429,0x000003AD,0x00001F94,0xFFFFF429,0x000003AD},
- {"0000001000010011111100001111110101000010110001100100000010100100",0x00003918,0xFFFFE5DA,0x000005B6,0x000024FC,0xFFFFF106,0x00000426,0x000024FC,0xFFFFF106,0x00000426},
- {"0000001000010011111010101001010011011110000001100010000001000100",0x0000334B,0xFFFFEA39,0x000004FD,0x00001983,0xFFFFF8F6,0x000002E2,0x00001983,0xFFFFF8F6,0x000002E2},
- {"0000001000010011111100001111110101000010110001100100100110000100",0x00003B59,0xFFFFE4D0,0x000005DA,0x00002605,0xFFFFF090,0x00000439,0x00002605,0xFFFFF090,0x00000439},
- {"0000001000010011111100001111110101000010110100000011000100100100",0x00003251,0xFFFFEA46,0x00000511,0x00002781,0xFFFFEF84,0x00000470,0x00002781,0xFFFFEF84,0x00000470},
- {"0000001000010011111100001111110101000010110010100011000101100100",0x00003304,0xFFFFE926,0x00000542,0x00001EE9,0xFFFFF4E4,0x0000038B,0x00001EE9,0xFFFFF4E4,0x0000038B},
- {"0000001000010011111100001111110101000010110011000011100011000100",0x00002F4C,0xFFFFEC0C,0x000004C4,0x00001E49,0xFFFFF578,0x00000374,0x00001E49,0xFFFFF578,0x00000374},
- {"0000001000010011111010101001010011011110000111000010000101100100",0x00002034,0xFFFFF692,0x0000034C,0x000014B8,0xFFFFFC5B,0x00000294,0x000014B8,0xFFFFFC5B,0x00000294},
- {"0000001000010011111100001111110101000010110011100100100100100100",0x0000385F,0xFFFFE513,0x000005F3,0x000024E7,0xFFFFF053,0x00000450,0x000024E7,0xFFFFF053,0x00000450},
- {"0000001000010011111010101001010011011110000111000100000011100100",0x00001D70,0xFFFFF821,0x0000030F,0x00001541,0xFFFFFBB4,0x000002B0,0x00001541,0xFFFFFBB4,0x000002B0},
- {"0000001000010011111100001111110101000010110100000010000010000100",0x000034EB,0xFFFFE7FF,0x00000575,0x000019B4,0xFFFFF836,0x00000308,0x000019B4,0xFFFFF836,0x00000308},
- {"0000001000010011111100001111110101000010110100000101000011100100",0x000037C9,0xFFFFE5D4,0x000005CD,0x000026A1,0xFFFFEF0C,0x00000491,0x000026A1,0xFFFFEF0C,0x00000491},
- {"0000001000010011111010101001010011011110000100100001100101000100",0x00002918,0xFFFFF148,0x000003E9,0x00001A49,0xFFFFF94C,0x000002CF,0x00001A49,0xFFFFF94C,0x000002CF},
- {"0000001000010011111100001111110101000010110010100100000001100100",0x00002F90,0xFFFFEAB5,0x00000514,0x00001707,0xFFFFF9C7,0x000002C4,0x00001707,0xFFFFF9C7,0x000002C4},
- {"0000001000010011111010101001010011011110000001100010000001100100",0x0000327E,0xFFFFEA99,0x000004F4,0x0000194F,0xFFFFF929,0x000002DC,0x0000194F,0xFFFFF929,0x000002DC},
- {"0000001000010011111100001111110101000010110001100100000010000100",0x0000326F,0xFFFFE9CF,0x00000519,0x00002240,0xFFFFF299,0x000003E7,0x00002240,0xFFFFF299,0x000003E7},
- {"0000001000010011111010101001010011011110001100100001000100100100",0x000022FB,0xFFFFF4C6,0x00000371,0x00001506,0xFFFFFC73,0x00000265,0x00001506,0xFFFFFC73,0x00000265},
- {"0000001000010011111100001111110101000010110010100011100100100100",0x00003AD6,0xFFFFE470,0x000005FE,0x00001F03,0xFFFFF4F3,0x00000387,0x00001F03,0xFFFFF4F3,0x00000387},
- {"0000001000010011111010101001010011011110001000000001000100100100",0x00001F11,0xFFFFF756,0x00000332,0x00001666,0xFFFFFB8A,0x000002B2,0x00001666,0xFFFFFB8A,0x000002B2},
- {"0000001000010011111010101001010011011110000000100011100010100100",0x00002A5F,0xFFFFEFA7,0x00000430,0x00001943,0xFFFFF8C6,0x000002F7,0x00001943,0xFFFFF8C6,0x000002F7},
- {"0000001000010011111010101001010011011110000101100101000011100100",0x0000235E,0xFFFFF3B4,0x000003B3,0x00001489,0xFFFFFBCF,0x0000029B,0x00001489,0xFFFFFBCF,0x0000029B},
- {"0000001000010011111100001111110101000010110011000011100010100100",0x00003570,0xFFFFE780,0x0000058D,0x00001B1D,0xFFFFF767,0x00000325,0x00001B1D,0xFFFFF767,0x00000325},
- {"0000001000010011111010101001010011011110000001000010000001100100",0x00003678,0xFFFFE7C3,0x00000569,0x00001831,0xFFFFF98E,0x000002C8,0x00001831,0xFFFFF98E,0x000002C8},
- {"0000001000010011111010101001010011011110001000000001100001100100",0x000020B9,0xFFFFF625,0x0000035A,0x000015C5,0xFFFFFB8A,0x000002B5,0x000015C5,0xFFFFFB8A,0x000002B5},
- {"0000001000010011111100001111110101000010110001100011000110000100",0x00003985,0xFFFFE529,0x000005DD,0x00002165,0xFFFFF351,0x000003C5,0x00002165,0xFFFFF351,0x000003C5},
- {"0000001000010011111100001111110101000010110100000010000001100100",0x0000322A,0xFFFFE99D,0x00000535,0x000019A1,0xFFFFF844,0x00000305,0x000019A1,0xFFFFF844,0x00000305},
- {"0000001000010011111100001111110101000010110100000101000100000100",0x000033ED,0xFFFFE834,0x00000571,0x00002094,0xFFFFF33A,0x000003DB,0x00002094,0xFFFFF33A,0x000003DB},
- {"0000001000010011111010101001010011011110001000000100000011000100",0x00001D10,0xFFFFF84D,0x0000030B,0x00001659,0xFFFFFB0A,0x000002CB,0x00001659,0xFFFFFB0A,0x000002CB},
- {"0000001000010011111010101001010011011110000111000001000100100100",0x0000210F,0xFFFFF644,0x00000355,0x00001A4A,0xFFFFF90F,0x00000310,0x00001A4A,0xFFFFF90F,0x00000310},
- {"0000001000010011111010101001010011011110000101100100000101100100",0x00001CA8,0xFFFFF813,0x00000316,0x00001440,0xFFFFFC1C,0x0000029D,0x00001440,0xFFFFFC1C,0x0000029D},
- {"0000001000010011111010101001010011011110001100100001000011000100",0x00002864,0xFFFFF15A,0x000003FA,0x0000137F,0xFFFFFD43,0x00000248,0x0000137F,0xFFFFFD43,0x00000248},
- {"0000001000010011111100001111110101000010110100000100000110000100",0x00002CDB,0xFFFFECFD,0x000004A7,0x00002472,0xFFFFF0E1,0x00000437,0x00002472,0xFFFFF0E1,0x00000437},
- {"0000001000010011111100001111110101000010110011000101000100000100",0x00003348,0xFFFFE8CA,0x00000554,0x00001E91,0xFFFFF4D4,0x00000392,0x00001E91,0xFFFFF4D4,0x00000392},
- {"0000001000010011111100001111110101000010110001100100100101000100",0x00003989,0xFFFFE4BB,0x000005F8,0x00001ACB,0xFFFFF780,0x00000319,0x00001ACB,0xFFFFF780,0x00000319},
- {"0000001000010011111100001111110101000010110010100010000100000100",0x00003238,0xFFFFEA09,0x0000051E,0x00001F08,0xFFFFF4F4,0x0000038C,0x00001F08,0xFFFFF4F4,0x0000038C},
- {"0000001000010011111010101001010011011110000100100000100100000100",0x00002453,0xFFFFF3B0,0x0000038D,0x00001AED,0xFFFFF8A2,0x000002EA,0x00001AED,0xFFFFF8A2,0x000002EA},
- {"0000001000010011111010101001010011011110000111000011000000100100",0x00002459,0xFFFFF409,0x000003A8,0x000017B5,0xFFFFFA53,0x000002E1,0x000017B5,0xFFFFFA53,0x000002E1},
- {"0000001000010011111010101001010011011110000000100001000110000100",0x0000310D,0xFFFFEB78,0x000004D0,0x00001DC9,0xFFFFF5D5,0x00000368,0x00001DC9,0xFFFFF5D5,0x00000368},
- {"0000001000010011111010101001010011011110000000100011000100000100",0x000031BF,0xFFFFECA3,0x00000498,0x00001DC9,0xFFFFF717,0x00000336,0x00001DC9,0xFFFFF717,0x00000336},
- {"0000001000010011111100001111110101000010110011100010000100000100",0x00003896,0xFFFFE5DD,0x000005C5,0x000023E2,0xFFFFF1A1,0x00000416,0x000023E2,0xFFFFF1A1,0x00000416},
- {"0000001000010011111010101001010011011110001100100011100100000100",0x000023CB,0xFFFFF4C8,0x00000372,0x00001C33,0xFFFFF7D5,0x0000032A,0x00001C33,0xFFFFF7D5,0x0000032A},
- {"0000001000010011111100001111110101000010110100000010000011000100",0x00002F6B,0xFFFFEBF0,0x000004CE,0x00001C89,0xFFFFF689,0x0000034D,0x00001C89,0xFFFFF689,0x0000034D},
- {"0000001000010011111100001111110101000010110011100011100100000100",0x00003E72,0xFFFFE211,0x0000065D,0x0000218D,0xFFFFF309,0x000003DC,0x0000218D,0xFFFFF309,0x000003DC},
- {"0000001000010011111010101001010011011110000000100010000010000100",0x00002612,0xFFFFF2C3,0x000003AD,0x000019F7,0xFFFFF891,0x000002FE,0x000019F7,0xFFFFF891,0x000002FE},
- {"0000001000010011111010101001010011011110000101100100000110000100",0x0000205D,0xFFFFF59F,0x00000372,0x000012E6,0xFFFFFD0A,0x00000270,0x000012E6,0xFFFFFD0A,0x00000270},
- {"0000001000010011111100001111110101000010110010100010000100100100",0x00002ECB,0xFFFFEC47,0x000004BD,0x00001936,0xFFFFF8D9,0x000002E4,0x00001936,0xFFFFF8D9,0x000002E4},
- {"0000001000010011111010101001010011011110000001100100100100000100",0x00002BDB,0xFFFFEE6D,0x00000458,0x00001852,0xFFFFF943,0x000002D9,0x00001852,0xFFFFF943,0x000002D9},
- {"0000001000010011111010101001010011011110000100100100100100000100",0x00003387,0xFFFFE958,0x00000534,0x00001932,0xFFFFF8FA,0x000002E4,0x00001932,0xFFFFF8FA,0x000002E4},
- {"0000001000010011111010101001010011011110000000100000100011000100",0x00002E3C,0xFFFFED26,0x00000495,0x00001858,0xFFFFF990,0x000002D1,0x00001858,0xFFFFF990,0x000002D1},
- {"0000001000010011111010101001010011011110000000100010100101100100",0x000033B8,0xFFFFEA5C,0x000004F9,0x00001BD1,0xFFFFF76A,0x0000032E,0x00001BD1,0xFFFFF76A,0x0000032E},
- {"0000001000010011111010101001010011011110000001100010100110000100",0x00002BCE,0xFFFFEEE9,0x00000443,0x00001982,0xFFFFF90D,0x000002DF,0x00001982,0xFFFFF90D,0x000002DF},
- {"0000001000010011111100001111110101000010110100000100100011100100",0x00003495,0xFFFFE7D9,0x0000057B,0x00001D2A,0xFFFFF5A5,0x00000372,0x00001D2A,0xFFFFF5A5,0x00000372},
- {"0000001000010011111100001111110101000010110010100011100011100100",0x000034B1,0xFFFFE88D,0x00000556,0x00002014,0xFFFFF43A,0x000003AA,0x00002014,0xFFFFF43A,0x000003AA},
- {"0000001000010011111100001111110101000010110011000011000100100100",0x00002F96,0xFFFFEC84,0x000004AD,0x000024A2,0xFFFFF1CE,0x0000040A,0x000024A2,0xFFFFF1CE,0x0000040A},
- {"0000001000010011111010101001010011011110000101100001000001100100",0x0000203B,0xFFFFF640,0x00000359,0x000014EC,0xFFFFFC14,0x0000029C,0x000014EC,0xFFFFFC14,0x0000029C},
- {"0000001000010011111100001111110101000010110100000010100110000100",0x000034E2,0xFFFFE7B8,0x00000582,0x00001938,0xFFFFF872,0x000002FA,0x00001938,0xFFFFF872,0x000002FA},
- {"0000001000010011111010101001010011011110000001100011000100100100",0x00002AC7,0xFFFFF0C1,0x000003F5,0x00002268,0xFFFFF39C,0x000003C9,0x00002268,0xFFFFF39C,0x000003C9},
- {"0000001000010011111100001111110101000010110001100011000101000100",0x000036F6,0xFFFFE77F,0x00000571,0x000027D9,0xFFFFEF6F,0x00000461,0x000027D9,0xFFFFEF6F,0x00000461},
- {"0000001000010011111010101001010011011110000100100011000100100100",0x00002BAB,0xFFFFF018,0x00000419,0x00002126,0xFFFFF4E2,0x0000038F,0x00002126,0xFFFFF4E2,0x0000038F},
- {"0000001000010011111010101001010011011110001100100011100100100100",0x000028C4,0xFFFFF161,0x000003F8,0x0000180C,0xFFFFFA4B,0x000002C8,0x0000180C,0xFFFFFA4B,0x000002C8},
- {"0000001000010011111100001111110101000010110010100010100001100100",0x00002F48,0xFFFFEB62,0x000004EE,0x00001912,0xFFFFF8C8,0x000002EA,0x00001912,0xFFFFF8C8,0x000002EA},
- {"0000001000010011111100001111110101000010110011100010100001100100",0x000032DF,0xFFFFE911,0x00000545,0x00001F06,0xFFFFF485,0x0000039C,0x00001F06,0xFFFFF485,0x0000039C},
- {"0000001000010011111100001111110101000010110100000100000101000100",0x000035B8,0xFFFFE74F,0x00000590,0x00001FD7,0xFFFFF410,0x000003AF,0x00001FD7,0xFFFFF410,0x000003AF},
- {"0000001000010011111100001111110101000010110100000101000011000100",0x00003608,0xFFFFE6D7,0x000005A9,0x000024A6,0xFFFFF075,0x00000450,0x000024A6,0xFFFFF075,0x00000450},
- {"0000001000010011111100001111110101000010110010100011100010000100",0x000030AB,0xFFFFEAED,0x000004F5,0x000019EE,0xFFFFF84E,0x000002FC,0x000019EE,0xFFFFF84E,0x000002FC},
- {"0000001000010011111010101001010011011110000001100010000011000100",0x000030C6,0xFFFFEC92,0x0000049E,0x000019BB,0xFFFFF8F1,0x000002F3,0x000019BB,0xFFFFF8F1,0x000002F3},
- {"0000001000010011111100001111110101000010110001100011000010100100",0x00003B27,0xFFFFE544,0x000005C1,0x00002697,0xFFFFF072,0x00000438,0x00002697,0xFFFFF072,0x00000438},
- {"0000001000010011111010101001010011011110000100100100100011100100",0x00002F23,0xFFFFEC48,0x000004B9,0x0000199A,0xFFFFF8CF,0x000002E9,0x0000199A,0xFFFFF8CF,0x000002E9},
- {"0000001000010011111010101001010011011110000001100010100110100100",0x00002BD7,0xFFFFEEAC,0x00000450,0x00001991,0xFFFFF8F4,0x000002E2,0x00001991,0xFFFFF8F4,0x000002E2},
- {"0000001000010011111010101001010011011110000000100010000000100100",0x00003210,0xFFFFEB24,0x000004DE,0x00001BDF,0xFFFFF744,0x00000333,0x00001BDF,0xFFFFF744,0x00000333},
- {"0000001000010011111010101001010011011110001001000100000101000100",0x00002DDC,0xFFFFED0D,0x000004AC,0x000019D0,0xFFFFF869,0x0000030F,0x000019D0,0xFFFFF869,0x0000030F},
- {"0000001000010011111010101001010011011110001000000011100101100100",0x000023E6,0xFFFFF40C,0x000003A9,0x000014EB,0xFFFFFBC4,0x000002AF,0x000014EB,0xFFFFFBC4,0x000002AF},
- {"0000001000010011111100001111110101000010110010100010100110100100",0x000030CE,0xFFFFE9A5,0x0000053C,0x00001C45,0xFFFFF60E,0x0000035D,0x00001C45,0xFFFFF60E,0x0000035D},
- {"0000001000010011111010101001010011011110000101100001000010000100",0x00001E89,0xFFFFF73A,0x00000337,0x0000157C,0xFFFFFBC0,0x000002AA,0x0000157C,0xFFFFFBC0,0x000002AA},
- {"0000001000010011111100001111110101000010110100000100000100100100",0x000036C6,0xFFFFE6CF,0x000005A1,0x00002457,0xFFFFF11D,0x0000042D,0x00002457,0xFFFFF11D,0x0000042D},
- {"0000001000010011111010101001010011011110001100100001100101000100",0x00002815,0xFFFFF19A,0x000003F2,0x000016D2,0xFFFFFB40,0x00000299,0x000016D2,0xFFFFFB40,0x00000299},
- {"0000001000010011111010101001010011011110000111000001100110100100",0x00001FE2,0xFFFFF660,0x00000354,0x000015A7,0xFFFFFB47,0x000002C1,0x000015A7,0xFFFFFB47,0x000002C1},
- {"0000001000010011111010101001010011011110000101100001100101100100",0x00002114,0xFFFFF634,0x00000356,0x000016C1,0xFFFFFB43,0x000002B8,0x000016C1,0xFFFFFB43,0x000002B8},
- {"0000001000010011111100001111110101000010110011000010100011000100",0x000028E3,0xFFFFF075,0x00000414,0x0000203C,0xFFFFF438,0x000003B3,0x0000203C,0xFFFFF438,0x000003B3},
- {"0000001000010011111010101001010011011110000111000011100100100100",0x00001EEB,0xFFFFF7BB,0x0000031A,0x00001580,0xFFFFFBD7,0x000002AD,0x00001580,0xFFFFFBD7,0x000002AD},
- {"0000001000010011111010101001010011011110001001000000100011000100",0x00002BB2,0xFFFFEE72,0x00000470,0x0000192C,0xFFFFF91E,0x000002E7,0x0000192C,0xFFFFF91E,0x000002E7},
- {"0000001000010011111010101001010011011110000001100101000011100100",0x00003A3D,0xFFFFE49D,0x000005F5,0x00001A3B,0xFFFFF7B1,0x00000320,0x00001A3B,0xFFFFF7B1,0x00000320},
- {"0000001000010011111100001111110101000010110011100011000101100100",0x00002E93,0xFFFFEC5A,0x000004B4,0x000025EB,0xFFFFF03C,0x0000044A,0x000025EB,0xFFFFF03C,0x0000044A},
- {"0000001000010011111100001111110101000010110010100010000011000100",0x0000331F,0xFFFFE97A,0x00000531,0x00001A06,0xFFFFF850,0x000002FD,0x00001A06,0xFFFFF850,0x000002FD},
- {"0000001000010011111100001111110101000010110001100011100101100100",0x00003937,0xFFFFE5A0,0x000005C7,0x0000235E,0xFFFFF234,0x000003F2,0x0000235E,0xFFFFF234,0x000003F2},
- {"0000001000010011111010101001010011011110000111100011100100100100",0x00001DD0,0xFFFFF80E,0x00000319,0x000015C7,0xFFFFFB91,0x000002BC,0x000015C7,0xFFFFFB91,0x000002BC},
- {"0000001000010011111100001111110101000010110100000011100101100100",0x00003328,0xFFFFE905,0x0000054A,0x00002054,0xFFFFF3BF,0x000003C0,0x00002054,0xFFFFF3BF,0x000003C0},
- {"0000001000010011111100001111110101000010110011000001000100000100",0x00002FE5,0xFFFFEA65,0x00000520,0x0000188B,0xFFFFF8A7,0x000002F5,0x0000188B,0xFFFFF8A7,0x000002F5},
- {"0000001000010011111100001111110101000010110010100011100010100100",0x00002ED3,0xFFFFEC51,0x000004B9,0x00001888,0xFFFFF96A,0x000002CA,0x00001888,0xFFFFF96A,0x000002CA},
- {"0000001000010011111100001111110101000010110100000011000010000100",0x00002FCC,0xFFFFEB60,0x000004EA,0x00001F8D,0xFFFFF436,0x000003B4,0x00001F8D,0xFFFFF436,0x000003B4},
- {"0000001000010011111100001111110101000010110011100100000010000100",0x0000329F,0xFFFFE8F7,0x0000054F,0x000023DB,0xFFFFF0EE,0x0000043A,0x000023DB,0xFFFFF0EE,0x0000043A},
- {"0000001000010011111010101001010011011110000001000011100010100100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001AFD,0xFFFFF781,0x00000329,0x00001AFD,0xFFFFF781,0x00000329},
- {"0000001000010011111010101001010011011110000111100001100110100100",0x00001BBF,0xFFFFF8E2,0x000002F7,0x00001722,0xFFFFFA85,0x000002DB,0x00001722,0xFFFFFA85,0x000002DB},
- {"0000001000010011111010101001010011011110000000100010000001000100",0x000030E4,0xFFFFEBE6,0x000004BB,0x00001C80,0xFFFFF6E1,0x0000033E,0x00001C80,0xFFFFF6E1,0x0000033E},
- {"0000001000010011111010101001010011011110000100100010100101000100",0x000030E2,0xFFFFECD0,0x00000492,0x00001CE0,0xFFFFF753,0x0000032F,0x00001CE0,0xFFFFF753,0x0000032F},
- {"0000001000010011111010101001010011011110001100100010100001100100",0x00002513,0xFFFFF323,0x000003BC,0x00001965,0xFFFFF93C,0x000002F0,0x00001965,0xFFFFF93C,0x000002F0},
- {"0000001000010011111010101001010011011110000101100001000010100100",0x00002147,0xFFFFF585,0x0000037A,0x000014CC,0xFFFFFC3B,0x00000296,0x000014CC,0xFFFFFC3B,0x00000296},
- {"0000001000010011111010101001010011011110001100100010000100100100",0x00002507,0xFFFFF432,0x0000038A,0x00001890,0xFFFFFA61,0x000002C6,0x00001890,0xFFFFFA61,0x000002C6},
- {"0000001000010011111010101001010011011110000001100011100010100100",0x0000339B,0xFFFFEA7D,0x000004F0,0x0000191E,0xFFFFF944,0x000002DF,0x0000191E,0xFFFFF944,0x000002DF},
- {"0000001000010011111100001111110101000010110011000010100010100100",0x00002842,0xFFFFF043,0x00000427,0x00001988,0xFFFFF892,0x000002F7,0x00001988,0xFFFFF892,0x000002F7},
- {"0000001000010011111100001111110101000010110001100001100010100100",0x0000389D,0xFFFFE5D8,0x000005BF,0x00001EE1,0xFFFFF4EF,0x00000387,0x00001EE1,0xFFFFF4EF,0x00000387},
- {"0000001000010011111100001111110101000010110011100011000110000100",0x0000396D,0xFFFFE4D7,0x000005F2,0x000020DA,0xFFFFF34E,0x000003CD,0x000020DA,0xFFFFF34E,0x000003CD},
- {"0000001000010011111100001111110101000010110010100011000100000100",0x0000355F,0xFFFFE85A,0x0000055F,0x0000281F,0xFFFFEF28,0x0000047D,0x0000281F,0xFFFFEF28,0x0000047D},
- {"0000001000010011111010101001010011011110000111000101000011100100",0x00002284,0xFFFFF46E,0x00000399,0x00001498,0xFFFFFBE3,0x0000029C,0x00001498,0xFFFFFBE3,0x0000029C},
- {"0000001000010011111010101001010011011110000000100011100101000100",0x000031B6,0xFFFFEB42,0x000004D9,0x00001F54,0xFFFFF4D2,0x00000399,0x00001F54,0xFFFFF4D2,0x00000399},
- {"0000001000010011111100001111110101000010110001100011000001100100",0x000035CE,0xFFFFE79D,0x00000578,0x00001C78,0xFFFFF68C,0x00000344,0x00001C78,0xFFFFF68C,0x00000344},
- {"0000001000010011111010101001010011011110000111100100100101100100",0x00001C0A,0xFFFFF81B,0x00000318,0x00001492,0xFFFFFBCC,0x000002A5,0x00001492,0xFFFFFBCC,0x000002A5},
- {"0000001000010011111010101001010011011110000000100010000110000100",0x00003492,0xFFFFE95C,0x00000526,0x00001A97,0xFFFFF81B,0x0000030B,0x00001A97,0xFFFFF81B,0x0000030B},
- {"0000001000010011111010101001010011011110000101100011000101100100",0x00001E89,0xFFFFF7D0,0x0000031A,0x000017A5,0xFFFFFA99,0x000002D9,0x000017A5,0xFFFFFA99,0x000002D9},
- {"0000001000010011111100001111110101000010110010100100100011000100",0x00002DCC,0xFFFFEBE0,0x000004DE,0x000019BA,0xFFFFF7F5,0x0000030D,0x000019BA,0xFFFFF7F5,0x0000030D},
- {"0000001000010011111010101001010011011110000001000010100110000100",0x000030EF,0xFFFFEBC1,0x000004C0,0x00001AA9,0xFFFFF814,0x0000030A,0x00001AA9,0xFFFFF814,0x0000030A},
- {"0000001000010011111010101001010011011110001001000101000100100100",0x00002EA3,0xFFFFEBF6,0x000004D8,0x00001DCF,0xFFFFF521,0x00000399,0x00001DCF,0xFFFFF521,0x00000399},
- {"0000001000010011111010101001010011011110001100100100000101100100",0x00002B5F,0xFFFFEEA1,0x0000046C,0x000017EB,0xFFFFF9C9,0x000002D4,0x000017EB,0xFFFFF9C9,0x000002D4},
- {"0000001000010011111010101001010011011110000000100100000100000100",0x00002C63,0xFFFFEE82,0x00000455,0x00002268,0xFFFFF29D,0x000003F6,0x00002268,0xFFFFF29D,0x000003F6},
- {"0000001000010011111010101001010011011110000100100001100100000100",0x00002B1A,0xFFFFF016,0x0000041C,0x000019AA,0xFFFFF988,0x000002D2,0x000019AA,0xFFFFF988,0x000002D2},
- {"0000001000010011111100001111110101000010110010100010100101100100",0x0000332F,0xFFFFE934,0x0000053B,0x00001E47,0xFFFFF566,0x00000374,0x00001E47,0xFFFFF566,0x00000374},
- {"0000001000010011111100001111110101000010110010100100100011100100",0x00002995,0xFFFFEEC1,0x00000465,0x0000178F,0xFFFFF995,0x000002C5,0x0000178F,0xFFFFF995,0x000002C5},
- {"0000001000010011111010101001010011011110001000000001100010000100",0x00001C2E,0xFFFFF932,0x000002E9,0x000015C2,0xFFFFFBC5,0x000002AD,0x000015C2,0xFFFFFBC5,0x000002AD},
- {"0000001000010011111100001111110101000010110001100100000011100100",0x00003B08,0xFFFFE4E8,0x000005D8,0x0000209D,0xFFFFF444,0x00000398,0x0000209D,0xFFFFF444,0x00000398},
- {"0000001000010011111010101001010011011110000001000101000011100100",0x00002F1F,0xFFFFEB74,0x000004EB,0x00001F4C,0xFFFFF3D4,0x000003CE,0x00001F4C,0xFFFFF3D4,0x000003CE},
- {"0000001000010011111010101001010011011110000001000011100010000100",0x00003415,0xFFFFE89F,0x00000553,0x0000186B,0xFFFFF8E1,0x000002EF,0x0000186B,0xFFFFF8E1,0x000002EF},
- {"0000001000010011111100001111110101000010110011000001000011000100",0x00003441,0xFFFFE779,0x0000059D,0x000019EA,0xFFFFF7B2,0x0000031F,0x000019EA,0xFFFFF7B2,0x0000031F},
- {"0000001000010011111010101001010011011110000101100100000001100100",0x00002174,0xFFFFF546,0x00000378,0x00001456,0xFFFFFC5F,0x00000284,0x00001456,0xFFFFFC5F,0x00000284},
- {"0000001000010011111100001111110101000010110011100100000011000100",0x00003788,0xFFFFE61E,0x000005BF,0x00001DF4,0xFFFFF562,0x00000374,0x00001DF4,0xFFFFF562,0x00000374},
- {"0000001000010011111010101001010011011110000111100001100001000100",0x00001C41,0xFFFFF8C1,0x000002FC,0x0000171E,0xFFFFFA93,0x000002DE,0x0000171E,0xFFFFFA93,0x000002DE},
- {"0000001000010011111100001111110101000010110010100011100001100100",0x00002B15,0xFFFFEDEC,0x00000487,0x000017E4,0xFFFFF934,0x000002DF,0x000017E4,0xFFFFF934,0x000002DF},
- {"0000001000010011111100001111110101000010110011000011000101000100",0x0000327A,0xFFFFEA71,0x000004FF,0x00001D96,0xFFFFF63B,0x00000351,0x00001D96,0xFFFFF63B,0x00000351},
- {"0000001000010011111010101001010011011110000111100100000001100100",0x000023C6,0xFFFFF3E5,0x000003B6,0x000014DE,0xFFFFFC29,0x00000294,0x000014DE,0xFFFFFC29,0x00000294},
- {"0000001000010011111010101001010011011110000101100100100101000100",0x00001F96,0xFFFFF5FA,0x00000364,0x00001397,0xFFFFFC9D,0x0000027D,0x00001397,0xFFFFFC9D,0x0000027D},
- {"0000001000010011111010101001010011011110000001100011000101000100",0x00002B51,0xFFFFEFB5,0x00000420,0x00001ACA,0xFFFFF824,0x0000030D,0x00001ACA,0xFFFFF824,0x0000030D},
- {"0000001000010011111010101001010011011110000111100100100101000100",0x000020DB,0xFFFFF55B,0x0000037C,0x0000153D,0xFFFFFB5F,0x000002BA,0x0000153D,0xFFFFFB5F,0x000002BA},
- {"0000001000010011111010101001010011011110000000100010000110100100",0x000030BB,0xFFFFEBDA,0x000004BC,0x00001B0E,0xFFFFF7A8,0x0000031E,0x00001B0E,0xFFFFF7A8,0x0000031E},
- {"0000001000010011111100001111110101000010110001100010100100000100",0x000033C4,0xFFFFEA41,0x000004FA,0x000022C6,0xFFFFF363,0x000003BC,0x000022C6,0xFFFFF363,0x000003BC},
- {"0000001000010011111010101001010011011110001001000000100100100100",0x00002D47,0xFFFFEE01,0x00000477,0x000021CD,0xFFFFF36E,0x000003D6,0x000021CD,0xFFFFF36E,0x000003D6},
- {"0000001000010011111010101001010011011110000111100011000110100100",0x00001E7B,0xFFFFF733,0x00000339,0x00001668,0xFFFFFB29,0x000002BF,0x00001668,0xFFFFFB29,0x000002BF},
- {"0000001000010011111100001111110101000010110010100010100110000100",0x00002F7E,0xFFFFEAFF,0x000004FC,0x000018D4,0xFFFFF8BE,0x000002E8,0x000018D4,0xFFFFF8BE,0x000002E8},
- {"0000001000010011111010101001010011011110001100100011100010100100",0x00002635,0xFFFFF2E1,0x000003BC,0x000017A4,0xFFFFFA67,0x000002C3,0x000017A4,0xFFFFFA67,0x000002C3},
- {"0000001000010011111010101001010011011110000100100011000010100100",0x000026CA,0xFFFFF2C1,0x000003B2,0x00001C3E,0xFFFFF7AE,0x0000031F,0x00001C3E,0xFFFFF7AE,0x0000031F},
- {"0000001000010011111010101001010011011110000111000001000001100100",0x00002550,0xFFFFF380,0x000003B5,0x000019F5,0xFFFFF8E7,0x00000313,0x000019F5,0xFFFFF8E7,0x00000313},
- {"0000001000010011111100001111110101000010110010100100100100000100",0x00002FBC,0xFFFFEAF8,0x000004FA,0x000018CC,0xFFFFF8C6,0x000002E8,0x000018CC,0xFFFFF8C6,0x000002E8},
- {"0000001000010011111100001111110101000010110100000001100011100100",0x00002FCC,0xFFFFEB60,0x000004EA,0x00001EFF,0xFFFFF4DA,0x0000038F,0x00001EFF,0xFFFFF4DA,0x0000038F},
- {"0000001000010011111010101001010011011110000101100100000010000100",0x000023E6,0xFFFFF413,0x000003A1,0x00001544,0xFFFFFC16,0x0000028B,0x00001544,0xFFFFFC16,0x0000028B},
- {"0000001000010011111100001111110101000010110011100011000000100100",0x00003251,0xFFFFEAA2,0x000004F5,0x000025B0,0xFFFFF0DF,0x00000431,0x000025B0,0xFFFFF0DF,0x00000431},
- {"0000001000010011111100001111110101000010110100000011100110000100",0x00002F6F,0xFFFFEB67,0x000004E6,0x00002275,0xFFFFF249,0x000003FB,0x00002275,0xFFFFF249,0x000003FB},
- {"0000001000010011111010101001010011011110001100100010100101100100",0x00002597,0xFFFFF34A,0x000003B1,0x00001BCC,0xFFFFF822,0x0000031A,0x00001BCC,0xFFFFF822,0x0000031A},
- {"0000001000010011111100001111110101000010110001100011100001100100",0x00003B1D,0xFFFFE40E,0x0000060D,0x00001F61,0xFFFFF470,0x0000039F,0x00001F61,0xFFFFF470,0x0000039F},
- {"0000001000010011111100001111110101000010110001100100000101000100",0x0000379F,0xFFFFE6DB,0x0000058C,0x00002460,0xFFFFF170,0x00000415,0x00002460,0xFFFFF170,0x00000415},
- {"0000001000010011111010101001010011011110000101100101000101000100",0x00002442,0xFFFFF2FB,0x000003D9,0x00001414,0xFFFFFBDC,0x000002A2,0x00001414,0xFFFFFBDC,0x000002A2},
- {"0000001000010011111010101001010011011110000000100100000011000100",0x00003270,0xFFFFEA0D,0x0000051C,0x00001AFD,0xFFFFF783,0x00000328,0x00001AFD,0xFFFFF783,0x00000328},
- {"0000001000010011111010101001010011011110000101100001000100000100",0x00001B23,0xFFFFF94B,0x000002EB,0x000015F1,0xFFFFFB82,0x000002B4,0x000015F1,0xFFFFFB82,0x000002B4},
- {"0000001000010011111010101001010011011110001100100011100001000100",0x000026AE,0xFFFFF21A,0x000003DB,0x00001827,0xFFFFFA10,0x000002C8,0x00001827,0xFFFFFA10,0x000002C8},
- {"0000001000010011111100001111110101000010110010100100100010000100",0x00002DCF,0xFFFFEBD8,0x000004DB,0x00001A75,0xFFFFF719,0x0000033A,0x00001A75,0xFFFFF719,0x0000033A},
- {"0000001000010011111100001111110101000010110011100100000011100100",0x00003983,0xFFFFE500,0x000005EA,0x000022A6,0xFFFFF25F,0x000003F1,0x000022A6,0xFFFFF25F,0x000003F1},
- {"0000001000010011111010101001010011011110000100100001100011000100",0x00002AD5,0xFFFFF07A,0x00000406,0x000019FB,0xFFFFF961,0x000002D8,0x000019FB,0xFFFFF961,0x000002D8},
- {"0000001000010011111100001111110101000010110010100011100110100100",0x00002A43,0xFFFFEE43,0x00000474,0x00001D65,0xFFFFF538,0x00000387,0x00001D65,0xFFFFF538,0x00000387},
- {"0000001000010011111100001111110101000010110001100010000010000100",0x0000311E,0xFFFFEAF8,0x000004E8,0x00001959,0xFFFFF8E4,0x000002DC,0x00001959,0xFFFFF8E4,0x000002DC},
- {"0000001000010011111100001111110101000010110100000011000110100100",0x0000339A,0xFFFFE8A7,0x00000559,0x00001A04,0xFFFFF7E5,0x00000311,0x00001A04,0xFFFFF7E5,0x00000311},
- {"0000001000010011111010101001010011011110001000000100000101000100",0x000021B3,0xFFFFF50F,0x00000389,0x00001470,0xFFFFFBF7,0x000002A5,0x00001470,0xFFFFFBF7,0x000002A5},
- {"0000001000010011111010101001010011011110000000100001100010000100",0x00003417,0xFFFFE9A6,0x0000051D,0x000018A4,0xFFFFF984,0x000002CF,0x000018A4,0xFFFFF984,0x000002CF},
- {"0000001000010011111010101001010011011110001000000010100110000100",0x00001FED,0xFFFFF6A2,0x00000347,0x00001639,0xFFFFFB59,0x000002BB,0x00001639,0xFFFFFB59,0x000002BB},
- {"0000001000010011111010101001010011011110000100100001100010100100",0x000032D2,0xFFFFEB18,0x000004DC,0x00001A01,0xFFFFF95E,0x000002CF,0x00001A01,0xFFFFF95E,0x000002CF},
- {"0000001000010011111100001111110101000010110100000100000010000100",0x00003147,0xFFFFEA3B,0x00000518,0x0000241D,0xFFFFF11C,0x00000431,0x0000241D,0xFFFFF11C,0x00000431},
- {"0000001000010011111010101001010011011110000111000000100100000100",0x00001D44,0xFFFFF7E7,0x0000031A,0x0000153F,0xFFFFFBBC,0x000002A9,0x0000153F,0xFFFFFBBC,0x000002A9},
- {"0000001000010011111100001111110101000010110011000100000100000100",0x00003690,0xFFFFE6E3,0x000005A4,0x000018DE,0xFFFFF908,0x000002DD,0x000018DE,0xFFFFF908,0x000002DD},
- {"0000001000010011111100001111110101000010110011000010000110000100",0x00003561,0xFFFFE6F8,0x000005AB,0x000018B5,0xFFFFF8A0,0x000002F3,0x000018B5,0xFFFFF8A0,0x000002F3},
- {"0000001000010011111010101001010011011110001100100011000100100100",0x000028F4,0xFFFFF23A,0x000003CE,0x00001BC6,0xFFFFF881,0x00000311,0x00001BC6,0xFFFFF881,0x00000311},
- {"0000001000010011111100001111110101000010110100000011000110000100",0x000035D7,0xFFFFE71C,0x0000059B,0x00001D49,0xFFFFF5C8,0x00000368,0x00001D49,0xFFFFF5C8,0x00000368},
- {"0000001000010011111100001111110101000010110011100001100010100100",0x0000397E,0xFFFFE4CB,0x000005F4,0x00001989,0xFFFFF844,0x000002FD,0x00001989,0xFFFFF844,0x000002FD},
- {"0000001000010011111100001111110101000010110001100010000001100100",0x00003BAB,0xFFFFE332,0x0000063F,0x00001A69,0xFFFFF7B9,0x00000312,0x00001A69,0xFFFFF7B9,0x00000312},
- {"0000001000010011111100001111110101000010110100000011000001100100",0x00002F26,0xFFFFEB82,0x000004E8,0x00001D7D,0xFFFFF590,0x00000379,0x00001D7D,0xFFFFF590,0x00000379},
- {"0000001000010011111010101001010011011110000001100011000110100100",0x00002FDC,0xFFFFEBE0,0x000004C3,0x00001940,0xFFFFF8CC,0x000002EE,0x00001940,0xFFFFF8CC,0x000002EE},
- {"0000001000010011111010101001010011011110000111000000100011100100",0x000021B2,0xFFFFF558,0x00000379,0x00001643,0xFFFFFB1C,0x000002C3,0x00001643,0xFFFFFB1C,0x000002C3},
- {"0000001000010011111010101001010011011110001100100001100100000100",0x00002897,0xFFFFF181,0x000003F7,0x00001990,0xFFFFF994,0x000002E2,0x00001990,0xFFFFF994,0x000002E2},
- {"0000001000010011111010101001010011011110000111100000100100100100",0x00001D19,0xFFFFF829,0x0000031A,0x00001558,0xFFFFFBCA,0x000002AF,0x00001558,0xFFFFFBCA,0x000002AF},
- {"0000001000010011111010101001010011011110000001000011000101000100",0x00003311,0xFFFFEAD9,0x000004E1,0x00001BDC,0xFFFFF79E,0x0000031D,0x00001BDC,0xFFFFF79E,0x0000031D},
- {"0000001000010011111010101001010011011110000111100010100111000100",0x00001E54,0xFFFFF740,0x00000333,0x000016A1,0xFFFFFAF0,0x000002C4,0x000016A1,0xFFFFFAF0,0x000002C4},
- {"0000001000010011111100001111110101000010110011100011100101100100",0x00003266,0xFFFFE9A8,0x00000527,0x00002307,0xFFFFF219,0x000003FC,0x00002307,0xFFFFF219,0x000003FC},
- {"0000001000010011111010101001010011011110001100100001000101000100",0x00001D1F,0xFFFFF82B,0x000002F0,0x000013F0,0xFFFFFD0B,0x0000024E,0x000013F0,0xFFFFFD0B,0x0000024E},
- {"0000001000010011111100001111110101000010110001100100100010100100",0x0000312E,0xFFFFEA67,0x00000502,0x0000222A,0xFFFFF253,0x000003F9,0x0000222A,0xFFFFF253,0x000003F9},
- {"0000001000010011111100001111110101000010110010100100000100100100",0x000032B2,0xFFFFE9AD,0x00000523,0x00001E97,0xFFFFF527,0x0000037F,0x00001E97,0xFFFFF527,0x0000037F},
- {"0000001000010011111010101001010011011110000101100100000011100100",0x00001F6A,0xFFFFF6FC,0x00000338,0x0000164B,0xFFFFFB2C,0x000002C2,0x0000164B,0xFFFFFB2C,0x000002C2},
- {"0000001000010011111010101001010011011110000000100010100011000100",0x00002603,0xFFFFF386,0x00000392,0x00001EE0,0xFFFFF601,0x00000369,0x00001EE0,0xFFFFF601,0x00000369},
- {"0000001000010011111010101001010011011110001000000001000101100100",0x00001D0C,0xFFFFF803,0x00000317,0x00001345,0xFFFFFD52,0x00000260,0x00001345,0xFFFFFD52,0x00000260},
- {"0000001000010011111100001111110101000010110011000001100010000100",0x0000327A,0xFFFFE8E5,0x0000055C,0x00001680,0xFFFFFA2D,0x000002B2,0x00001680,0xFFFFFA2D,0x000002B2},
- {"0000001000010011111100001111110101000010110010100011100101100100",0x000032B8,0xFFFFE91A,0x0000054A,0x00001BAB,0xFFFFF6EC,0x00000338,0x00001BAB,0xFFFFF6EC,0x00000338},
- {"0000001000010011111100001111110101000010110011000011000001000100",0x00002F79,0xFFFFEB63,0x000004EF,0x000017BB,0xFFFFF9B1,0x000002CA,0x000017BB,0xFFFFF9B1,0x000002CA},
- {"0000001000010011111010101001010011011110000001000011100011100100",0x00002AE5,0xFFFFEFCB,0x0000041D,0x0000214A,0xFFFFF3A7,0x000003C7,0x0000214A,0xFFFFF3A7,0x000003C7},
- {"0000001000010011111010101001010011011110001100100010000001100100",0x0000212C,0xFFFFF5BC,0x0000034F,0x000017ED,0xFFFFFA4C,0x000002C1,0x000017ED,0xFFFFFA4C,0x000002C1},
- {"0000001000010011111010101001010011011110000100100001000100100100",0x00002BE7,0xFFFFEF40,0x0000043C,0x00001AE2,0xFFFFF8CF,0x000002E3,0x00001AE2,0xFFFFF8CF,0x000002E3},
- {"0000001000010011111100001111110101000010110100000101000101000100",0x000032DC,0xFFFFE90F,0x00000549,0x00002A2D,0xFFFFECC9,0x000004ED,0x00002A2D,0xFFFFECC9,0x000004ED},
- {"0000001000010011111010101001010011011110000101100001100010100100",0x00001DE3,0xFFFFF80D,0x00000319,0x000016FA,0xFFFFFB42,0x000002BC,0x000016FA,0xFFFFFB42,0x000002BC},
- {"0000001000010011111010101001010011011110000111100010100001000100",0x00001F1B,0xFFFFF6DE,0x00000346,0x00001502,0xFFFFFC23,0x00000298,0x00001502,0xFFFFFC23,0x00000298},
- {"0000001000010011111010101001010011011110000001100001100001100100",0x00003203,0xFFFFEA87,0x000004FE,0x0000194E,0xFFFFF8E3,0x000002EC,0x0000194E,0xFFFFF8E3,0x000002EC},
- {"0000001000010011111100001111110101000010110100000010000101000100",0x0000337A,0xFFFFE8DD,0x00000551,0x00001E3C,0xFFFFF534,0x00000385,0x00001E3C,0xFFFFF534,0x00000385},
- {"0000001000010011111100001111110101000010110010100100100001100100",0x000036F6,0xFFFFE62A,0x000005C5,0x000023C0,0xFFFFF117,0x00000435,0x000023C0,0xFFFFF117,0x00000435},
- {"0000001000010011111100001111110101000010110011000010000101000100",0x00003125,0xFFFFEA4E,0x0000051A,0x00001E6C,0xFFFFF503,0x0000038E,0x00001E6C,0xFFFFF503,0x0000038E},
- {"0000001000010011111010101001010011011110000111000000100010100100",0x00001CD4,0xFFFFF82D,0x0000030E,0x0000156D,0xFFFFFB64,0x000002B8,0x0000156D,0xFFFFFB64,0x000002B8},
- {"0000001000010011111010101001010011011110000000100100000010100100",0x00002F14,0xFFFFEC46,0x000004B8,0x000017F1,0xFFFFF977,0x000002D2,0x000017F1,0xFFFFF977,0x000002D2},
- {"0000001000010011111010101001010011011110000001100100000010100100",0x000031F1,0xFFFFEAD4,0x000004ED,0x0000184C,0xFFFFF983,0x000002D4,0x0000184C,0xFFFFF983,0x000002D4},
- {"0000001000010011111100001111110101000010110100000100100110000100",0x00002EA9,0xFFFFEBD7,0x000004D5,0x0000288D,0xFFFFEDDB,0x000004C0,0x0000288D,0xFFFFEDDB,0x000004C0},
- {"0000001000010011111100001111110101000010110010100011100110000100",0x0000335F,0xFFFFE82C,0x00000579,0x00001DBF,0xFFFFF512,0x0000038C,0x00001DBF,0xFFFFF512,0x0000038C},
- {"0000001000010011111010101001010011011110001000000001000110000100",0x0000224F,0xFFFFF4B5,0x00000391,0x0000138C,0xFFFFFCC3,0x0000027A,0x0000138C,0xFFFFFCC3,0x0000027A},
- {"0000001000010011111010101001010011011110000100100100000010100100",0x0000320D,0xFFFFEACD,0x000004F5,0x00001976,0xFFFFF913,0x000002E2,0x00001976,0xFFFFF913,0x000002E2},
- {"0000001000010011111010101001010011011110001000000010000100000100",0x00001BEB,0xFFFFF99C,0x000002E4,0x000016A4,0xFFFFFB77,0x000002C3,0x000016A4,0xFFFFFB77,0x000002C3},
- {"0000001000010011111010101001010011011110000001100011000001000100",0x0000396E,0xFFFFE616,0x000005A9,0x000018F4,0xFFFFF91A,0x000002E3,0x000018F4,0xFFFFF91A,0x000002E3},
- {"0000001000010011111010101001010011011110000000100010100001100100",0x00003251,0xFFFFEA8E,0x000004FA,0x000018EF,0xFFFFF910,0x000002E4,0x000018EF,0xFFFFF910,0x000002E4},
- {"0000001000010011111010101001010011011110000111000001100100100100",0x00001DAF,0xFFFFF857,0x0000030D,0x00001915,0xFFFFF9D8,0x000002F7,0x00001915,0xFFFFF9D8,0x000002F7},
- {"0000001000010011111010101001010011011110001000000100000110100100",0x000025B6,0xFFFFF26B,0x000003E5,0x00001531,0xFFFFFB68,0x000002AF,0x00001531,0xFFFFFB68,0x000002AF},
- {"0000001000010011111010101001010011011110000001100001100010000100",0x00002B2E,0xFFFFEF2E,0x00000440,0x00001968,0xFFFFF91A,0x000002DF,0x00001968,0xFFFFF91A,0x000002DF},
- {"0000001000010011111010101001010011011110000111000010000001100100",0x00002305,0xFFFFF528,0x00000377,0x000018A4,0xFFFFF9EB,0x000002F0,0x000018A4,0xFFFFF9EB,0x000002F0},
- {"0000001000010011111100001111110101000010110010100100000011000100",0x000032A1,0xFFFFE992,0x0000052E,0x00001A55,0xFFFFF826,0x000002FE,0x00001A55,0xFFFFF826,0x000002FE},
- {"0000001000010011111010101001010011011110000001000010000110000100",0x00002CCD,0xFFFFEE35,0x00000462,0x00001B09,0xFFFFF7E6,0x0000030F,0x00001B09,0xFFFFF7E6,0x0000030F},
- {"0000001000010011111010101001010011011110001100100011000010000100",0x00002602,0xFFFFF2CF,0x000003C5,0x000016EE,0xFFFFFAD4,0x000002B4,0x000016EE,0xFFFFFAD4,0x000002B4},
- {"0000001000010011111100001111110101000010110100000001100101100100",0x00003370,0xFFFFE891,0x00000560,0x000017F0,0xFFFFF930,0x000002DF,0x000017F0,0xFFFFF930,0x000002DF},
- {"0000001000010011111100001111110101000010110010100001100010000100",0x00002EDC,0xFFFFEB6D,0x000004EC,0x000016E6,0xFFFFF9ED,0x000002BC,0x000016E6,0xFFFFF9ED,0x000002BC},
- {"0000001000010011111010101001010011011110000100100010100011000100",0x00002A05,0xFFFFF13D,0x000003F0,0x00002065,0xFFFFF57B,0x00000378,0x00002065,0xFFFFF57B,0x00000378},
- {"0000001000010011111100001111110101000010110011100010000001000100",0x00002F8A,0xFFFFEB6E,0x000004E4,0x00001E3E,0xFFFFF50E,0x0000038D,0x00001E3E,0xFFFFF50E,0x0000038D},
- {"0000001000010011111100001111110101000010110010100011000001000100",0x00002BB5,0xFFFFED6A,0x000004A1,0x000017BF,0xFFFFF937,0x000002E5,0x000017BF,0xFFFFF937,0x000002E5},
- {"0000001000010011111010101001010011011110001000000001100101100100",0x0000202C,0xFFFFF6CE,0x0000033F,0x000015EE,0xFFFFFB83,0x000002B9,0x000015EE,0xFFFFFB83,0x000002B9},
- {"0000001000010011111010101001010011011110000000100010100010000100",0x00002C0C,0xFFFFEF10,0x0000043F,0x00001A73,0xFFFFF83E,0x0000030C,0x00001A73,0xFFFFF83E,0x0000030C},
- {"0000001000010011111010101001010011011110001100100100000100000100",0x0000234F,0xFFFFF460,0x00000385,0x000018C3,0xFFFFF9A5,0x000002DD,0x000018C3,0xFFFFF9A5,0x000002DD},
- {"0000001000010011111100001111110101000010110011100001100100000100",0x00003679,0xFFFFE704,0x00000595,0x00002177,0xFFFFF31A,0x000003D7,0x00002177,0xFFFFF31A,0x000003D7},
- {"0000001000010011111100001111110101000010110010100010100100100100",0x00003008,0xFFFFEBB8,0x000004D5,0x000024FF,0xFFFFF112,0x00000430,0x000024FF,0xFFFFF112,0x00000430},
- {"0000001000010011111100001111110101000010110001100100000110100100",0x00003848,0xFFFFE6A3,0x00000594,0x00002958,0xFFFFEE37,0x000004A0,0x00002958,0xFFFFEE37,0x000004A0},
- {"0000001000010011111100001111110101000010110011000001100100100100",0x00002FDF,0xFFFFEB08,0x000004FD,0x00001D77,0xFFFFF58B,0x0000037A,0x00001D77,0xFFFFF58B,0x0000037A},
- {"0000001000010011111010101001010011011110000001100011000001100100",0x00002EC8,0xFFFFED41,0x00000481,0x00001949,0xFFFFF91C,0x000002DF,0x00001949,0xFFFFF91C,0x000002DF},
- {"0000001000010011111100001111110101000010110100000100000110100100",0x000037C1,0xFFFFE5BA,0x000005D7,0x0000252C,0xFFFFF023,0x00000460,0x0000252C,0xFFFFF023,0x00000460},
- {"0000001000010011111100001111110101000010110011100010100101000100",0x00003716,0xFFFFE70C,0x0000058A,0x000028CC,0xFFFFEE57,0x0000049D,0x000028CC,0xFFFFEE57,0x0000049D},
- {"0000001000010011111100001111110101000010110010100100000011100100",0x000033D1,0xFFFFE8E8,0x00000547,0x00001AB1,0xFFFFF7E5,0x00000309,0x00001AB1,0xFFFFF7E5,0x00000309},
- {"0000001000010011111100001111110101000010110011000010100101000100",0x00002D72,0xFFFFED65,0x0000048E,0x00001E0D,0xFFFFF5A7,0x00000370,0x00001E0D,0xFFFFF5A7,0x00000370},
- {"0000001000010011111010101001010011011110000111000011100110100100",0x00002292,0xFFFFF49F,0x00000393,0x000017F4,0xFFFFF9CD,0x000002F5,0x000017F4,0xFFFFF9CD,0x000002F5},
- {"0000001000010011111010101001010011011110001001000011000001000100",0x000026EE,0xFFFFF18C,0x000003F7,0x000018A7,0xFFFFF95A,0x000002E5,0x000018A7,0xFFFFF95A,0x000002E5},
- {"0000001000010011111010101001010011011110000001000010000101100100",0x00002F62,0xFFFFEC9B,0x000004A4,0x0000194E,0xFFFFF932,0x000002D9,0x0000194E,0xFFFFF932,0x000002D9},
- {"0000001000010011111010101001010011011110000111100011100110000100",0x00001CE8,0xFFFFF7FA,0x0000031C,0x000014CE,0xFFFFFBD4,0x000002AB,0x000014CE,0xFFFFFBD4,0x000002AB},
- {"0000001000010011111010101001010011011110000100100001000011100100",0x00002E5A,0xFFFFEDAB,0x0000047C,0x00001A82,0xFFFFF8F7,0x000002DE,0x00001A82,0xFFFFF8F7,0x000002DE},
- {"0000001000010011111100001111110101000010110011000011000011100100",0x00003057,0xFFFFEC34,0x000004B9,0x00002296,0xFFFFF342,0x000003D0,0x00002296,0xFFFFF342,0x000003D0},
- {"0000001000010011111010101001010011011110000001000001100010100100",0x00002B0F,0xFFFFEF58,0x00000434,0x00001BFD,0xFFFFF721,0x00000330,0x00001BFD,0xFFFFF721,0x00000330},
- {"0000001000010011111010101001010011011110001000000001000010100100",0x00001F01,0xFFFFF751,0x0000032F,0x00001502,0xFFFFFC3E,0x00000296,0x00001502,0xFFFFFC3E,0x00000296},
- {"0000001000010011111100001111110101000010110010100011000001100100",0x00002FF4,0xFFFFEAE2,0x00000503,0x00001B36,0xFFFFF736,0x00000330,0x00001B36,0xFFFFF736,0x00000330},
- {"0000001000010011111100001111110101000010110011100010000001100100",0x00003762,0xFFFFE5AB,0x000005DE,0x000018CB,0xFFFFF896,0x000002F4,0x000018CB,0xFFFFF896,0x000002F4},
- {"0000001000010011111100001111110101000010110011000010000001100100",0x00002890,0xFFFFEF92,0x00000445,0x0000191D,0xFFFFF86F,0x00000302,0x0000191D,0xFFFFF86F,0x00000302},
- {"0000001000010011111010101001010011011110000001000011000001100100",0x00002F76,0xFFFFEC0E,0x000004BF,0x00001F7D,0xFFFFF41A,0x000003C0,0x00001F7D,0xFFFFF41A,0x000003C0},
- {"0000001000010011111010101001010011011110000111100000100010100100",0x00001D55,0xFFFFF7F8,0x0000031E,0x000015DF,0xFFFFFB79,0x000002B7,0x000015DF,0xFFFFFB79,0x000002B7},
- {"0000001000010011111010101001010011011110001000000100100100100100",0x00001FE9,0xFFFFF64A,0x00000353,0x000019E8,0xFFFFF882,0x0000032A,0x000019E8,0xFFFFF882,0x0000032A},
- {"0000001000010011111010101001010011011110000001100011100101100100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001857,0xFFFFF968,0x000002D8,0x00001857,0xFFFFF968,0x000002D8},
- {"0000001000010011111100001111110101000010110010100010100011000100",0x00003398,0xFFFFE9A3,0x00000524,0x00001FF9,0xFFFFF458,0x000003AD,0x00001FF9,0xFFFFF458,0x000003AD},
- {"0000001000010011111100001111110101000010110011100010100101100100",0x00003897,0xFFFFE5BD,0x000005C8,0x00002519,0xFFFFF0BA,0x00000438,0x00002519,0xFFFFF0BA,0x00000438},
- {"0000001000010011111100001111110101000010110100000100000001100100",0x00003234,0xFFFFE9B1,0x00000530,0x000022CC,0xFFFFF20E,0x00000409,0x000022CC,0xFFFFF20E,0x00000409},
- {"0000001000010011111010101001010011011110001000000101000100000100",0x00001FD2,0xFFFFF641,0x00000354,0x000017C9,0xFFFFF9C0,0x000002FB,0x000017C9,0xFFFFF9C0,0x000002FB},
- {"0000001000010011111100001111110101000010110011100100100011100100",0x00003234,0xFFFFE946,0x0000053D,0x00002267,0xFFFFF1F5,0x0000040D,0x00002267,0xFFFFF1F5,0x0000040D},
- {"0000001000010011111010101001010011011110001000000010100110100100",0x00002330,0xFFFFF474,0x00000399,0x00001490,0xFFFFFC67,0x00000288,0x00001490,0xFFFFFC67,0x00000288},
- {"0000001000010011111100001111110101000010110100000011100100100100",0x000032A3,0xFFFFE9EB,0x0000051B,0x0000234D,0xFFFFF23C,0x000003F7,0x0000234D,0xFFFFF23C,0x000003F7},
- {"0000001000010011111010101001010011011110001000000000100100000100",0x0000217E,0xFFFFF53A,0x00000384,0x00001511,0xFFFFFBF5,0x0000029E,0x00001511,0xFFFFFBF5,0x0000029E},
- {"0000001000010011111100001111110101000010110011100101000011100100",0x0000384F,0xFFFFE562,0x000005E2,0x0000295A,0xFFFFED53,0x000004D3,0x0000295A,0xFFFFED53,0x000004D3},
- {"0000001000010011111100001111110101000010110100000101000100100100",0x00003315,0xFFFFE8D1,0x00000552,0x000025D1,0xFFFFEFAF,0x00000471,0x000025D1,0xFFFFEFAF,0x00000471},
- {"0000001000010011111100001111110101000010110001100100100100100100",0x00004183,0xFFFFDF61,0x000006DA,0x0000193C,0xFFFFF88F,0x000002EC,0x0000193C,0xFFFFF88F,0x000002EC},
- {"0000001000010011111010101001010011011110001001000010000101100100",0x00002DFC,0xFFFFEDF2,0x0000047A,0x00001755,0xFFFFFAC2,0x000002AC,0x00001755,0xFFFFFAC2,0x000002AC},
- {"0000001000010011111100001111110101000010110010100011000110100100",0x000033FE,0xFFFFE774,0x0000059F,0x00001E70,0xFFFFF492,0x000003A0,0x00001E70,0xFFFFF492,0x000003A0},
- {"0000001000010011111100001111110101000010110001100010100110100100",0x000040D7,0xFFFFDFB8,0x000006CE,0x00001AC8,0xFFFFF773,0x0000031D,0x00001AC8,0xFFFFF773,0x0000031D},
- {"0000001000010011111010101001010011011110000111100001000101100100",0x00001D02,0xFFFFF803,0x00000322,0x000015FE,0xFFFFFB71,0x000002BB,0x000015FE,0xFFFFFB71,0x000002BB},
- {"0000001000010011111100001111110101000010110100000010100010000100",0x00002EB0,0xFFFFEC31,0x000004C4,0x00001B3C,0xFFFFF73B,0x00000330,0x00001B3C,0xFFFFF73B,0x00000330},
- {"0000001000010011111100001111110101000010110010100100100110000100",0x00002D9F,0xFFFFECBF,0x000004A8,0x000022B0,0xFFFFF23C,0x000003F9,0x000022B0,0xFFFFF23C,0x000003F9},
- {"0000001000010011111100001111110101000010110011000001100011100100",0x00002C6A,0xFFFFEDAC,0x00000488,0x00002419,0xFFFFF159,0x00000427,0x00002419,0xFFFFF159,0x00000427},
- {"0000001000010011111010101001010011011110000100100001000010100100",0x00002991,0xFFFFF06C,0x0000040E,0x00001AA9,0xFFFFF8D0,0x000002E1,0x00001AA9,0xFFFFF8D0,0x000002E1},
- {"0000001000010011111010101001010011011110000100100011100100000100",0x00002F8E,0xFFFFED1B,0x00000493,0x00001DE4,0xFFFFF69C,0x00000347,0x00001DE4,0xFFFFF69C,0x00000347},
- {"0000001000010011111010101001010011011110001000000100000110000100",0x00002136,0xFFFFF540,0x0000037C,0x000014FF,0xFFFFFB83,0x000002B2,0x000014FF,0xFFFFFB83,0x000002B2},
- {"0000001000010011111010101001010011011110000001100001100011100100",0x0000354C,0xFFFFE97D,0x0000051A,0x00001906,0xFFFFF965,0x000002DD,0x00001906,0xFFFFF965,0x000002DD},
- {"0000001000010011111100001111110101000010110001100010000011000100",0x0000348B,0xFFFFE94D,0x0000051F,0x0000285B,0xFFFFEF1A,0x00000473,0x0000285B,0xFFFFEF1A,0x00000473},
- {"0000001000010011111010101001010011011110001100100001100010100100",0x000026E6,0xFFFFF24E,0x000003D6,0x0000141F,0xFFFFFCCE,0x00000260,0x0000141F,0xFFFFFCCE,0x00000260},
- {"0000001000010011111100001111110101000010110001100100000101100100",0x00003CED,0xFFFFE2A5,0x0000064E,0x00002060,0xFFFFF3E0,0x000003B0,0x00002060,0xFFFFF3E0,0x000003B0},
- {"0000001000010011111010101001010011011110000000100001000010000100",0x000029D4,0xFFFFEFF7,0x00000426,0x00001976,0xFFFFF8E1,0x000002EE,0x00001976,0xFFFFF8E1,0x000002EE},
- {"0000001000010011111100001111110101000010110010100100000010100100",0x00003767,0xFFFFE601,0x000005CC,0x00001D22,0xFFFFF5F4,0x00000361,0x00001D22,0xFFFFF5F4,0x00000361},
- {"0000001000010011111100001111110101000010110001100101000011000100",0x00003CE8,0xFFFFE2E8,0x00000637,0x0000232C,0xFFFFF1E7,0x00000405,0x0000232C,0xFFFFF1E7,0x00000405},
- {"0000001000010011111010101001010011011110001000000001000001100100",0x000023A8,0xFFFFF4CD,0x00000386,0x00001944,0xFFFFF983,0x00000300,0x00001944,0xFFFFF983,0x00000300},
- {"0000001000010011111100001111110101000010110011000011000010100100",0x00003451,0xFFFFE8B9,0x00000551,0x00001AD7,0xFFFFF7BF,0x00000318,0x00001AD7,0xFFFFF7BF,0x00000318},
- {"0000001000010011111100001111110101000010110011100010100110000100",0x0000381B,0xFFFFE5A0,0x000005D0,0x00001E0F,0xFFFFF521,0x00000382,0x00001E0F,0xFFFFF521,0x00000382},
- {"0000001000010011111010101001010011011110001000000011100011000100",0x000023A4,0xFFFFF4A6,0x00000394,0x0000171F,0xFFFFFABB,0x000002D9,0x0000171F,0xFFFFFABB,0x000002D9},
- {"0000001000010011111100001111110101000010110001100010000010100100",0x00003C2B,0xFFFFE447,0x000005F0,0x0000207F,0xFFFFF44E,0x0000039A,0x0000207F,0xFFFFF44E,0x0000039A},
- {"0000001000010011111100001111110101000010110011000011100110000100",0x00002F07,0xFFFFEB70,0x000004E9,0x00001765,0xFFFFF9A5,0x000002C6,0x00001765,0xFFFFF9A5,0x000002C6},
- {"0000001000010011111100001111110101000010110001100010100110000100",0x00003A01,0xFFFFE4E0,0x000005E7,0x0000227A,0xFFFFF292,0x000003E5,0x0000227A,0xFFFFF292,0x000003E5},
- {"0000001000010011111100001111110101000010110011100010000010100100",0x0000376E,0xFFFFE686,0x000005A6,0x00001FCF,0xFFFFF43B,0x000003A8,0x00001FCF,0xFFFFF43B,0x000003A8},
- {"0000001000010011111100001111111111101111010110100100100110000100",0x0000485F,0xFFFFDCC1,0x00000713,0x00002CF8,0xFFFFEC45,0x000004DA,0x00002CF8,0xFFFFEC45,0x000004DA},
- {"0000001000010011111100001111111111101111010111000011000110000100",0x0000331C,0xFFFFE8FF,0x00000541,0x00002366,0xFFFFF19D,0x00000411,0x00002366,0xFFFFF19D,0x00000411},
- {"0000001000010011111100001111111111101111011001000011100001100100",0x00003CF3,0xFFFFE15A,0x00000694,0x00002FB3,0xFFFFE827,0x000005B9,0x00002FB3,0xFFFFE827,0x000005B9},
- {"0000001000010011111010101001010011011110001100100001000100000100",0x000023F3,0xFFFFF3EA,0x0000039A,0x00001345,0xFFFFFD6B,0x00000241,0x00001345,0xFFFFFD6B,0x00000241},
- {"0000001000010011111100001111111111101111010111000010100010100100",0x000038C0,0xFFFFE58A,0x000005CC,0x000023CA,0xFFFFF1AA,0x00000408,0x000023CA,0xFFFFF1AA,0x00000408},
- {"0000001000010011111100001111111111101111011001100010100101000100",0x00004976,0xFFFFDD6A,0x000006D7,0x000033C6,0xFFFFE8EB,0x0000054D,0x000033C6,0xFFFFE8EB,0x0000054D},
- {"0000001000010011111100001111111111101111011001000100100100000100",0x00004049,0xFFFFDF6D,0x000006D8,0x00003129,0xFFFFE716,0x000005E9,0x00003129,0xFFFFE716,0x000005E9},
- {"0000001000010011111100001111111111101111011001100001000101100100",0x000046C2,0xFFFFDCEB,0x0000071C,0x00002E6D,0xFFFFEA8F,0x0000052E,0x00002E6D,0xFFFFEA8F,0x0000052E},
- {"0000001000010011111100001111111111101111011000100011100010100100",0x00004080,0xFFFFE1E1,0x0000063A,0x0000396D,0xFFFFE40A,0x0000062C,0x0000396D,0xFFFFE40A,0x0000062C},
- {"0000001000010011111100001111111111101111010111100010000100100100",0x00003DE0,0xFFFFE358,0x0000060C,0x00002AA2,0xFFFFEDBF,0x000004A0,0x00002AA2,0xFFFFEDBF,0x000004A0},
- {"0000001000010011111100001111111111101111010111100011000101000100",0x00003FC0,0xFFFFE2A1,0x0000061A,0x000027D8,0xFFFFEFEC,0x0000043A,0x000027D8,0xFFFFEFEC,0x0000043A},
- {"0000001000010011111100001111111111101111011001100001100100100100",0x00003FBF,0xFFFFE2F5,0x00000603,0x000032D7,0xFFFFE900,0x00000552,0x000032D7,0xFFFFE900,0x00000552},
- {"0000001000010011111100001111111111101111010111000001000011100100",0x000035EE,0xFFFFE6CA,0x000005A2,0x0000247C,0xFFFFF088,0x00000446,0x0000247C,0xFFFFF088,0x00000446},
- {"0000001000010011111100001111111111101111011001000011100010000100",0x000039C8,0xFFFFE3AE,0x0000062A,0x000028AF,0xFFFFED24,0x000004DF,0x000028AF,0xFFFFED24,0x000004DF},
- {"0000001000010011111100001111111111101111010111000010100010000100",0x00003BDE,0xFFFFE33B,0x00000632,0x00001B6C,0xFFFFF720,0x00000326,0x00001B6C,0xFFFFF720,0x00000326},
- {"0000001000010011111100001111111111101111011100100001000010100100",0x00003818,0xFFFFE57D,0x000005D4,0x000020EF,0xFFFFF327,0x000003CE,0x000020EF,0xFFFFF327,0x000003CE},
- {"0000001000010011111100001111111111101111010111100001100110100100",0x000038DA,0xFFFFE561,0x000005D3,0x0000297D,0xFFFFED6D,0x000004C5,0x0000297D,0xFFFFED6D,0x000004C5},
- {"0000001000010011111100001111111111101111011010000100100010000100",0x000027AC,0xFFFFF0CE,0x00000417,0x00001F5F,0xFFFFF484,0x000003B2,0x00001F5F,0xFFFFF484,0x000003B2},
- {"0000001000010011111100001111111111101111011001100100100010100100",0x00003F02,0xFFFFE222,0x00000643,0x000026D4,0xFFFFF000,0x00000443,0x000026D4,0xFFFFF000,0x00000443},
- {"0000001000010011111100001111111111101111011000100100000101100100",0x00004303,0xFFFFDFE3,0x00000690,0x0000312C,0xFFFFE912,0x00000561,0x0000312C,0xFFFFE912,0x00000561},
- {"0000001000010011111100001111111111101111011000000000100100000100",0x000039E5,0xFFFFE31F,0x00000657,0x00001D23,0xFFFFF51F,0x00000386,0x00001D23,0xFFFFF51F,0x00000386},
- {"0000001000010011111100001111111111101111011001100001000101000100",0x000041FA,0xFFFFE01B,0x00000697,0x00002767,0xFFFFEF90,0x00000455,0x00002767,0xFFFFEF90,0x00000455},
- {"0000001000010011111100001111111111101111011010000011000010100100",0x00002888,0xFFFFF11C,0x00000403,0x00001864,0xFFFFF9D8,0x000002D3,0x00001864,0xFFFFF9D8,0x000002D3},
- {"0000001000010011111010101001010011011110001000000001100001100100",0x0000215C,0xFFFFF5B6,0x0000036D,0x000015C5,0xFFFFFB8A,0x000002B5,0x000015C5,0xFFFFFB8A,0x000002B5},
- {"0000001000010011111100001111111111101111011010000011100110000100",0x00002FAF,0xFFFFEC27,0x000004CA,0x00002184,0xFFFFF39C,0x000003CD,0x00002184,0xFFFFF39C,0x000003CD},
- {"0000001000010011111100001111111111101111010111100001000011000100",0x00004ACE,0xFFFFD9A3,0x000007BC,0x00001A5D,0xFFFFF7F6,0x000002FC,0x00001A5D,0xFFFFF7F6,0x000002FC},
- {"0000001000010011111100001111111111101111010110100011000001000100",0x00003763,0xFFFFE797,0x0000055F,0x000029B5,0xFFFFEEA1,0x00000474,0x000029B5,0xFFFFEEA1,0x00000474},
- {"0000001000010011111100001111111111101111010111100011000101100100",0x00003832,0xFFFFE6F9,0x00000575,0x00002C99,0xFFFFEC42,0x000004E3,0x00002C99,0xFFFFEC42,0x000004E3},
- {"0000001000010011111100001111111111101111011000000100000101100100",0x000041C9,0xFFFFDE33,0x0000071E,0x0000199D,0xFFFFF808,0x000002F9,0x0000199D,0xFFFFF808,0x000002F9},
- {"0000001000010011111100001111111111101111011001000001000101100100",0x0000474A,0xFFFFD96E,0x00000802,0x00002A30,0xFFFFEB57,0x0000053F,0x00002A30,0xFFFFEB57,0x0000053F},
- {"0000001000010011111100001111111111101111010111000011000111000100",0x0000312F,0xFFFFEA6A,0x00000508,0x000029D3,0xFFFFED38,0x000004D3,0x000029D3,0xFFFFED38,0x000004D3},
- {"0000001000010011111100001111111111101111011100100001000011000100",0x00003BD6,0xFFFFE2E7,0x00000644,0x00002093,0xFFFFF37B,0x000003BD,0x00002093,0xFFFFF37B,0x000003BD},
- {"0000001000010011111100001111111111101111011010000100000011100100",0x00002F94,0xFFFFECD4,0x000004A3,0x00002196,0xFFFFF40B,0x000003B5,0x00002196,0xFFFFF40B,0x000003B5},
- {"0000001000010011111100001111111111101111010111100001100101000100",0x0000369B,0xFFFFE762,0x00000571,0x00002726,0xFFFFEF99,0x00000459,0x00002726,0xFFFFEF99,0x00000459},
- {"0000001000010011111100001111111111101111011001000010000001100100",0x00003F57,0xFFFFDF47,0x000006F4,0x00002E5F,0xFFFFE8AE,0x000005AB,0x00002E5F,0xFFFFE8AE,0x000005AB},
- {"0000001000010011111010101001010011011110000010100100000011000100",0x00004313,0xFFFFDD81,0x0000072D,0x00002468,0xFFFFF068,0x00000440,0x00002468,0xFFFFF068,0x00000440},
- {"0000001000010011111100001111111111101111011010000011000001000100",0x00002A35,0xFFFFEFA8,0x00000441,0x00001F3F,0xFFFFF4F3,0x000003A0,0x00001F3F,0xFFFFF4F3,0x000003A0},
- {"0000001000010011111100001111111111101111011001100011000010100100",0x00003E33,0xFFFFE4B0,0x000005AF,0x00002802,0xFFFFF092,0x00000412,0x00002802,0xFFFFF092,0x00000412},
- {"0000001000010011111010101001010011011110001100100011100100000100",0x00002815,0xFFFFF20E,0x000003DD,0x00001C33,0xFFFFF7D5,0x0000032A,0x00001C33,0xFFFFF7D5,0x0000032A},
- {"0000001000010011111100001111111111101111010110100010000110000100",0x00003CC2,0xFFFFE43E,0x000005DE,0x00002C16,0xFFFFECED,0x000004BA,0x00002C16,0xFFFFECED,0x000004BA},
- {"0000001000010011111100001111111111101111010111000100000010000100",0x00003CFA,0xFFFFE1EE,0x00000673,0x00001F7D,0xFFFFF402,0x000003AE,0x00001F7D,0xFFFFF402,0x000003AE},
- {"0000001000010011111100001111111111101111011000100010000100000100",0x0000486E,0xFFFFDD43,0x000006EE,0x000036F0,0xFFFFE609,0x000005D5,0x000036F0,0xFFFFE609,0x000005D5},
- {"0000001000010011111100001111111111101111010111000100100101100100",0x000039FE,0xFFFFE41F,0x00000613,0x0000266C,0xFFFFEF35,0x0000047D,0x0000266C,0xFFFFEF35,0x0000047D},
- {"0000001000010011111010101001010011011110000100100011000100100100",0x00002EA4,0xFFFFEE3B,0x00000462,0x00002126,0xFFFFF4E2,0x0000038F,0x00002126,0xFFFFF4E2,0x0000038F},
- {"0000001000010011111100001111111111101111011010000011100101000100",0x00002D2E,0xFFFFEE7B,0x00000462,0x0000229D,0xFFFFF363,0x000003D4,0x0000229D,0xFFFFF363,0x000003D4},
- {"0000001000010011111100001111111111101111010111100010100001000100",0x0000375C,0xFFFFE695,0x0000059D,0x00002319,0xFFFFF237,0x000003EE,0x00002319,0xFFFFF237,0x000003EE},
- {"0000001000010011111100001111111111101111011100100101000011000100",0x00004522,0xFFFFDC71,0x0000075E,0x0000247E,0xFFFFF0A0,0x0000043C,0x0000247E,0xFFFFF0A0,0x0000043C},
- {"0000001000010011111010101001010011011110000100100100100011100100",0x00002E58,0xFFFFECB9,0x000004A9,0x0000199A,0xFFFFF8CF,0x000002E9,0x0000199A,0xFFFFF8CF,0x000002E9},
- {"0000001000010011111100001111111111101111011001000011100011100100",0x00003791,0xFFFFE5FE,0x000005B6,0x000029F5,0xFFFFED0D,0x000004CD,0x000029F5,0xFFFFED0D,0x000004CD},
- {"0000001000010011111010101001010011011110001001000100000101000100",0x00002E9E,0xFFFFEC8D,0x000004C1,0x000019D0,0xFFFFF869,0x0000030F,0x000019D0,0xFFFFF869,0x0000030F},
- {"0000001000010011111010101001010011011110001000000011100101100100",0x0000237C,0xFFFFF435,0x000003A6,0x000014EB,0xFFFFFBC4,0x000002AF,0x000014EB,0xFFFFFBC4,0x000002AF},
- {"0000001000010011111100001111111111101111011001100010100100100100",0x00003FE5,0xFFFFE4A2,0x000005A0,0x00003416,0xFFFFE995,0x00000523,0x00003416,0xFFFFE995,0x00000523},
- {"0000001000010011111100001111111111101111010111000000100100100100",0x00002B27,0xFFFFED51,0x000004A5,0x000025D1,0xFFFFEF18,0x00000492,0x000025D1,0xFFFFEF18,0x00000492},
- {"0000001000010011111100001111111111101111011010000100100100000100",0x00002D77,0xFFFFED79,0x00000494,0x00002196,0xFFFFF352,0x000003DE,0x00002196,0xFFFFF352,0x000003DE},
- {"0000001000010011111100001111111111101111010111000010000011000100",0x00003750,0xFFFFE6AC,0x00000596,0x00002524,0xFFFFF0B5,0x00000431,0x00002524,0xFFFFF0B5,0x00000431},
- {"0000001000010011111010101001010011011110000100100010100101000100",0x00002896,0xFFFFF1BB,0x000003D9,0x00001CE0,0xFFFFF753,0x0000032F,0x00001CE0,0xFFFFF753,0x0000032F},
- {"0000001000010011111100001111111111101111011001000001100110000100",0x00003CA7,0xFFFFE0F7,0x000006B1,0x00002CB8,0xFFFFE9AB,0x00000587,0x00002CB8,0xFFFFE9AB,0x00000587},
- {"0000001000010011111010101001010011011110001100100010100001100100",0x00002513,0xFFFFF323,0x000003BC,0x00001965,0xFFFFF93C,0x000002F0,0x00001965,0xFFFFF93C,0x000002F0},
- {"0000001000010011111100001111111111101111011001100010000101100100",0x00003914,0xFFFFE683,0x00000586,0x00003120,0xFFFFE9A6,0x00000543,0x00003120,0xFFFFE9A6,0x00000543},
- {"0000001000010011111100001111111111101111011001000011100100000100",0x000040D0,0xFFFFE007,0x000006AC,0x00002B9E,0xFFFFEBF5,0x000004FB,0x00002B9E,0xFFFFEBF5,0x000004FB},
- {"0000001000010011111100001111111111101111010110100100100010000100",0x00004412,0xFFFFDF5F,0x000006A9,0x00002A9E,0xFFFFEDCE,0x00000498,0x00002A9E,0xFFFFEDCE,0x00000498},
- {"0000001000010011111100001111111111101111011000100100100010000100",0x000042A6,0xFFFFDFEF,0x00000696,0x00002E65,0xFFFFEAAE,0x00000529,0x00002E65,0xFFFFEAAE,0x00000529},
- {"0000001000010011111010101001010011011110001100100010000100100100",0x000022E8,0xFFFFF565,0x0000035F,0x00001890,0xFFFFFA61,0x000002C6,0x00001890,0xFFFFFA61,0x000002C6},
- {"0000001000010011111100001111111111101111011000100011100110100100",0x00004637,0xFFFFDDD8,0x000006E9,0x0000349D,0xFFFFE6C8,0x000005C7,0x0000349D,0xFFFFE6C8,0x000005C7},
- {"0000001000010011111010101001010011011110001001100011100100000100",0x00004686,0xFFFFDC58,0x0000073D,0x00003972,0xFFFFE27B,0x0000068E,0x00003972,0xFFFFE27B,0x0000068E},
- {"0000001000010011111100001111111111101111011010000000100011100100",0x00002B35,0xFFFFEE9C,0x0000046C,0x00001F5B,0xFFFFF4A3,0x000003A9,0x00001F5B,0xFFFFF4A3,0x000003A9},
- {"0000001000010011111100001111111111101111011100100100000101000100",0x00003AC9,0xFFFFE3B2,0x0000061B,0x000023A1,0xFFFFF170,0x0000040F,0x000023A1,0xFFFFF170,0x0000040F},
- {"0000001000010011111100001111111111101111010111100001100010000100",0x00003C50,0xFFFFE37E,0x00000617,0x0000218F,0xFFFFF339,0x000003C4,0x0000218F,0xFFFFF339,0x000003C4},
- {"0000001000010011111100001111111111101111011001100011000001000100",0x00003793,0xFFFFE761,0x0000055D,0x000029C7,0xFFFFEE03,0x00000496,0x000029C7,0xFFFFEE03,0x00000496},
- {"0000001000010011111100001111111111101111011001000011100010100100",0x000040B5,0xFFFFDF78,0x000006DA,0x00002DED,0xFFFFEA20,0x00000551,0x00002DED,0xFFFFEA20,0x00000551},
- {"0000001000010011111100001111111111101111011000000001000101000100",0x000039D6,0xFFFFE37D,0x0000063C,0x00001AED,0xFFFFF6E2,0x00000331,0x00001AED,0xFFFFF6E2,0x00000331},
- {"0000001000010011111100001111111111101111011001100010000101000100",0x0000431F,0xFFFFE09B,0x0000066A,0x00002BDF,0xFFFFED93,0x00000496,0x00002BDF,0xFFFFED93,0x00000496},
- {"0000001000010011111100001111111111101111011000100011100001100100",0x00004887,0xFFFFDC65,0x00000721,0x00003669,0xFFFFE5C4,0x000005E9,0x00003669,0xFFFFE5C4,0x000005E9},
- {"0000001000010011111100001111111111101111011001000000100100100100",0x00004120,0xFFFFDDAE,0x00000748,0x0000303B,0xFFFFE70D,0x000005FC,0x0000303B,0xFFFFE70D,0x000005FC},
- {"0000001000010011111100001111111111101111010111100010100010100100",0x0000415D,0xFFFFE0BE,0x0000067B,0x00002FA7,0xFFFFEA28,0x00000538,0x00002FA7,0xFFFFEA28,0x00000538},
- {"0000001000010011111100001111111111101111011010000001100100000100",0x00002B12,0xFFFFEFF9,0x00000428,0x00001DDA,0xFFFFF693,0x00000356,0x00001DDA,0xFFFFF693,0x00000356},
- {"0000001000010011111100001111111111101111010111100011000110000100",0x00003ED3,0xFFFFE28D,0x0000062D,0x00002B00,0xFFFFED4E,0x000004B3,0x00002B00,0xFFFFED4E,0x000004B3},
- {"0000001000010011111100001111111111101111011000100101000010100100",0x00004218,0xFFFFE039,0x0000068F,0x00002F84,0xFFFFEA0C,0x00000541,0x00002F84,0xFFFFEA0C,0x00000541},
- {"0000001000010011111100001111111111101111010110100011100001000100",0x00003FF5,0xFFFFE2A3,0x00000617,0x00003017,0xFFFFEA7A,0x00000520,0x00003017,0xFFFFEA7A,0x00000520},
- {"0000001000010011111100001111111111101111010110100000100010100100",0x00004304,0xFFFFDFCC,0x0000069E,0x00002E0C,0xFFFFEB51,0x00000505,0x00002E0C,0xFFFFEB51,0x00000505},
- {"0000001000010011111100001111111111101111011001000001100101000100",0x00003D3A,0xFFFFE17F,0x00000687,0x0000284C,0xFFFFED83,0x000004CD,0x0000284C,0xFFFFED83,0x000004CD},
- {"0000001000010011111100001111111111101111010111100100000010100100",0x000042F5,0xFFFFDF76,0x000006B2,0x000027B6,0xFFFFEF72,0x00000455,0x000027B6,0xFFFFEF72,0x00000455},
- {"0000001000010011111100001111111111101111010111000011100011000100",0x00004267,0xFFFFDF29,0x000006D5,0x0000298F,0xFFFFEDBD,0x000004AC,0x0000298F,0xFFFFEDBD,0x000004AC},
- {"0000001000010011111010101001010011011110001001000000100100100100",0x0000303E,0xFFFFEC00,0x000004CB,0x000021CD,0xFFFFF36E,0x000003D6,0x000021CD,0xFFFFF36E,0x000003D6},
- {"0000001000010011111100001111111111101111010111100010100011000100",0x00003127,0xFFFFEBDB,0x000004A6,0x00002E95,0xFFFFEB78,0x000004F3,0x00002E95,0xFFFFEB78,0x000004F3},
- {"0000001000010011111010101001010011011110000111000001000001100100",0x00002655,0xFFFFF2D9,0x000003CF,0x000019F5,0xFFFFF8E7,0x00000313,0x000019F5,0xFFFFF8E7,0x00000313},
- {"0000001000010011111010101001010011011110000101100100000010000100",0x00002372,0xFFFFF449,0x0000039B,0x00001544,0xFFFFFC16,0x0000028B,0x00001544,0xFFFFFC16,0x0000028B},
- {"0000001000010011111100001111111111101111011001100010100011000100",0x0000348E,0xFFFFEB20,0x000004B2,0x00002BE8,0xFFFFEE80,0x00000467,0x00002BE8,0xFFFFEE80,0x00000467},
- {"0000001000010011111100001111111111101111010111100001000100000100",0x00004092,0xFFFFE073,0x0000069B,0x00002061,0xFFFFF403,0x000003A0,0x00002061,0xFFFFF403,0x000003A0},
- {"0000001000010011111100001111111111101111011100100010000011100100",0x000039D1,0xFFFFE55D,0x000005CC,0x000025CB,0xFFFFF0C0,0x00000428,0x000025CB,0xFFFFF0C0,0x00000428},
- {"0000001000010011111100001111111111101111010111100100100010000100",0x000042AA,0xFFFFDF68,0x000006C2,0x0000290B,0xFFFFEE78,0x00000485,0x0000290B,0xFFFFEE78,0x00000485},
- {"0000001000010011111100001111111111101111011100100001100011000100",0x0000356F,0xFFFFE7AC,0x0000056E,0x00001BE8,0xFFFFF6E3,0x0000032A,0x00001BE8,0xFFFFF6E3,0x0000032A},
- {"0000001000010011111100001111111111101111010111100001000101000100",0x00003525,0xFFFFE7FF,0x0000055D,0x0000242C,0xFFFFF12E,0x0000041D,0x0000242C,0xFFFFF12E,0x0000041D},
- {"0000001000010011111100001111111111101111010111000100100011000100",0x00003360,0xFFFFE895,0x00000550,0x00002175,0xFFFFF29E,0x000003E9,0x00002175,0xFFFFF29E,0x000003E9},
- {"0000001000010011111100001111111111101111011001000100000010100100",0x00003C94,0xFFFFE1C4,0x0000067E,0x00002E28,0xFFFFE964,0x0000057F,0x00002E28,0xFFFFE964,0x0000057F},
- {"0000001000010011111100001111111111101111011100100100000100100100",0x0000431C,0xFFFFDE4B,0x000006FF,0x00002270,0xFFFFF268,0x000003E5,0x00002270,0xFFFFF268,0x000003E5},
- {"0000001000010011111010101001010011011110000100100001100011000100",0x00002B67,0xFFFFF01D,0x00000414,0x000019FB,0xFFFFF961,0x000002D8,0x000019FB,0xFFFFF961,0x000002D8},
- {"0000001000010011111100001111111111101111010111100011100110000100",0x0000400B,0xFFFFE13D,0x0000066F,0x000024F3,0xFFFFF125,0x00000417,0x000024F3,0xFFFFF125,0x00000417},
- {"0000001000010011111100001111111111101111010110100010000010100100",0x00004460,0xFFFFE00E,0x0000067B,0x000023DF,0xFFFFF2E6,0x000003BB,0x000023DF,0xFFFFF2E6,0x000003BB},
- {"0000001000010011111100001111111111101111011001000001100001100100",0x00003AFB,0xFFFFE2C5,0x00000650,0x00002D46,0xFFFFE9C4,0x00000571,0x00002D46,0xFFFFE9C4,0x00000571},
- {"0000001000010011111100001111111111101111011000100010100100100100",0x00005482,0xFFFFD5BC,0x0000081A,0x00003250,0xFFFFE961,0x00000541,0x00003250,0xFFFFE961,0x00000541},
- {"0000001000010011111100001111111111101111010111000010100101000100",0x00003D27,0xFFFFE2FA,0x00000632,0x00002A4D,0xFFFFED6A,0x000004BB,0x00002A4D,0xFFFFED6A,0x000004BB},
- {"0000001000010011111100001111111111101111011000000001100010100100",0x00003E03,0xFFFFE142,0x00000690,0x00001E08,0xFFFFF555,0x0000036C,0x00001E08,0xFFFFF555,0x0000036C},
- {"0000001000010011111100001111111111101111010111000010000001100100",0x000031B5,0xFFFFE97D,0x00000535,0x0000232E,0xFFFFF166,0x00000422,0x0000232E,0xFFFFF166,0x00000422},
- {"0000001000010011111100001111111111101111010111100001100011100100",0x00003753,0xFFFFE724,0x00000575,0x0000281A,0xFFFFEF1A,0x0000046B,0x0000281A,0xFFFFEF1A,0x0000046B},
- {"0000001000010011111010101001010011011110001000000100000101000100",0x00002071,0xFFFFF5C9,0x0000036F,0x00001470,0xFFFFFBF7,0x000002A5,0x00001470,0xFFFFFBF7,0x000002A5},
- {"0000001000010011111100001111111111101111011010000011000101000100",0x00002799,0xFFFFF223,0x000003CF,0x00001CD3,0xFFFFF74A,0x00000333,0x00001CD3,0xFFFFF74A,0x00000333},
- {"0000001000010011111100001111111111101111011001100001000011000100",0x000040DF,0xFFFFE11C,0x00000664,0x000031D4,0xFFFFE8BC,0x0000056F,0x000031D4,0xFFFFE8BC,0x0000056F},
- {"0000001000010011111100001111111111101111011001000100000011000100",0x00003A4D,0xFFFFE3A6,0x00000627,0x00002871,0xFFFFEDA0,0x000004C0,0x00002871,0xFFFFEDA0,0x000004C0},
- {"0000001000010011111100001111111111101111011010000001100110000100",0x00002AF9,0xFFFFEED7,0x00000464,0x0000219B,0xFFFFF368,0x000003D6,0x0000219B,0xFFFFF368,0x000003D6},
- {"0000001000010011111010101001010011011110001100100011000100100100",0x000026D5,0xFFFFF36C,0x000003A3,0x00001BC6,0xFFFFF881,0x00000311,0x00001BC6,0xFFFFF881,0x00000311},
- {"0000001000010011111100001111111111101111010111100010000001000100",0x0000325D,0xFFFFEA07,0x0000050B,0x000026D1,0xFFFFEFB3,0x0000045A,0x000026D1,0xFFFFEFB3,0x0000045A},
- {"0000001000010011111100001111111111101111011010000010100001100100",0x00002F75,0xFFFFEC64,0x000004BE,0x00001EEB,0xFFFFF559,0x00000386,0x00001EEB,0xFFFFF559,0x00000386},
- {"0000001000010011111100001111111111101111010110100011100010100100",0x00003C2F,0xFFFFE541,0x000005A3,0x000025B6,0xFFFFF16F,0x000003FA,0x000025B6,0xFFFFF16F,0x000003FA},
- {"0000001000010011111100001111111111101111011010000100100100100100",0x00002BC2,0xFFFFEE89,0x0000046A,0x00001D04,0xFFFFF651,0x00000361,0x00001D04,0xFFFFF651,0x00000361},
- {"0000001000010011111100001111111111101111011010000010100110100100",0x00002DD0,0xFFFFED40,0x0000049F,0x00001C8C,0xFFFFF6B3,0x00000353,0x00001C8C,0xFFFFF6B3,0x00000353},
- {"0000001000010011111010101001010011011110000111000000100011100100",0x000021ED,0xFFFFF530,0x00000380,0x00001643,0xFFFFFB1C,0x000002C3,0x00001643,0xFFFFFB1C,0x000002C3},
- {"0000001000010011111010101001010011011110001100100001100100000100",0x000028C7,0xFFFFF160,0x000003FD,0x00001990,0xFFFFF994,0x000002E2,0x00001990,0xFFFFF994,0x000002E2},
- {"0000001000010011111100001111111111101111011001100001000010100100",0x0000431C,0xFFFFDF9D,0x000006A3,0x000034A6,0xFFFFE6B0,0x000005C9,0x000034A6,0xFFFFE6B0,0x000005C9},
- {"0000001000010011111010101001010011011110001001100011000010100100",0x00004115,0xFFFFE0D6,0x00000667,0x000031AD,0xFFFFE850,0x00000585,0x000031AD,0xFFFFE850,0x00000585},
- {"0000001000010011111100001111111111101111011001000011100100100100",0x0000424A,0xFFFFDEEC,0x000006E1,0x0000346A,0xFFFFE5EA,0x00000602,0x0000346A,0xFFFFE5EA,0x00000602},
- {"0000001000010011111100001111111111101111011001100001100110000100",0x00004990,0xFFFFDAFA,0x00000771,0x00002A9C,0xFFFFED37,0x000004BC,0x00002A9C,0xFFFFED37,0x000004BC},
- {"0000001000010011111100001111111111101111011001000010100010100100",0x00003858,0xFFFFE568,0x000005D2,0x00003030,0xFFFFE8B0,0x0000058E,0x00003030,0xFFFFE8B0,0x0000058E},
- {"0000001000010011111100001111111111101111011010000100000101100100",0x00001EDC,0xFFFFF6CD,0x00000322,0x00001FCA,0xFFFFF4BD,0x0000039E,0x00001FCA,0xFFFFF4BD,0x0000039E},
- {"0000001000010011111100001111111111101111011001100010000100100100",0x00004C88,0xFFFFDBA3,0x0000071B,0x000030C4,0xFFFFEAFD,0x000004F7,0x000030C4,0xFFFFEAFD,0x000004F7},
- {"0000001000010011111100001111111111101111011010000000100100000100",0x00002B9A,0xFFFFEE41,0x0000047D,0x00002131,0xFFFFF344,0x000003E5,0x00002131,0xFFFFF344,0x000003E5},
- {"0000001000010011111100001111111111101111011000100011100110000100",0x00003E4B,0xFFFFE33C,0x000005FA,0x00003877,0xFFFFE437,0x0000062E,0x00003877,0xFFFFE437,0x0000062E},
- {"0000001000010011111010101001010011011110001100100010000001100100",0x00002376,0xFFFFF444,0x0000038A,0x000017ED,0xFFFFFA4C,0x000002C1,0x000017ED,0xFFFFFA4C,0x000002C1},
- {"0000001000010011111100001111111111101111011001100001000010000100",0x00004517,0xFFFFDDF4,0x000006F2,0x000030DC,0xFFFFE8EF,0x00000571,0x000030DC,0xFFFFE8EF,0x00000571},
- {"0000001000010011111100001111111111101111011010000001100101000100",0x0000270C,0xFFFFF1F3,0x000003DF,0x0000207B,0xFFFFF474,0x000003AD,0x0000207B,0xFFFFF474,0x000003AD},
- {"0000001000010011111100001111111111101111011001000101000101000100",0x00004086,0xFFFFDF39,0x000006E3,0x00002A24,0xFFFFEC2B,0x000004FF,0x00002A24,0xFFFFEC2B,0x000004FF},
- {"0000001000010011111100001111111111101111010111000011000100100100",0x00003BDE,0xFFFFE45E,0x000005EB,0x00002CD5,0xFFFFEC45,0x000004DD,0x00002CD5,0xFFFFEC45,0x000004DD},
- {"0000001000010011111100001111111111101111011100100011000011100100",0x00003803,0xFFFFE714,0x00000579,0x0000288A,0xFFFFEF21,0x0000046B,0x0000288A,0xFFFFEF21,0x0000046B},
- {"0000001000010011111100001111111111101111011000000001000100000100",0x00003F50,0xFFFFE002,0x000006CD,0x00001AD4,0xFFFFF72E,0x0000031F,0x00001AD4,0xFFFFF72E,0x0000031F},
- {"0000001000010011111100001111111111101111011010000010000011100100",0x00002968,0xFFFFF100,0x00000402,0x00001FB5,0xFFFFF57C,0x0000037F,0x00001FB5,0xFFFFF57C,0x0000037F},
- {"0000001000010011111100001111111111101111011001100010000100000100",0x00004283,0xFFFFE2A7,0x000005F5,0x00003165,0xFFFFEB0C,0x000004EC,0x00003165,0xFFFFEB0C,0x000004EC},
- {"0000001000010011111100001111111111101111011001000011000110100100",0x00004253,0xFFFFDDA8,0x00000732,0x00002E5C,0xFFFFE90A,0x00000593,0x00002E5C,0xFFFFE90A,0x00000593},
- {"0000001000010011111100001111111111101111010111000101000010100100",0x00003551,0xFFFFE756,0x0000058D,0x000029A7,0xFFFFED0C,0x000004DE,0x000029A7,0xFFFFED0C,0x000004DE},
- {"0000001000010011111100001111111111101111011001000010100011000100",0x00003728,0xFFFFE604,0x000005C4,0x00002832,0xFFFFEE64,0x00000493,0x00002832,0xFFFFEE64,0x00000493},
- {"0000001000010011111100001111111111101111011000100011100101100100",0x00004796,0xFFFFDCC8,0x00000715,0x000032AB,0xFFFFE848,0x0000057C,0x000032AB,0xFFFFE848,0x0000057C},
- {"0000001000010011111100001111111111101111011000100001000011000100",0x000049DF,0xFFFFDB24,0x0000075F,0x00003076,0xFFFFE967,0x0000055C,0x00003076,0xFFFFE967,0x0000055C},
- {"0000001000010011111100001111111111101111011100100001000100000100",0x00003F13,0xFFFFE099,0x000006A8,0x00002279,0xFFFFF226,0x000003F3,0x00002279,0xFFFFF226,0x000003F3},
- {"0000001000010011111100001111111111101111011001000011000010100100",0x00003E03,0xFFFFE19F,0x00000674,0x00002D66,0xFFFFEAA7,0x00000537,0x00002D66,0xFFFFEAA7,0x00000537},
- {"0000001000010011111100001111111111101111010111000100000100000100",0x000037DA,0xFFFFE63F,0x000005A7,0x00002543,0xFFFFF0A0,0x00000431,0x00002543,0xFFFFF0A0,0x00000431},
- {"0000001000010011111100001111111111101111011000100100100101000100",0x00003D82,0xFFFFE3F5,0x000005D9,0x0000332F,0xFFFFE834,0x00000577,0x0000332F,0xFFFFE834,0x00000577},
- {"0000001000010011111010101001010011011110000100100010100011000100",0x00002915,0xFFFFF1E0,0x000003D4,0x00002065,0xFFFFF57B,0x00000378,0x00002065,0xFFFFF57B,0x00000378},
- {"0000001000010011111100001111111111101111010111100100100100000100",0x000036FC,0xFFFFE72D,0x00000577,0x00002811,0xFFFFEF30,0x00000464,0x00002811,0xFFFFEF30,0x00000464},
- {"0000001000010011111100001111111111101111011000100011000110000100",0x00004767,0xFFFFDD30,0x000006FD,0x00003703,0xFFFFE564,0x000005F8,0x00003703,0xFFFFE564,0x000005F8},
- {"0000001000010011111100001111111111101111011000000011000110000100",0x00003094,0xFFFFEAA9,0x000004F5,0x000022E7,0xFFFFF200,0x000003FB,0x000022E7,0xFFFFF200,0x000003FB},
- {"0000001000010011111100001111111111101111011001000001000101000100",0x00003EF0,0xFFFFDF83,0x000006ED,0x00002A27,0xFFFFEB7C,0x00000537,0x00002A27,0xFFFFEB7C,0x00000537},
- {"0000001000010011111100001111111111101111011010000001000100100100",0x0000243C,0xFFFFF358,0x000003AC,0x00001DC4,0xFFFFF5E9,0x00000372,0x00001DC4,0xFFFFF5E9,0x00000372},
- {"0000001000010011111100001111111111101111011100100010000101000100",0x0000284B,0xFFFFF036,0x0000040F,0x00001FCD,0xFFFFF445,0x00000395,0x00001FCD,0xFFFFF445,0x00000395},
- {"0000001000010011111100001111111111101111011010000100000011000100",0x00002611,0xFFFFF285,0x000003C7,0x00001CFE,0xFFFFF6A0,0x00000355,0x00001CFE,0xFFFFF6A0,0x00000355},
- {"0000001000010011111010101001010011011110000111000011100110100100",0x00002292,0xFFFFF49F,0x00000393,0x000017F4,0xFFFFF9CD,0x000002F5,0x000017F4,0xFFFFF9CD,0x000002F5},
- {"0000001000010011111100001111111111101111010111100011100010100100",0x000037F3,0xFFFFE68D,0x00000590,0x00002443,0xFFFFF1AD,0x000003FA,0x00002443,0xFFFFF1AD,0x000003FA},
- {"0000001000010011111100001111111111101111011010000010000101000100",0x00002C01,0xFFFFEF3F,0x00000444,0x0000210A,0xFFFFF475,0x000003A7,0x0000210A,0xFFFFF475,0x000003A7},
- {"0000001000010011111010101001010011011110000100100001000011100100",0x00002C0E,0xFFFFEF0F,0x00000446,0x00001A82,0xFFFFF8F7,0x000002DE,0x00001A82,0xFFFFF8F7,0x000002DE},
- {"0000001000010011111100001111111111101111010111100010000011000100",0x00003FA6,0xFFFFE20A,0x0000063F,0x00002E29,0xFFFFEB21,0x00000510,0x00002E29,0xFFFFEB21,0x00000510},
- {"0000001000010011111100001111111111101111010111000010000101100100",0x00003BCD,0xFFFFE31B,0x0000063C,0x000019AF,0xFFFFF83D,0x000002F8,0x000019AF,0xFFFFF83D,0x000002F8},
- {"0000001000010011111100001111111111101111011001100100000101100100",0x000044C8,0xFFFFDF08,0x000006B0,0x00002E2E,0xFFFFEB62,0x000004FD,0x00002E2E,0xFFFFEB62,0x000004FD},
- {"0000001000010011111100001111111111101111010111000001100010000100",0x00003790,0xFFFFE571,0x000005E3,0x00002042,0xFFFFF35D,0x000003CF,0x00002042,0xFFFFF35D,0x000003CF},
- {"0000001000010011111100001111111111101111011000000101000011100100",0x000038AC,0xFFFFE46C,0x00000609,0x0000215E,0xFFFFF22D,0x00000403,0x0000215E,0xFFFFF22D,0x00000403},
- {"0000001000010011111100001111111111101111010111100010100110100100",0x00003A1E,0xFFFFE536,0x000005C9,0x000024F3,0xFFFFF11A,0x0000041B,0x000024F3,0xFFFFF11A,0x0000041B},
- {"0000001000010011111100001111111111101111011001100101000011100100",0x0000431A,0xFFFFDF1B,0x000006C5,0x00002F34,0xFFFFEA02,0x00000545,0x00002F34,0xFFFFEA02,0x00000545},
- {"0000001000010011111100001111111111101111011001000001100100000100",0x000042DC,0xFFFFDE28,0x0000070C,0x00003B53,0xFFFFE0EA,0x000006E2,0x00003B53,0xFFFFE0EA,0x000006E2},
- {"0000001000010011111100001111111111101111011010000011000101100100",0x0000264B,0xFFFFF29A,0x000003C4,0x000021D0,0xFFFFF3CE,0x000003C4,0x000021D0,0xFFFFF3CE,0x000003C4},
- {"0000001000010011111100001111111111101111010110100100000001100100",0x00004225,0xFFFFE0E8,0x00000665,0x00002B53,0xFFFFED89,0x0000049F,0x00002B53,0xFFFFED89,0x0000049F},
- {"0000001000010011111010101001010011011110001000000100100100100100",0x00001FCC,0xFFFFF63F,0x00000358,0x000019E8,0xFFFFF882,0x0000032A,0x000019E8,0xFFFFF882,0x0000032A},
- {"0000001000010011111100001111111111101111011000100100000010100100",0x000045E0,0xFFFFDDD0,0x000006ED,0x00003193,0xFFFFE8BD,0x00000572,0x00003193,0xFFFFE8BD,0x00000572},
- {"0000001000010011111100001111111111101111011010000011100100100100",0x000024FC,0xFFFFF366,0x000003A6,0x00001FE8,0xFFFFF509,0x00000394,0x00001FE8,0xFFFFF509,0x00000394},
- {"0000001000010011111100001111111111101111010111000100100010000100",0x0000378F,0xFFFFE54B,0x000005F1,0x00001C9B,0xFFFFF5C7,0x00000368,0x00001C9B,0xFFFFF5C7,0x00000368},
- {"0000001000010011111100001111111111101111011001000001100010100100",0x00003CF3,0xFFFFE15A,0x00000694,0x00002CDD,0xFFFFEA44,0x00000557,0x00002CDD,0xFFFFEA44,0x00000557},
- {"0000001000010011111010101001010011011110001000000000100100000100",0x000021EC,0xFFFFF4F4,0x0000038F,0x00001511,0xFFFFFBF5,0x0000029E,0x00001511,0xFFFFFBF5,0x0000029E},
- {"0000001000010011111100001111111111101111011000000001000010100100",0x00003C8A,0xFFFFE1C1,0x00000685,0x000019C7,0xFFFFF7E2,0x00000301,0x000019C7,0xFFFFF7E2,0x00000301},
- {"0000001000010011111100001111111111101111010111100010000001100100",0x00003908,0xFFFFE5C7,0x000005B3,0x00002793,0xFFFFEF46,0x00000465,0x00002793,0xFFFFEF46,0x00000465},
- {"0000001000010011111100001111111111101111011000000101000100000100",0x000040A3,0xFFFFDE61,0x00000725,0x00002077,0xFFFFF2CE,0x000003E8,0x00002077,0xFFFFF2CE,0x000003E8},
- {"0000001000010011111100001111111111101111011001100100000101000100",0x00003DCA,0xFFFFE34D,0x00000608,0x00002D66,0xFFFFEBDF,0x000004E8,0x00002D66,0xFFFFEBDF,0x000004E8},
- {"0000001000010011111100001111111111101111010111100101000011000100",0x00003085,0xFFFFEB70,0x000004C8,0x000029B1,0xFFFFEDD9,0x000004A5,0x000029B1,0xFFFFEDD9,0x000004A5},
- {"0000001000010011111010101001010011011110000010000011100010000100",0x00004C73,0xFFFFD676,0x0000086C,0x0000280A,0xFFFFED89,0x000004C2,0x0000280A,0xFFFFED89,0x000004C2},
- {"0000001000010011111010101001010011011110001001000010000101100100",0x00002CE5,0xFFFFEE8C,0x00000466,0x00001755,0xFFFFFAC2,0x000002AC,0x00001755,0xFFFFFAC2,0x000002AC},
- {"0000001000010011111100001111111111101111011000100001000100100100",0x0000489F,0xFFFFDBF1,0x0000073E,0x0000332D,0xFFFFE786,0x000005AD,0x0000332D,0xFFFFE786,0x000005AD},
- {"0000001000010011111100001111111111101111011000000010100001100100",0x00003D09,0xFFFFE193,0x00000689,0x00001E82,0xFFFFF4C0,0x00000386,0x00001E82,0xFFFFF4C0,0x00000386},
- {"0000001000010011111100001111111111101111011001000100000100000100",0x00003E4C,0xFFFFE131,0x00000689,0x00002F4E,0xFFFFE925,0x0000057B,0x00002F4E,0xFFFFE925,0x0000057B},
- {"0000001000010011111100001111111111101111010110100100000010000100",0x00003B31,0xFFFFE53F,0x000005B3,0x0000248A,0xFFFFF211,0x000003DF,0x0000248A,0xFFFFF211,0x000003DF},
- {"0000001000010011111100001111111111101111011001000100000100100100",0x000038DD,0xFFFFE54A,0x000005C9,0x00002B6D,0xFFFFEBDF,0x00000502,0x00002B6D,0xFFFFEBDF,0x00000502},
- {"0000001000010011111100001111111111101111011010000100000001100100",0x00002698,0xFFFFF1A8,0x000003F2,0x00002163,0xFFFFF34B,0x000003E3,0x00002163,0xFFFFF34B,0x000003E3},
- {"0000001000010011111010101001010011011110001000000001000001100100",0x000023A8,0xFFFFF4CD,0x00000386,0x00001944,0xFFFFF983,0x00000300,0x00001944,0xFFFFF983,0x00000300},
- {"0000001000010011111100001111111111101111011001000001100011000100",0x00003EAF,0xFFFFE0C3,0x000006A0,0x000030AB,0xFFFFE829,0x000005A6,0x000030AB,0xFFFFE829,0x000005A6},
- {"0000001000010011111100001111111111101111011010000100100101000100",0x00002E89,0xFFFFECA6,0x000004B6,0x00001FA0,0xFFFFF4A8,0x000003A3,0x00001FA0,0xFFFFF4A8,0x000003A3},
- {"0000001000010011111100001111111111101111011010000010100010100100",0x000028A4,0xFFFFF112,0x00000402,0x00001F7C,0xFFFFF545,0x0000038A,0x00001F7C,0xFFFFF545,0x0000038A},
- {"0000001000010011111100001111111111101111011001100101000010100100",0x00004135,0xFFFFDFA2,0x000006C5,0x0000324C,0xFFFFE7AA,0x000005AF,0x0000324C,0xFFFFE7AA,0x000005AF},
- {"0000001000010011111010101001010011011110001000000011100011000100",0x00002012,0xFFFFF693,0x00000352,0x0000171F,0xFFFFFABB,0x000002D9,0x0000171F,0xFFFFFABB,0x000002D9},
- {"0000001000010011111100001111111111101111011001000011000010000100",0x00003D7C,0xFFFFE1BC,0x00000671,0x00002A45,0xFFFFEC84,0x000004EC,0x00002A45,0xFFFFEC84,0x000004EC},
- {"0000001000010011111100001111111111101111011100100011000001100100",0x00004172,0xFFFFDF58,0x000006DA,0x00002504,0xFFFFF0A6,0x00000431,0x00002504,0xFFFFF0A6,0x00000431},
- {"0000001000010011111100001111111010011001001010000001100101000100",0x000029C7,0xFFFFF087,0x00000414,0x00001DCB,0xFFFFF675,0x0000035F,0x00001DCB,0xFFFFF675,0x0000035F},
- {"0000001000010011111100001111111010011001001010100010100110100100",0x000027F0,0xFFFFF05A,0x00000432,0x00001707,0xFFFFFA0E,0x000002D1,0x00001707,0xFFFFFA0E,0x000002D1},
- {"0000001000010011111100001111111010011001001000100010000101000100",0x00003279,0xFFFFE9F7,0x00000511,0x00001B5E,0xFFFFF787,0x00000317,0x00001B5E,0xFFFFF787,0x00000317},
- {"0000001000010011111100001111111010011001001100100010000110000100",0x000030A5,0xFFFFEABC,0x000004FF,0x000019D1,0xFFFFF83C,0x00000304,0x000019D1,0xFFFFF83C,0x00000304},
- {"0000001000010011111100001111111010011001001010000010100001000100",0x0000283B,0xFFFFF122,0x00000402,0x000019C2,0xFFFFF8E9,0x000002FB,0x000019C2,0xFFFFF8E9,0x000002FB},
- {"0000001000010011111100001111111010011001001011000010000010000100",0x00003376,0xFFFFE9E1,0x00000510,0x000021A7,0xFFFFF39F,0x000003BF,0x000021A7,0xFFFFF39F,0x000003BF},
- {"0000001000010011111100001111111010011001001100100001100011000100",0x000031D2,0xFFFFEA9C,0x000004FC,0x00001F66,0xFFFFF4E4,0x00000390,0x00001F66,0xFFFFF4E4,0x00000390},
- {"0000001000010011111100001111111010011001000110100011100001100100",0x00003006,0xFFFFEB18,0x000004F2,0x000019B3,0xFFFFF84E,0x00000301,0x000019B3,0xFFFFF84E,0x00000301},
- {"0000001000010011111100001111111010011001001100000011100110100100",0x0000364F,0xFFFFE81F,0x00000556,0x00002AC9,0xFFFFED87,0x000004BD,0x00002AC9,0xFFFFED87,0x000004BD},
- {"0000001000010011111100001111111010011001001011100011100001000100",0x00003043,0xFFFFEBAE,0x000004CD,0x00001B0C,0xFFFFF7ED,0x0000030C,0x00001B0C,0xFFFFF7ED,0x0000030C},
- {"0000001000010011111100001111111010011001001100000100100010100100",0x000037CE,0xFFFFE69E,0x00000596,0x0000276B,0xFFFFEF65,0x0000046E,0x0000276B,0xFFFFEF65,0x0000046E},
- {"0000001000010011111100001111111010011001001011000011000100000100",0x00003063,0xFFFFED5E,0x0000046F,0x000024AE,0xFFFFF2C4,0x000003D8,0x000024AE,0xFFFFF2C4,0x000003D8},
- {"0000001000010011111100001111111010011001001011100000100010100100",0x00002F5D,0xFFFFEBDC,0x000004D3,0x00001EDB,0xFFFFF50F,0x0000038E,0x00001EDB,0xFFFFF50F,0x0000038E},
- {"0000001000010011111100001111111010011001001011100100100010100100",0x00003148,0xFFFFEA9A,0x000004FB,0x0000192D,0xFFFFF8E9,0x000002DF,0x0000192D,0xFFFFF8E9,0x000002DF},
- {"0000001000010011111100001111111010011001001011000010000001100100",0x00003682,0xFFFFE7E4,0x0000055C,0x0000250E,0xFFFFF150,0x0000041A,0x0000250E,0xFFFFF150,0x0000041A},
- {"0000001000010011111100001111111010011001001010100010000010000100",0x0000284E,0xFFFFF15A,0x000003F8,0x00001CE2,0xFFFFF6F9,0x0000034F,0x00001CE2,0xFFFFF6F9,0x0000034F},
- {"0000001000010011111100001111111010011001001100000001100010100100",0x00003171,0xFFFFEAE9,0x000004ED,0x00001F40,0xFFFFF513,0x00000384,0x00001F40,0xFFFFF513,0x00000384},
- {"0000001000010011111100001111111010011001001100100011000001000100",0x000031BD,0xFFFFEA64,0x0000050A,0x00001EFD,0xFFFFF4F7,0x00000390,0x00001EFD,0xFFFFF4F7,0x00000390},
- {"0000001000010011111100001111111010011001001011100101000011100100",0x00003050,0xFFFFEB29,0x000004EA,0x000019B3,0xFFFFF878,0x000002F9,0x000019B3,0xFFFFF878,0x000002F9},
- {"0000001000010011111100001111111010011001001011000001100100000100",0x00003400,0xFFFFE9A0,0x0000051A,0x00002460,0xFFFFF1DA,0x00000409,0x00002460,0xFFFFF1DA,0x00000409},
- {"0000001000010011111100001111111010011001001011000100100010000100",0x000034A1,0xFFFFE86F,0x00000558,0x0000255D,0xFFFFF09E,0x00000443,0x0000255D,0xFFFFF09E,0x00000443},
- {"0000001000010011111100001111111010011001001011100100100011100100",0x00003103,0xFFFFEAD7,0x000004F0,0x00001896,0xFFFFF95A,0x000002CC,0x00001896,0xFFFFF95A,0x000002CC},
- {"0000001000010011111100001111111010011001001100000001100011100100",0x00003120,0xFFFFEB9E,0x000004CB,0x000021E8,0xFFFFF3A2,0x000003C1,0x000021E8,0xFFFFF3A2,0x000003C1},
- {"0000001000010011111100001111111010011001000111000101000011100100",0x00003558,0xFFFFE812,0x00000565,0x0000256E,0xFFFFF097,0x00000447,0x0000256E,0xFFFFF097,0x00000447},
- {"0000001000010011111100001111111010011001000110100010100001000100",0x00002DA8,0xFFFFECA8,0x000004B7,0x0000180B,0xFFFFF96D,0x000002D8,0x0000180B,0xFFFFF96D,0x000002D8},
- {"0000001000010011111100001111111010011001001011100011000001100100",0x00003232,0xFFFFEA66,0x000004FF,0x00001DDE,0xFFFFF5FE,0x0000035A,0x00001DDE,0xFFFFF5FE,0x0000035A},
- {"0000001000010011111100001111111010011001001100000101000011100100",0x000034D2,0xFFFFE89F,0x00000548,0x0000246C,0xFFFFF17F,0x00000418,0x0000246C,0xFFFFF17F,0x00000418},
- {"0000001000010011111100001111111010011001001100000100100100000100",0x000033EC,0xFFFFE954,0x0000052A,0x00002323,0xFFFFF279,0x000003EE,0x00002323,0xFFFFF279,0x000003EE},
- {"0000001000010011111100001111111010011001001100000011100010000100",0x000033AA,0xFFFFE955,0x0000052D,0x0000229F,0xFFFFF2B2,0x000003E7,0x0000229F,0xFFFFF2B2,0x000003E7},
- {"0000001000010011111100001111111010011001001100100100100101100100",0x00003258,0xFFFFE9AA,0x0000052A,0x00001D5F,0xFFFFF5D1,0x0000036B,0x00001D5F,0xFFFFF5D1,0x0000036B},
- {"0000001000010011111100001111111010011001001100000010100110100100",0x0000323A,0xFFFFEA5F,0x00000504,0x00002108,0xFFFFF3D5,0x000003BA,0x00002108,0xFFFFF3D5,0x000003BA},
- {"0000001000010011111100001111111010011001001011000010000110000100",0x00003216,0xFFFFEA6B,0x000004FF,0x00001D6E,0xFFFFF640,0x00000350,0x00001D6E,0xFFFFF640,0x00000350},
- {"0000001000010011111100001111111010011001001100100001000011100100",0x000030C5,0xFFFFEAC4,0x000004FC,0x00001924,0xFFFFF8C2,0x000002EE,0x00001924,0xFFFFF8C2,0x000002EE},
- {"0000001000010011111100001111111010011001001100000101000100000100",0x000032BB,0xFFFFE9F1,0x00000515,0x00002211,0xFFFFF31B,0x000003D5,0x00002211,0xFFFFF31B,0x000003D5},
- {"0000001000010011111100001111111010011001001100000100100011000100",0x0000352C,0xFFFFE85B,0x00000553,0x00002410,0xFFFFF1B4,0x0000040F,0x00002410,0xFFFFF1B4,0x0000040F},
- {"0000001000010011111100001111111010011001001000100011100011000100",0x000036A0,0xFFFFE7E8,0x0000055D,0x00002901,0xFFFFEEB8,0x00000489,0x00002901,0xFFFFEEB8,0x00000489},
- {"0000001000010011111100001111111010011001001011000011000001000100",0x00003340,0xFFFFE9D9,0x00000516,0x00002332,0xFFFFF27A,0x000003F0,0x00002332,0xFFFFF27A,0x000003F0},
- {"0000001000010011111100001111111010011001000110100011100010100100",0x00003564,0xFFFFE86D,0x0000054E,0x00002613,0xFFFFF07C,0x00000444,0x00002613,0xFFFFF07C,0x00000444},
- {"0000001000010011111100001111111010011001001010000000100100000100",0x00002AD1,0xFFFFEF0B,0x0000045C,0x00001DEA,0xFFFFF5C8,0x00000381,0x00001DEA,0xFFFFF5C8,0x00000381},
- {"0000001000010011111100001111111010011001001000100010000011100100",0x000035B0,0xFFFFE846,0x00000555,0x000027BE,0xFFFFEF5D,0x00000474,0x000027BE,0xFFFFEF5D,0x00000474},
- {"0000001000010011111100001111111010011001001000100011100010100100",0x000032C4,0xFFFFEA48,0x00000502,0x000022C6,0xFFFFF2DF,0x000003DE,0x000022C6,0xFFFFF2DF,0x000003DE},
- {"0000001000010011111100001111111010011001001100000000100011000100",0x00003036,0xFFFFEB0D,0x000004F9,0x00001FE8,0xFFFFF41A,0x000003BC,0x00001FE8,0xFFFFF41A,0x000003BC},
- {"0000001000010011111100001111111010011001000110100000100100000100",0x000030F8,0xFFFFEA13,0x00000524,0x00001B6A,0xFFFFF6C9,0x0000034A,0x00001B6A,0xFFFFF6C9,0x0000034A},
- {"0000001000010011111100001111111010011001001100000001000010100100",0x00002EE2,0xFFFFEC0C,0x000004CB,0x00001A39,0xFFFFF814,0x0000030F,0x00001A39,0xFFFFF814,0x0000030F},
- {"0000001000010011111100001111111010011001000111000011000110000100",0x00003457,0xFFFFE924,0x0000052A,0x00001E9D,0xFFFFF59C,0x00000363,0x00001E9D,0xFFFFF59C,0x00000363},
- {"0000001000010011111100001111111010011001001100100010100001000100",0x000030BF,0xFFFFEB18,0x000004ED,0x00001D37,0xFFFFF636,0x0000035C,0x00001D37,0xFFFFF636,0x0000035C},
- {"0000001000010011111100001111111010011001001011100100000010000100",0x000031AF,0xFFFFEA75,0x000004FE,0x000019F2,0xFFFFF87A,0x000002F0,0x000019F2,0xFFFFF87A,0x000002F0},
- {"0000001000010011111100001111111010011001001100000010100010000100",0x00003642,0xFFFFE85B,0x00000547,0x00002975,0xFFFFEE98,0x0000048B,0x00002975,0xFFFFEE98,0x0000048B},
- {"0000001000010011111100001111111010011001001011100010100010000100",0x00002E8B,0xFFFFED1E,0x0000048E,0x000019C1,0xFFFFF917,0x000002D6,0x000019C1,0xFFFFF917,0x000002D6},
- {"0000001000010011111100001111111010011001001100100100000110100100",0x000033D9,0xFFFFE8E1,0x00000548,0x0000224B,0xFFFFF298,0x000003F4,0x0000224B,0xFFFFF298,0x000003F4},
- {"0000001000010011111100001111111010011001001011100010100011000100",0x000032BC,0xFFFFEB0F,0x000004D6,0x00002488,0xFFFFF240,0x000003F2,0x00002488,0xFFFFF240,0x000003F2},
- {"0000001000010011111100001111111010011001001100000100100101000100",0x000035FD,0xFFFFE838,0x00000553,0x00002762,0xFFFFEFBC,0x00000460,0x00002762,0xFFFFEFBC,0x00000460},
- {"0000001000010011111100001111111010011001001010000001100010100100",0x0000268B,0xFFFFF263,0x000003D1,0x00001914,0xFFFFF977,0x000002E8,0x00001914,0xFFFFF977,0x000002E8},
- {"0000001000010011111100001111111010011001001011000011000110000100",0x0000330B,0xFFFFEA1E,0x00000505,0x000020B1,0xFFFFF44D,0x0000039E,0x000020B1,0xFFFFF44D,0x0000039E},
- {"0000001000010011111100001111111010011001001000100010000010000100",0x0000326E,0xFFFFEA26,0x00000508,0x00001C17,0xFFFFF722,0x00000328,0x00001C17,0xFFFFF722,0x00000328},
- {"0000001000010011111100001111111010011001001010100011000110100100",0x00002A3F,0xFFFFEEE8,0x0000046D,0x00001B2B,0xFFFFF737,0x0000034D,0x00001B2B,0xFFFFF737,0x0000034D},
- {"0000001000010011111100001111111010011001001011000100000001100100",0x00003732,0xFFFFE765,0x00000574,0x00002A6D,0xFFFFEDA8,0x000004B7,0x00002A6D,0xFFFFEDA8,0x000004B7},
- {"0000001000010011111100001111111010011001001100000000100100100100",0x000034D3,0xFFFFE827,0x00000569,0x000027AA,0xFFFFEEE7,0x00000492,0x000027AA,0xFFFFEEE7,0x00000492},
- {"0000001000010011111100001111111010011001001011100100000011000100",0x00003306,0xFFFFEA39,0x000004FC,0x00001DCC,0xFFFFF655,0x00000344,0x00001DCC,0xFFFFF655,0x00000344},
- {"0000001000010011111100001111111010011001001010000010000001000100",0x00002A48,0xFFFFEFCA,0x00000439,0x00001DED,0xFFFFF60D,0x00000375,0x00001DED,0xFFFFF60D,0x00000375},
- {"0000001000010011111100001111111010011001001100000011100011000100",0x000033A3,0xFFFFEA36,0x000004F9,0x0000247C,0xFFFFF21F,0x000003F4,0x0000247C,0xFFFFF21F,0x000003F4},
- {"0000001000010011111100001111111010011001001011000011000101100100",0x0000311B,0xFFFFEB76,0x000004D1,0x00001EB1,0xFFFFF5B6,0x00000366,0x00001EB1,0xFFFFF5B6,0x00000366},
- {"0000001000010011111100001111111010011001001100100100000101100100",0x00003307,0xFFFFE97F,0x0000052A,0x00001E76,0xFFFFF54D,0x0000037C,0x00001E76,0xFFFFF54D,0x0000037C},
- {"0000001000010011111100001111111010011001000111000010000101000100",0x0000344B,0xFFFFE9C5,0x00000509,0x000020D6,0xFFFFF486,0x0000038F,0x000020D6,0xFFFFF486,0x0000038F},
- {"0000001000010011111100001111111010011001001011000011000101000100",0x000034B9,0xFFFFEA0B,0x000004F7,0x000027B3,0xFFFFF057,0x0000043A,0x000027B3,0xFFFFF057,0x0000043A},
- {"0000001000010011111100001111111010011001001100000001100101100100",0x00003360,0xFFFFE984,0x00000527,0x00002238,0xFFFFF2EE,0x000003E0,0x00002238,0xFFFFF2EE,0x000003E0},
- {"0000001000010011111100001111111010011001001100000010000100100100",0x0000315C,0xFFFFEC05,0x000004B1,0x000023C8,0xFFFFF2CC,0x000003DE,0x000023C8,0xFFFFF2CC,0x000003DE},
- {"0000001000010011111100001111111010011001001011000010100001100100",0x0000389B,0xFFFFE6D5,0x00000582,0x00002C6C,0xFFFFEC92,0x000004DE,0x00002C6C,0xFFFFEC92,0x000004DE},
- {"0000001000010011111100001111111010011001001011100001000100100100",0x00003058,0xFFFFEB30,0x000004E6,0x000019B5,0xFFFFF88B,0x000002F1,0x000019B5,0xFFFFF88B,0x000002F1},
- {"0000001000010011111100001111111010011001001011100000100100000100",0x00002F69,0xFFFFEB4A,0x000004F1,0x00001B82,0xFFFFF6EC,0x00000341,0x00001B82,0xFFFFF6EC,0x00000341},
- {"0000001000010011111100001111111010011001000110100001100011100100",0x000031EB,0xFFFFEA64,0x00000508,0x00002059,0xFFFFF427,0x000003B0,0x00002059,0xFFFFF427,0x000003B0},
- {"0000001000010011111100001111111010011001001000100100000100100100",0x000033E2,0xFFFFE94D,0x0000052A,0x000020BF,0xFFFFF40B,0x000003AB,0x000020BF,0xFFFFF40B,0x000003AB},
- {"0000001000010011111100001111111010011001001010000011000110000100",0x00002AF9,0xFFFFEFE9,0x00000427,0x00001F72,0xFFFFF57A,0x00000383,0x00001F72,0xFFFFF57A,0x00000383},
- {"0000001000010011111100001111111010011001001011000010100000100100",0x00003282,0xFFFFEA88,0x000004FA,0x00002561,0xFFFFF126,0x0000042B,0x00002561,0xFFFFF126,0x0000042B},
- {"0000001000010011111100001111111010011001001100000001000011100100",0x0000308A,0xFFFFEB5D,0x000004E0,0x00001E83,0xFFFFF577,0x00000378,0x00001E83,0xFFFFF577,0x00000378},
- {"0000001000010011111100001111111010011001001100100100100010000100",0x0000336E,0xFFFFE8C8,0x00000553,0x0000217C,0xFFFFF2E1,0x000003EB,0x0000217C,0xFFFFF2E1,0x000003EB},
- {"0000001000010011111100001111111010011001000110100010000101100100",0x000034A9,0xFFFFE838,0x00000561,0x000020CE,0xFFFFF38A,0x000003C7,0x000020CE,0xFFFFF38A,0x000003C7},
- {"0000001000010011111100001111111010011001001000100010000110000100",0x00003152,0xFFFFE9EB,0x00000522,0x00001755,0xFFFFF9A9,0x000002C6,0x00001755,0xFFFFF9A9,0x000002C6},
- {"0000001000010011111100001111111010011001001010000001100010000100",0x0000286E,0xFFFFF136,0x000003FD,0x00001BAB,0xFFFFF7C3,0x0000032C,0x00001BAB,0xFFFFF7C3,0x0000032C},
- {"0000001000010011111100001111111010011001001100000000100101000100",0x0000316B,0xFFFFEA02,0x00000528,0x00002247,0xFFFFF24E,0x00000408,0x00002247,0xFFFFF24E,0x00000408},
- {"0000001000010011111100001111111010011001001011000000100011100100",0x000034CF,0xFFFFE83D,0x00000562,0x00002458,0xFFFFF130,0x00000430,0x00002458,0xFFFFF130,0x00000430},
- {"0000001000010011111100001111111010011001001011000010100110000100",0x00003352,0xFFFFE9D1,0x00000515,0x0000212A,0xFFFFF3DC,0x000003B4,0x0000212A,0xFFFFF3DC,0x000003B4},
- {"0000001000010011111100001111111010011001001010000100000010100100",0x00002946,0xFFFFF09B,0x00000415,0x00001DC9,0xFFFFF650,0x00000366,0x00001DC9,0xFFFFF650,0x00000366},
- {"0000001000010011111100001111111010011001001100000001000100100100",0x00003080,0xFFFFEB47,0x000004E1,0x00001BD5,0xFFFFF73B,0x00000329,0x00001BD5,0xFFFFF73B,0x00000329},
- {"0000001000010011111100001111111010011001000110100001100010000100",0x00002FBD,0xFFFFEB7B,0x000004DD,0x000017FC,0xFFFFF99E,0x000002C7,0x000017FC,0xFFFFF99E,0x000002C7},
- {"0000001000010011111100001111111010011001001010000001000100100100",0x00002A28,0xFFFFF032,0x0000041F,0x00001B19,0xFFFFF83A,0x00000312,0x00001B19,0xFFFFF83A,0x00000312},
- {"0000001000010011111100001111111010011001001000100100000011000100",0x00003420,0xFFFFE936,0x00000530,0x000023C2,0xFFFFF203,0x00000406,0x000023C2,0xFFFFF203,0x00000406},
- {"0000001000010011111100001111111010011001001100000001000101000100",0x00002F7C,0xFFFFEBBA,0x000004D1,0x0000185D,0xFFFFF975,0x000002CA,0x0000185D,0xFFFFF975,0x000002CA},
- {"0000001000010011111100001111111010011001001011100010000001000100",0x00002C51,0xFFFFEE3B,0x0000046F,0x000019AA,0xFFFFF8DD,0x000002ED,0x000019AA,0xFFFFF8DD,0x000002ED},
- {"0000001000010011111100001111111010011001000110100100000101000100",0x000033D6,0xFFFFE8F2,0x0000053D,0x00001D73,0xFFFFF5FB,0x0000035B,0x00001D73,0xFFFFF5FB,0x0000035B},
- {"0000001000010011111100001111111010011001001100100011000010000100",0x000031D9,0xFFFFEAF7,0x000004E4,0x00001EBD,0xFFFFF5A6,0x00000368,0x00001EBD,0xFFFFF5A6,0x00000368},
- {"0000001000010011111100001111111010011001000110100010000010100100",0x00003386,0xFFFFE9CE,0x00000515,0x00002422,0xFFFFF1F3,0x00000405,0x00002422,0xFFFFF1F3,0x00000405},
- {"0000001000010011111100001111111010011001001011000101000011100100",0x000032FB,0xFFFFE9BC,0x00000520,0x00002301,0xFFFFF267,0x000003F7,0x00002301,0xFFFFF267,0x000003F7},
- {"0000001000010011111100001111111010011001001100100010100100100100",0x000032C2,0xFFFFEAC0,0x000004EA,0x0000250F,0xFFFFF1A2,0x00000413,0x0000250F,0xFFFFF1A2,0x00000413},
- {"0000001000010011111100001111111010011001000111000010100101000100",0x00003722,0xFFFFE8A6,0x00000527,0x000026E4,0xFFFFF0F5,0x0000041C,0x000026E4,0xFFFFF0F5,0x0000041C},
- {"0000001000010011111100001111111010011001001011000100100011000100",0x000035A4,0xFFFFE822,0x00000558,0x000022F2,0xFFFFF288,0x000003E8,0x000022F2,0xFFFFF288,0x000003E8},
- {"0000001000010011111100001111111010011001001010000000100100100100",0x00002CD1,0xFFFFEDC6,0x0000048C,0x00001EAF,0xFFFFF53D,0x00000396,0x00001EAF,0xFFFFF53D,0x00000396},
- {"0000001000010011111100001111111010011001001100000001000101100100",0x00003156,0xFFFFEA60,0x0000050B,0x00001BBC,0xFFFFF704,0x00000335,0x00001BBC,0xFFFFF704,0x00000335},
- {"0000001000010011111100001111111010011001001011000101000100000100",0x000034A1,0xFFFFE8C0,0x00000544,0x00002528,0xFFFFF105,0x0000042C,0x00002528,0xFFFFF105,0x0000042C},
- {"0000001000010011111100001111111010011001001100100011000001100100",0x000032CE,0xFFFFE9D3,0x00000520,0x000021FF,0xFFFFF2FD,0x000003E4,0x000021FF,0xFFFFF2FD,0x000003E4},
- {"0000001000010011111100001111111010011001000110100101000010100100",0x000034A0,0xFFFFE823,0x0000056D,0x0000256F,0xFFFFF047,0x0000045A,0x0000256F,0xFFFFF047,0x0000045A},
- {"0000001000010011111100001111111010011001001100000011100101000100",0x00003109,0xFFFFEBD6,0x000004BF,0x000022D4,0xFFFFF32D,0x000003D0,0x000022D4,0xFFFFF32D,0x000003D0},
- {"0000001000010011111100001111111010011001001011000001000101100100",0x000030B7,0xFFFFEAF0,0x000004F3,0x00001AEC,0xFFFFF7A9,0x0000031B,0x00001AEC,0xFFFFF7A9,0x0000031B},
- {"0000001000010011111100001111111010011001001011000011100110100100",0x00003078,0xFFFFEBA4,0x000004CF,0x00001E7A,0xFFFFF5AF,0x0000036B,0x00001E7A,0xFFFFF5AF,0x0000036B},
- {"0000001000010011111100001111111010011001001100000100000100100100",0x00003442,0xFFFFE998,0x00000518,0x000025EA,0xFFFFF0F3,0x0000042B,0x000025EA,0xFFFFF0F3,0x0000042B},
- {"0000001000010011111100001111111010011001001100000010000110100100",0x000031CB,0xFFFFEA80,0x00000501,0x000020A3,0xFFFFF403,0x000003B2,0x000020A3,0xFFFFF403,0x000003B2},
- {"0000001000010011111100001111111010011001001010100010100110000100",0x00002947,0xFFFFF018,0x00000433,0x00001BA5,0xFFFFF75C,0x00000340,0x00001BA5,0xFFFFF75C,0x00000340},
- {"0000001000010011111100001111111010011001001011000011100110000100",0x000033F9,0xFFFFE99D,0x00000518,0x00002231,0xFFFFF358,0x000003C5,0x00002231,0xFFFFF358,0x000003C5},
- {"0000001000010011111100001111111010011001001100100001000100100100",0x00003131,0xFFFFEA45,0x00000513,0x00001973,0xFFFFF85E,0x00000301,0x00001973,0xFFFFF85E,0x00000301},
- {"0000001000010011111100001111111010011001000111000010100110100100",0x00003571,0xFFFFE8AC,0x00000539,0x00002049,0xFFFFF49C,0x0000038D,0x00002049,0xFFFFF49C,0x0000038D},
- {"0000001000010011111100001111111010011001001011100011100001100100",0x0000309E,0xFFFFEB1D,0x000004E8,0x000019ED,0xFFFFF86E,0x000002F8,0x000019ED,0xFFFFF86E,0x000002F8},
- {"0000001000010011111100001111111010011001001100000010100110000100",0x00003091,0xFFFFEB9B,0x000004CC,0x00001D2C,0xFFFFF6A2,0x0000033D,0x00001D2C,0xFFFFF6A2,0x0000033D},
- {"0000001000010011111100001111111010011001001100000000100011100100",0x00003069,0xFFFFEAFD,0x000004F8,0x00001E82,0xFFFFF51C,0x0000038D,0x00001E82,0xFFFFF51C,0x0000038D},
- {"0000001000010011111100001111111010011001001000100001000010100100",0x00003459,0xFFFFE7F2,0x00000572,0x00001DA7,0xFFFFF552,0x0000037F,0x00001DA7,0xFFFFF552,0x0000037F},
- {"0000001000010011111100001111111010011001001100100001000100000100",0x0000304B,0xFFFFEAFB,0x000004F4,0x0000191E,0xFFFFF8BD,0x000002EE,0x0000191E,0xFFFFF8BD,0x000002EE},
- {"0000001000010011111100001111111010011001001100000010000011000100",0x0000346E,0xFFFFEA07,0x000004FD,0x00002767,0xFFFFF058,0x00000440,0x00002767,0xFFFFF058,0x00000440},
- {"0000001000010011111100001111111010011001001011100011000010000100",0x000030B5,0xFFFFEBC1,0x000004C1,0x00001B3C,0xFFFFF818,0x000002FD,0x00001B3C,0xFFFFF818,0x000002FD},
- {"0000001000010011111100001111111010011001001100000000100100000100",0x0000321F,0xFFFFE9EA,0x00000524,0x00002380,0xFFFFF1C2,0x0000041A,0x00002380,0xFFFFF1C2,0x0000041A},
- {"0000001000010011111100001111111010011001001011100011000001000100",0x000030DF,0xFFFFEB37,0x000004E2,0x00001E3C,0xFFFFF5BB,0x00000369,0x00001E3C,0xFFFFF5BB,0x00000369},
- {"0000001000010011111100001111111010011001001010000100100010100100",0x000027E0,0xFFFFF0E2,0x00000416,0x00001A6A,0xFFFFF820,0x00000321,0x00001A6A,0xFFFFF820,0x00000321},
- {"0000001000010011111100001111111010011001000110100001000010000100",0x00002FA1,0xFFFFEB63,0x000004E7,0x0000196B,0xFFFFF880,0x000002FB,0x0000196B,0xFFFFF880,0x000002FB},
- {"0000001000010011111100001111111010011001000111000001000010000100",0x0000310C,0xFFFFEAAF,0x000004FC,0x000019EF,0xFFFFF850,0x000002FD,0x000019EF,0xFFFFF850,0x000002FD},
- {"0000001000010011111100001111111010011001001100100011100100000100",0x0000334A,0xFFFFEA07,0x0000050B,0x00002380,0xFFFFF26F,0x000003F0,0x00002380,0xFFFFF26F,0x000003F0},
- {"0000001000010011111100001111111010011001001100000010100101000100",0x00002FF9,0xFFFFECDC,0x00000492,0x00002297,0xFFFFF394,0x000003BF,0x00002297,0xFFFFF394,0x000003BF},
- {"0000001000010011111100001111111010011001001011000010000101100100",0x0000354B,0xFFFFE894,0x00000546,0x000024C4,0xFFFFF16C,0x0000041B,0x000024C4,0xFFFFF16C,0x0000041B},
- {"0000001000010011111100001111111010011001001000100000100100100100",0x00003245,0xFFFFE92F,0x00000544,0x00001829,0xFFFFF8F1,0x000002EA,0x00001829,0xFFFFF8F1,0x000002EA},
- {"0000001000010011111100001111111010011001001011100100100010000100",0x0000302F,0xFFFFEB51,0x000004E3,0x0000199F,0xFFFFF894,0x000002F4,0x0000199F,0xFFFFF894,0x000002F4},
- {"0000001000010011111100001111111010011001001011100001100011000100",0x00002F54,0xFFFFEC86,0x000004A6,0x00001A6F,0xFFFFF891,0x000002EC,0x00001A6F,0xFFFFF891,0x000002EC},
- {"0000001000010011111100001111111010011001001010000100000101100100",0x00002908,0xFFFFF0D8,0x0000040A,0x00001C9B,0xFFFFF729,0x00000342,0x00001C9B,0xFFFFF729,0x00000342},
- {"0000001000010011111100001111111010011001001100000010100101100100",0x000031D9,0xFFFFEB40,0x000004D7,0x000023F5,0xFFFFF259,0x000003F4,0x000023F5,0xFFFFF259,0x000003F4},
- {"0000001000010011111100001111111010011001001100000100100011100100",0x000034C8,0xFFFFE8C6,0x0000053F,0x00002313,0xFFFFF280,0x000003EC,0x00002313,0xFFFFF280,0x000003EC},
- {"0000001000010011111100001111111010011001001100000101000011000100",0x000037D1,0xFFFFE6A1,0x0000059C,0x00002C6A,0xFFFFEBFF,0x00000504,0x00002C6A,0xFFFFEBFF,0x00000504},
- {"0000001000010011111100001111111010011001001100100001100101100100",0x000030E9,0xFFFFEA6B,0x0000050F,0x00001A2D,0xFFFFF7DF,0x00000316,0x00001A2D,0xFFFFF7DF,0x00000316},
- {"0000001000010011111100001111111010011001001100000010000010000100",0x0000323D,0xFFFFEA95,0x000004F4,0x00001ED2,0xFFFFF584,0x0000036C,0x00001ED2,0xFFFFF584,0x0000036C},
- {"0000001000010011111100001111111010011001001011000011000000100100",0x000033D6,0xFFFFE9DB,0x00000510,0x000027A7,0xFFFFEFC7,0x0000045E,0x000027A7,0xFFFFEFC7,0x0000045E},
- {"0000001000010011111100001111111010011001000111000011000101100100",0x00003444,0xFFFFE98A,0x00000517,0x000020FD,0xFFFFF43F,0x0000039D,0x000020FD,0xFFFFF43F,0x0000039D},
- {"0000001000010011111100001111111010011001001010000000100011100100",0x00002987,0xFFFFEFA1,0x0000044B,0x00001B06,0xFFFFF788,0x0000033C,0x00001B06,0xFFFFF788,0x0000033C},
- {"0000001000010011111100001111111010011001001011000010100011100100",0x0000311D,0xFFFFED20,0x00000474,0x000025DA,0xFFFFF223,0x000003F0,0x000025DA,0xFFFFF223,0x000003F0},
- {"0000001000010011111100001111111010011001001011000001000100100100",0x000032A2,0xFFFFEA0A,0x0000050D,0x00001D48,0xFFFFF659,0x0000034A,0x00001D48,0xFFFFF659,0x0000034A},
- {"0000001000010011111100001111111010011001001000100000100011100100",0x00003110,0xFFFFE9EA,0x00000529,0x00001786,0xFFFFF958,0x000002DB,0x00001786,0xFFFFF958,0x000002DB},
- {"0000001000010011111100001111111010011001001010000010000110100100",0x000027F2,0xFFFFF174,0x000003F7,0x00001C7A,0xFFFFF72A,0x00000348,0x00001C7A,0xFFFFF72A,0x00000348},
- {"0000001000010011111100001111111010011001000111000001000011100100",0x000031DB,0xFFFFEA7D,0x000004FB,0x000019C4,0xFFFFF8B1,0x000002E6,0x000019C4,0xFFFFF8B1,0x000002E6},
- {"0000001000010011111100001111111010011001001011000001000100000100",0x00003158,0xFFFFEAAC,0x000004FA,0x00001BC1,0xFFFFF737,0x0000032B,0x00001BC1,0xFFFFF737,0x0000032B},
- {"0000001000010011111100001111111010011001001100000001000011000100",0x00002F36,0xFFFFEBF9,0x000004CA,0x00001A2A,0xFFFFF83F,0x00000303,0x00001A2A,0xFFFFF83F,0x00000303},
- {"0000001000010011111100001111111010011001001100100011100010100100",0x000032B4,0xFFFFEA72,0x000004FA,0x000021FF,0xFFFFF378,0x000003C5,0x000021FF,0xFFFFF378,0x000003C5},
- {"0000001000010011111100001111111010011001001100000011000101100100",0x00003262,0xFFFFEAFA,0x000004DF,0x00002441,0xFFFFF237,0x000003F6,0x00002441,0xFFFFF237,0x000003F6},
- {"0000001000010011111100001111111010011001001100000011100100100100",0x0000336A,0xFFFFEAFB,0x000004D1,0x00002746,0xFFFFF0B8,0x0000042B,0x00002746,0xFFFFF0B8,0x0000042B},
- {"0000001000010011111100001111111010011001000110100100000010000100",0x000032E5,0xFFFFE923,0x00000541,0x00001DF0,0xFFFFF552,0x00000380,0x00001DF0,0xFFFFF552,0x00000380},
- {"0000001000010011111100001111111010011001001100000100000001100100",0x000035D1,0xFFFFE80B,0x0000055F,0x00002780,0xFFFFEF74,0x0000046F,0x00002780,0xFFFFEF74,0x0000046F},
- {"0000001000010011111100001111111010011001001100000010100010100100",0x000033EC,0xFFFFEA48,0x000004F4,0x0000269F,0xFFFFF0D8,0x0000042A,0x0000269F,0xFFFFF0D8,0x0000042A},
- {"0000001000010011111100001111111010011001001100100011100010000100",0x000030C4,0xFFFFEB39,0x000004E2,0x00001B44,0xFFFFF7AA,0x00000318,0x00001B44,0xFFFFF7AA,0x00000318},
- {"0000001000010011111100001111111010011001001010000001000101000100",0x00002926,0xFFFFF0AF,0x0000040E,0x0000194E,0xFFFFF959,0x000002E2,0x0000194E,0xFFFFF959,0x000002E2},
- {"0000001000010011111100001111111010011001001011000001000011000100",0x00003141,0xFFFFEAAF,0x000004F6,0x00001864,0xFFFFF97C,0x000002C6,0x00001864,0xFFFFF97C,0x000002C6},
- {"0000001000010011111100001111111010011001001100000001000001100100",0x000030B2,0xFFFFEB7C,0x000004DB,0x000022CE,0xFFFFF2B5,0x000003F0,0x000022CE,0xFFFFF2B5,0x000003F0},
- {"0000001000010011111100001111111010011001001100000001100101000100",0x0000318C,0xFFFFEAC7,0x000004F6,0x00002113,0xFFFFF3CA,0x000003BD,0x00002113,0xFFFFF3CA,0x000003BD},
- {"0000001000010011111100001111111010011001001011100001000100000100",0x00002FD2,0xFFFFEB8F,0x000004D9,0x00001996,0xFFFFF89F,0x000002F1,0x00001996,0xFFFFF89F,0x000002F1},
- {"0000001000010011111100001111111010011001000110100010100010100100",0x0000310D,0xFFFFEB25,0x000004E7,0x00001F67,0xFFFFF4EF,0x0000038E,0x00001F67,0xFFFFF4EF,0x0000038E},
- {"0000001000010011111100001111111010011001001010100100100101100100",0x00002BBC,0xFFFFEE68,0x00000477,0x00002050,0xFFFFF41D,0x000003C8,0x00002050,0xFFFFF41D,0x000003C8},
- {"0000001000010011111100001111111010011001001100000010000100000100",0x00003096,0xFFFFECED,0x00000486,0x000024C9,0xFFFFF278,0x000003E7,0x000024C9,0xFFFFF278,0x000003E7},
- {"0000001000010011111100001111111010011001001011000001000010100100",0x00003401,0xFFFFE8F1,0x0000053C,0x00001E75,0xFFFFF55C,0x00000376,0x00001E75,0xFFFFF55C,0x00000376},
- {"0000001000010011111100001111111010011001001100000010100001000100",0x0000319E,0xFFFFEAB1,0x000004F8,0x00001EA3,0xFFFFF567,0x00000378,0x00001EA3,0xFFFFF567,0x00000378},
- {"0000001000010011111100001111111010011001001100100010100101100100",0x000030FD,0xFFFFEB4C,0x000004DB,0x00001CA6,0xFFFFF6E8,0x00000335,0x00001CA6,0xFFFFF6E8,0x00000335},
- {"0000001000010011111100001111111010011001001011100100000010100100",0x000030D6,0xFFFFEB1A,0x000004E4,0x00001A0D,0xFFFFF87D,0x000002EF,0x00001A0D,0xFFFFF87D,0x000002EF},
- {"0000001000010011111100001111111010011001001011000010000100100100",0x0000324B,0xFFFFEB17,0x000004D9,0x00002225,0xFFFFF3A8,0x000003BA,0x00002225,0xFFFFF3A8,0x000003BA},
- {"0000001000010011111100001111111010011001001010000100000010000100",0x00002A00,0xFFFFF02E,0x00000424,0x00001E21,0xFFFFF61D,0x0000036C,0x00001E21,0xFFFFF61D,0x0000036C},
- {"0000001000010011111100001111111010011001001010100100100010100100",0x000029CF,0xFFFFEF53,0x00000457,0x00001B11,0xFFFFF772,0x0000033D,0x00001B11,0xFFFFF772,0x0000033D},
- {"0000001000010011111100001111111010011001000110100011000010100100",0x000032A1,0xFFFFEA63,0x000004FB,0x00001F83,0xFFFFF516,0x0000037E,0x00001F83,0xFFFFF516,0x0000037E},
- {"0000001000010011111100001111111010011001001011100010000011000100",0x0000305C,0xFFFFEC14,0x000004B5,0x00001D0B,0xFFFFF6ED,0x00000332,0x00001D0B,0xFFFFF6ED,0x00000332},
- {"0000001000010011111100001111111010011001001011000001000001100100",0x00003467,0xFFFFE8D5,0x00000543,0x0000243F,0xFFFFF190,0x00000418,0x0000243F,0xFFFFF190,0x00000418},
- {"0000001000010011111100001111111010011001001010100010000001100100",0x00002796,0xFFFFF133,0x00000409,0x00001903,0xFFFFF91C,0x000002FC,0x00001903,0xFFFFF91C,0x000002FC},
- {"0000001000010011111100001111111010011001001100000010000101100100",0x000031F6,0xFFFFEAB7,0x000004F5,0x000022B9,0xFFFFF2D0,0x000003E6,0x000022B9,0xFFFFF2D0,0x000003E6},
- {"0000001000010011111100001111111010011001001011100101000100000100",0x00003196,0xFFFFEA76,0x00000503,0x00001CC5,0xFFFFF67D,0x0000034A,0x00001CC5,0xFFFFF67D,0x0000034A},
- {"0000001000010011111100001111111010011001001100100001000101000100",0x00002F9E,0xFFFFEAD9,0x00000505,0x000017C1,0xFFFFF93D,0x000002DF,0x000017C1,0xFFFFF93D,0x000002DF},
- {"0000001000010011111100001111111010011001001011100010000100100100",0x00002FBC,0xFFFFEC75,0x000004A8,0x00001D6D,0xFFFFF6AC,0x0000033D,0x00001D6D,0xFFFFF6AC,0x0000033D},
- {"0000001000010011111100001111111010011001001011000011100010100100",0x00003541,0xFFFFE921,0x00000524,0x00002662,0xFFFFF0CB,0x0000042B,0x00002662,0xFFFFF0CB,0x0000042B},
- {"0000001000010011111100001111111010011001001010100010000110100100",0x00002953,0xFFFFEF76,0x00000459,0x00001C05,0xFFFFF6A0,0x00000368,0x00001C05,0xFFFFF6A0,0x00000368},
- {"0000001000010011111100001111111010011001001011000100100100100100",0x000034BC,0xFFFFE8DD,0x00000536,0x0000210E,0xFFFFF3F4,0x000003A8,0x0000210E,0xFFFFF3F4,0x000003A8},
- {"0000001000010011111100001111111010011001001011000010100110100100",0x000034BE,0xFFFFE916,0x0000052F,0x000024A1,0xFFFFF1A6,0x00000410,0x000024A1,0xFFFFF1A6,0x00000410},
- {"0000001000010011111100001111111010011001001100000100100101100100",0x000037B5,0xFFFFE7A9,0x0000055B,0x000028A1,0xFFFFEF51,0x00000467,0x000028A1,0xFFFFEF51,0x00000467},
- {"0000001000010011111100001111111010011001001100000001000100000100",0x00002FC5,0xFFFFEBBE,0x000004D1,0x00001BA5,0xFFFFF757,0x00000328,0x00001BA5,0xFFFFF757,0x00000328},
- {"0000001000010011111100001111111010011001001100000100000010100100",0x000033CB,0xFFFFE944,0x0000052B,0x00001FBE,0xFFFFF4B1,0x0000038C,0x00001FBE,0xFFFFF4B1,0x0000038C},
- {"0000001000010011111100001111111010011001001100000001100001000100",0x000030AE,0xFFFFEBA0,0x000004D3,0x00002268,0xFFFFF316,0x000003DD,0x00002268,0xFFFFF316,0x000003DD},
- {"0000001000010011111100001111111010011001001011000010000010100100",0x00002F90,0xFFFFEC5A,0x000004B0,0x00001C3A,0xFFFFF752,0x00000323,0x00001C3A,0xFFFFF752,0x00000323},
- {"0000001000010011111100001111111010011001001011100011100011100100",0x00003113,0xFFFFEB91,0x000004C8,0x00001E3C,0xFFFFF623,0x0000034E,0x00001E3C,0xFFFFF623,0x0000034E},
- {"0000001000010011111100001111111010011001001100100011100110000100",0x0000330B,0xFFFFE94B,0x00000539,0x000020E7,0xFFFFF37E,0x000003CD,0x000020E7,0xFFFFF37E,0x000003CD},
- {"0000001000010011111100001111111010011001001011100010100001100100",0x000031D1,0xFFFFEACB,0x000004ED,0x00001E82,0xFFFFF5B2,0x00000365,0x00001E82,0xFFFFF5B2,0x00000365},
- {"0000001000010011111100001111111010011001001010100011100110000100",0x00002CD5,0xFFFFEDC1,0x0000048D,0x000020F8,0xFFFFF3C1,0x000003D1,0x000020F8,0xFFFFF3C1,0x000003D1},
- { NULL ,0x000,0x000,0x000,0x000,0x000,0x000,0x000,0x000,0x000}
+static const struct phm_fuses_default vega10_fuses_default[] = {
+ { 0x0213EA94DE0E4964, 0x00003C96, 0xFFFFE226, 0x00000656, 0x00002203, 0xFFFFF201, 0x000003FF, 0x00002203, 0xFFFFF201, 0x000003FF },
+ { 0x0213EA94DE0A1884, 0x00003CC5, 0xFFFFE23A, 0x0000064E, 0x00002258, 0xFFFFF1F7, 0x000003FC, 0x00002258, 0xFFFFF1F7, 0x000003FC },
+ { 0x0213EA94DE0E31A4, 0x00003CAF, 0xFFFFE36E, 0x00000602, 0x00001E98, 0xFFFFF569, 0x00000357, 0x00001E98, 0xFFFFF569, 0x00000357 },
+ { 0x0213EA94DE2C1144, 0x0000391A, 0xFFFFE548, 0x000005C9, 0x00001B98, 0xFFFFF707, 0x00000324, 0x00001B98, 0xFFFFF707, 0x00000324 },
+ { 0x0213EA94DE2C18C4, 0x00003821, 0xFFFFE674, 0x00000597, 0x00002196, 0xFFFFF361, 0x000003C0, 0x00002196, 0xFFFFF361, 0x000003C0 },
+ { 0x0213EA94DE263884, 0x000044A2, 0xFFFFDCB7, 0x00000738, 0x0000325C, 0xFFFFE6A7, 0x000005E6, 0x0000325C, 0xFFFFE6A7, 0x000005E6 },
+ { 0x0213EA94DE082924, 0x00004057, 0xFFFFE1CF, 0x0000063C, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
+ { 0x0213EA94DE284924, 0x00003FD0, 0xFFFFDF0F, 0x000006E5, 0x0000267C, 0xFFFFEE2D, 0x000004AB, 0x0000267C, 0xFFFFEE2D, 0x000004AB },
+ { 0x0213EA94DE280904, 0x00003F13, 0xFFFFE010, 0x000006AD, 0x000020E7, 0xFFFFF266, 0x000003EC, 0x000020E7, 0xFFFFF266, 0x000003EC },
+ { 0x0213EA94DE082044, 0x00004088, 0xFFFFDFAB, 0x000006B6, 0x0000252B, 0xFFFFEFDB, 0x00000458, 0x0000252B, 0xFFFFEFDB, 0x00000458 },
+ { 0x0213EA94DE283884, 0x00003EF6, 0xFFFFE017, 0x000006AA, 0x00001F67, 0xFFFFF369, 0x000003BE, 0x00001F67, 0xFFFFF369, 0x000003BE },
+ { 0x0213EA94DE2C2184, 0x00003CDD, 0xFFFFE2A7, 0x0000063C, 0x000026C6, 0xFFFFEF38, 0x00000478, 0x000026C6, 0xFFFFEF38, 0x00000478 },
+ { 0x0213EA94DE105124, 0x00003FA8, 0xFFFFDF02, 0x000006F0, 0x000027FE, 0xFFFFECF6, 0x000004EA, 0x000027FE, 0xFFFFECF6, 0x000004EA },
+ { 0x0213EA94DE2638C4, 0x00004670, 0xFFFFDC40, 0x00000742, 0x00003A7A, 0xFFFFE1A7, 0x000006B6, 0x00003A7A, 0xFFFFE1A7, 0x000006B6 },
+ { 0x0213EA94DE2C3024, 0x00003CDC, 0xFFFFE18C, 0x00000683, 0x00002A69, 0xFFFFEBE7, 0x00000515, 0x00002A69, 0xFFFFEBE7, 0x00000515 },
+ { 0x0213EA94DE0E38C4, 0x00003CEC, 0xFFFFE38E, 0x00000601, 0x00002752, 0xFFFFEFA7, 0x00000453, 0x00002752, 0xFFFFEFA7, 0x00000453 },
+ { 0x0213EA94DE2C1124, 0x000037D0, 0xFFFFE634, 0x000005A7, 0x00001CD2, 0xFFFFF644, 0x00000348, 0x00001CD2, 0xFFFFF644, 0x00000348 },
+ { 0x0213EA94DE283964, 0x00003DF5, 0xFFFFE0A5, 0x00000698, 0x00001FD5, 0xFFFFF30E, 0x000003D1, 0x00001FD5, 0xFFFFF30E, 0x000003D1 },
+ { 0x0213EA94DE0828C4, 0x00004201, 0xFFFFE03E, 0x00000688, 0x00003206, 0xFFFFE852, 0x0000058A, 0x00003206, 0xFFFFE852, 0x0000058A },
+ { 0x0213EA94DE2C1864, 0x00003BED, 0xFFFFE2F5, 0x00000638, 0x0000270D, 0xFFFFEED0, 0x0000048E, 0x0000270D, 0xFFFFEED0, 0x0000048E },
+ { 0x0213EA94DE0A1904, 0x00003E82, 0xFFFFE1BE, 0x00000654, 0x000025FB, 0xFFFFEFFA, 0x00000448, 0x000025FB, 0xFFFFEFFA, 0x00000448 },
+ { 0x0213EA94DE2C40C4, 0x00003962, 0xFFFFE4B9, 0x000005EF, 0x00002385, 0xFFFFF156, 0x00000423, 0x00002385, 0xFFFFF156, 0x00000423 },
+ { 0x0213EA94DE2C0944, 0x00003D88, 0xFFFFE21A, 0x00000655, 0x0000295A, 0xFFFFED68, 0x000004C4, 0x0000295A, 0xFFFFED68, 0x000004C4 },
+ { 0x0213EA94DE2C1104, 0x00003AA4, 0xFFFFE4A3, 0x000005E0, 0x000022EF, 0xFFFFF250, 0x000003EB, 0x000022EF, 0xFFFFF250, 0x000003EB },
+ { 0x0213EA94DE0E29A4, 0x00003D97, 0xFFFFE30D, 0x0000060D, 0x0000205D, 0xFFFFF45D, 0x00000380, 0x0000205D, 0xFFFFF45D, 0x00000380 },
+ { 0x0213EA94DE2C40A4, 0x000039B6, 0xFFFFE446, 0x00000605, 0x00002325, 0xFFFFF16C, 0x0000041F, 0x00002325, 0xFFFFF16C, 0x0000041F },
+ { 0x0213EA94DE263904, 0x0000457E, 0xFFFFDCF6, 0x00000722, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
+ { 0x0213EA94DE0A1924, 0x00003FB8, 0xFFFFE101, 0x00000670, 0x00002787, 0xFFFFEEF5, 0x00000471, 0x00002787, 0xFFFFEEF5, 0x00000471 },
+ { 0x0213EA94DE0E38A4, 0x00003BB2, 0xFFFFE430, 0x000005EA, 0x000024A5, 0xFFFFF162, 0x00000409, 0x000024A5, 0xFFFFF162, 0x00000409 },
+ { 0x0213EA94DE082144, 0x00003EC5, 0xFFFFE1BD, 0x0000064F, 0x000022F0, 0xFFFFF227, 0x000003E8, 0x000022F0, 0xFFFFF227, 0x000003E8 },
+ { 0x0213EA94DE2C3164, 0x000038A7, 0xFFFFE59F, 0x000005C1, 0x000021CC, 0xFFFFF2DF, 0x000003D9, 0x000021CC, 0xFFFFF2DF, 0x000003D9 },
+ { 0x0213EA94DE324184, 0x00002995, 0xFFFFEF7A, 0x0000044C, 0x00001552, 0xFFFFFB5D, 0x00000292, 0x00001552, 0xFFFFFB5D, 0x00000292 },
+ { 0x0213EA94DE2C4064, 0x00003B26, 0xFFFFE2D3, 0x00000649, 0x000023B4, 0xFFFFF09B, 0x00000449, 0x000023B4, 0xFFFFF09B, 0x00000449 },
+ { 0x0213EA94DE081124, 0x000040D2, 0xFFFFE00A, 0x00000696, 0x000022DA, 0xFFFFF1E9, 0x000003F2, 0x000022DA, 0xFFFFF1E9, 0x000003F2 },
+ { 0x0213EA94DE2C3924, 0x00003C98, 0xFFFFE365, 0x00000618, 0x00002D5D, 0xFFFFEB3A, 0x0000051D, 0x00002D5D, 0xFFFFEB3A, 0x0000051D },
+ { 0x0213EA94DE2C10A4, 0x00003BBD, 0xFFFFE37E, 0x00000617, 0x0000252E, 0xFFFFF06E, 0x00000441, 0x0000252E, 0xFFFFF06E, 0x00000441 },
+ { 0x0213EA94DE262924, 0x00004363, 0xFFFFDF7A, 0x000006A0, 0x000031F5, 0xFFFFE880, 0x0000057B, 0x000031F5, 0xFFFFE880, 0x0000057B },
+ { 0x0213EA94DE0E3844, 0x00003CFC, 0xFFFFE2AF, 0x0000062E, 0x0000212A, 0xFFFFF335, 0x000003BF, 0x0000212A, 0xFFFFF335, 0x000003BF },
+ { 0x0213EA94DE1C4924, 0x0000252D, 0xFFFFF31B, 0x000003C3, 0x00001A1A, 0xFFFFF882, 0x00000325, 0x00001A1A, 0xFFFFF882, 0x00000325 },
+ { 0x0213EA94DE0A29A4, 0x00003FE2, 0xFFFFDFEF, 0x000006AC, 0x000025A2, 0xFFFFEF84, 0x00000462, 0x000025A2, 0xFFFFEF84, 0x00000462 },
+ { 0x0213EA94DE0820E4, 0x000040A5, 0xFFFFE13B, 0x0000065B, 0x00002C13, 0xFFFFEC75, 0x000004D7, 0x00002C13, 0xFFFFEC75, 0x000004D7 },
+ { 0x0213EA94DE0E48A4, 0x00003E42, 0xFFFFE1B3, 0x00000657, 0x0000221D, 0xFFFFF273, 0x000003DE, 0x0000221D, 0xFFFFF273, 0x000003DE },
+ { 0x0213EA94DE0A20E4, 0x00003E7F, 0xFFFFE255, 0x00000638, 0x00002D30, 0xFFFFEB8A, 0x00000503, 0x00002D30, 0xFFFFEB8A, 0x00000503 },
+ { 0x0213EA94DE2C29C4, 0x00003E56, 0xFFFFE16D, 0x00000670, 0x000028DC, 0xFFFFEDA0, 0x000004BA, 0x000028DC, 0xFFFFEDA0, 0x000004BA },
+ { 0x0213EA94DE2630A4, 0x000044AD, 0xFFFFDE24, 0x000006DD, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
+ { 0x0213EA94DE2C20E4, 0x00003AF3, 0xFFFFE5B0, 0x000005A6, 0x00002CF6, 0xFFFFEC75, 0x000004DD, 0x00002CF6, 0xFFFFEC75, 0x000004DD },
+ { 0x0213EA94DE0A2084, 0x00003E66, 0xFFFFE19E, 0x0000065B, 0x00002332, 0xFFFFF1B9, 0x000003FD, 0x00002332, 0xFFFFF1B9, 0x000003FD },
+ { 0x0213EA94DE082884, 0x00003FB4, 0xFFFFE0A5, 0x00000686, 0x0000253E, 0xFFFFF02E, 0x00000444, 0x0000253E, 0xFFFFF02E, 0x00000444 },
+ { 0x0213EA94DE2818A4, 0x00003E28, 0xFFFFE14D, 0x0000066E, 0x00001FE2, 0xFFFFF39A, 0x000003B1, 0x00001FE2, 0xFFFFF39A, 0x000003B1 },
+ { 0x0213EA94DE2C0904, 0x000039E6, 0xFFFFE44B, 0x000005FE, 0x0000210C, 0xFFFFF2F4, 0x000003DA, 0x0000210C, 0xFFFFF2F4, 0x000003DA },
+ { 0x0213EA94DE2C5104, 0x00003A4D, 0xFFFFE252, 0x0000067A, 0x000027E2, 0xFFFFECED, 0x000004FA, 0x000027E2, 0xFFFFECED, 0x000004FA },
+ { 0x0213EA94DE0A2964, 0x00004065, 0xFFFFE02F, 0x0000069B, 0x0000299D, 0xFFFFED38, 0x000004C2, 0x0000299D, 0xFFFFED38, 0x000004C2 },
+ { 0x0213EA94DE0E20A4, 0x000039EE, 0xFFFFE603, 0x00000594, 0x0000214F, 0xFFFFF429, 0x0000038E, 0x0000214F, 0xFFFFF429, 0x0000038E },
+ { 0x0213EA94DE0E48E4, 0x00003BD2, 0xFFFFE351, 0x00000618, 0x000020B8, 0xFFFFF377, 0x000003B4, 0x000020B8, 0xFFFFF377, 0x000003B4 },
+ { 0x0213EA94DE0A3124, 0x00003FAA, 0xFFFFE183, 0x0000065E, 0x000032AE, 0xFFFFE7C2, 0x000005A6, 0x000032AE, 0xFFFFE7C2, 0x000005A6 },
+ { 0x0213EA94DE2C2984, 0x00003AFB, 0xFFFFE3E4, 0x00000608, 0x00002293, 0xFFFFF21F, 0x000003FA, 0x00002293, 0xFFFFF21F, 0x000003FA },
+ { 0x0213EA94DE262064, 0x0000448B, 0xFFFFDD5D, 0x0000070D, 0x00002E4E, 0xFFFFE9DF, 0x00000551, 0x00002E4E, 0xFFFFE9DF, 0x00000551 },
+ { 0x0213EA94DE0E2184, 0x00003D46, 0xFFFFE39B, 0x000005F3, 0x0000218E, 0xFFFFF3CD, 0x00000398, 0x0000218E, 0xFFFFF3CD, 0x00000398 },
+ { 0x0213EA94DE0848E4, 0x00003F01, 0xFFFFDFD9, 0x000006BF, 0x000023AF, 0xFFFFF04E, 0x0000044C, 0x000023AF, 0xFFFFF04E, 0x0000044C },
+ { 0x0213EA94DE1029A4, 0x0000403D, 0xFFFFDF6B, 0x000006C9, 0x0000270D, 0xFFFFEE4B, 0x0000049E, 0x0000270D, 0xFFFFEE4B, 0x0000049E },
+ { 0x0213EA94DE0E3964, 0x00003C11, 0xFFFFE35C, 0x00000613, 0x000020F9, 0xFFFFF365, 0x000003B9, 0x000020F9, 0xFFFFF365, 0x000003B9 },
+ { 0x0213EA94DE2C3884, 0x00003B58, 0xFFFFE37D, 0x0000061F, 0x00002698, 0xFFFFEF46, 0x00000478, 0x00002698, 0xFFFFEF46, 0x00000478 },
+ { 0x0213EA94DE2841A4, 0x00003EBC, 0xFFFFDF7A, 0x000006D6, 0x0000212B, 0xFFFFF195, 0x0000041B, 0x0000212B, 0xFFFFF195, 0x0000041B },
+ { 0x0213EA94DE0848C4, 0x00004050, 0xFFFFDEB3, 0x000006FE, 0x00002D6C, 0xFFFFE961, 0x00000582, 0x00002D6C, 0xFFFFE961, 0x00000582 },
+ { 0x0213EA94DE262044, 0x000043F0, 0xFFFFDD9C, 0x00000702, 0x00002B31, 0xFFFFEBEA, 0x000004F7, 0x00002B31, 0xFFFFEBEA, 0x000004F7 },
+ { 0x0213EA94DE100924, 0x00003EFA, 0xFFFFE093, 0x00000696, 0x000026DB, 0xFFFFEEB3, 0x00000489, 0x000026DB, 0xFFFFEEB3, 0x00000489 },
+ { 0x0213EA94DE082064, 0x0000425D, 0xFFFFDE8D, 0x000006E6, 0x00002CA4, 0xFFFFEAD2, 0x00000531, 0x00002CA4, 0xFFFFEAD2, 0x00000531 },
+ { 0x0213EA94DE2639A4, 0x000043B0, 0xFFFFDD03, 0x00000728, 0x00002946, 0xFFFFECA6, 0x000004DE, 0x00002946, 0xFFFFECA6, 0x000004DE },
+ { 0x0213EA94DE282864, 0x00003F6A, 0xFFFFE03A, 0x0000069D, 0x00002208, 0xFFFFF1F8, 0x000003F6, 0x00002208, 0xFFFFF1F8, 0x000003F6 },
+ { 0x0213EA94DE2C2964, 0x00003A94, 0xFFFFE4A7, 0x000005E2, 0x000024D0, 0xFFFFF100, 0x00000426, 0x000024D0, 0xFFFFF100, 0x00000426 },
+ { 0x0213EA94DE2810C4, 0x00003F2F, 0xFFFFE0A3, 0x00000688, 0x00002198, 0xFFFFF271, 0x000003E2, 0x00002198, 0xFFFFF271, 0x000003E2 },
+ { 0x0213EA94DE1048E4, 0x00003EA5, 0xFFFFE032, 0x000006AE, 0x0000227C, 0xFFFFF130, 0x00000426, 0x0000227C, 0xFFFFF130, 0x00000426 },
+ { 0x0213EA94DE264144, 0x0000442F, 0xFFFFDBC4, 0x0000078B, 0x00003CD6, 0xFFFFDE6C, 0x0000076C, 0x00003CD6, 0xFFFFDE6C, 0x0000076C },
+ { 0x0213EA94DE282884, 0x00003DDE, 0xFFFFE174, 0x00000668, 0x00001FF4, 0xFFFFF38F, 0x000003B1, 0x00001FF4, 0xFFFFF38F, 0x000003B1 },
+ { 0x0213EA94DE0A3144, 0x000040B0, 0xFFFFE016, 0x000006A0, 0x00002DBB, 0xFFFFEA7F, 0x00000537, 0x00002DBB, 0xFFFFEA7F, 0x00000537 },
+ { 0x0213EA94DE2C3104, 0x00003429, 0xFFFFEA97, 0x000004DD, 0x000024D5, 0xFFFFF26F, 0x000003DF, 0x000024D5, 0xFFFFF26F, 0x000003DF },
+ { 0x0213EA94DE0E1904, 0x00003AEB, 0xFFFFE590, 0x000005A3, 0x000022CB, 0xFFFFF347, 0x000003B2, 0x000022CB, 0xFFFFF347, 0x000003B2 },
+ { 0x0213EA94DE283904, 0x00003B8E, 0xFFFFE2EF, 0x00000636, 0x00002351, 0xFFFFF143, 0x0000041C, 0x00002351, 0xFFFFF143, 0x0000041C },
+ { 0x0213EA94DE3240C4, 0x00002926, 0xFFFFF0B0, 0x00000410, 0x0000194E, 0xFFFFF94E, 0x000002E9, 0x0000194E, 0xFFFFF94E, 0x000002E9 },
+ { 0x0213EA94DE283184, 0x0000402B, 0xFFFFDF78, 0x000006C2, 0x00002273, 0xFFFFF16C, 0x00000414, 0x00002273, 0xFFFFF16C, 0x00000414 },
+ { 0x0213EA94DE0A10A4, 0x00003D6A, 0xFFFFE1D3, 0x00000659, 0x00002006, 0xFFFFF394, 0x000003B1, 0x00002006, 0xFFFFF394, 0x000003B1 },
+ { 0x0213EA94DE284064, 0x00004042, 0xFFFFDFD8, 0x000006A8, 0x00002135, 0xFFFFF29F, 0x000003D9, 0x00002135, 0xFFFFF29F, 0x000003D9 },
+ { 0x0213EA94DE0820A4, 0x0000405B, 0xFFFFE093, 0x00000682, 0x0000288F, 0xFFFFEE3A, 0x00000491, 0x0000288F, 0xFFFFEE3A, 0x00000491 },
+ { 0x0213EA94DE2C48A4, 0x00003A49, 0xFFFFE30C, 0x00000648, 0x000023F9, 0xFFFFF02D, 0x00000460, 0x000023F9, 0xFFFFF02D, 0x00000460 },
+ { 0x0213EA94DE282964, 0x00003D59, 0xFFFFE1CC, 0x0000065B, 0x00002013, 0xFFFFF37D, 0x000003B6, 0x00002013, 0xFFFFF37D, 0x000003B6 },
+ { 0x0213EA94DE2C3984, 0x000040C1, 0xFFFFDF8C, 0x000006CA, 0x00003271, 0xFFFFE6CA, 0x000005EA, 0x00003271, 0xFFFFE6CA, 0x000005EA },
+ { 0x0213EA94DE2620E4, 0x000042E9, 0xFFFFDFDC, 0x0000068C, 0x00002ED9, 0xFFFFEAAF, 0x0000051B, 0x00002ED9, 0xFFFFEAAF, 0x0000051B },
+ { 0x0213EA94DE083084, 0x000042ED, 0xFFFFDE50, 0x000006F0, 0x00002FCF, 0xFFFFE8BB, 0x0000058C, 0x00002FCF, 0xFFFFE8BB, 0x0000058C },
+ { 0x0213EA94DE0A4104, 0x00003EBD, 0xFFFFE099, 0x00000698, 0x00002709, 0xFFFFEE7B, 0x00000495, 0x00002709, 0xFFFFEE7B, 0x00000495 },
+ { 0x0213EA94DE284904, 0x00003F71, 0xFFFFDF82, 0x000006C9, 0x0000219B, 0xFFFFF1AD, 0x0000040F, 0x0000219B, 0xFFFFF1AD, 0x0000040F },
+ { 0x0213EA94DE2808E4, 0x00003E73, 0xFFFFE080, 0x0000069B, 0x000020E7, 0xFFFFF273, 0x000003E9, 0x000020E7, 0xFFFFF273, 0x000003E9 },
+ { 0x0213EA94DE0E3184, 0x00003E14, 0xFFFFE278, 0x0000062C, 0x00002275, 0xFFFFF2B3, 0x000003CE, 0x00002275, 0xFFFFF2B3, 0x000003CE },
+ { 0x0213EA94DE2C21A4, 0x00003ABB, 0xFFFFE3B9, 0x00000615, 0x00002192, 0xFFFFF28F, 0x000003EB, 0x00002192, 0xFFFFF28F, 0x000003EB },
+ { 0x0213EA94DE283124, 0x00003D53, 0xFFFFE255, 0x00000643, 0x0000275B, 0xFFFFEEED, 0x00000479, 0x0000275B, 0xFFFFEEED, 0x00000479 },
+ { 0x0213EA94DE262864, 0x000043E3, 0xFFFFDDC3, 0x000006FB, 0x00002B6B, 0xFFFFEBD6, 0x000004FA, 0x00002B6B, 0xFFFFEBD6, 0x000004FA },
+ { 0x0213EA94DE0E2144, 0x00003BDE, 0xFFFFE507, 0x000005B4, 0x000022CE, 0xFFFFF358, 0x000003AB, 0x000022CE, 0xFFFFF358, 0x000003AB },
+ { 0x0213EA94DE323164, 0x00002460, 0xFFFFF3B5, 0x000003A2, 0x000014E7, 0xFFFFFC32, 0x0000027C, 0x000014E7, 0xFFFFFC32, 0x0000027C },
+ { 0x0213EA94DE2820C4, 0x00003D20, 0xFFFFE298, 0x0000062F, 0x00002080, 0xFFFFF3AF, 0x000003A8, 0x00002080, 0xFFFFF3AF, 0x000003A8 },
+ { 0x0213EA94DE081904, 0x00003E14, 0xFFFFE221, 0x00000641, 0x000021BB, 0xFFFFF2EA, 0x000003CA, 0x000021BB, 0xFFFFF2EA, 0x000003CA },
+ { 0x0213EA94DE0A40C4, 0x00003DE1, 0xFFFFE14E, 0x00000677, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
+ { 0x0213EA94DE261084, 0x00004372, 0xFFFFDDF8, 0x000006F5, 0x00002B3F, 0xFFFFEBE8, 0x000004F8, 0x00002B3F, 0xFFFFEBE8, 0x000004F8 },
+ { 0x0213EA94DE0A28C4, 0x00003E4F, 0xFFFFE2A3, 0x0000062B, 0x00002F5A, 0xFFFFEA37, 0x0000053B, 0x00002F5A, 0xFFFFEA37, 0x0000053B },
+ { 0x0213EA94DE2850E4, 0x00003E07, 0xFFFFE02F, 0x000006B6, 0x0000216B, 0xFFFFF1A3, 0x00000416, 0x0000216B, 0xFFFFF1A3, 0x00000416 },
+ { 0x0213EA94DE2838A4, 0x00003DAB, 0xFFFFE128, 0x0000067F, 0x0000216F, 0xFFFFF236, 0x000003F3, 0x0000216F, 0xFFFFF236, 0x000003F3 },
+ { 0x0213EA94DE2C2924, 0x0000364B, 0xFFFFE8CB, 0x0000052A, 0x00002568, 0xFFFFF1B2, 0x00000400, 0x00002568, 0xFFFFF1B2, 0x00000400 },
+ { 0x0213EA94DE261064, 0x00004219, 0xFFFFDE87, 0x000006E8, 0x00002C59, 0xFFFFEAEE, 0x00000529, 0x00002C59, 0xFFFFEAEE, 0x00000529 },
+ { 0x0213EA94DE0E1944, 0x000039A8, 0xFFFFE602, 0x00000594, 0x00001D06, 0xFFFFF6F0, 0x00000316, 0x00001D06, 0xFFFFF6F0, 0x00000316 },
+ { 0x0213EA94DE2610E4, 0x00004052, 0xFFFFE01C, 0x00000698, 0x00002310, 0xFFFFF1A1, 0x000003FE, 0x00002310, 0xFFFFF1A1, 0x000003FE },
+ { 0x0213EA94DE0E2824, 0x00003C1C, 0xFFFFE3EB, 0x000005F1, 0x00002289, 0xFFFFF2CF, 0x000003C9, 0x00002289, 0xFFFFF2CF, 0x000003C9 },
+ { 0x0213EA94DE0E5124, 0x00003F19, 0xFFFFE085, 0x0000069E, 0x00002B94, 0xFFFFEB72, 0x0000051D, 0x00002B94, 0xFFFFEB72, 0x0000051D },
+ { 0x0213EA94DE0E41A4, 0x00003C51, 0xFFFFE2AD, 0x00000638, 0x0000206B, 0xFFFFF361, 0x000003BE, 0x0000206B, 0xFFFFF361, 0x000003BE },
+ { 0x0213EA94DE2610C4, 0x000040B9, 0xFFFFDFBB, 0x000006AB, 0x0000241F, 0xFFFFF0CC, 0x00000425, 0x0000241F, 0xFFFFF0CC, 0x00000425 },
+ { 0x0213EA94DE0A2064, 0x00003E62, 0xFFFFE12C, 0x00000678, 0x00002445, 0xFFFFF09E, 0x00000435, 0x00002445, 0xFFFFF09E, 0x00000435 },
+ { 0x0213EA94DE0E1984, 0x00003C97, 0xFFFFE399, 0x000005FB, 0x0000209D, 0xFFFFF41D, 0x0000038F, 0x0000209D, 0xFFFFF41D, 0x0000038F },
+ { 0x0213EA94DE0E3144, 0x00003FF9, 0xFFFFE1E9, 0x0000063E, 0x00002E96, 0xFFFFEAF5, 0x00000516, 0x00002E96, 0xFFFFEAF5, 0x00000516 },
+ { 0x0213EA94DE0A3084, 0x00003F04, 0xFFFFE109, 0x0000067A, 0x000026E1, 0xFFFFEF0B, 0x00000476, 0x000026E1, 0xFFFFEF0B, 0x00000476 },
+ { 0x0213EA94DE101124, 0x00003E3E, 0xFFFFE187, 0x00000660, 0x00002049, 0xFFFFF38D, 0x000003B0, 0x00002049, 0xFFFFF38D, 0x000003B0 },
+ { 0x0213EA94DE282944, 0x00003D58, 0xFFFFE253, 0x0000063D, 0x00002158, 0xFFFFF308, 0x000003C3, 0x00002158, 0xFFFFF308, 0x000003C3 },
+ { 0x0213EA94DE0840C4, 0x00004074, 0xFFFFDF8D, 0x000006C0, 0x00002799, 0xFFFFEE19, 0x000004A5, 0x00002799, 0xFFFFEE19, 0x000004A5 },
+ { 0x0213EA94DE281924, 0x00003DAF, 0xFFFFE1C9, 0x00000659, 0x000020E5, 0xFFFFF313, 0x000003C6, 0x000020E5, 0xFFFFF313, 0x000003C6 },
+ { 0x0213EA94DE0A3964, 0x000041DD, 0xFFFFDDFA, 0x0000071B, 0x0000348D, 0xFFFFE4B4, 0x0000064C, 0x0000348D, 0xFFFFE4B4, 0x0000064C },
+ { 0x0213EA94DE2C2884, 0x00003947, 0xFFFFE5AE, 0x000005B8, 0x000024A6, 0xFFFFF140, 0x0000041D, 0x000024A6, 0xFFFFF140, 0x0000041D },
+ { 0x0213EA94DE101844, 0x00003D35, 0xFFFFE197, 0x0000066E, 0x00002248, 0xFFFFF1BC, 0x00000408, 0x00002248, 0xFFFFF1BC, 0x00000408 },
+ { 0x0213EA94DE0A18E4, 0x00003F4F, 0xFFFFE13E, 0x0000066D, 0x00002AF0, 0xFFFFEC99, 0x000004DB, 0x00002AF0, 0xFFFFEC99, 0x000004DB },
+ { 0x0213EA94DE263944, 0x0000430F, 0xFFFFDDFB, 0x000006FC, 0x00002D4D, 0xFFFFEA55, 0x00000540, 0x00002D4D, 0xFFFFEA55, 0x00000540 },
+ { 0x0213EA94DE0E2944, 0x00003B22, 0xFFFFE543, 0x000005B1, 0x000022E1, 0xFFFFF31B, 0x000003B9, 0x000022E1, 0xFFFFF31B, 0x000003B9 },
+ { 0x0213EA94DE0E2084, 0x00003978, 0xFFFFE611, 0x00000592, 0x00001C36, 0xFFFFF771, 0x00000302, 0x00001C36, 0xFFFFF771, 0x00000302 },
+ { 0x0213EA94DE262164, 0x000044DF, 0xFFFFDDAB, 0x000006F2, 0x00002CEA, 0xFFFFEB47, 0x00000507, 0x00002CEA, 0xFFFFEB47, 0x00000507 },
+ { 0x0213EA94DE0A38C4, 0x00003E9B, 0xFFFFE12C, 0x0000067C, 0x00002B79, 0xFFFFEBD9, 0x00000503, 0x00002B79, 0xFFFFEBD9, 0x00000503 },
+ { 0x0213EA94DE263044, 0x00004464, 0xFFFFDCD3, 0x00000731, 0x00002D14, 0xFFFFEA2D, 0x0000054E, 0x00002D14, 0xFFFFEA2D, 0x0000054E },
+ { 0x0213EA94DE281124, 0x00003FB3, 0xFFFFE052, 0x00000693, 0x000020AC, 0xFFFFF311, 0x000003C6, 0x000020AC, 0xFFFFF311, 0x000003C6 },
+ { 0x0213EA94DE2C1084, 0x00003BDA, 0xFFFFE2FB, 0x00000636, 0x0000261E, 0xFFFFEF72, 0x00000471, 0x0000261E, 0xFFFFEF72, 0x00000471 },
+ { 0x0213EA94DE2C1964, 0x00003D72, 0xFFFFE28A, 0x0000063E, 0x000029D8, 0xFFFFED54, 0x000004C7, 0x000029D8, 0xFFFFED54, 0x000004C7 },
+ { 0x0213EA94DE2C2824, 0x00003E26, 0xFFFFE102, 0x00000694, 0x00002DD1, 0xFFFFE9CA, 0x0000056D, 0x00002DD1, 0xFFFFE9CA, 0x0000056D },
+ { 0x0213EA94DE104124, 0x000041CD, 0xFFFFDE97, 0x000006ED, 0x00002DE5, 0xFFFFE9B9, 0x00000565, 0x00002DE5, 0xFFFFE9B9, 0x00000565 },
+ { 0x0213EA94DE0A2984, 0x00003F30, 0xFFFFE06E, 0x00000698, 0x000024FF, 0xFFFFEFFC, 0x0000044F, 0x000024FF, 0xFFFFEFFC, 0x0000044F },
+ { 0x0213EA94DE2C38C4, 0x0000378B, 0xFFFFE6B4, 0x00000594, 0x000023A7, 0xFFFFF1DC, 0x00000407, 0x000023A7, 0xFFFFF1DC, 0x00000407 },
+ { 0x0213EA94DE0E4164, 0x00003CD7, 0xFFFFE28D, 0x00000636, 0x00002036, 0xFFFFF3B5, 0x000003AA, 0x00002036, 0xFFFFF3B5, 0x000003AA },
+ { 0x0213EA94DE0A3884, 0x00003EF9, 0xFFFFE0AA, 0x0000068D, 0x000024D3, 0xFFFFF02F, 0x00000445, 0x000024D3, 0xFFFFF02F, 0x00000445 },
+ { 0x0213EA94DE283944, 0x00003D08, 0xFFFFE1BB, 0x00000665, 0x00002159, 0xFFFFF26F, 0x000003E6, 0x00002159, 0xFFFFF26F, 0x000003E6 },
+ { 0x0213EA94DE2C20C4, 0x000038A9, 0xFFFFE6CA, 0x00000580, 0x000025D3, 0xFFFFF101, 0x00000421, 0x000025D3, 0xFFFFF101, 0x00000421 },
+ { 0x0213EA94DE0A20A4, 0x00003E45, 0xFFFFE1F8, 0x0000064D, 0x000027E3, 0xFFFFEEBB, 0x0000047F, 0x000027E3, 0xFFFFEEBB, 0x0000047F },
+ { 0x0213EA94DE0E3864, 0x00003F76, 0xFFFFE128, 0x0000066E, 0x0000286B, 0xFFFFEE4C, 0x00000493, 0x0000286B, 0xFFFFEE4C, 0x00000493 },
+ { 0x0213EA94DE264104, 0x0000440D, 0xFFFFDCA2, 0x0000074F, 0x00003817, 0xFFFFE256, 0x000006AF, 0x00003817, 0xFFFFE256, 0x000006AF },
+ { 0x0213EA94DE105104, 0x00003EE1, 0xFFFFDFA7, 0x000006D4, 0x000027EA, 0xFFFFED2B, 0x000004DE, 0x000027EA, 0xFFFFED2B, 0x000004DE },
+ { 0x0213EA94DE2C3864, 0x00003C62, 0xFFFFE285, 0x0000064A, 0x00002520, 0xFFFFF001, 0x0000045C, 0x00002520, 0xFFFFF001, 0x0000045C },
+ { 0x0213EA94DE323964, 0x0000272E, 0xFFFFF17A, 0x000003FA, 0x0000150B, 0xFFFFFBD5, 0x00000284, 0x0000150B, 0xFFFFFBD5, 0x00000284 },
+ { 0x0213EA94DE261924, 0x00004275, 0xFFFFDF69, 0x000006A5, 0x000025AA, 0xFFFFF05C, 0x0000042B, 0x000025AA, 0xFFFFF05C, 0x0000042B },
+ { 0x0213EA94DE0E40E4, 0x00003CAA, 0xFFFFE392, 0x000005FF, 0x000023A8, 0xFFFFF20E, 0x000003E9, 0x000023A8, 0xFFFFF20E, 0x000003E9 },
+ { 0x0213EA94DE2C50C4, 0x00003CF8, 0xFFFFE0FB, 0x000006A6, 0x00002CA7, 0xFFFFE9FF, 0x0000056E, 0x00002CA7, 0xFFFFE9FF, 0x0000056E },
+ { 0x0213EA94DE282124, 0x00003D00, 0xFFFFE296, 0x00000633, 0x000021C1, 0xFFFFF2C8, 0x000003CF, 0x000021C1, 0xFFFFF2C8, 0x000003CF },
+ { 0x0213EA94DE2838E4, 0x00003B46, 0xFFFFE301, 0x00000632, 0x0000204C, 0xFFFFF33B, 0x000003C8, 0x0000204C, 0xFFFFF33B, 0x000003C8 },
+ { 0x0213EA94DE204164, 0x00002026, 0xFFFFF5CE, 0x00000368, 0x00001598, 0xFFFFFB29, 0x000002C3, 0x00001598, 0xFFFFFB29, 0x000002C3 },
+ { 0x0213EA94DE283164, 0x00003DCA, 0xFFFFE178, 0x00000668, 0x00001FDB, 0xFFFFF39D, 0x000003AF, 0x00001FDB, 0xFFFFF39D, 0x000003AF },
+ { 0x0213EA94DE2C48C4, 0x00003A59, 0xFFFFE327, 0x00000642, 0x000024B9, 0xFFFFEFC4, 0x00000471, 0x000024B9, 0xFFFFEFC4, 0x00000471 },
+ { 0x0213EA94DE2C2944, 0x00003C26, 0xFFFFE440, 0x000005EB, 0x00002C0F, 0xFFFFEC88, 0x000004E0, 0x00002C0F, 0xFFFFEC88, 0x000004E0 },
+ { 0x0213EA94DE083884, 0x00004149, 0xFFFFDEB8, 0x000006E7, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
+ { 0x0213EA94DE0E4124, 0x00003EB4, 0xFFFFE1E5, 0x0000064D, 0x0000299F, 0xFFFFEDB3, 0x000004A9, 0x0000299F, 0xFFFFEDB3, 0x000004A9 },
+ { 0x0213EA94DE2C39A4, 0x00003BBF, 0xFFFFE268, 0x0000065A, 0x00002504, 0xFFFFEFB0, 0x00000470, 0x00002504, 0xFFFFEFB0, 0x00000470 },
+ { 0x0213EA94DE084904, 0x00004203, 0xFFFFDDC6, 0x00000720, 0x0000303B, 0xFFFFE78F, 0x000005D0, 0x0000303B, 0xFFFFE78F, 0x000005D0 },
+ { 0x0213EA94DE0E3984, 0x00003DA3, 0xFFFFE244, 0x0000063E, 0x000021B4, 0xFFFFF2DA, 0x000003CD, 0x000021B4, 0xFFFFF2DA, 0x000003CD },
+ { 0x0213EA94DE0A38E4, 0x00004035, 0xFFFFE065, 0x0000069B, 0x00003323, 0xFFFFE6D6, 0x000005D8, 0x00003323, 0xFFFFE6D6, 0x000005D8 },
+ { 0x0213EA94DE2C1164, 0x00003944, 0xFFFFE4E5, 0x000005E2, 0x00001F3C, 0xFFFFF456, 0x0000039D, 0x00001F3C, 0xFFFFF456, 0x0000039D },
+ { 0x0213EA94DE061904, 0x000032D8, 0xFFFFEAE8, 0x000004E6, 0x00001812, 0xFFFFFA1C, 0x000002BC, 0x00001812, 0xFFFFFA1C, 0x000002BC },
+ { 0x0213F0FD42D22944, 0x000041F6, 0xFFFFE025, 0x0000069A, 0x0000241E, 0xFFFFF1B4, 0x00000402, 0x0000241E, 0xFFFFF1B4, 0x00000402 },
+ { 0x0213F0FE990C30A4, 0x00003300, 0xFFFFEB60, 0x000004C1, 0x00001E15, 0xFFFFF6A6, 0x0000033B, 0x00001E15, 0xFFFFF6A6, 0x0000033B },
+ { 0x0213EA94DE0408A4, 0x000037F0, 0xFFFFE68F, 0x0000059B, 0x00001F8A, 0xFFFFF467, 0x000003A3, 0x00001F8A, 0xFFFFF467, 0x000003A3 },
+ { 0x0213F0FE99182984, 0x000025D8, 0xFFFFF2AA, 0x000003C3, 0x000018A8, 0xFFFFF9BE, 0x000002D2, 0x000018A8, 0xFFFFF9BE, 0x000002D2 },
+ { 0x0213F0FE990620C4, 0x0000364F, 0xFFFFE988, 0x000004FC, 0x00001E51, 0xFFFFF633, 0x0000034F, 0x00001E51, 0xFFFFF633, 0x0000034F },
+ { 0x0213EA94DE061144, 0x00002288, 0xFFFFF483, 0x0000036C, 0x0000280F, 0xFFFFEF39, 0x0000047B, 0x0000280F, 0xFFFFEF39, 0x0000047B },
+ { 0x0213F0FE99082084, 0x00003322, 0xFFFFEA7E, 0x000004ED, 0x00001DAD, 0xFFFFF62B, 0x00000355, 0x00001DAD, 0xFFFFF62B, 0x00000355 },
+ { 0x0213EA94DE0250E4, 0x00002B7B, 0xFFFFEE4F, 0x0000045B, 0x00001AA2, 0xFFFFF710, 0x0000033E, 0x00001AA2, 0xFFFFF710, 0x0000033E },
+ { 0x0213F0FE990420C4, 0x000034CC, 0xFFFFEA79, 0x000004E4, 0x00001B05, 0xFFFFF8B3, 0x000002EC, 0x00001B05, 0xFFFFF8B3, 0x000002EC },
+ { 0x0213F0FD42DC2864, 0x00003837, 0xFFFFE5ED, 0x000005C3, 0x00001ACB, 0xFFFFF7B2, 0x00000314, 0x00001ACB, 0xFFFFF7B2, 0x00000314 },
+ { 0x0213F0FE99044164, 0x0000352D, 0xFFFFE88F, 0x00000548, 0x000021E6, 0xFFFFF3B5, 0x000003AA, 0x000021E6, 0xFFFFF3B5, 0x000003AA },
+ { 0x0213F0FE990A4884, 0x00003300, 0xFFFFE835, 0x0000057B, 0x00001A85, 0xFFFFF715, 0x00000336, 0x00001A85, 0xFFFFF715, 0x00000336 },
+ { 0x0213EA94DE0448A4, 0x000033FA, 0xFFFFE851, 0x00000565, 0x00001A8E, 0xFFFFF727, 0x0000033B, 0x00001A8E, 0xFFFFF727, 0x0000033B },
+ { 0x0213F0FD42DA3924, 0x000039D3, 0xFFFFE5D3, 0x000005B0, 0x00001888, 0xFFFFF978, 0x000002C8, 0x00001888, 0xFFFFF978, 0x000002C8 },
+ { 0x0213F0FE990E4864, 0x00002F6B, 0xFFFFEC53, 0x000004B9, 0x00001C15, 0xFFFFF71B, 0x00000337, 0x00001C15, 0xFFFFF71B, 0x00000337 },
+ { 0x0213F0FE99064144, 0x0000384D, 0xFFFFE737, 0x00000569, 0x00001D2D, 0xFFFFF673, 0x00000343, 0x00001D2D, 0xFFFFF673, 0x00000343 },
+ { 0x0213F0FE990620A4, 0x00003A49, 0xFFFFE70B, 0x0000055F, 0x00001A63, 0xFFFFF8CD, 0x000002E2, 0x00001A63, 0xFFFFF8CD, 0x000002E2 },
+ { 0x0213F0FE99042984, 0x0000311E, 0xFFFFEB97, 0x000004C6, 0x00001EAE, 0xFFFFF5A9, 0x00000367, 0x00001EAE, 0xFFFFF5A9, 0x00000367 },
+ { 0x0213F0FE990E1124, 0x000027D3, 0xFFFFF075, 0x00000417, 0x00002001, 0xFFFFF44A, 0x000003A2, 0x00002001, 0xFFFFF44A, 0x000003A2 },
+ { 0x0213F0FE99064904, 0x00003B72, 0xFFFFE4BD, 0x000005DC, 0x00001D76, 0xFFFFF606, 0x0000035A, 0x00001D76, 0xFFFFF606, 0x0000035A },
+ { 0x0213F0FE99101124, 0x00002E0F, 0xFFFFECA7, 0x000004AE, 0x00001DC6, 0xFFFFF5BF, 0x0000036A, 0x00001DC6, 0xFFFFF5BF, 0x0000036A },
+ { 0x0213F0FE990238A4, 0x000032C7, 0xFFFFEA7A, 0x000004F0, 0x00001A7B, 0xFFFFF827, 0x00000301, 0x00001A7B, 0xFFFFF827, 0x00000301 },
+ { 0x0213EA94DE044884, 0x0000312D, 0xFFFFEA39, 0x00000515, 0x00001948, 0xFFFFF800, 0x00000318, 0x00001948, 0xFFFFF800, 0x00000318 },
+ { 0x0213EA94DE062084, 0x00003611, 0xFFFFE8D7, 0x00000533, 0x00001929, 0xFFFFF965, 0x000002D2, 0x00001929, 0xFFFFF965, 0x000002D2 },
+ { 0x0213F0FE992C30E4, 0x00002FE2, 0xFFFFED89, 0x00000470, 0x00001A3C, 0xFFFFF955, 0x000002D5, 0x00001A3C, 0xFFFFF955, 0x000002D5 },
+ { 0x0213EA94DE0208A4, 0x000035FF, 0xFFFFE884, 0x00000548, 0x0000182A, 0xFFFFF9AB, 0x000002CF, 0x0000182A, 0xFFFFF9AB, 0x000002CF },
+ { 0x0213F0FE990220E4, 0x00003597, 0xFFFFE904, 0x00000528, 0x00001A94, 0xFFFFF840, 0x00000300, 0x00001A94, 0xFFFFF840, 0x00000300 },
+ { 0x0213F0FE99181944, 0x000026CB, 0xFFFFF1FB, 0x000003E4, 0x000017CC, 0xFFFFFA25, 0x000002C8, 0x000017CC, 0xFFFFFA25, 0x000002C8 },
+ { 0x0213EA94DE0608C4, 0x00003274, 0xFFFFEA39, 0x0000050C, 0x00001B20, 0xFFFFF7C1, 0x00000314, 0x00001B20, 0xFFFFF7C1, 0x00000314 },
+ { 0x0213F0FD42D82924, 0x0000280B, 0xFFFFF283, 0x000003B5, 0x000018D0, 0xFFFFF992, 0x000002EC, 0x000018D0, 0xFFFFF992, 0x000002EC },
+ { 0x0213F0FE99062104, 0x000033AB, 0xFFFFEB1B, 0x000004C4, 0x00001FEE, 0xFFFFF53A, 0x00000378, 0x00001FEE, 0xFFFFF53A, 0x00000378 },
+ { 0x0213F0FE990A3964, 0x00002F79, 0xFFFFEB0C, 0x000004FA, 0x00001E57, 0xFFFFF4BF, 0x0000039B, 0x00001E57, 0xFFFFF4BF, 0x0000039B },
+ { 0x0213F0FE990448E4, 0x00003487, 0xFFFFE8F2, 0x00000539, 0x0000185B, 0xFFFFF9AE, 0x000002BA, 0x0000185B, 0xFFFFF9AE, 0x000002BA },
+ { 0x0213F0FE990A18A4, 0x00003500, 0xFFFFE793, 0x0000058A, 0x00001AA2, 0xFFFFF792, 0x0000031D, 0x00001AA2, 0xFFFFF792, 0x0000031D },
+ { 0x0213F0FE99081164, 0x00003943, 0xFFFFE54D, 0x000005D9, 0x00001BC8, 0xFFFFF6E0, 0x00000339, 0x00001BC8, 0xFFFFF6E0, 0x00000339 },
+ { 0x0213EA94DE0430A4, 0x0000306D, 0xFFFFEC5E, 0x000004A5, 0x00001A3A, 0xFFFFF85F, 0x00000304, 0x00001A3A, 0xFFFFF85F, 0x00000304 },
+ { 0x0213F0FD42D83084, 0x00002BA4, 0xFFFFEE8D, 0x0000046A, 0x0000198C, 0xFFFFF88E, 0x00000307, 0x0000198C, 0xFFFFF88E, 0x00000307 },
+ { 0x0213F0FD42D218E4, 0x00003D30, 0xFFFFE2F6, 0x0000062A, 0x000025DC, 0xFFFFF074, 0x00000435, 0x000025DC, 0xFFFFF074, 0x00000435 },
+ { 0x0213F0FD42D83964, 0x00002CD6, 0xFFFFED79, 0x0000049B, 0x000016D0, 0xFFFFFA53, 0x000002BB, 0x000016D0, 0xFFFFFA53, 0x000002BB },
+ { 0x0213F0FE99163164, 0x00002484, 0xFFFFF3BD, 0x000003A0, 0x000015B8, 0xFFFFFB6B, 0x000002A4, 0x000015B8, 0xFFFFFB6B, 0x000002A4 },
+ { 0x0213F0FE990E3944, 0x000038AE, 0xFFFFE6D1, 0x00000587, 0x00001A2A, 0xFFFFF8F1, 0x000002D4, 0x00001A2A, 0xFFFFF8F1, 0x000002D4 },
+ { 0x0213F0FE99044944, 0x000036FD, 0xFFFFE76C, 0x00000576, 0x00001EE4, 0xFFFFF58D, 0x00000361, 0x00001EE4, 0xFFFFF58D, 0x00000361 },
+ { 0x0213F0FD42D830A4, 0x00002BCF, 0xFFFFEF28, 0x00000448, 0x00001B93, 0xFFFFF7BA, 0x00000327, 0x00001B93, 0xFFFFF7BA, 0x00000327 },
+ { 0x0213F0FE99062884, 0x00003834, 0xFFFFE818, 0x0000053B, 0x00001AFE, 0xFFFFF85C, 0x000002F3, 0x00001AFE, 0xFFFFF85C, 0x000002F3 },
+ { 0x0213F0FE993231A4, 0x00002EF7, 0xFFFFEBFC, 0x000004CE, 0x00001897, 0xFFFFF8EF, 0x000002EC, 0x00001897, 0xFFFFF8EF, 0x000002EC },
+ { 0x0213F0FE992C18C4, 0x000035BD, 0xFFFFE8BB, 0x0000053B, 0x00001F22, 0xFFFFF561, 0x00000373, 0x00001F22, 0xFFFFF561, 0x00000373 },
+ { 0x0213F0FE99183984, 0x00002D42, 0xFFFFEE1D, 0x00000478, 0x000016F0, 0xFFFFFAAE, 0x000002B3, 0x000016F0, 0xFFFFFAAE, 0x000002B3 },
+ { 0x0213EA94DE045124, 0x00002F98, 0xFFFFEB3C, 0x000004F0, 0x00001903, 0xFFFFF818, 0x00000319, 0x00001903, 0xFFFFF818, 0x00000319 },
+ { 0x0213F0FD42D42144, 0x00004081, 0xFFFFDF13, 0x000006F3, 0x00002A6D, 0xFFFFEC1B, 0x00000509, 0x00002A6D, 0xFFFFEC1B, 0x00000509 },
+ { 0x0213EA94DE040904, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001FF6, 0xFFFFF427, 0x000003B0, 0x00001FF6, 0xFFFFF427, 0x000003B0 },
+ { 0x0213F0FE99023884, 0x00003243, 0xFFFFEA5C, 0x000004FD, 0x000020FB, 0xFFFFF39E, 0x000003C0, 0x000020FB, 0xFFFFF39E, 0x000003C0 },
+ { 0x0213F0FD42D848A4, 0x00002F20, 0xFFFFEC19, 0x000004C6, 0x00001748, 0xFFFFF99F, 0x000002DA, 0x00001748, 0xFFFFF99F, 0x000002DA },
+ { 0x0213F0FE99103984, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001A43, 0xFFFFF843, 0x000002F9, 0x00001A43, 0xFFFFF843, 0x000002F9 },
+ { 0x0213F0FE990220A4, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x00001A51, 0xFFFFF850, 0x000002FA, 0x00001A51, 0xFFFFF850, 0x000002FA },
+ { 0x0213F0FE99043144, 0x0000305C, 0xFFFFED4B, 0x0000046C, 0x00001CF9, 0xFFFFF7BA, 0x00000304, 0x00001CF9, 0xFFFFF7BA, 0x00000304 },
+ { 0x0213F0FD42DA4164, 0x0000343C, 0xFFFFE869, 0x00000559, 0x00001CE2, 0xFFFFF614, 0x00000359, 0x00001CE2, 0xFFFFF614, 0x00000359 },
+ { 0x0213F0FE99183964, 0x00002782, 0xFFFFF1FE, 0x000003D9, 0x000015DC, 0xFFFFFB8B, 0x00000290, 0x000015DC, 0xFFFFFB8B, 0x00000290 },
+ { 0x0213F0FE991818C4, 0x00002B9C, 0xFFFFEF63, 0x00000443, 0x00001369, 0xFFFFFD51, 0x00000244, 0x00001369, 0xFFFFFD51, 0x00000244 },
+ { 0x0213F0FE990A2084, 0x000035F8, 0xFFFFE743, 0x00000592, 0x000018D8, 0xFFFFF8EE, 0x000002E4, 0x000018D8, 0xFFFFF8EE, 0x000002E4 },
+ { 0x0213EA94DE062844, 0x00002B72, 0xFFFFEF1E, 0x0000043C, 0x00002647, 0xFFFFF092, 0x0000043E, 0x00002647, 0xFFFFF092, 0x0000043E },
+ { 0x0213F0FE99102184, 0x00002EC9, 0xFFFFEC5F, 0x000004B8, 0x000018B6, 0xFFFFF936, 0x000002D8, 0x000018B6, 0xFFFFF936, 0x000002D8 },
+ { 0x0213F0FE99064084, 0x000038A7, 0xFFFFE6AC, 0x00000589, 0x00001C42, 0xFFFFF70B, 0x00000329, 0x00001C42, 0xFFFFF70B, 0x00000329 },
+ { 0x0213F0FE993008A4, 0x00002F6B, 0xFFFFEBF6, 0x000004CF, 0x000018AE, 0xFFFFF928, 0x000002E3, 0x000018AE, 0xFFFFF928, 0x000002E3 },
+ { 0x0213F0FD42DA5104, 0x000029CD, 0xFFFFEEE1, 0x00000459, 0x00001AB5, 0xFFFFF76F, 0x00000324, 0x00001AB5, 0xFFFFF76F, 0x00000324 },
+ { 0x0213EA94DE0638C4, 0x00003921, 0xFFFFE71D, 0x00000577, 0x00001646, 0xFFFFFB24, 0x00000293, 0x00001646, 0xFFFFFB24, 0x00000293 },
+ { 0x0213EA94DE044164, 0x00003940, 0xFFFFE521, 0x000005E8, 0x00001947, 0xFFFFF839, 0x0000030D, 0x00001947, 0xFFFFF839, 0x0000030D },
+ { 0x0213F0FD42D24164, 0x00003DCA, 0xFFFFE211, 0x00000659, 0x0000250E, 0xFFFFF072, 0x00000443, 0x0000250E, 0xFFFFF072, 0x00000443 },
+ { 0x0213F0FE990C0904, 0x00002E95, 0xFFFFEC20, 0x000004C9, 0x000015B4, 0xFFFFFAD3, 0x0000029D, 0x000015B4, 0xFFFFFAD3, 0x0000029D },
+ { 0x0213F0FE99041084, 0x00002C11, 0xFFFFEE6E, 0x00000468, 0x00001901, 0xFFFFF924, 0x000002E7, 0x00001901, 0xFFFFF924, 0x000002E7 },
+ { 0x0213EA94DE062104, 0x0000293F, 0xFFFFF158, 0x000003E6, 0x0000183F, 0xFFFFF9F6, 0x000002D2, 0x0000183F, 0xFFFFF9F6, 0x000002D2 },
+ { 0x0213F0FE990E1104, 0x00002A67, 0xFFFFEF34, 0x0000043E, 0x00001C6F, 0xFFFFF6F1, 0x0000032B, 0x00001C6F, 0xFFFFF6F1, 0x0000032B },
+ { 0x0213EA94DE065124, 0x00002F8D, 0xFFFFEB77, 0x000004DA, 0x00001C0D, 0xFFFFF627, 0x00000365, 0x00001C0D, 0xFFFFF627, 0x00000365 },
+ { 0x0213F0FE990C38C4, 0x00003476, 0xFFFFEA5B, 0x000004E7, 0x00001DBF, 0xFFFFF6C7, 0x00000333, 0x00001DBF, 0xFFFFF6C7, 0x00000333 },
+ { 0x0213F0FE990E0944, 0x00003336, 0xFFFFE92F, 0x00000546, 0x00001614, 0xFFFFFAE0, 0x00000296, 0x00001614, 0xFFFFFAE0, 0x00000296 },
+ { 0x0213F0FE99162164, 0x00002513, 0xFFFFF323, 0x000003BC, 0x000016DB, 0xFFFFFA79, 0x000002CD, 0x000016DB, 0xFFFFFA79, 0x000002CD },
+ { 0x0213F0FE990A2944, 0x000035A7, 0xFFFFE78E, 0x00000584, 0x00001B0D, 0xFFFFF77D, 0x0000031F, 0x00001B0D, 0xFFFFF77D, 0x0000031F },
+ { 0x0213F0FE993238E4, 0x00003171, 0xFFFFEB98, 0x000004C6, 0x00001C76, 0xFFFFF71F, 0x0000032F, 0x00001C76, 0xFFFFF71F, 0x0000032F },
+ { 0x0213F0FD42DA1084, 0x00002C52, 0xFFFFED2E, 0x000004A7, 0x00002182, 0xFFFFF2F4, 0x000003E4, 0x00002182, 0xFFFFF2F4, 0x000003E4 },
+ { 0x0213F0FE99102924, 0x000032E1, 0xFFFFEB39, 0x000004D0, 0x00001B55, 0xFFFFF859, 0x000002FA, 0x00001B55, 0xFFFFF859, 0x000002FA },
+ { 0x0213F0FE991848A4, 0x000029B6, 0xFFFFEFF7, 0x00000430, 0x0000151B, 0xFFFFFBC6, 0x0000027F, 0x0000151B, 0xFFFFFBC6, 0x0000027F },
+ { 0x0213F0FD42DA1964, 0x00002FF7, 0xFFFFEB67, 0x000004DA, 0x000020E9, 0xFFFFF363, 0x000003CE, 0x000020E9, 0xFFFFF363, 0x000003CE },
+ { 0x0213F0FD42DA5124, 0x00003CDD, 0xFFFFE2B2, 0x00000649, 0x00001B18, 0xFFFFF739, 0x00000329, 0x00001B18, 0xFFFFF739, 0x00000329 },
+ { 0x0213F0FE990628A4, 0x00003C82, 0xFFFFE5C6, 0x0000058E, 0x00001F3F, 0xFFFFF5AD, 0x00000361, 0x00001F3F, 0xFFFFF5AD, 0x00000361 },
+ { 0x0213F0FD42DC4084, 0x0000319B, 0xFFFFEA15, 0x0000051B, 0x00001CC9, 0xFFFFF62E, 0x00000358, 0x00001CC9, 0xFFFFF62E, 0x00000358 },
+ { 0x0213EA94DE0638E4, 0x000032B6, 0xFFFFEB2B, 0x000004D6, 0x000018E0, 0xFFFFF966, 0x000002DE, 0x000018E0, 0xFFFFF966, 0x000002DE },
+ { 0x0213EA94DE023984, 0x0000300A, 0xFFFFEBA6, 0x000004D1, 0x00001CFD, 0xFFFFF5F6, 0x0000036D, 0x00001CFD, 0xFFFFF5F6, 0x0000036D },
+ { 0x0213F0FD42D82984, 0x000026A9, 0xFFFFF15D, 0x00000400, 0x00001561, 0xFFFFFB1F, 0x000002A0, 0x00001561, 0xFFFFFB1F, 0x000002A0 },
+ { 0x0213F0FE990E5124, 0x00003123, 0xFFFFEAD2, 0x000004FA, 0x000018CB, 0xFFFFF8F5, 0x000002EC, 0x000018CB, 0xFFFFF8F5, 0x000002EC },
+ { 0x0213F0FE991840C4, 0x00003577, 0xFFFFE935, 0x00000533, 0x000016CD, 0xFFFFFB44, 0x00000289, 0x000016CD, 0xFFFFFB44, 0x00000289 },
+ { 0x0213F0FE99282184, 0x00002875, 0xFFFFF170, 0x000003F3, 0x00001567, 0xFFFFFBD5, 0x00000289, 0x00001567, 0xFFFFFBD5, 0x00000289 },
+ { 0x0213F0FE99084084, 0x00003AE2, 0xFFFFE538, 0x000005C1, 0x00001CB4, 0xFFFFF6A3, 0x0000033C, 0x00001CB4, 0xFFFFF6A3, 0x0000033C },
+ { 0x0213F0FE990C38E4, 0x000031DF, 0xFFFFEC2A, 0x000004A3, 0x00001EF0, 0xFFFFF626, 0x00000352, 0x00001EF0, 0xFFFFF626, 0x00000352 },
+ { 0x0213F0FD42D25144, 0x00004A6A, 0xFFFFDB15, 0x00000758, 0x000027F3, 0xFFFFEEEE, 0x00000479, 0x000027F3, 0xFFFFEEEE, 0x00000479 },
+ { 0x0213EA94DE063904, 0x00002BB9, 0xFFFFEF5D, 0x00000433, 0x00001589, 0xFFFFFB57, 0x00000295, 0x00001589, 0xFFFFFB57, 0x00000295 },
+ { 0x0213F0FE99042164, 0x000033A0, 0xFFFFE98F, 0x00000528, 0x00001CB4, 0xFFFFF706, 0x0000032D, 0x00001CB4, 0xFFFFF706, 0x0000032D },
+ { 0x0213F0FE99163064, 0x0000248E, 0xFFFFF380, 0x000003AC, 0x000016EA, 0xFFFFFA6C, 0x000002CE, 0x000016EA, 0xFFFFFA6C, 0x000002CE },
+ { 0x0213F0FE990221A4, 0x00002FE2, 0xFFFFEB2F, 0x000004E9, 0x00001D4E, 0xFFFFF56B, 0x00000380, 0x00001D4E, 0xFFFFF56B, 0x00000380 },
+ { 0x0213F0FE990A2884, 0x00003283, 0xFFFFE9E7, 0x0000051D, 0x00000694, 0xFFFFFD32, 0x000003C3, 0x00000694, 0xFFFFFD32, 0x000003C3 },
+ { 0x0213F0FD42D850C4, 0x00002EE4, 0xFFFFEBFD, 0x000004D3, 0x0000151A, 0xFFFFFAF6, 0x000002A4, 0x0000151A, 0xFFFFFAF6, 0x000002A4 },
+ { 0x0213F0FD42DC18E4, 0x0000302D, 0xFFFFEB7F, 0x000004DA, 0x00001E6D, 0xFFFFF54B, 0x00000380, 0x00001E6D, 0xFFFFF54B, 0x00000380 },
+ { 0x0213F0FD42DA50C4, 0x000033DA, 0xFFFFE7FB, 0x0000057F, 0x00001DED, 0xFFFFF50E, 0x0000038D, 0x00001DED, 0xFFFFF50E, 0x0000038D },
+ { 0x0213F0FE992C4084, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001C3F, 0xFFFFF726, 0x0000032A, 0x00001C3F, 0xFFFFF726, 0x0000032A },
+ { 0x0213F0FE990831C4, 0x00003BBD, 0xFFFFE55C, 0x000005B8, 0x000019DB, 0xFFFFF8BB, 0x000002EF, 0x000019DB, 0xFFFFF8BB, 0x000002EF },
+ { 0x0213F0FE990E3884, 0x00002964, 0xFFFFF051, 0x0000040E, 0x000025CD, 0xFFFFF11B, 0x0000041F, 0x000025CD, 0xFFFFF11B, 0x0000041F },
+ { 0x0213F0FD42DC4884, 0x000033F5, 0xFFFFE863, 0x00000560, 0x00001BCE, 0xFFFFF689, 0x0000034B, 0x00001BCE, 0xFFFFF689, 0x0000034B },
+ { 0x0213F0FE990A2864, 0x00003294, 0xFFFFE924, 0x00000548, 0x00001D41, 0xFFFFF580, 0x0000037D, 0x00001D41, 0xFFFFF580, 0x0000037D },
+ { 0x0213F0FD42DC39A4, 0x000034FB, 0xFFFFE7FE, 0x0000056D, 0x00001CB1, 0xFFFFF635, 0x00000357, 0x00001CB1, 0xFFFFF635, 0x00000357 },
+ { 0x0213F0FE990A10A4, 0x00002E28, 0xFFFFEBB9, 0x000004E0, 0x00001B20, 0xFFFFF6E3, 0x0000033C, 0x00001B20, 0xFFFFF6E3, 0x0000033C },
+ { 0x0213F0FD42DA1904, 0x00002799, 0xFFFFF0F4, 0x000003FC, 0x00001C9D, 0xFFFFF6A1, 0x00000345, 0x00001C9D, 0xFFFFF6A1, 0x00000345 },
+ { 0x0213F0FE99064104, 0x00003AEA, 0xFFFFE5DB, 0x0000059D, 0x00001B61, 0xFFFFF7F0, 0x00000301, 0x00001B61, 0xFFFFF7F0, 0x00000301 },
+ { 0x0213EA94DE041984, 0x000031F6, 0xFFFFEAB8, 0x000004F3, 0x00001D90, 0xFFFFF622, 0x00000359, 0x00001D90, 0xFFFFF622, 0x00000359 },
+ { 0x0213F0FE990C4064, 0x000031B8, 0xFFFFEA61, 0x0000050F, 0x0000199D, 0xFFFFF87C, 0x000002FD, 0x0000199D, 0xFFFFF87C, 0x000002FD },
+ { 0x0213F0FD42D23144, 0x00004514, 0xFFFFDDFF, 0x000006F6, 0x000022CD, 0xFFFFF29F, 0x000003D9, 0x000022CD, 0xFFFFF29F, 0x000003D9 },
+ { 0x0213EA94DE043164, 0x00002F30, 0xFFFFECB8, 0x000004A0, 0x00001B07, 0xFFFFF7E2, 0x00000313, 0x00001B07, 0xFFFFF7E2, 0x00000313 },
+ { 0x0213F0FD42DC30A4, 0x0000383B, 0xFFFFE702, 0x00000581, 0x00001A08, 0xFFFFF8CA, 0x000002E2, 0x00001A08, 0xFFFFF8CA, 0x000002E2 },
+ { 0x0213F0FE99022164, 0x00002CC5, 0xFFFFEDF8, 0x00000465, 0x00001F47, 0xFFFFF4B2, 0x00000393, 0x00001F47, 0xFFFFF4B2, 0x00000393 },
+ { 0x0213F0FE991621C4, 0x00002304, 0xFFFFF453, 0x00000384, 0x0000170A, 0xFFFFFA3F, 0x000002CE, 0x0000170A, 0xFFFFFA3F, 0x000002CE },
+ { 0x0213F0FE990A5124, 0x0000337E, 0xFFFFE850, 0x0000056E, 0x00001BDD, 0xFFFFF668, 0x00000353, 0x00001BDD, 0xFFFFF668, 0x00000353 },
+ { 0x0213F0FE990E4924, 0x00002E2F, 0xFFFFEC9B, 0x000004AE, 0x00001C4D, 0xFFFFF6D3, 0x00000338, 0x00001C4D, 0xFFFFF6D3, 0x00000338 },
+ { 0x0213EA94DE061124, 0x00002DDD, 0xFFFFEDA4, 0x00000477, 0x00002010, 0xFFFFF4BB, 0x00000390, 0x00002010, 0xFFFFF4BB, 0x00000390 },
+ { 0x0213F0FD42DA48E4, 0x0000290C, 0xFFFFEF61, 0x00000445, 0x00002133, 0xFFFFF324, 0x000003D8, 0x00002133, 0xFFFFF324, 0x000003D8 },
+ { 0x0213F0FE99062924, 0x0000371E, 0xFFFFE8D5, 0x00000524, 0x00001C3A, 0xFFFFF7AE, 0x00000314, 0x00001C3A, 0xFFFFF7AE, 0x00000314 },
+ { 0x0213F0FD42D838E4, 0x00002A58, 0xFFFFF007, 0x00000429, 0x000018A6, 0xFFFFF98F, 0x000002E1, 0x000018A6, 0xFFFFF98F, 0x000002E1 },
+ { 0x0213F0FE99023084, 0x00002FED, 0xFFFFEC48, 0x000004AA, 0x00001E9D, 0xFFFFF584, 0x00000370, 0x00001E9D, 0xFFFFF584, 0x00000370 },
+ { 0x0213F0FE99181884, 0x00002829, 0xFFFFF15F, 0x000003F7, 0x0000157E, 0xFFFFFBD4, 0x00000282, 0x0000157E, 0xFFFFFBD4, 0x00000282 },
+ { 0x0213F0FE99101924, 0x000030CF, 0xFFFFEB8D, 0x000004CE, 0x00001A4C, 0xFFFFF868, 0x000002F7, 0x00001A4C, 0xFFFFF868, 0x000002F7 },
+ { 0x0213F0FD42DA2084, 0x00002C8F, 0xFFFFEDD2, 0x0000047D, 0x00001CCE, 0xFFFFF6A1, 0x00000343, 0x00001CCE, 0xFFFFF6A1, 0x00000343 },
+ { 0x0213F0FE99182164, 0x00002A84, 0xFFFFEFBA, 0x0000043E, 0x000015EF, 0xFFFFFB4B, 0x0000029E, 0x000015EF, 0xFFFFFB4B, 0x0000029E },
+ { 0x0213F0FE990C28A4, 0x000034CA, 0xFFFFEA08, 0x000004FF, 0x00001C19, 0xFFFFF7ED, 0x00000309, 0x00001C19, 0xFFFFF7ED, 0x00000309 },
+ { 0x0213F0FE991639A4, 0x00002187, 0xFFFFF4B0, 0x0000037E, 0x0000154A, 0xFFFFFB0C, 0x000002AE, 0x0000154A, 0xFFFFFB0C, 0x000002AE },
+ { 0x0213F0FD42DA3844, 0x00002F4F, 0xFFFFEB3C, 0x000004F8, 0x0000181F, 0xFFFFF92D, 0x000002DF, 0x0000181F, 0xFFFFF92D, 0x000002DF },
+ { 0x0213F0FE990410E4, 0x0000290C, 0xFFFFF0B1, 0x000003FC, 0x00001DB0, 0xFFFFF636, 0x00000355, 0x00001DB0, 0xFFFFF636, 0x00000355 },
+ { 0x0213F0FE990A1064, 0x000034C1, 0xFFFFE888, 0x0000055A, 0x000019BF, 0xFFFFF881, 0x000002FB, 0x000019BF, 0xFFFFF881, 0x000002FB },
+ { 0x0213F0FD42DC18C4, 0x00003139, 0xFFFFEA98, 0x00000504, 0x000019F2, 0xFFFFF820, 0x0000030B, 0x000019F2, 0xFFFFF820, 0x0000030B },
+ { 0x0213F0FD42D83144, 0x00002CAC, 0xFFFFEEB2, 0x00000458, 0x0000152C, 0xFFFFFBEF, 0x0000027B, 0x0000152C, 0xFFFFFBEF, 0x0000027B },
+ { 0x0213F0FE992C38E4, 0x00003577, 0xFFFFE99C, 0x0000050D, 0x00001E64, 0xFFFFF679, 0x0000033F, 0x00001E64, 0xFFFFF679, 0x0000033F },
+ { 0x0213F0FD42DA4104, 0x0000263A, 0xFFFFF1E4, 0x000003D4, 0x00001F68, 0xFFFFF4ED, 0x00000386, 0x00001F68, 0xFFFFF4ED, 0x00000386 },
+ { 0x0213F0FD42D81984, 0x00002CE9, 0xFFFFED63, 0x00000497, 0x00001810, 0xFFFFF94D, 0x000002E3, 0x00001810, 0xFFFFF94D, 0x000002E3 },
+ { 0x0213EA94DE044104, 0x0000318A, 0xFFFFEAC8, 0x000004F5, 0x0000195C, 0xFFFFF896, 0x000002FB, 0x0000195C, 0xFFFFF896, 0x000002FB },
+ { 0x0213F0FD42D83904, 0x00002C41, 0xFFFFEEC6, 0x0000045D, 0x000017DD, 0xFFFFFA16, 0x000002CB, 0x000017DD, 0xFFFFFA16, 0x000002CB },
+ { 0x0213F0FE990231A4, 0x00002DD4, 0xFFFFEC98, 0x000004AD, 0x00001BD7, 0xFFFFF69F, 0x00000347, 0x00001BD7, 0xFFFFF69F, 0x00000347 },
+ { 0x0213F0FD42DA3944, 0x00003351, 0xFFFFE9B2, 0x0000051A, 0x00001CA1, 0xFFFFF6A4, 0x00000341, 0x00001CA1, 0xFFFFF6A4, 0x00000341 },
+ { 0x0213F0FE99021104, 0x0000322D, 0xFFFFE9BE, 0x00000527, 0x00001CF9, 0xFFFFF5EB, 0x00000366, 0x00001CF9, 0xFFFFF5EB, 0x00000366 },
+ { 0x0213F0FE990C28C4, 0x00003678, 0xFFFFE9A8, 0x00000503, 0x00001AD4, 0xFFFFF8F6, 0x000002E3, 0x00001AD4, 0xFFFFF8F6, 0x000002E3 },
+ { 0x0213F0FE99161924, 0x0000260E, 0xFFFFF2C1, 0x000003CA, 0x00001139, 0xFFFFFE48, 0x00000236, 0x00001139, 0xFFFFFE48, 0x00000236 },
+ { 0x0213F0FE990A2164, 0x000033D3, 0xFFFFE872, 0x00000565, 0x00001B72, 0xFFFFF713, 0x00000332, 0x00001B72, 0xFFFFF713, 0x00000332 },
+ { 0x0213F0FE99323844, 0x0000309B, 0xFFFFEB42, 0x000004E4, 0x00001918, 0xFFFFF8C8, 0x000002F2, 0x00001918, 0xFFFFF8C8, 0x000002F2 },
+ { 0x0213F0FE99182864, 0x000028B8, 0xFFFFF105, 0x00000402, 0x000018BB, 0xFFFFF9BC, 0x000002D3, 0x000018BB, 0xFFFFF9BC, 0x000002D3 },
+ { 0x0213F0FE990A1884, 0x00003123, 0xFFFFE9D1, 0x00000534, 0x00001B19, 0xFFFFF6FE, 0x0000033C, 0x00001B19, 0xFFFFF6FE, 0x0000033C },
+ { 0x0213F0FE99022144, 0x00003216, 0xFFFFEA8E, 0x000004F6, 0x00001F72, 0xFFFFF4CE, 0x0000038B, 0x00001F72, 0xFFFFF4CE, 0x0000038B },
+ { 0x0213F0FE99162964, 0x00002564, 0xFFFFF32D, 0x000003B6, 0x00001685, 0xFFFFFADB, 0x000002BB, 0x00001685, 0xFFFFFADB, 0x000002BB },
+ { 0x0213F0FD42DA2924, 0x00002E60, 0xFFFFED13, 0x00000497, 0x00001CA5, 0xFFFFF6B9, 0x00000346, 0x00001CA5, 0xFFFFF6B9, 0x00000346 },
+ { 0x0213F0FE990E39A4, 0x0000336D, 0xFFFFE934, 0x0000053B, 0x00001B3E, 0xFFFFF763, 0x00000327, 0x00001B3E, 0xFFFFF763, 0x00000327 },
+ { 0x0213F0FE99101084, 0x0000274A, 0xFFFFF119, 0x000003FA, 0x00001D75, 0xFFFFF5CD, 0x0000036F, 0x00001D75, 0xFFFFF5CD, 0x0000036F },
+ { 0x0213F0FD42DA2164, 0x0000366B, 0xFFFFE70A, 0x0000059A, 0x00001ED8, 0xFFFFF501, 0x00000389, 0x00001ED8, 0xFFFFF501, 0x00000389 },
+ { 0x0213F0FE99223964, 0x00003164, 0xFFFFEAB4, 0x000004FA, 0x00001C52, 0xFFFFF6E0, 0x00000336, 0x00001C52, 0xFFFFF6E0, 0x00000336 },
+ { 0x0213F0FD42D23064, 0x00004224, 0xFFFFDF7F, 0x000006C1, 0x00002A52, 0xFFFFED5E, 0x000004BB, 0x00002A52, 0xFFFFED5E, 0x000004BB },
+ { 0x0213F0FE99102864, 0x000030E3, 0xFFFFEB07, 0x000004ED, 0x00001FD3, 0xFFFFF46D, 0x000003A1, 0x00001FD3, 0xFFFFF46D, 0x000003A1 },
+ { 0x0213F0FD42D82884, 0x00002AEB, 0xFFFFEF1B, 0x00000454, 0x00001829, 0xFFFFF995, 0x000002DD, 0x00001829, 0xFFFFF995, 0x000002DD },
+ { 0x0213F0FD42DC50E4, 0x0000346B, 0xFFFFE7A2, 0x0000058B, 0x000020C5, 0xFFFFF2E8, 0x000003EC, 0x000020C5, 0xFFFFF2E8, 0x000003EC },
+ { 0x0213F0FD42DC4164, 0x000039CF, 0xFFFFE5D7, 0x000005A9, 0x00001D66, 0xFFFFF5D6, 0x00000366, 0x00001D66, 0xFFFFF5D6, 0x00000366 },
+ { 0x0213F0FE990418E4, 0x000034AC, 0xFFFFE9AE, 0x00000515, 0x00001A28, 0xFFFFF904, 0x000002DC, 0x00001A28, 0xFFFFF904, 0x000002DC },
+ { 0x0213F0FD42DC2084, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001C6F, 0xFFFFF686, 0x0000034C, 0x00001C6F, 0xFFFFF686, 0x0000034C },
+ { 0x0213F0FE990820C4, 0x0000328B, 0xFFFFEBA1, 0x000004B4, 0x00001DA3, 0xFFFFF683, 0x00000349, 0x00001DA3, 0xFFFFF683, 0x00000349 },
+ { 0x0213F0FE991828C4, 0x000027DC, 0xFFFFF295, 0x000003BF, 0x000019C1, 0xFFFFF98E, 0x000002E8, 0x000019C1, 0xFFFFF98E, 0x000002E8 },
+ { 0x0213F0FE99184084, 0x00002756, 0xFFFFF1D7, 0x000003DF, 0x000015D9, 0xFFFFFB51, 0x00000298, 0x000015D9, 0xFFFFFB51, 0x00000298 },
+ { 0x0213F0FE99083884, 0x00003526, 0xFFFFE907, 0x00000526, 0x000017AB, 0xFFFFFA12, 0x000002AB, 0x000017AB, 0xFFFFFA12, 0x000002AB },
+ { 0x0213F0FD42DA18E4, 0x0000351B, 0xFFFFE8B7, 0x00000540, 0x00001A86, 0xFFFFF821, 0x00000303, 0x00001A86, 0xFFFFF821, 0x00000303 },
+ { 0x0213F0FE99164144, 0x000024B2, 0xFFFFF34E, 0x000003B1, 0x000018E2, 0xFFFFF926, 0x000002FC, 0x000018E2, 0xFFFFF926, 0x000002FC },
+ { 0x0213F0FD42D828A4, 0x00002F36, 0xFFFFED5D, 0x00000486, 0x0000157A, 0xFFFFFB85, 0x00000293, 0x0000157A, 0xFFFFFB85, 0x00000293 },
+ { 0x0213F0FD42DC50C4, 0x00003A6E, 0xFFFFE456, 0x000005FD, 0x00001F68, 0xFFFFF3D1, 0x000003C3, 0x00001F68, 0xFFFFF3D1, 0x000003C3 },
+ { 0x0213F0FE990A31A4, 0x00002BC3, 0xFFFFED2D, 0x000004A7, 0x00001C3F, 0xFFFFF609, 0x00000364, 0x00001C3F, 0xFFFFF609, 0x00000364 },
+ { 0x0213F0FE990E2084, 0x000032E1, 0xFFFFEA83, 0x000004F6, 0x00001B37, 0xFFFFF842, 0x000002F5, 0x00001B37, 0xFFFFF842, 0x000002F5 },
+ { 0x0213F0FD42D83184, 0x000028E3, 0xFFFFF07F, 0x00000412, 0x00001676, 0xFFFFFA68, 0x000002BE, 0x00001676, 0xFFFFFA68, 0x000002BE },
+ { 0x0213F0FD42D21104, 0x0000444C, 0xFFFFDDAD, 0x00000712, 0x00002634, 0xFFFFEF89, 0x0000046C, 0x00002634, 0xFFFFEF89, 0x0000046C },
+ { 0x0213F0FE990418C4, 0x00003121, 0xFFFFEBBB, 0x000004C6, 0x00001C98, 0xFFFFF72B, 0x0000032D, 0x00001C98, 0xFFFFF72B, 0x0000032D },
+ { 0x0213F0FD42D840A4, 0x00002C31, 0xFFFFEDC4, 0x00000490, 0x0000162D, 0xFFFFFA8E, 0x000002B4, 0x0000162D, 0xFFFFFA8E, 0x000002B4 },
+ { 0x0213F0FD42DA18C4, 0x00002749, 0xFFFFF112, 0x000003FC, 0x00001C85, 0xFFFFF6B8, 0x00000342, 0x00001C85, 0xFFFFF6B8, 0x00000342 },
+ { 0x0213F0FE99044104, 0x00003159, 0xFFFFEB99, 0x000004C2, 0x00001BD0, 0xFFFFF7CA, 0x00000307, 0x00001BD0, 0xFFFFF7CA, 0x00000307 },
+ { 0x0213F0FE99164164, 0x00002610, 0xFFFFF1FD, 0x000003EC, 0x000016BE, 0xFFFFFA53, 0x000002CB, 0x000016BE, 0xFFFFFA53, 0x000002CB },
+ { 0x0213F0FE99023184, 0x000037B5, 0xFFFFE63D, 0x000005B5, 0x00002285, 0xFFFFF25D, 0x000003F7, 0x00002285, 0xFFFFF25D, 0x000003F7 },
+ { 0x0213F0FE990A28A4, 0x00002FEE, 0xFFFFEB47, 0x000004EF, 0x00001CBE, 0xFFFFF64E, 0x00000358, 0x00001CBE, 0xFFFFF64E, 0x00000358 },
+ { 0x0213F0FE99105104, 0x00002E90, 0xFFFFEC48, 0x000004C0, 0x00001A47, 0xFFFFF7D1, 0x0000031A, 0x00001A47, 0xFFFFF7D1, 0x0000031A },
+ { 0x0213F0FD42DA4084, 0x000034AB, 0xFFFFE84A, 0x00000559, 0x00001A72, 0xFFFFF79A, 0x0000031C, 0x00001A72, 0xFFFFF79A, 0x0000031C },
+ { 0x0213F0FE99183884, 0x00002F7B, 0xFFFFECFC, 0x0000049C, 0x00001814, 0xFFFFFA22, 0x000002C2, 0x00001814, 0xFFFFFA22, 0x000002C2 },
+ { 0x0213F0FE99021964, 0x00003618, 0xFFFFE709, 0x00000596, 0x00001EBF, 0xFFFFF482, 0x000003A5, 0x00001EBF, 0xFFFFF482, 0x000003A5 },
+ { 0x0213EA94DE024904, 0x0000341B, 0xFFFFE8B2, 0x0000054F, 0x00001D26, 0xFFFFF578, 0x00000388, 0x00001D26, 0xFFFFF578, 0x00000388 },
+ { 0x0213F0FE99102144, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x000019C0, 0xFFFFF8CC, 0x000002E6, 0x000019C0, 0xFFFFF8CC, 0x000002E6 },
+ { 0x0213F0FE992841A4, 0x00002B76, 0xFFFFEF6C, 0x00000444, 0x00001563, 0xFFFFFBBE, 0x0000028D, 0x00001563, 0xFFFFFBBE, 0x0000028D },
+ { 0x0213F0FD42D81864, 0x00002BA2, 0xFFFFEE31, 0x0000047F, 0x00001A3D, 0xFFFFF7F3, 0x00000320, 0x00001A3D, 0xFFFFF7F3, 0x00000320 },
+ { 0x0213F0FE992C48E4, 0x00003545, 0xFFFFE87A, 0x0000054A, 0x00001B5A, 0xFFFFF7B0, 0x0000030C, 0x00001B5A, 0xFFFFF7B0, 0x0000030C },
+ { 0x0213EA94DE042944, 0x00003879, 0xFFFFE73F, 0x00000578, 0x00001649, 0xFFFFFB57, 0x00000283, 0x00001649, 0xFFFFFB57, 0x00000283 },
+ { 0x0213F0FD42D840C4, 0x00002772, 0xFFFFF0F1, 0x00000410, 0x0000142F, 0xFFFFFBCF, 0x00000287, 0x0000142F, 0xFFFFFBCF, 0x00000287 },
+ { 0x0213F0FD42DA3184, 0x00003228, 0xFFFFE98E, 0x00000535, 0x00001F48, 0xFFFFF495, 0x00000399, 0x00001F48, 0xFFFFF495, 0x00000399 },
+ { 0x0213F0FE990E40E4, 0x00002887, 0xFFFFF119, 0x000003E8, 0x000021AA, 0xFFFFF3F5, 0x000003A5, 0x000021AA, 0xFFFFF3F5, 0x000003A5 },
+ { 0x0213F0FD42DA28A4, 0x0000301F, 0xFFFFEBB2, 0x000004D2, 0x00001C02, 0xFFFFF736, 0x0000032B, 0x00001C02, 0xFFFFF736, 0x0000032B },
+ { 0x0213F0FE991820A4, 0x00002E13, 0xFFFFEE3F, 0x00000468, 0x000016AC, 0xFFFFFB32, 0x0000029E, 0x000016AC, 0xFFFFFB32, 0x0000029E },
+ { 0x0213F0FE99044924, 0x00003478, 0xFFFFE8F9, 0x00000538, 0x00001DAB, 0xFFFFF645, 0x00000345, 0x00001DAB, 0xFFFFF645, 0x00000345 },
+ { 0x0213F0FE990608C4, 0x000030C6, 0xFFFFEB6C, 0x000004D4, 0x0000184A, 0xFFFFF934, 0x000002E1, 0x0000184A, 0xFFFFF934, 0x000002E1 },
+ { 0x0213F0FE990A2044, 0x00002F1B, 0xFFFFEBD3, 0x000004D3, 0x000019E7, 0xFFFFF813, 0x0000030D, 0x000019E7, 0xFFFFF813, 0x0000030D },
+ { 0x0213F0FE99023904, 0x00003214, 0xFFFFEAE9, 0x000004E0, 0x0000178F, 0xFFFFFA1C, 0x000002B1, 0x0000178F, 0xFFFFFA1C, 0x000002B1 },
+ { 0x0213F0FD42DC3144, 0x0000399C, 0xFFFFE738, 0x0000055E, 0x00001EA1, 0xFFFFF5E7, 0x0000035A, 0x00001EA1, 0xFFFFF5E7, 0x0000035A },
+ { 0x0213F0FE990650C4, 0x00003A01, 0xFFFFE5B2, 0x000005B6, 0x00001D95, 0xFFFFF5D2, 0x0000036A, 0x00001D95, 0xFFFFF5D2, 0x0000036A },
+ { 0x0213F0FE99043884, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001C06, 0xFFFFF76E, 0x0000031A, 0x00001C06, 0xFFFFF76E, 0x0000031A },
+ { 0x0213F0FE99063864, 0x00003CD1, 0xFFFFE42F, 0x000005EB, 0x00001933, 0xFFFFF91F, 0x000002D4, 0x00001933, 0xFFFFF91F, 0x000002D4 },
+ { 0x0213F0FD42DA3164, 0x00003119, 0xFFFFEB1B, 0x000004E1, 0x00001FC7, 0xFFFFF46A, 0x000003A2, 0x00001FC7, 0xFFFFF46A, 0x000003A2 },
+ { 0x0213EA94DE0648A4, 0x0000390D, 0xFFFFE566, 0x000005D8, 0x00001EC6, 0xFFFFF4DC, 0x00000391, 0x00001EC6, 0xFFFFF4DC, 0x00000391 },
+ { 0x0213F0FD42DA10C4, 0x00003446, 0xFFFFE858, 0x00000561, 0x00001FDB, 0xFFFFF3FF, 0x000003B9, 0x00001FDB, 0xFFFFF3FF, 0x000003B9 },
+ { 0x0213F0FE99044904, 0x000032BA, 0xFFFFEA07, 0x00000511, 0x00001B25, 0xFFFFF7C9, 0x0000030D, 0x00001B25, 0xFFFFF7C9, 0x0000030D },
+ { 0x0213F0FE990E1864, 0x00002CCF, 0xFFFFEDE5, 0x00000478, 0x00001BC8, 0xFFFFF761, 0x00000326, 0x00001BC8, 0xFFFFF761, 0x00000326 },
+ { 0x0213F0FE99062984, 0x0000400E, 0xFFFFE1CB, 0x00000652, 0x00001AF8, 0xFFFFF7B9, 0x00000312, 0x00001AF8, 0xFFFFF7B9, 0x00000312 },
+ { 0x0213F0FE990408E4, 0x00002F24, 0xFFFFEC2A, 0x000004C7, 0x00001B94, 0xFFFFF748, 0x00000333, 0x00001B94, 0xFFFFF748, 0x00000333 },
+ { 0x0213F0FD42D21924, 0x00003FDA, 0xFFFFE1C1, 0x0000064B, 0x00002427, 0xFFFFF180, 0x0000040C, 0x00002427, 0xFFFFF180, 0x0000040C },
+ { 0x0213F0FE990A18C4, 0x00002F6B, 0xFFFFEBA7, 0x000004DD, 0x00001C25, 0xFFFFF6C1, 0x00000344, 0x00001C25, 0xFFFFF6C1, 0x00000344 },
+ { 0x0213F0FE99182104, 0x00002A53, 0xFFFFF0EE, 0x00000402, 0x000017C6, 0xFFFFFAA0, 0x000002BF, 0x000017C6, 0xFFFFFAA0, 0x000002BF },
+ { 0x0213F0FE99105144, 0x000031F4, 0xFFFFEA34, 0x00000517, 0x000016FF, 0xFFFFFA4E, 0x000002AC, 0x000016FF, 0xFFFFFA4E, 0x000002AC },
+ { 0x0213F0FE99322144, 0x00002E24, 0xFFFFED46, 0x00000489, 0x00001712, 0xFFFFFA5D, 0x000002AC, 0x00001712, 0xFFFFFA5D, 0x000002AC },
+ { 0x0213F0FE99182824, 0x000028CD, 0xFFFFF0E3, 0x0000040E, 0x00001606, 0xFFFFFB37, 0x000002A4, 0x00001606, 0xFFFFFB37, 0x000002A4 },
+ { 0x0213F0FE990220C4, 0x00003184, 0xFFFFEB88, 0x000004C3, 0x000018DA, 0xFFFFF939, 0x000002DB, 0x000018DA, 0xFFFFF939, 0x000002DB },
+ { 0x0213F0FE99162124, 0x0000239B, 0xFFFFF470, 0x00000386, 0x00001714, 0xFFFFFA9F, 0x000002C8, 0x00001714, 0xFFFFFA9F, 0x000002C8 },
+ { 0x0213F0FD42DC38E4, 0x00003641, 0xFFFFE92B, 0x00000515, 0x00001BE2, 0xFFFFF795, 0x0000031B, 0x00001BE2, 0xFFFFF795, 0x0000031B },
+ { 0x0213F0FE992C1144, 0x00003278, 0xFFFFEA17, 0x00000510, 0x00001B71, 0xFFFFF778, 0x0000031D, 0x00001B71, 0xFFFFF778, 0x0000031D },
+ { 0x0213F0FE99062844, 0x000035B9, 0xFFFFE8DA, 0x0000052D, 0x00001A6A, 0xFFFFF83B, 0x000002FF, 0x00001A6A, 0xFFFFF83B, 0x000002FF },
+ { 0x0213F0FE990E18C4, 0x00002E5E, 0xFFFFED32, 0x0000048B, 0x00001E7D, 0xFFFFF60E, 0x0000034E, 0x00001E7D, 0xFFFFF60E, 0x0000034E },
+ { 0x0213F0FE991019A4, 0x00003178, 0xFFFFEA52, 0x00000513, 0x00001AD0, 0xFFFFF793, 0x0000031F, 0x00001AD0, 0xFFFFF793, 0x0000031F },
+ { 0x0213F0FD42D44104, 0x00003A2C, 0xFFFFE346, 0x00000641, 0x000023D0, 0xFFFFF0CE, 0x00000433, 0x000023D0, 0xFFFFF0CE, 0x00000433 },
+ { 0x0213F0FD42D818C4, 0x000028FD, 0xFFFFF02A, 0x0000042B, 0x0000152B, 0xFFFFFB90, 0x00000289, 0x0000152B, 0xFFFFFB90, 0x00000289 },
+ { 0x0213F0FE990E3084, 0x000030DE, 0xFFFFEBDF, 0x000004BE, 0x00001CDC, 0xFFFFF747, 0x0000031C, 0x00001CDC, 0xFFFFF747, 0x0000031C },
+ { 0x0213F0FE99021944, 0x000036CB, 0xFFFFE6EE, 0x00000596, 0x00002096, 0xFFFFF3C2, 0x000003BB, 0x00002096, 0xFFFFF3C2, 0x000003BB },
+ { 0x0213F0FE990C48C4, 0x00003172, 0xFFFFEAC1, 0x000004F4, 0x00001C87, 0xFFFFF6CD, 0x00000337, 0x00001C87, 0xFFFFF6CD, 0x00000337 },
+ { 0x0213F0FD42D24864, 0x00004A18, 0xFFFFDB34, 0x00000758, 0x0000213C, 0xFFFFF3A2, 0x000003AC, 0x0000213C, 0xFFFFF3A2, 0x000003AC },
+ { 0x0213F0FE99022104, 0x000031F3, 0xFFFFEB73, 0x000004C6, 0x00001B23, 0xFFFFF7CB, 0x0000031A, 0x00001B23, 0xFFFFF7CB, 0x0000031A },
+ { 0x0213F0FE990A2924, 0x000031C0, 0xFFFFEABA, 0x000004F7, 0x00001A5A, 0xFFFFF845, 0x000002FF, 0x00001A5A, 0xFFFFF845, 0x000002FF },
+ { 0x0213F0FE99104944, 0x00003B77, 0xFFFFE3B3, 0x00000623, 0x00001BCA, 0xFFFFF6F8, 0x00000333, 0x00001BCA, 0xFFFFF6F8, 0x00000333 },
+ { 0x0213F0FE990A3944, 0x000035AF, 0xFFFFE76D, 0x00000588, 0x00001C16, 0xFFFFF6AB, 0x00000341, 0x00001C16, 0xFFFFF6AB, 0x00000341 },
+ { 0x0213EA94DE0438C4, 0x000032AD, 0xFFFFEA8E, 0x000004F8, 0x00001A3A, 0xFFFFF832, 0x0000030E, 0x00001A3A, 0xFFFFF832, 0x0000030E },
+ { 0x0213F0FE99104884, 0x00002E92, 0xFFFFEBD2, 0x000004DA, 0x00001E04, 0xFFFFF51E, 0x0000038A, 0x00001E04, 0xFFFFF51E, 0x0000038A },
+ { 0x0213F0FD42D440A4, 0x00003E57, 0xFFFFE0F7, 0x0000068F, 0x000021F1, 0xFFFFF1C6, 0x00000411, 0x000021F1, 0xFFFFF1C6, 0x00000411 },
+ { 0x0213F0FE990821A4, 0x00003598, 0xFFFFE8BB, 0x00000535, 0x00001B62, 0xFFFFF764, 0x00000326, 0x00001B62, 0xFFFFF764, 0x00000326 },
+ { 0x0213F0FE990A3884, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x00001E8B, 0xFFFFF4AB, 0x0000039F, 0x00001E8B, 0xFFFFF4AB, 0x0000039F },
+ { 0x0213EA94DE060904, 0x0000267E, 0xFFFFF1A7, 0x000003E1, 0x000021C1, 0xFFFFF2E9, 0x000003EA, 0x000021C1, 0xFFFFF2E9, 0x000003EA },
+ { 0x0213EA94DE0239A4, 0x00002ED7, 0xFFFFEC88, 0x000004A6, 0x00001DEC, 0xFFFFF57C, 0x00000378, 0x00001DEC, 0xFFFFF57C, 0x00000378 },
+ { 0x0213EA94DE0441A4, 0x00003365, 0xFFFFE946, 0x00000536, 0x000019E9, 0xFFFFF7E0, 0x0000031D, 0x000019E9, 0xFFFFF7E0, 0x0000031D },
+ { 0x0213F0FE991818E4, 0x000029A4, 0xFFFFF0FD, 0x000003FE, 0x0000163F, 0xFFFFFB68, 0x00000299, 0x0000163F, 0xFFFFFB68, 0x00000299 },
+ { 0x0213EA94DE021904, 0x0000348D, 0xFFFFE9F7, 0x00000509, 0x000017A0, 0xFFFFFA59, 0x000002B6, 0x000017A0, 0xFFFFFA59, 0x000002B6 },
+ { 0x0213F0FE990610C4, 0x00003144, 0xFFFFEB23, 0x000004D9, 0x00001C9B, 0xFFFFF664, 0x00000351, 0x00001C9B, 0xFFFFF664, 0x00000351 },
+ { 0x0213EA94DE0620E4, 0x00002E95, 0xFFFFEE1A, 0x00000463, 0x00001707, 0xFFFFFAB7, 0x000002B3, 0x00001707, 0xFFFFFAB7, 0x000002B3 },
+ { 0x0213F0FD42D41864, 0x0000489C, 0xFFFFDA43, 0x000007AC, 0x00002866, 0xFFFFED6B, 0x000004D0, 0x00002866, 0xFFFFED6B, 0x000004D0 },
+ { 0x0213F0FE99161844, 0x00002895, 0xFFFFF10A, 0x0000040A, 0x000013E9, 0xFFFFFC9F, 0x0000026E, 0x000013E9, 0xFFFFFC9F, 0x0000026E },
+ { 0x0213F0FE99061964, 0x000033A0, 0xFFFFE9B1, 0x00000510, 0x00001D96, 0xFFFFF5AE, 0x0000036F, 0x00001D96, 0xFFFFF5AE, 0x0000036F },
+ { 0x0213F0FE99083984, 0x0000327C, 0xFFFFEAEA, 0x000004DD, 0x00001D45, 0xFFFFF649, 0x00000356, 0x00001D45, 0xFFFFF649, 0x00000356 },
+ { 0x0213EA94DE0248A4, 0x000031DF, 0xFFFFE9AB, 0x0000052F, 0x000019C8, 0xFFFFF7B7, 0x00000321, 0x000019C8, 0xFFFFF7B7, 0x00000321 },
+ { 0x0213F0FE991640A4, 0x00002BCC, 0xFFFFEEF4, 0x0000045C, 0x000015CD, 0xFFFFFB58, 0x0000029E, 0x000015CD, 0xFFFFFB58, 0x0000029E },
+ { 0x0213F0FE990638E4, 0x00003534, 0xFFFFEA10, 0x000004EB, 0x00001BB6, 0xFFFFF7B9, 0x00000314, 0x00001BB6, 0xFFFFF7B9, 0x00000314 },
+ { 0x0213F0FE99041984, 0x00002F4F, 0xFFFFEC35, 0x000004B9, 0x0000205D, 0xFFFFF47F, 0x00000392, 0x0000205D, 0xFFFFF47F, 0x00000392 },
+ { 0x0213F0FE990C20A4, 0x00003295, 0xFFFFEB1C, 0x000004D6, 0x000019C1, 0xFFFFF931, 0x000002D5, 0x000019C1, 0xFFFFF931, 0x000002D5 },
+ { 0x0213F0FE99024144, 0x00003557, 0xFFFFE7F7, 0x00000568, 0x00002342, 0xFFFFF1F9, 0x00000405, 0x00002342, 0xFFFFF1F9, 0x00000405 },
+ { 0x0213F0FE990450C4, 0x00003487, 0xFFFFE872, 0x0000055D, 0x000019D7, 0xFFFFF823, 0x0000030C, 0x000019D7, 0xFFFFF823, 0x0000030C },
+ { 0x0213F0FE992C3944, 0x0000378F, 0xFFFFE7A6, 0x00000566, 0x00001875, 0xFFFFFA04, 0x000002AF, 0x00001875, 0xFFFFFA04, 0x000002AF },
+ { 0x0213EA94DE0230E4, 0x00002A67, 0xFFFFF157, 0x000003DD, 0x000017BD, 0xFFFFFA53, 0x000002D1, 0x000017BD, 0xFFFFFA53, 0x000002D1 },
+ { 0x0213F0FD42D220E4, 0x000030B5, 0xFFFFEB32, 0x000004D9, 0x00002129, 0xFFFFF38A, 0x000003BB, 0x00002129, 0xFFFFF38A, 0x000003BB },
+ { 0x0213F0FE990610A4, 0x00003786, 0xFFFFE703, 0x00000584, 0x00001D63, 0xFFFFF5DC, 0x00000367, 0x00001D63, 0xFFFFF5DC, 0x00000367 },
+ { 0x0213F0FD42DA20C4, 0x0000346A, 0xFFFFE93E, 0x0000052C, 0x00001B27, 0xFFFFF79D, 0x0000031F, 0x00001B27, 0xFFFFF79D, 0x0000031F },
+ { 0x0213F0FE990E3024, 0x0000294E, 0xFFFFF0A5, 0x00000409, 0x00001928, 0xFFFFF93B, 0x000002E6, 0x00001928, 0xFFFFF93B, 0x000002E6 },
+ { 0x0213F0FD42D410C4, 0x00003E09, 0xFFFFE0FF, 0x00000694, 0x000025A0, 0xFFFFEF0F, 0x0000048F, 0x000025A0, 0xFFFFEF0F, 0x0000048F },
+ { 0x0213F0FE990A2964, 0x00003197, 0xFFFFEA06, 0x00000520, 0x00001B42, 0xFFFFF73B, 0x0000032A, 0x00001B42, 0xFFFFF73B, 0x0000032A },
+ { 0x0213F0FE99161864, 0x000022CB, 0xFFFFF3FC, 0x000003A3, 0x00001449, 0xFFFFFBD0, 0x00000297, 0x00001449, 0xFFFFFBD0, 0x00000297 },
+ { 0x0213F0FD42D82944, 0x00002A79, 0xFFFFEFD2, 0x00000433, 0x00001585, 0xFFFFFB92, 0x0000028E, 0x00001585, 0xFFFFFB92, 0x0000028E },
+ { 0x0213F0FE990C4184, 0x00003249, 0xFFFFEA92, 0x000004F4, 0x000019CB, 0xFFFFF8CF, 0x000002E1, 0x000019CB, 0xFFFFF8CF, 0x000002E1 },
+ { 0x0213EA94DE0218A4, 0x00002CEA, 0xFFFFEE46, 0x00000463, 0x00001A5E, 0xFFFFF83C, 0x0000030D, 0x00001A5E, 0xFFFFF83C, 0x0000030D },
+ { 0x0213F0FD42DC5144, 0x00003AE2, 0xFFFFE422, 0x00000600, 0x00001C65, 0xFFFFF62F, 0x0000034B, 0x00001C65, 0xFFFFF62F, 0x0000034B },
+ { 0x0213F0FE99181184, 0x000026A0, 0xFFFFF1C2, 0x000003F8, 0x000010E5, 0xFFFFFE56, 0x0000022A, 0x000010E5, 0xFFFFFE56, 0x0000022A },
+ { 0x0213F0FE992829A4, 0x00002A7B, 0xFFFFF063, 0x00000417, 0x000016FC, 0xFFFFFAD7, 0x000002B1, 0x000016FC, 0xFFFFFAD7, 0x000002B1 },
+ { 0x0213F0FE993210C4, 0x00003092, 0xFFFFEAB9, 0x00000507, 0x00001AE3, 0xFFFFF783, 0x00000323, 0x00001AE3, 0xFFFFF783, 0x00000323 },
+ { 0x0213F0FE990438E4, 0x00003265, 0xFFFFEBE8, 0x000004AA, 0x00001D65, 0xFFFFF73F, 0x00000321, 0x00001D65, 0xFFFFF73F, 0x00000321 },
+ { 0x0213EA94DE023084, 0x00002F14, 0xFFFFECC2, 0x000004A4, 0x00001A8D, 0xFFFFF7F3, 0x0000031D, 0x00001A8D, 0xFFFFF7F3, 0x0000031D },
+ { 0x0213F0FD42DC10E4, 0x000035FB, 0xFFFFE6D3, 0x000005AC, 0x00001B19, 0xFFFFF712, 0x00000338, 0x00001B19, 0xFFFFF712, 0x00000338 },
+ { 0x0213F0FD42DA2124, 0x00003519, 0xFFFFE8CC, 0x0000053A, 0x00001A0F, 0xFFFFF86E, 0x000002F5, 0x00001A0F, 0xFFFFF86E, 0x000002F5 },
+ { 0x0213F0FE992C2144, 0x0000364C, 0xFFFFE879, 0x00000541, 0x00001A42, 0xFFFFF8BA, 0x000002E2, 0x00001A42, 0xFFFFF8BA, 0x000002E2 },
+ { 0x0213EA94DE0218C4, 0x000029BA, 0xFFFFF09A, 0x00000408, 0x00001986, 0xFFFFF8D9, 0x000002FE, 0x00001986, 0xFFFFF8D9, 0x000002FE },
+ { 0x0213F0FD42DA38E4, 0x00003507, 0xFFFFE961, 0x00000518, 0x00001B79, 0xFFFFF775, 0x00000325, 0x00001B79, 0xFFFFF775, 0x00000325 },
+ { 0x0213F0FD42DC3184, 0x00003AD5, 0xFFFFE415, 0x00000613, 0x00001CB4, 0xFFFFF66D, 0x00000348, 0x00001CB4, 0xFFFFF66D, 0x00000348 },
+ { 0x0213F0FE991640E4, 0x000023D1, 0xFFFFF42B, 0x0000038F, 0x00001546, 0xFFFFFBA0, 0x0000029F, 0x00001546, 0xFFFFFBA0, 0x0000029F },
+ { 0x0213F0FE990A1924, 0x0000399E, 0xFFFFE518, 0x000005E7, 0x00001990, 0xFFFFF871, 0x000002FB, 0x00001990, 0xFFFFF871, 0x000002FB },
+ { 0x0213F0FD42D82964, 0x00002EDE, 0xFFFFEC93, 0x000004B8, 0x0000152C, 0xFFFFFBB3, 0x0000027E, 0x0000152C, 0xFFFFFBB3, 0x0000027E },
+ { 0x0213EA94DE042964, 0x00003140, 0xFFFFEBC9, 0x000004BB, 0x000016BE, 0xFFFFFB0A, 0x00000288, 0x000016BE, 0xFFFFFB0A, 0x00000288 },
+ { 0x0213F0FE99064064, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x0000185D, 0xFFFFF95A, 0x000002D9, 0x0000185D, 0xFFFFF95A, 0x000002D9 },
+ { 0x0213F0FE99023844, 0x0000389C, 0xFFFFE65A, 0x000005A2, 0x0000195D, 0xFFFFF8C8, 0x000002E8, 0x0000195D, 0xFFFFF8C8, 0x000002E8 },
+ { 0x0213F0FE99042104, 0x0000362B, 0xFFFFE9EC, 0x000004F6, 0x00001605, 0xFFFFFC1C, 0x00000263, 0x00001605, 0xFFFFFC1C, 0x00000263 },
+ { 0x0213F0FE992A1964, 0x00002946, 0xFFFFF04F, 0x00000426, 0x000015BA, 0xFFFFFB2F, 0x000002A3, 0x000015BA, 0xFFFFFB2F, 0x000002A3 },
+ { 0x0213F0FE99082184, 0x0000368E, 0xFFFFE837, 0x0000054A, 0x000017D7, 0xFFFFF9EB, 0x000002BA, 0x000017D7, 0xFFFFF9EB, 0x000002BA },
+ { 0x0213F0FD42DA2844, 0x00002E74, 0xFFFFEBE8, 0x000004DA, 0x00001DD6, 0xFFFFF57E, 0x00000379, 0x00001DD6, 0xFFFFF57E, 0x00000379 },
+ { 0x0213F0FE99041944, 0x0000322D, 0xFFFFEAA8, 0x000004F5, 0x00001B55, 0xFFFFF7DD, 0x0000030B, 0x00001B55, 0xFFFFF7DD, 0x0000030B },
+ { 0x0213F0FE99181904, 0x00002A29, 0xFFFFF07B, 0x00000416, 0x00001671, 0xFFFFFB3E, 0x0000029F, 0x00001671, 0xFFFFFB3E, 0x0000029F },
+ { 0x0213F0FD42DA2104, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x00001815, 0xFFFFF9AE, 0x000002C9, 0x00001815, 0xFFFFF9AE, 0x000002C9 },
+ { 0x0213F0FE990E10E4, 0x0000265F, 0xFFFFF1CB, 0x000003D5, 0x00001ED2, 0xFFFFF539, 0x0000037A, 0x00001ED2, 0xFFFFF539, 0x0000037A },
+ { 0x0213F0FE99162184, 0x000027A8, 0xFFFFF10D, 0x00000413, 0x000014B5, 0xFFFFFBA1, 0x00000299, 0x000014B5, 0xFFFFFBA1, 0x00000299 },
+ { 0x0213F0FE99043064, 0x00002CEE, 0xFFFFEDF6, 0x00000476, 0x00001A99, 0xFFFFF83E, 0x00000305, 0x00001A99, 0xFFFFF83E, 0x00000305 },
+ { 0x0213F0FE990640C4, 0x0000346C, 0xFFFFEA17, 0x000004EF, 0x00001D38, 0xFFFFF69F, 0x0000033D, 0x00001D38, 0xFFFFF69F, 0x0000033D },
+ { 0x0213F0FD42DA2944, 0x00002DBB, 0xFFFFED35, 0x00000490, 0x000018C1, 0xFFFFF930, 0x000002DA, 0x000018C1, 0xFFFFF930, 0x000002DA },
+ { 0x0213F0FE99042924, 0x000038DF, 0xFFFFE8A7, 0x0000051E, 0x00001B59, 0xFFFFF915, 0x000002D3, 0x00001B59, 0xFFFFF915, 0x000002D3 },
+ { 0x0213F0FE99080944, 0x00003384, 0xFFFFE979, 0x00000524, 0x00001AF3, 0xFFFFF74C, 0x0000032F, 0x00001AF3, 0xFFFFF74C, 0x0000032F },
+ { 0x0213F0FE99181864, 0x0000258B, 0xFFFFF2AE, 0x000003CB, 0x0000190C, 0xFFFFF93E, 0x000002EF, 0x0000190C, 0xFFFFF93E, 0x000002EF },
+ { 0x0213F0FE99103884, 0x000034F1, 0xFFFFE84B, 0x0000055E, 0x00001CB8, 0xFFFFF670, 0x0000034A, 0x00001CB8, 0xFFFFF670, 0x0000034A },
+ { 0x0213F0FE990C2104, 0x000030FB, 0xFFFFECD2, 0x00000488, 0x00001BF4, 0xFFFFF821, 0x00000302, 0x00001BF4, 0xFFFFF821, 0x00000302 },
+ { 0x0213F0FE99063044, 0x000036A6, 0xFFFFE815, 0x00000556, 0x000018FD, 0xFFFFF925, 0x000002DF, 0x000018FD, 0xFFFFF925, 0x000002DF },
+ { 0x0213EA94DE023044, 0x0000302A, 0xFFFFEB79, 0x000004E0, 0x00001C11, 0xFFFFF694, 0x00000358, 0x00001C11, 0xFFFFF694, 0x00000358 },
+ { 0x0213F0FE99181124, 0x00002555, 0xFFFFF2C4, 0x000003CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB },
+ { 0x0213F0FE990A3164, 0x000032A3, 0xFFFFE933, 0x00000544, 0x000019D3, 0xFFFFF81A, 0x00000306, 0x000019D3, 0xFFFFF81A, 0x00000306 },
+ { 0x0213F0FD42D85104, 0x00002B91, 0xFFFFED81, 0x000004A9, 0x0000158B, 0xFFFFFAB9, 0x000002AC, 0x0000158B, 0xFFFFFAB9, 0x000002AC },
+ { 0x0213F0FE990E20C4, 0x00003537, 0xFFFFE912, 0x0000052C, 0x00001C8A, 0xFFFFF754, 0x0000031B, 0x00001C8A, 0xFFFFF754, 0x0000031B },
+ { 0x0213EA94DE063184, 0x000032E1, 0xFFFFEA5A, 0x000004F9, 0x000017B4, 0xFFFFF9D9, 0x000002C2, 0x000017B4, 0xFFFFF9D9, 0x000002C2 },
+ { 0x0213F0FD42D210C4, 0x00003B76, 0xFFFFE330, 0x00000636, 0x000026FB, 0xFFFFEF06, 0x00000481, 0x000026FB, 0xFFFFEF06, 0x00000481 },
+ { 0x0213F0FE99042144, 0x0000320C, 0xFFFFEB84, 0x000004C3, 0x00001A3A, 0xFFFFF8E9, 0x000002DF, 0x00001A3A, 0xFFFFF8E9, 0x000002DF },
+ { 0x0213F0FE99023984, 0x0000317D, 0xFFFFEA1F, 0x00000515, 0x00002100, 0xFFFFF31B, 0x000003DD, 0x00002100, 0xFFFFF31B, 0x000003DD },
+ { 0x0213F0FD42D43164, 0x00003DCB, 0xFFFFE0B4, 0x000006B4, 0x00002160, 0xFFFFF269, 0x000003F0, 0x00002160, 0xFFFFF269, 0x000003F0 },
+ { 0x0213F0FE991618C4, 0x00002737, 0xFFFFF218, 0x000003E1, 0x000015B5, 0xFFFFFB8F, 0x0000029C, 0x000015B5, 0xFFFFFB8F, 0x0000029C },
+ { 0x0213EA94DE023184, 0x0000318F, 0xFFFFEB3F, 0x000004D8, 0x00001938, 0xFFFFF8E9, 0x000002EB, 0x00001938, 0xFFFFF8E9, 0x000002EB },
+ { 0x0213F0FE991048C4, 0x000031BD, 0xFFFFE9DE, 0x00000527, 0x000018A7, 0xFFFFF8CA, 0x000002ED, 0x000018A7, 0xFFFFF8CA, 0x000002ED },
+ { 0x0213F0FD42DA3884, 0x00002F77, 0xFFFFEC2F, 0x000004B4, 0x00001D25, 0xFFFFF61B, 0x0000035D, 0x00001D25, 0xFFFFF61B, 0x0000035D },
+ { 0x0213F0FE990E4904, 0x00002CCA, 0xFFFFEDB3, 0x0000047C, 0x00001FBD, 0xFFFFF4A7, 0x00000391, 0x00001FBD, 0xFFFFF4A7, 0x00000391 },
+ { 0x0213F0FD42D438A4, 0x00003FF6, 0xFFFFE058, 0x000006A2, 0x000024CD, 0xFFFFF026, 0x00000452, 0x000024CD, 0xFFFFF026, 0x00000452 },
+ { 0x0213F0FE990A38E4, 0x00003161, 0xFFFFEAC8, 0x000004F3, 0x00001BB6, 0xFFFFF72A, 0x0000032B, 0x00001BB6, 0xFFFFF72A, 0x0000032B },
+ { 0x0213F0FD42D838A4, 0x00002EA0, 0xFFFFECA6, 0x000004B7, 0x000018C2, 0xFFFFF94E, 0x000002E1, 0x000018C2, 0xFFFFF94E, 0x000002E1 },
+ { 0x0213F0FE99182184, 0x00002F62, 0xFFFFEC9E, 0x000004B8, 0x00001531, 0xFFFFFBCD, 0x00000285, 0x00001531, 0xFFFFFBCD, 0x00000285 },
+ { 0x0213F0FE990440A4, 0x00003013, 0xFFFFEBD6, 0x000004C2, 0x00001B01, 0xFFFFF802, 0x000002FF, 0x00001B01, 0xFFFFF802, 0x000002FF },
+ { 0x0213F0FE99183064, 0x00002972, 0xFFFFF08D, 0x00000417, 0x00001A32, 0xFFFFF8A4, 0x00000305, 0x00001A32, 0xFFFFF8A4, 0x00000305 },
+ { 0x0213F0FD42D820E4, 0x00002E95, 0xFFFFED94, 0x00000487, 0x00001529, 0xFFFFFC26, 0x00000271, 0x00001529, 0xFFFFFC26, 0x00000271 },
+ { 0x0213F0FE990A1084, 0x00002D6A, 0xFFFFEC79, 0x000004C1, 0x00001AE2, 0xFFFFF725, 0x00000337, 0x00001AE2, 0xFFFFF725, 0x00000337 },
+ { 0x0213F0FE99021884, 0x000036B4, 0xFFFFE704, 0x00000591, 0x00001E7E, 0xFFFFF51C, 0x00000383, 0x00001E7E, 0xFFFFF51C, 0x00000383 },
+ { 0x0213F0FE99041844, 0x00002A6F, 0xFFFFEF70, 0x00000443, 0x00001BAA, 0xFFFFF752, 0x00000336, 0x00001BAA, 0xFFFFF752, 0x00000336 },
+ { 0x0213F0FE99183944, 0x00002C66, 0xFFFFEF5F, 0x0000043A, 0x000019F7, 0xFFFFF931, 0x000002EC, 0x000019F7, 0xFFFFF931, 0x000002EC },
+ { 0x0213EA94DE0631C4, 0x00003852, 0xFFFFE6AB, 0x00000590, 0x000019C1, 0xFFFFF8B1, 0x000002E5, 0x000019C1, 0xFFFFF8B1, 0x000002E5 },
+ { 0x0213F0FD42DA3124, 0x00003521, 0xFFFFE932, 0x00000523, 0x000018A9, 0xFFFFF96B, 0x000002D0, 0x000018A9, 0xFFFFF96B, 0x000002D0 },
+ { 0x0213F0FE99062164, 0x000031B9, 0xFFFFEB36, 0x000004D0, 0x00001D65, 0xFFFFF612, 0x0000035D, 0x00001D65, 0xFFFFF612, 0x0000035D },
+ { 0x0213F0FD42D41064, 0x00003ED0, 0xFFFFE135, 0x00000679, 0x00002351, 0xFFFFF0FE, 0x00000433, 0x00002351, 0xFFFFF0FE, 0x00000433 },
+ { 0x0213F0FE990A20E4, 0x000033ED, 0xFFFFE91A, 0x00000541, 0x00001C93, 0xFFFFF6A0, 0x0000034A, 0x00001C93, 0xFFFFF6A0, 0x0000034A },
+ { 0x0213EA94DE021844, 0x0000356F, 0xFFFFE8F7, 0x00000530, 0x000016BF, 0xFFFFFA85, 0x000002AB, 0x000016BF, 0xFFFFFA85, 0x000002AB },
+ { 0x0213F0FE991840E4, 0x00002304, 0xFFFFF4F3, 0x00000364, 0x000017CC, 0xFFFFFA41, 0x000002CA, 0x000017CC, 0xFFFFFA41, 0x000002CA },
+ { 0x0213F0FE99161164, 0x00002887, 0xFFFFEFD7, 0x00000450, 0x00001474, 0xFFFFFB94, 0x00000299, 0x00001474, 0xFFFFFB94, 0x00000299 },
+ { 0x0213F0FE99063064, 0x00003D0B, 0xFFFFE416, 0x000005EF, 0x00001C7E, 0xFFFFF71D, 0x00000325, 0x00001C7E, 0xFFFFF71D, 0x00000325 },
+ { 0x0213F0FE990810E4, 0x00003185, 0xFFFFEAFA, 0x000004E4, 0x00001A12, 0xFFFFF83C, 0x00000303, 0x00001A12, 0xFFFFF83C, 0x00000303 },
+ { 0x0213F0FE990A1944, 0x00003032, 0xFFFFEAE6, 0x000004FC, 0x00001B2A, 0xFFFFF73F, 0x0000032B, 0x00001B2A, 0xFFFFF73F, 0x0000032B },
+ { 0x0213F0FD42D838C4, 0x00002691, 0xFFFFF22D, 0x000003D6, 0x00001700, 0xFFFFFA6E, 0x000002C0, 0x00001700, 0xFFFFFA6E, 0x000002C0 },
+ { 0x0213F0FE990218A4, 0x00002B2F, 0xFFFFEEC4, 0x0000044B, 0x0000215F, 0xFFFFF33F, 0x000003D2, 0x0000215F, 0xFFFFF33F, 0x000003D2 },
+ { 0x0213F0FE990A4184, 0x000034AA, 0xFFFFE706, 0x000005B1, 0x00001B28, 0xFFFFF6B5, 0x00000349, 0x00001B28, 0xFFFFF6B5, 0x00000349 },
+ { 0x0213F0FD42DA2964, 0x0000307E, 0xFFFFEB38, 0x000004E6, 0x00001A22, 0xFFFFF83F, 0x00000300, 0x00001A22, 0xFFFFF83F, 0x00000300 },
+ { 0x0213F0FE990618A4, 0x000038D6, 0xFFFFE6D8, 0x0000057C, 0x00001B24, 0xFFFFF7E4, 0x00000307, 0x00001B24, 0xFFFFF7E4, 0x00000307 },
+ { 0x0213F0FE99183044, 0x00002757, 0xFFFFF1E8, 0x000003DD, 0x000017F5, 0xFFFFFA15, 0x000002C8, 0x000017F5, 0xFFFFFA15, 0x000002C8 },
+ { 0x0213F0FE99083184, 0x000031FC, 0xFFFFEB3E, 0x000004CE, 0x00001B4C, 0xFFFFF7AD, 0x00000319, 0x00001B4C, 0xFFFFF7AD, 0x00000319 },
+ { 0x0213F0FE99301864, 0x00002933, 0xFFFFF073, 0x0000040E, 0x00001C3C, 0xFFFFF701, 0x0000033C, 0x00001C3C, 0xFFFFF701, 0x0000033C },
+ { 0x0213F0FD42D218A4, 0x000040BB, 0xFFFFE066, 0x0000069A, 0x0000257F, 0xFFFFF08A, 0x00000435, 0x0000257F, 0xFFFFF08A, 0x00000435 },
+ { 0x0213F0FE991010A4, 0x0000305B, 0xFFFFEB9B, 0x000004CB, 0x00001996, 0xFFFFF846, 0x00000308, 0x00001996, 0xFFFFF846, 0x00000308 },
+ { 0x0213F0FE99064884, 0x000039C0, 0xFFFFE5D3, 0x000005B0, 0x00001A8D, 0xFFFFF7DA, 0x00000313, 0x00001A8D, 0xFFFFF7DA, 0x00000313 },
+ { 0x0213EA94DE0210A4, 0x00002E23, 0xFFFFED3F, 0x0000048F, 0x0000189D, 0xFFFFF94C, 0x000002DE, 0x0000189D, 0xFFFFF94C, 0x000002DE },
+ { 0x0213EA94DE021984, 0x0000332B, 0xFFFFE9F1, 0x00000516, 0x000018E6, 0xFFFFF8FE, 0x000002EC, 0x000018E6, 0xFFFFF8FE, 0x000002EC },
+ { 0x0213F0FE990838C4, 0x000034A0, 0xFFFFEA44, 0x000004E4, 0x00001ECD, 0xFFFFF5B4, 0x00000364, 0x00001ECD, 0xFFFFF5B4, 0x00000364 },
+ { 0x0213F0FD42D24104, 0x0000448C, 0xFFFFDF34, 0x000006A8, 0x0000231C, 0xFFFFF286, 0x000003D9, 0x0000231C, 0xFFFFF286, 0x000003D9 },
+ { 0x0213EA94DE062144, 0x00002D8C, 0xFFFFEE65, 0x00000456, 0x000018B1, 0xFFFFF9C8, 0x000002C8, 0x000018B1, 0xFFFFF9C8, 0x000002C8 },
+ { 0x0213F0FE99061904, 0x00003527, 0xFFFFE9BF, 0x000004FD, 0x00001D23, 0xFFFFF69F, 0x00000342, 0x00001D23, 0xFFFFF69F, 0x00000342 },
+ { 0x0213F0FD42DC38A4, 0x00002C51, 0xFFFFEDC3, 0x00000483, 0x00001BE0, 0xFFFFF720, 0x0000032D, 0x00001BE0, 0xFFFFF720, 0x0000032D },
+ { 0x0213F0FE990A3044, 0x00002C6C, 0xFFFFECEB, 0x000004B7, 0x00001C86, 0xFFFFF5E7, 0x00000371, 0x00001C86, 0xFFFFF5E7, 0x00000371 },
+ { 0x0213F0FE99045144, 0x000037CF, 0xFFFFE6BE, 0x00000599, 0x000018CD, 0xFFFFF967, 0x000002C7, 0x000018CD, 0xFFFFF967, 0x000002C7 },
+ { 0x0213F0FE99103164, 0x00002E6F, 0xFFFFED1D, 0x0000048E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E },
+ { 0x0213F0FD42D42984, 0x00003FF3, 0xFFFFDF13, 0x000006F9, 0x000025BF, 0xFFFFEEEE, 0x00000497, 0x000025BF, 0xFFFFEEEE, 0x00000497 },
+ { 0x0213F0FD42DC5104, 0x00004135, 0xFFFFDF97, 0x000006CC, 0x00001D52, 0xFFFFF541, 0x00000383, 0x00001D52, 0xFFFFF541, 0x00000383 },
+ { 0x0213F0FD42DC20E4, 0x00002EA9, 0xFFFFEDDB, 0x0000045F, 0x0000197C, 0xFFFFF8E1, 0x000002F0, 0x0000197C, 0xFFFFF8E1, 0x000002F0 },
+ { 0x0213EA94DE043084, 0x0000345C, 0xFFFFE922, 0x00000532, 0x00001922, 0xFFFFF8C7, 0x000002F1, 0x00001922, 0xFFFFF8C7, 0x000002F1 },
+ { 0x0213F0FE99064124, 0x000035C4, 0xFFFFE8FE, 0x00000521, 0x00001C87, 0xFFFFF6F3, 0x00000330, 0x00001C87, 0xFFFFF6F3, 0x00000330 },
+ { 0x0213F0FD42D83164, 0x00002888, 0xFFFFF08A, 0x0000041E, 0x0000150F, 0xFFFFFB87, 0x00000291, 0x0000150F, 0xFFFFFB87, 0x00000291 },
+ { 0x0213F0FE990A1124, 0x000035E9, 0xFFFFE657, 0x000005CC, 0x00001BD6, 0xFFFFF664, 0x00000355, 0x00001BD6, 0xFFFFF664, 0x00000355 },
+ { 0x0213F0FE991648E4, 0x00002F94, 0xFFFFEBD0, 0x000004E5, 0x00001333, 0xFFFFFCA7, 0x00000266, 0x00001333, 0xFFFFFCA7, 0x00000266 },
+ { 0x0213F0FE99181964, 0x000029E7, 0xFFFFF009, 0x00000433, 0x0000144A, 0xFFFFFC37, 0x0000027D, 0x0000144A, 0xFFFFFC37, 0x0000027D },
+ { 0x0213F0FE992C1944, 0x00003418, 0xFFFFE979, 0x00000521, 0x00001D33, 0xFFFFF66B, 0x0000034A, 0x00001D33, 0xFFFFF66B, 0x0000034A },
+ { 0x0213EA94DE0440E4, 0x00003656, 0xFFFFE79D, 0x0000057A, 0x000017C2, 0xFFFFF992, 0x000002D4, 0x000017C2, 0xFFFFF992, 0x000002D4 },
+ { 0x0213F0FE990C40C4, 0x00002EB2, 0xFFFFECFE, 0x00000493, 0x00001F2A, 0xFFFFF543, 0x0000037B, 0x00001F2A, 0xFFFFF543, 0x0000037B },
+ { 0x0213F0FE99021124, 0x00002FC1, 0xFFFFEB3F, 0x000004E8, 0x00001CD0, 0xFFFFF5F7, 0x00000364, 0x00001CD0, 0xFFFFF5F7, 0x00000364 },
+ { 0x0213F0FE990C1124, 0x0000307B, 0xFFFFEB66, 0x000004DE, 0x00001953, 0xFFFFF8ED, 0x000002E4, 0x00001953, 0xFFFFF8ED, 0x000002E4 },
+ { 0x0213F0FD42DA1884, 0x00002CAA, 0xFFFFED07, 0x000004AC, 0x0000251C, 0xFFFFF086, 0x0000044D, 0x0000251C, 0xFFFFF086, 0x0000044D },
+ { 0x0213EA94DE043944, 0x00002C94, 0xFFFFEE5F, 0x0000045B, 0x000018D7, 0xFFFFF900, 0x000002EB, 0x000018D7, 0xFFFFF900, 0x000002EB },
+ { 0x0213F0FE99021864, 0x000031F1, 0xFFFFE9BE, 0x0000052E, 0x00001DDF, 0xFFFFF558, 0x00000380, 0x00001DDF, 0xFFFFF558, 0x00000380 },
+ { 0x0213F0FE990E50C4, 0x00002603, 0xFFFFF1E9, 0x000003DA, 0x00001B37, 0xFFFFF75A, 0x0000032F, 0x00001B37, 0xFFFFF75A, 0x0000032F },
+ { 0x0213F0FD42DA3044, 0x00003992, 0xFFFFE4F9, 0x000005EB, 0x00001775, 0xFFFFF9B8, 0x000002C2, 0x00001775, 0xFFFFF9B8, 0x000002C2 },
+ { 0x0213F0FE99184964, 0x000029DA, 0xFFFFF052, 0x0000041F, 0x000016E2, 0xFFFFFA99, 0x000002BB, 0x000016E2, 0xFFFFFA99, 0x000002BB },
+ { 0x0213F0FE99101064, 0x00002FF2, 0xFFFFEB8F, 0x000004DF, 0x00001AF6, 0xFFFFF7A1, 0x00000321, 0x00001AF6, 0xFFFFF7A1, 0x00000321 },
+ { 0x0213F0FE991608E4, 0x00002590, 0xFFFFF222, 0x000003EE, 0x0000130B, 0xFFFFFCC9, 0x00000268, 0x0000130B, 0xFFFFFCC9, 0x00000268 },
+ { 0x0213F0FE99024064, 0x000038A2, 0xFFFFE65F, 0x000005A2, 0x000018B1, 0xFFFFF917, 0x000002E1, 0x000018B1, 0xFFFFF917, 0x000002E1 },
+ { 0x0213F0FD42DC48E4, 0x000035FD, 0xFFFFE73C, 0x0000058D, 0x00001BB3, 0xFFFFF6E1, 0x00000337, 0x00001BB3, 0xFFFFF6E1, 0x00000337 },
+ { 0x0213F0FE991038C4, 0x00002AB7, 0xFFFFEF98, 0x00000429, 0x00001F35, 0xFFFFF539, 0x0000037C, 0x00001F35, 0xFFFFF539, 0x0000037C },
+ { 0x0213F0FE990A0944, 0x000034BA, 0xFFFFE73D, 0x000005A6, 0x000018A6, 0xFFFFF888, 0x000002FB, 0x000018A6, 0xFFFFF888, 0x000002FB },
+ { 0x0213F0FE99063844, 0x000032EA, 0xFFFFEA78, 0x000004F4, 0x00001AB6, 0xFFFFF812, 0x00000308, 0x00001AB6, 0xFFFFF812, 0x00000308 },
+ { 0x0213F0FE990C3044, 0x00002BE9, 0xFFFFEE9A, 0x00000457, 0x00001942, 0xFFFFF8D2, 0x000002F2, 0x00001942, 0xFFFFF8D2, 0x000002F2 },
+ { 0x0213F0FE99105124, 0x00002FAB, 0xFFFFEB76, 0x000004E1, 0x00001DCA, 0xFFFFF57D, 0x00000378, 0x00001DCA, 0xFFFFF57D, 0x00000378 },
+ { 0x0213F0FE992E2844, 0x0000330A, 0xFFFFE9E1, 0x0000051B, 0x00001CC4, 0xFFFFF6DF, 0x00000335, 0x00001CC4, 0xFFFFF6DF, 0x00000335 },
+ { 0x0213F0FE991828A4, 0x000027D8, 0xFFFFF276, 0x000003BF, 0x0000178A, 0xFFFFFABF, 0x000002B5, 0x0000178A, 0xFFFFFABF, 0x000002B5 },
+ { 0x0213F0FD42DC3864, 0x0000340A, 0xFFFFE86D, 0x00000562, 0x00001B85, 0xFFFFF719, 0x0000032F, 0x00001B85, 0xFFFFF719, 0x0000032F },
+ { 0x0213EA94DE063084, 0x00003879, 0xFFFFE73F, 0x00000578, 0x0000161C, 0xFFFFFB6B, 0x00000281, 0x0000161C, 0xFFFFFB6B, 0x00000281 },
+ { 0x0213F0FE99184064, 0x00002879, 0xFFFFF0F8, 0x0000040A, 0x00001749, 0xFFFFFA37, 0x000002CC, 0x00001749, 0xFFFFFA37, 0x000002CC },
+ { 0x0213F0FE99043964, 0x00002C3A, 0xFFFFEEA0, 0x0000044F, 0x00001D57, 0xFFFFF6C2, 0x00000332, 0x00001D57, 0xFFFFF6C2, 0x00000332 },
+ { 0x0213EA94DE021964, 0x000035BB, 0xFFFFE90D, 0x0000052A, 0x000017D9, 0xFFFFF9F5, 0x000002C3, 0x000017D9, 0xFFFFF9F5, 0x000002C3 },
+ { 0x0213EA94DE041124, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x00001F10, 0xFFFFF539, 0x0000037D, 0x00001F10, 0xFFFFF539, 0x0000037D },
+ { 0x0213F0FE99102824, 0x00002A1A, 0xFFFFEFAD, 0x00000430, 0x00001D47, 0xFFFFF62F, 0x0000035E, 0x00001D47, 0xFFFFF62F, 0x0000035E },
+ { 0x0213F0FE99164924, 0x00002AF0, 0xFFFFEEDC, 0x00000465, 0x0000145F, 0xFFFFFBEB, 0x00000281, 0x0000145F, 0xFFFFFBEB, 0x00000281 },
+ { 0x0213F0FE99183164, 0x00002657, 0xFFFFF2E0, 0x000003B6, 0x00001664, 0xFFFFFB37, 0x000002A2, 0x00001664, 0xFFFFFB37, 0x000002A2 },
+ { 0x0213F0FD42D03864, 0x00003183, 0xFFFFE9F1, 0x0000052B, 0x00002020, 0xFFFFF3CE, 0x000003C1, 0x00002020, 0xFFFFF3CE, 0x000003C1 },
+ { 0x0213F0FD42C628E4, 0x00003240, 0xFFFFEB65, 0x000004C7, 0x00002425, 0xFFFFF245, 0x000003F3, 0x00002425, 0xFFFFF245, 0x000003F3 },
+ { 0x0213EA94DE321104, 0x000023D0, 0xFFFFF400, 0x00000397, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
+ { 0x0213F0FD42CE38A4, 0x00003440, 0xFFFFE872, 0x0000055B, 0x00002247, 0xFFFFF296, 0x000003E8, 0x00002247, 0xFFFFF296, 0x000003E8 },
+ { 0x0213F0FD42D04904, 0x00003275, 0xFFFFE970, 0x00000538, 0x00001F94, 0xFFFFF429, 0x000003AD, 0x00001F94, 0xFFFFF429, 0x000003AD },
+ { 0x0213F0FD42C640A4, 0x00003918, 0xFFFFE5DA, 0x000005B6, 0x000024FC, 0xFFFFF106, 0x00000426, 0x000024FC, 0xFFFFF106, 0x00000426 },
+ { 0x0213EA94DE062044, 0x0000334B, 0xFFFFEA39, 0x000004FD, 0x00001983, 0xFFFFF8F6, 0x000002E2, 0x00001983, 0xFFFFF8F6, 0x000002E2 },
+ { 0x0213F0FD42C64984, 0x00003B59, 0xFFFFE4D0, 0x000005DA, 0x00002605, 0xFFFFF090, 0x00000439, 0x00002605, 0xFFFFF090, 0x00000439 },
+ { 0x0213F0FD42D03124, 0x00003251, 0xFFFFEA46, 0x00000511, 0x00002781, 0xFFFFEF84, 0x00000470, 0x00002781, 0xFFFFEF84, 0x00000470 },
+ { 0x0213F0FD42CA3164, 0x00003304, 0xFFFFE926, 0x00000542, 0x00001EE9, 0xFFFFF4E4, 0x0000038B, 0x00001EE9, 0xFFFFF4E4, 0x0000038B },
+ { 0x0213F0FD42CC38C4, 0x00002F4C, 0xFFFFEC0C, 0x000004C4, 0x00001E49, 0xFFFFF578, 0x00000374, 0x00001E49, 0xFFFFF578, 0x00000374 },
+ { 0x0213EA94DE1C2164, 0x00002034, 0xFFFFF692, 0x0000034C, 0x000014B8, 0xFFFFFC5B, 0x00000294, 0x000014B8, 0xFFFFFC5B, 0x00000294 },
+ { 0x0213F0FD42CE4924, 0x0000385F, 0xFFFFE513, 0x000005F3, 0x000024E7, 0xFFFFF053, 0x00000450, 0x000024E7, 0xFFFFF053, 0x00000450 },
+ { 0x0213EA94DE1C40E4, 0x00001D70, 0xFFFFF821, 0x0000030F, 0x00001541, 0xFFFFFBB4, 0x000002B0, 0x00001541, 0xFFFFFBB4, 0x000002B0 },
+ { 0x0213F0FD42D02084, 0x000034EB, 0xFFFFE7FF, 0x00000575, 0x000019B4, 0xFFFFF836, 0x00000308, 0x000019B4, 0xFFFFF836, 0x00000308 },
+ { 0x0213F0FD42D050E4, 0x000037C9, 0xFFFFE5D4, 0x000005CD, 0x000026A1, 0xFFFFEF0C, 0x00000491, 0x000026A1, 0xFFFFEF0C, 0x00000491 },
+ { 0x0213EA94DE121944, 0x00002918, 0xFFFFF148, 0x000003E9, 0x00001A49, 0xFFFFF94C, 0x000002CF, 0x00001A49, 0xFFFFF94C, 0x000002CF },
+ { 0x0213F0FD42CA4064, 0x00002F90, 0xFFFFEAB5, 0x00000514, 0x00001707, 0xFFFFF9C7, 0x000002C4, 0x00001707, 0xFFFFF9C7, 0x000002C4 },
+ { 0x0213EA94DE062064, 0x0000327E, 0xFFFFEA99, 0x000004F4, 0x0000194F, 0xFFFFF929, 0x000002DC, 0x0000194F, 0xFFFFF929, 0x000002DC },
+ { 0x0213F0FD42C64084, 0x0000326F, 0xFFFFE9CF, 0x00000519, 0x00002240, 0xFFFFF299, 0x000003E7, 0x00002240, 0xFFFFF299, 0x000003E7 },
+ { 0x0213EA94DE321124, 0x000022FB, 0xFFFFF4C6, 0x00000371, 0x00001506, 0xFFFFFC73, 0x00000265, 0x00001506, 0xFFFFFC73, 0x00000265 },
+ { 0x0213F0FD42CA3924, 0x00003AD6, 0xFFFFE470, 0x000005FE, 0x00001F03, 0xFFFFF4F3, 0x00000387, 0x00001F03, 0xFFFFF4F3, 0x00000387 },
+ { 0x0213EA94DE201124, 0x00001F11, 0xFFFFF756, 0x00000332, 0x00001666, 0xFFFFFB8A, 0x000002B2, 0x00001666, 0xFFFFFB8A, 0x000002B2 },
+ { 0x0213EA94DE0238A4, 0x00002A5F, 0xFFFFEFA7, 0x00000430, 0x00001943, 0xFFFFF8C6, 0x000002F7, 0x00001943, 0xFFFFF8C6, 0x000002F7 },
+ { 0x0213EA94DE1650E4, 0x0000235E, 0xFFFFF3B4, 0x000003B3, 0x00001489, 0xFFFFFBCF, 0x0000029B, 0x00001489, 0xFFFFFBCF, 0x0000029B },
+ { 0x0213F0FD42CC38A4, 0x00003570, 0xFFFFE780, 0x0000058D, 0x00001B1D, 0xFFFFF767, 0x00000325, 0x00001B1D, 0xFFFFF767, 0x00000325 },
+ { 0x0213EA94DE042064, 0x00003678, 0xFFFFE7C3, 0x00000569, 0x00001831, 0xFFFFF98E, 0x000002C8, 0x00001831, 0xFFFFF98E, 0x000002C8 },
+ { 0x0213EA94DE201864, 0x000020B9, 0xFFFFF625, 0x0000035A, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
+ { 0x0213F0FD42C63184, 0x00003985, 0xFFFFE529, 0x000005DD, 0x00002165, 0xFFFFF351, 0x000003C5, 0x00002165, 0xFFFFF351, 0x000003C5 },
+ { 0x0213F0FD42D02064, 0x0000322A, 0xFFFFE99D, 0x00000535, 0x000019A1, 0xFFFFF844, 0x00000305, 0x000019A1, 0xFFFFF844, 0x00000305 },
+ { 0x0213F0FD42D05104, 0x000033ED, 0xFFFFE834, 0x00000571, 0x00002094, 0xFFFFF33A, 0x000003DB, 0x00002094, 0xFFFFF33A, 0x000003DB },
+ { 0x0213EA94DE2040C4, 0x00001D10, 0xFFFFF84D, 0x0000030B, 0x00001659, 0xFFFFFB0A, 0x000002CB, 0x00001659, 0xFFFFFB0A, 0x000002CB },
+ { 0x0213EA94DE1C1124, 0x0000210F, 0xFFFFF644, 0x00000355, 0x00001A4A, 0xFFFFF90F, 0x00000310, 0x00001A4A, 0xFFFFF90F, 0x00000310 },
+ { 0x0213EA94DE164164, 0x00001CA8, 0xFFFFF813, 0x00000316, 0x00001440, 0xFFFFFC1C, 0x0000029D, 0x00001440, 0xFFFFFC1C, 0x0000029D },
+ { 0x0213EA94DE3210C4, 0x00002864, 0xFFFFF15A, 0x000003FA, 0x0000137F, 0xFFFFFD43, 0x00000248, 0x0000137F, 0xFFFFFD43, 0x00000248 },
+ { 0x0213F0FD42D04184, 0x00002CDB, 0xFFFFECFD, 0x000004A7, 0x00002472, 0xFFFFF0E1, 0x00000437, 0x00002472, 0xFFFFF0E1, 0x00000437 },
+ { 0x0213F0FD42CC5104, 0x00003348, 0xFFFFE8CA, 0x00000554, 0x00001E91, 0xFFFFF4D4, 0x00000392, 0x00001E91, 0xFFFFF4D4, 0x00000392 },
+ { 0x0213F0FD42C64944, 0x00003989, 0xFFFFE4BB, 0x000005F8, 0x00001ACB, 0xFFFFF780, 0x00000319, 0x00001ACB, 0xFFFFF780, 0x00000319 },
+ { 0x0213F0FD42CA2104, 0x00003238, 0xFFFFEA09, 0x0000051E, 0x00001F08, 0xFFFFF4F4, 0x0000038C, 0x00001F08, 0xFFFFF4F4, 0x0000038C },
+ { 0x0213EA94DE120904, 0x00002453, 0xFFFFF3B0, 0x0000038D, 0x00001AED, 0xFFFFF8A2, 0x000002EA, 0x00001AED, 0xFFFFF8A2, 0x000002EA },
+ { 0x0213EA94DE1C3024, 0x00002459, 0xFFFFF409, 0x000003A8, 0x000017B5, 0xFFFFFA53, 0x000002E1, 0x000017B5, 0xFFFFFA53, 0x000002E1 },
+ { 0x0213EA94DE021184, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001DC9, 0xFFFFF5D5, 0x00000368, 0x00001DC9, 0xFFFFF5D5, 0x00000368 },
+ { 0x0213EA94DE023104, 0x000031BF, 0xFFFFECA3, 0x00000498, 0x00001DC9, 0xFFFFF717, 0x00000336, 0x00001DC9, 0xFFFFF717, 0x00000336 },
+ { 0x0213F0FD42CE2104, 0x00003896, 0xFFFFE5DD, 0x000005C5, 0x000023E2, 0xFFFFF1A1, 0x00000416, 0x000023E2, 0xFFFFF1A1, 0x00000416 },
+ { 0x0213EA94DE323904, 0x000023CB, 0xFFFFF4C8, 0x00000372, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
+ { 0x0213F0FD42D020C4, 0x00002F6B, 0xFFFFEBF0, 0x000004CE, 0x00001C89, 0xFFFFF689, 0x0000034D, 0x00001C89, 0xFFFFF689, 0x0000034D },
+ { 0x0213F0FD42CE3904, 0x00003E72, 0xFFFFE211, 0x0000065D, 0x0000218D, 0xFFFFF309, 0x000003DC, 0x0000218D, 0xFFFFF309, 0x000003DC },
+ { 0x0213EA94DE022084, 0x00002612, 0xFFFFF2C3, 0x000003AD, 0x000019F7, 0xFFFFF891, 0x000002FE, 0x000019F7, 0xFFFFF891, 0x000002FE },
+ { 0x0213EA94DE164184, 0x0000205D, 0xFFFFF59F, 0x00000372, 0x000012E6, 0xFFFFFD0A, 0x00000270, 0x000012E6, 0xFFFFFD0A, 0x00000270 },
+ { 0x0213F0FD42CA2124, 0x00002ECB, 0xFFFFEC47, 0x000004BD, 0x00001936, 0xFFFFF8D9, 0x000002E4, 0x00001936, 0xFFFFF8D9, 0x000002E4 },
+ { 0x0213EA94DE064904, 0x00002BDB, 0xFFFFEE6D, 0x00000458, 0x00001852, 0xFFFFF943, 0x000002D9, 0x00001852, 0xFFFFF943, 0x000002D9 },
+ { 0x0213EA94DE124904, 0x00003387, 0xFFFFE958, 0x00000534, 0x00001932, 0xFFFFF8FA, 0x000002E4, 0x00001932, 0xFFFFF8FA, 0x000002E4 },
+ { 0x0213EA94DE0208C4, 0x00002E3C, 0xFFFFED26, 0x00000495, 0x00001858, 0xFFFFF990, 0x000002D1, 0x00001858, 0xFFFFF990, 0x000002D1 },
+ { 0x0213EA94DE022964, 0x000033B8, 0xFFFFEA5C, 0x000004F9, 0x00001BD1, 0xFFFFF76A, 0x0000032E, 0x00001BD1, 0xFFFFF76A, 0x0000032E },
+ { 0x0213EA94DE062984, 0x00002BCE, 0xFFFFEEE9, 0x00000443, 0x00001982, 0xFFFFF90D, 0x000002DF, 0x00001982, 0xFFFFF90D, 0x000002DF },
+ { 0x0213F0FD42D048E4, 0x00003495, 0xFFFFE7D9, 0x0000057B, 0x00001D2A, 0xFFFFF5A5, 0x00000372, 0x00001D2A, 0xFFFFF5A5, 0x00000372 },
+ { 0x0213F0FD42CA38E4, 0x000034B1, 0xFFFFE88D, 0x00000556, 0x00002014, 0xFFFFF43A, 0x000003AA, 0x00002014, 0xFFFFF43A, 0x000003AA },
+ { 0x0213F0FD42CC3124, 0x00002F96, 0xFFFFEC84, 0x000004AD, 0x000024A2, 0xFFFFF1CE, 0x0000040A, 0x000024A2, 0xFFFFF1CE, 0x0000040A },
+ { 0x0213EA94DE161064, 0x0000203B, 0xFFFFF640, 0x00000359, 0x000014EC, 0xFFFFFC14, 0x0000029C, 0x000014EC, 0xFFFFFC14, 0x0000029C },
+ { 0x0213F0FD42D02984, 0x000034E2, 0xFFFFE7B8, 0x00000582, 0x00001938, 0xFFFFF872, 0x000002FA, 0x00001938, 0xFFFFF872, 0x000002FA },
+ { 0x0213EA94DE063124, 0x00002AC7, 0xFFFFF0C1, 0x000003F5, 0x00002268, 0xFFFFF39C, 0x000003C9, 0x00002268, 0xFFFFF39C, 0x000003C9 },
+ { 0x0213F0FD42C63144, 0x000036F6, 0xFFFFE77F, 0x00000571, 0x000027D9, 0xFFFFEF6F, 0x00000461, 0x000027D9, 0xFFFFEF6F, 0x00000461 },
+ { 0x0213EA94DE123124, 0x00002BAB, 0xFFFFF018, 0x00000419, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
+ { 0x0213EA94DE323924, 0x000028C4, 0xFFFFF161, 0x000003F8, 0x0000180C, 0xFFFFFA4B, 0x000002C8, 0x0000180C, 0xFFFFFA4B, 0x000002C8 },
+ { 0x0213F0FD42CA2864, 0x00002F48, 0xFFFFEB62, 0x000004EE, 0x00001912, 0xFFFFF8C8, 0x000002EA, 0x00001912, 0xFFFFF8C8, 0x000002EA },
+ { 0x0213F0FD42CE2864, 0x000032DF, 0xFFFFE911, 0x00000545, 0x00001F06, 0xFFFFF485, 0x0000039C, 0x00001F06, 0xFFFFF485, 0x0000039C },
+ { 0x0213F0FD42D04144, 0x000035B8, 0xFFFFE74F, 0x00000590, 0x00001FD7, 0xFFFFF410, 0x000003AF, 0x00001FD7, 0xFFFFF410, 0x000003AF },
+ { 0x0213F0FD42D050C4, 0x00003608, 0xFFFFE6D7, 0x000005A9, 0x000024A6, 0xFFFFF075, 0x00000450, 0x000024A6, 0xFFFFF075, 0x00000450 },
+ { 0x0213F0FD42CA3884, 0x000030AB, 0xFFFFEAED, 0x000004F5, 0x000019EE, 0xFFFFF84E, 0x000002FC, 0x000019EE, 0xFFFFF84E, 0x000002FC },
+ { 0x0213EA94DE0620C4, 0x000030C6, 0xFFFFEC92, 0x0000049E, 0x000019BB, 0xFFFFF8F1, 0x000002F3, 0x000019BB, 0xFFFFF8F1, 0x000002F3 },
+ { 0x0213F0FD42C630A4, 0x00003B27, 0xFFFFE544, 0x000005C1, 0x00002697, 0xFFFFF072, 0x00000438, 0x00002697, 0xFFFFF072, 0x00000438 },
+ { 0x0213EA94DE1248E4, 0x00002F23, 0xFFFFEC48, 0x000004B9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
+ { 0x0213EA94DE0629A4, 0x00002BD7, 0xFFFFEEAC, 0x00000450, 0x00001991, 0xFFFFF8F4, 0x000002E2, 0x00001991, 0xFFFFF8F4, 0x000002E2 },
+ { 0x0213EA94DE022024, 0x00003210, 0xFFFFEB24, 0x000004DE, 0x00001BDF, 0xFFFFF744, 0x00000333, 0x00001BDF, 0xFFFFF744, 0x00000333 },
+ { 0x0213EA94DE244144, 0x00002DDC, 0xFFFFED0D, 0x000004AC, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
+ { 0x0213EA94DE203964, 0x000023E6, 0xFFFFF40C, 0x000003A9, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
+ { 0x0213F0FD42CA29A4, 0x000030CE, 0xFFFFE9A5, 0x0000053C, 0x00001C45, 0xFFFFF60E, 0x0000035D, 0x00001C45, 0xFFFFF60E, 0x0000035D },
+ { 0x0213EA94DE161084, 0x00001E89, 0xFFFFF73A, 0x00000337, 0x0000157C, 0xFFFFFBC0, 0x000002AA, 0x0000157C, 0xFFFFFBC0, 0x000002AA },
+ { 0x0213F0FD42D04124, 0x000036C6, 0xFFFFE6CF, 0x000005A1, 0x00002457, 0xFFFFF11D, 0x0000042D, 0x00002457, 0xFFFFF11D, 0x0000042D },
+ { 0x0213EA94DE321944, 0x00002815, 0xFFFFF19A, 0x000003F2, 0x000016D2, 0xFFFFFB40, 0x00000299, 0x000016D2, 0xFFFFFB40, 0x00000299 },
+ { 0x0213EA94DE1C19A4, 0x00001FE2, 0xFFFFF660, 0x00000354, 0x000015A7, 0xFFFFFB47, 0x000002C1, 0x000015A7, 0xFFFFFB47, 0x000002C1 },
+ { 0x0213EA94DE161964, 0x00002114, 0xFFFFF634, 0x00000356, 0x000016C1, 0xFFFFFB43, 0x000002B8, 0x000016C1, 0xFFFFFB43, 0x000002B8 },
+ { 0x0213F0FD42CC28C4, 0x000028E3, 0xFFFFF075, 0x00000414, 0x0000203C, 0xFFFFF438, 0x000003B3, 0x0000203C, 0xFFFFF438, 0x000003B3 },
+ { 0x0213EA94DE1C3924, 0x00001EEB, 0xFFFFF7BB, 0x0000031A, 0x00001580, 0xFFFFFBD7, 0x000002AD, 0x00001580, 0xFFFFFBD7, 0x000002AD },
+ { 0x0213EA94DE2408C4, 0x00002BB2, 0xFFFFEE72, 0x00000470, 0x0000192C, 0xFFFFF91E, 0x000002E7, 0x0000192C, 0xFFFFF91E, 0x000002E7 },
+ { 0x0213EA94DE0650E4, 0x00003A3D, 0xFFFFE49D, 0x000005F5, 0x00001A3B, 0xFFFFF7B1, 0x00000320, 0x00001A3B, 0xFFFFF7B1, 0x00000320 },
+ { 0x0213F0FD42CE3164, 0x00002E93, 0xFFFFEC5A, 0x000004B4, 0x000025EB, 0xFFFFF03C, 0x0000044A, 0x000025EB, 0xFFFFF03C, 0x0000044A },
+ { 0x0213F0FD42CA20C4, 0x0000331F, 0xFFFFE97A, 0x00000531, 0x00001A06, 0xFFFFF850, 0x000002FD, 0x00001A06, 0xFFFFF850, 0x000002FD },
+ { 0x0213F0FD42C63964, 0x00003937, 0xFFFFE5A0, 0x000005C7, 0x0000235E, 0xFFFFF234, 0x000003F2, 0x0000235E, 0xFFFFF234, 0x000003F2 },
+ { 0x0213EA94DE1E3924, 0x00001DD0, 0xFFFFF80E, 0x00000319, 0x000015C7, 0xFFFFFB91, 0x000002BC, 0x000015C7, 0xFFFFFB91, 0x000002BC },
+ { 0x0213F0FD42D03964, 0x00003328, 0xFFFFE905, 0x0000054A, 0x00002054, 0xFFFFF3BF, 0x000003C0, 0x00002054, 0xFFFFF3BF, 0x000003C0 },
+ { 0x0213F0FD42CC1104, 0x00002FE5, 0xFFFFEA65, 0x00000520, 0x0000188B, 0xFFFFF8A7, 0x000002F5, 0x0000188B, 0xFFFFF8A7, 0x000002F5 },
+ { 0x0213F0FD42CA38A4, 0x00002ED3, 0xFFFFEC51, 0x000004B9, 0x00001888, 0xFFFFF96A, 0x000002CA, 0x00001888, 0xFFFFF96A, 0x000002CA },
+ { 0x0213F0FD42D03084, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001F8D, 0xFFFFF436, 0x000003B4, 0x00001F8D, 0xFFFFF436, 0x000003B4 },
+ { 0x0213F0FD42CE4084, 0x0000329F, 0xFFFFE8F7, 0x0000054F, 0x000023DB, 0xFFFFF0EE, 0x0000043A, 0x000023DB, 0xFFFFF0EE, 0x0000043A },
+ { 0x0213EA94DE0438A4, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001AFD, 0xFFFFF781, 0x00000329, 0x00001AFD, 0xFFFFF781, 0x00000329 },
+ { 0x0213EA94DE1E19A4, 0x00001BBF, 0xFFFFF8E2, 0x000002F7, 0x00001722, 0xFFFFFA85, 0x000002DB, 0x00001722, 0xFFFFFA85, 0x000002DB },
+ { 0x0213EA94DE022044, 0x000030E4, 0xFFFFEBE6, 0x000004BB, 0x00001C80, 0xFFFFF6E1, 0x0000033E, 0x00001C80, 0xFFFFF6E1, 0x0000033E },
+ { 0x0213EA94DE122944, 0x000030E2, 0xFFFFECD0, 0x00000492, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
+ { 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
+ { 0x0213EA94DE1610A4, 0x00002147, 0xFFFFF585, 0x0000037A, 0x000014CC, 0xFFFFFC3B, 0x00000296, 0x000014CC, 0xFFFFFC3B, 0x00000296 },
+ { 0x0213EA94DE322124, 0x00002507, 0xFFFFF432, 0x0000038A, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
+ { 0x0213EA94DE0638A4, 0x0000339B, 0xFFFFEA7D, 0x000004F0, 0x0000191E, 0xFFFFF944, 0x000002DF, 0x0000191E, 0xFFFFF944, 0x000002DF },
+ { 0x0213F0FD42CC28A4, 0x00002842, 0xFFFFF043, 0x00000427, 0x00001988, 0xFFFFF892, 0x000002F7, 0x00001988, 0xFFFFF892, 0x000002F7 },
+ { 0x0213F0FD42C618A4, 0x0000389D, 0xFFFFE5D8, 0x000005BF, 0x00001EE1, 0xFFFFF4EF, 0x00000387, 0x00001EE1, 0xFFFFF4EF, 0x00000387 },
+ { 0x0213F0FD42CE3184, 0x0000396D, 0xFFFFE4D7, 0x000005F2, 0x000020DA, 0xFFFFF34E, 0x000003CD, 0x000020DA, 0xFFFFF34E, 0x000003CD },
+ { 0x0213F0FD42CA3104, 0x0000355F, 0xFFFFE85A, 0x0000055F, 0x0000281F, 0xFFFFEF28, 0x0000047D, 0x0000281F, 0xFFFFEF28, 0x0000047D },
+ { 0x0213EA94DE1C50E4, 0x00002284, 0xFFFFF46E, 0x00000399, 0x00001498, 0xFFFFFBE3, 0x0000029C, 0x00001498, 0xFFFFFBE3, 0x0000029C },
+ { 0x0213EA94DE023944, 0x000031B6, 0xFFFFEB42, 0x000004D9, 0x00001F54, 0xFFFFF4D2, 0x00000399, 0x00001F54, 0xFFFFF4D2, 0x00000399 },
+ { 0x0213F0FD42C63064, 0x000035CE, 0xFFFFE79D, 0x00000578, 0x00001C78, 0xFFFFF68C, 0x00000344, 0x00001C78, 0xFFFFF68C, 0x00000344 },
+ { 0x0213EA94DE1E4964, 0x00001C0A, 0xFFFFF81B, 0x00000318, 0x00001492, 0xFFFFFBCC, 0x000002A5, 0x00001492, 0xFFFFFBCC, 0x000002A5 },
+ { 0x0213EA94DE022184, 0x00003492, 0xFFFFE95C, 0x00000526, 0x00001A97, 0xFFFFF81B, 0x0000030B, 0x00001A97, 0xFFFFF81B, 0x0000030B },
+ { 0x0213EA94DE163164, 0x00001E89, 0xFFFFF7D0, 0x0000031A, 0x000017A5, 0xFFFFFA99, 0x000002D9, 0x000017A5, 0xFFFFFA99, 0x000002D9 },
+ { 0x0213F0FD42CA48C4, 0x00002DCC, 0xFFFFEBE0, 0x000004DE, 0x000019BA, 0xFFFFF7F5, 0x0000030D, 0x000019BA, 0xFFFFF7F5, 0x0000030D },
+ { 0x0213EA94DE042984, 0x000030EF, 0xFFFFEBC1, 0x000004C0, 0x00001AA9, 0xFFFFF814, 0x0000030A, 0x00001AA9, 0xFFFFF814, 0x0000030A },
+ { 0x0213EA94DE245124, 0x00002EA3, 0xFFFFEBF6, 0x000004D8, 0x00001DCF, 0xFFFFF521, 0x00000399, 0x00001DCF, 0xFFFFF521, 0x00000399 },
+ { 0x0213EA94DE324164, 0x00002B5F, 0xFFFFEEA1, 0x0000046C, 0x000017EB, 0xFFFFF9C9, 0x000002D4, 0x000017EB, 0xFFFFF9C9, 0x000002D4 },
+ { 0x0213EA94DE024104, 0x00002C63, 0xFFFFEE82, 0x00000455, 0x00002268, 0xFFFFF29D, 0x000003F6, 0x00002268, 0xFFFFF29D, 0x000003F6 },
+ { 0x0213EA94DE121904, 0x00002B1A, 0xFFFFF016, 0x0000041C, 0x000019AA, 0xFFFFF988, 0x000002D2, 0x000019AA, 0xFFFFF988, 0x000002D2 },
+ { 0x0213F0FD42CA2964, 0x0000332F, 0xFFFFE934, 0x0000053B, 0x00001E47, 0xFFFFF566, 0x00000374, 0x00001E47, 0xFFFFF566, 0x00000374 },
+ { 0x0213F0FD42CA48E4, 0x00002995, 0xFFFFEEC1, 0x00000465, 0x0000178F, 0xFFFFF995, 0x000002C5, 0x0000178F, 0xFFFFF995, 0x000002C5 },
+ { 0x0213EA94DE201884, 0x00001C2E, 0xFFFFF932, 0x000002E9, 0x000015C2, 0xFFFFFBC5, 0x000002AD, 0x000015C2, 0xFFFFFBC5, 0x000002AD },
+ { 0x0213F0FD42C640E4, 0x00003B08, 0xFFFFE4E8, 0x000005D8, 0x0000209D, 0xFFFFF444, 0x00000398, 0x0000209D, 0xFFFFF444, 0x00000398 },
+ { 0x0213EA94DE0450E4, 0x00002F1F, 0xFFFFEB74, 0x000004EB, 0x00001F4C, 0xFFFFF3D4, 0x000003CE, 0x00001F4C, 0xFFFFF3D4, 0x000003CE },
+ { 0x0213EA94DE043884, 0x00003415, 0xFFFFE89F, 0x00000553, 0x0000186B, 0xFFFFF8E1, 0x000002EF, 0x0000186B, 0xFFFFF8E1, 0x000002EF },
+ { 0x0213F0FD42CC10C4, 0x00003441, 0xFFFFE779, 0x0000059D, 0x000019EA, 0xFFFFF7B2, 0x0000031F, 0x000019EA, 0xFFFFF7B2, 0x0000031F },
+ { 0x0213EA94DE164064, 0x00002174, 0xFFFFF546, 0x00000378, 0x00001456, 0xFFFFFC5F, 0x00000284, 0x00001456, 0xFFFFFC5F, 0x00000284 },
+ { 0x0213F0FD42CE40C4, 0x00003788, 0xFFFFE61E, 0x000005BF, 0x00001DF4, 0xFFFFF562, 0x00000374, 0x00001DF4, 0xFFFFF562, 0x00000374 },
+ { 0x0213EA94DE1E1844, 0x00001C41, 0xFFFFF8C1, 0x000002FC, 0x0000171E, 0xFFFFFA93, 0x000002DE, 0x0000171E, 0xFFFFFA93, 0x000002DE },
+ { 0x0213F0FD42CA3864, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x000017E4, 0xFFFFF934, 0x000002DF, 0x000017E4, 0xFFFFF934, 0x000002DF },
+ { 0x0213F0FD42CC3144, 0x0000327A, 0xFFFFEA71, 0x000004FF, 0x00001D96, 0xFFFFF63B, 0x00000351, 0x00001D96, 0xFFFFF63B, 0x00000351 },
+ { 0x0213EA94DE1E4064, 0x000023C6, 0xFFFFF3E5, 0x000003B6, 0x000014DE, 0xFFFFFC29, 0x00000294, 0x000014DE, 0xFFFFFC29, 0x00000294 },
+ { 0x0213EA94DE164944, 0x00001F96, 0xFFFFF5FA, 0x00000364, 0x00001397, 0xFFFFFC9D, 0x0000027D, 0x00001397, 0xFFFFFC9D, 0x0000027D },
+ { 0x0213EA94DE063144, 0x00002B51, 0xFFFFEFB5, 0x00000420, 0x00001ACA, 0xFFFFF824, 0x0000030D, 0x00001ACA, 0xFFFFF824, 0x0000030D },
+ { 0x0213EA94DE1E4944, 0x000020DB, 0xFFFFF55B, 0x0000037C, 0x0000153D, 0xFFFFFB5F, 0x000002BA, 0x0000153D, 0xFFFFFB5F, 0x000002BA },
+ { 0x0213EA94DE0221A4, 0x000030BB, 0xFFFFEBDA, 0x000004BC, 0x00001B0E, 0xFFFFF7A8, 0x0000031E, 0x00001B0E, 0xFFFFF7A8, 0x0000031E },
+ { 0x0213F0FD42C62904, 0x000033C4, 0xFFFFEA41, 0x000004FA, 0x000022C6, 0xFFFFF363, 0x000003BC, 0x000022C6, 0xFFFFF363, 0x000003BC },
+ { 0x0213EA94DE240924, 0x00002D47, 0xFFFFEE01, 0x00000477, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
+ { 0x0213EA94DE1E31A4, 0x00001E7B, 0xFFFFF733, 0x00000339, 0x00001668, 0xFFFFFB29, 0x000002BF, 0x00001668, 0xFFFFFB29, 0x000002BF },
+ { 0x0213F0FD42CA2984, 0x00002F7E, 0xFFFFEAFF, 0x000004FC, 0x000018D4, 0xFFFFF8BE, 0x000002E8, 0x000018D4, 0xFFFFF8BE, 0x000002E8 },
+ { 0x0213EA94DE3238A4, 0x00002635, 0xFFFFF2E1, 0x000003BC, 0x000017A4, 0xFFFFFA67, 0x000002C3, 0x000017A4, 0xFFFFFA67, 0x000002C3 },
+ { 0x0213EA94DE1230A4, 0x000026CA, 0xFFFFF2C1, 0x000003B2, 0x00001C3E, 0xFFFFF7AE, 0x0000031F, 0x00001C3E, 0xFFFFF7AE, 0x0000031F },
+ { 0x0213EA94DE1C1064, 0x00002550, 0xFFFFF380, 0x000003B5, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
+ { 0x0213F0FD42CA4904, 0x00002FBC, 0xFFFFEAF8, 0x000004FA, 0x000018CC, 0xFFFFF8C6, 0x000002E8, 0x000018CC, 0xFFFFF8C6, 0x000002E8 },
+ { 0x0213F0FD42D018E4, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001EFF, 0xFFFFF4DA, 0x0000038F, 0x00001EFF, 0xFFFFF4DA, 0x0000038F },
+ { 0x0213EA94DE164084, 0x000023E6, 0xFFFFF413, 0x000003A1, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
+ { 0x0213F0FD42CE3024, 0x00003251, 0xFFFFEAA2, 0x000004F5, 0x000025B0, 0xFFFFF0DF, 0x00000431, 0x000025B0, 0xFFFFF0DF, 0x00000431 },
+ { 0x0213F0FD42D03984, 0x00002F6F, 0xFFFFEB67, 0x000004E6, 0x00002275, 0xFFFFF249, 0x000003FB, 0x00002275, 0xFFFFF249, 0x000003FB },
+ { 0x0213EA94DE322964, 0x00002597, 0xFFFFF34A, 0x000003B1, 0x00001BCC, 0xFFFFF822, 0x0000031A, 0x00001BCC, 0xFFFFF822, 0x0000031A },
+ { 0x0213F0FD42C63864, 0x00003B1D, 0xFFFFE40E, 0x0000060D, 0x00001F61, 0xFFFFF470, 0x0000039F, 0x00001F61, 0xFFFFF470, 0x0000039F },
+ { 0x0213F0FD42C64144, 0x0000379F, 0xFFFFE6DB, 0x0000058C, 0x00002460, 0xFFFFF170, 0x00000415, 0x00002460, 0xFFFFF170, 0x00000415 },
+ { 0x0213EA94DE165144, 0x00002442, 0xFFFFF2FB, 0x000003D9, 0x00001414, 0xFFFFFBDC, 0x000002A2, 0x00001414, 0xFFFFFBDC, 0x000002A2 },
+ { 0x0213EA94DE0240C4, 0x00003270, 0xFFFFEA0D, 0x0000051C, 0x00001AFD, 0xFFFFF783, 0x00000328, 0x00001AFD, 0xFFFFF783, 0x00000328 },
+ { 0x0213EA94DE161104, 0x00001B23, 0xFFFFF94B, 0x000002EB, 0x000015F1, 0xFFFFFB82, 0x000002B4, 0x000015F1, 0xFFFFFB82, 0x000002B4 },
+ { 0x0213EA94DE323844, 0x000026AE, 0xFFFFF21A, 0x000003DB, 0x00001827, 0xFFFFFA10, 0x000002C8, 0x00001827, 0xFFFFFA10, 0x000002C8 },
+ { 0x0213F0FD42CA4884, 0x00002DCF, 0xFFFFEBD8, 0x000004DB, 0x00001A75, 0xFFFFF719, 0x0000033A, 0x00001A75, 0xFFFFF719, 0x0000033A },
+ { 0x0213F0FD42CE40E4, 0x00003983, 0xFFFFE500, 0x000005EA, 0x000022A6, 0xFFFFF25F, 0x000003F1, 0x000022A6, 0xFFFFF25F, 0x000003F1 },
+ { 0x0213EA94DE1218C4, 0x00002AD5, 0xFFFFF07A, 0x00000406, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
+ { 0x0213F0FD42CA39A4, 0x00002A43, 0xFFFFEE43, 0x00000474, 0x00001D65, 0xFFFFF538, 0x00000387, 0x00001D65, 0xFFFFF538, 0x00000387 },
+ { 0x0213F0FD42C62084, 0x0000311E, 0xFFFFEAF8, 0x000004E8, 0x00001959, 0xFFFFF8E4, 0x000002DC, 0x00001959, 0xFFFFF8E4, 0x000002DC },
+ { 0x0213F0FD42D031A4, 0x0000339A, 0xFFFFE8A7, 0x00000559, 0x00001A04, 0xFFFFF7E5, 0x00000311, 0x00001A04, 0xFFFFF7E5, 0x00000311 },
+ { 0x0213EA94DE204144, 0x000021B3, 0xFFFFF50F, 0x00000389, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
+ { 0x0213EA94DE021884, 0x00003417, 0xFFFFE9A6, 0x0000051D, 0x000018A4, 0xFFFFF984, 0x000002CF, 0x000018A4, 0xFFFFF984, 0x000002CF },
+ { 0x0213EA94DE202984, 0x00001FED, 0xFFFFF6A2, 0x00000347, 0x00001639, 0xFFFFFB59, 0x000002BB, 0x00001639, 0xFFFFFB59, 0x000002BB },
+ { 0x0213EA94DE1218A4, 0x000032D2, 0xFFFFEB18, 0x000004DC, 0x00001A01, 0xFFFFF95E, 0x000002CF, 0x00001A01, 0xFFFFF95E, 0x000002CF },
+ { 0x0213F0FD42D04084, 0x00003147, 0xFFFFEA3B, 0x00000518, 0x0000241D, 0xFFFFF11C, 0x00000431, 0x0000241D, 0xFFFFF11C, 0x00000431 },
+ { 0x0213EA94DE1C0904, 0x00001D44, 0xFFFFF7E7, 0x0000031A, 0x0000153F, 0xFFFFFBBC, 0x000002A9, 0x0000153F, 0xFFFFFBBC, 0x000002A9 },
+ { 0x0213F0FD42CC4104, 0x00003690, 0xFFFFE6E3, 0x000005A4, 0x000018DE, 0xFFFFF908, 0x000002DD, 0x000018DE, 0xFFFFF908, 0x000002DD },
+ { 0x0213F0FD42CC2184, 0x00003561, 0xFFFFE6F8, 0x000005AB, 0x000018B5, 0xFFFFF8A0, 0x000002F3, 0x000018B5, 0xFFFFF8A0, 0x000002F3 },
+ { 0x0213EA94DE323124, 0x000028F4, 0xFFFFF23A, 0x000003CE, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
+ { 0x0213F0FD42D03184, 0x000035D7, 0xFFFFE71C, 0x0000059B, 0x00001D49, 0xFFFFF5C8, 0x00000368, 0x00001D49, 0xFFFFF5C8, 0x00000368 },
+ { 0x0213F0FD42CE18A4, 0x0000397E, 0xFFFFE4CB, 0x000005F4, 0x00001989, 0xFFFFF844, 0x000002FD, 0x00001989, 0xFFFFF844, 0x000002FD },
+ { 0x0213F0FD42C62064, 0x00003BAB, 0xFFFFE332, 0x0000063F, 0x00001A69, 0xFFFFF7B9, 0x00000312, 0x00001A69, 0xFFFFF7B9, 0x00000312 },
+ { 0x0213F0FD42D03064, 0x00002F26, 0xFFFFEB82, 0x000004E8, 0x00001D7D, 0xFFFFF590, 0x00000379, 0x00001D7D, 0xFFFFF590, 0x00000379 },
+ { 0x0213EA94DE0631A4, 0x00002FDC, 0xFFFFEBE0, 0x000004C3, 0x00001940, 0xFFFFF8CC, 0x000002EE, 0x00001940, 0xFFFFF8CC, 0x000002EE },
+ { 0x0213EA94DE1C08E4, 0x000021B2, 0xFFFFF558, 0x00000379, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
+ { 0x0213EA94DE321904, 0x00002897, 0xFFFFF181, 0x000003F7, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
+ { 0x0213EA94DE1E0924, 0x00001D19, 0xFFFFF829, 0x0000031A, 0x00001558, 0xFFFFFBCA, 0x000002AF, 0x00001558, 0xFFFFFBCA, 0x000002AF },
+ { 0x0213EA94DE043144, 0x00003311, 0xFFFFEAD9, 0x000004E1, 0x00001BDC, 0xFFFFF79E, 0x0000031D, 0x00001BDC, 0xFFFFF79E, 0x0000031D },
+ { 0x0213EA94DE1E29C4, 0x00001E54, 0xFFFFF740, 0x00000333, 0x000016A1, 0xFFFFFAF0, 0x000002C4, 0x000016A1, 0xFFFFFAF0, 0x000002C4 },
+ { 0x0213F0FD42CE3964, 0x00003266, 0xFFFFE9A8, 0x00000527, 0x00002307, 0xFFFFF219, 0x000003FC, 0x00002307, 0xFFFFF219, 0x000003FC },
+ { 0x0213EA94DE321144, 0x00001D1F, 0xFFFFF82B, 0x000002F0, 0x000013F0, 0xFFFFFD0B, 0x0000024E, 0x000013F0, 0xFFFFFD0B, 0x0000024E },
+ { 0x0213F0FD42C648A4, 0x0000312E, 0xFFFFEA67, 0x00000502, 0x0000222A, 0xFFFFF253, 0x000003F9, 0x0000222A, 0xFFFFF253, 0x000003F9 },
+ { 0x0213F0FD42CA4124, 0x000032B2, 0xFFFFE9AD, 0x00000523, 0x00001E97, 0xFFFFF527, 0x0000037F, 0x00001E97, 0xFFFFF527, 0x0000037F },
+ { 0x0213EA94DE1640E4, 0x00001F6A, 0xFFFFF6FC, 0x00000338, 0x0000164B, 0xFFFFFB2C, 0x000002C2, 0x0000164B, 0xFFFFFB2C, 0x000002C2 },
+ { 0x0213EA94DE0228C4, 0x00002603, 0xFFFFF386, 0x00000392, 0x00001EE0, 0xFFFFF601, 0x00000369, 0x00001EE0, 0xFFFFF601, 0x00000369 },
+ { 0x0213EA94DE201164, 0x00001D0C, 0xFFFFF803, 0x00000317, 0x00001345, 0xFFFFFD52, 0x00000260, 0x00001345, 0xFFFFFD52, 0x00000260 },
+ { 0x0213F0FD42CC1884, 0x0000327A, 0xFFFFE8E5, 0x0000055C, 0x00001680, 0xFFFFFA2D, 0x000002B2, 0x00001680, 0xFFFFFA2D, 0x000002B2 },
+ { 0x0213F0FD42CA3964, 0x000032B8, 0xFFFFE91A, 0x0000054A, 0x00001BAB, 0xFFFFF6EC, 0x00000338, 0x00001BAB, 0xFFFFF6EC, 0x00000338 },
+ { 0x0213F0FD42CC3044, 0x00002F79, 0xFFFFEB63, 0x000004EF, 0x000017BB, 0xFFFFF9B1, 0x000002CA, 0x000017BB, 0xFFFFF9B1, 0x000002CA },
+ { 0x0213EA94DE0438E4, 0x00002AE5, 0xFFFFEFCB, 0x0000041D, 0x0000214A, 0xFFFFF3A7, 0x000003C7, 0x0000214A, 0xFFFFF3A7, 0x000003C7 },
+ { 0x0213EA94DE322064, 0x0000212C, 0xFFFFF5BC, 0x0000034F, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
+ { 0x0213EA94DE121124, 0x00002BE7, 0xFFFFEF40, 0x0000043C, 0x00001AE2, 0xFFFFF8CF, 0x000002E3, 0x00001AE2, 0xFFFFF8CF, 0x000002E3 },
+ { 0x0213F0FD42D05144, 0x000032DC, 0xFFFFE90F, 0x00000549, 0x00002A2D, 0xFFFFECC9, 0x000004ED, 0x00002A2D, 0xFFFFECC9, 0x000004ED },
+ { 0x0213EA94DE1618A4, 0x00001DE3, 0xFFFFF80D, 0x00000319, 0x000016FA, 0xFFFFFB42, 0x000002BC, 0x000016FA, 0xFFFFFB42, 0x000002BC },
+ { 0x0213EA94DE1E2844, 0x00001F1B, 0xFFFFF6DE, 0x00000346, 0x00001502, 0xFFFFFC23, 0x00000298, 0x00001502, 0xFFFFFC23, 0x00000298 },
+ { 0x0213EA94DE061864, 0x00003203, 0xFFFFEA87, 0x000004FE, 0x0000194E, 0xFFFFF8E3, 0x000002EC, 0x0000194E, 0xFFFFF8E3, 0x000002EC },
+ { 0x0213F0FD42D02144, 0x0000337A, 0xFFFFE8DD, 0x00000551, 0x00001E3C, 0xFFFFF534, 0x00000385, 0x00001E3C, 0xFFFFF534, 0x00000385 },
+ { 0x0213F0FD42CA4864, 0x000036F6, 0xFFFFE62A, 0x000005C5, 0x000023C0, 0xFFFFF117, 0x00000435, 0x000023C0, 0xFFFFF117, 0x00000435 },
+ { 0x0213F0FD42CC2144, 0x00003125, 0xFFFFEA4E, 0x0000051A, 0x00001E6C, 0xFFFFF503, 0x0000038E, 0x00001E6C, 0xFFFFF503, 0x0000038E },
+ { 0x0213EA94DE1C08A4, 0x00001CD4, 0xFFFFF82D, 0x0000030E, 0x0000156D, 0xFFFFFB64, 0x000002B8, 0x0000156D, 0xFFFFFB64, 0x000002B8 },
+ { 0x0213EA94DE0240A4, 0x00002F14, 0xFFFFEC46, 0x000004B8, 0x000017F1, 0xFFFFF977, 0x000002D2, 0x000017F1, 0xFFFFF977, 0x000002D2 },
+ { 0x0213EA94DE0640A4, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x0000184C, 0xFFFFF983, 0x000002D4, 0x0000184C, 0xFFFFF983, 0x000002D4 },
+ { 0x0213F0FD42D04984, 0x00002EA9, 0xFFFFEBD7, 0x000004D5, 0x0000288D, 0xFFFFEDDB, 0x000004C0, 0x0000288D, 0xFFFFEDDB, 0x000004C0 },
+ { 0x0213F0FD42CA3984, 0x0000335F, 0xFFFFE82C, 0x00000579, 0x00001DBF, 0xFFFFF512, 0x0000038C, 0x00001DBF, 0xFFFFF512, 0x0000038C },
+ { 0x0213EA94DE201184, 0x0000224F, 0xFFFFF4B5, 0x00000391, 0x0000138C, 0xFFFFFCC3, 0x0000027A, 0x0000138C, 0xFFFFFCC3, 0x0000027A },
+ { 0x0213EA94DE1240A4, 0x0000320D, 0xFFFFEACD, 0x000004F5, 0x00001976, 0xFFFFF913, 0x000002E2, 0x00001976, 0xFFFFF913, 0x000002E2 },
+ { 0x0213EA94DE202104, 0x00001BEB, 0xFFFFF99C, 0x000002E4, 0x000016A4, 0xFFFFFB77, 0x000002C3, 0x000016A4, 0xFFFFFB77, 0x000002C3 },
+ { 0x0213EA94DE063044, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x000018F4, 0xFFFFF91A, 0x000002E3, 0x000018F4, 0xFFFFF91A, 0x000002E3 },
+ { 0x0213EA94DE022864, 0x00003251, 0xFFFFEA8E, 0x000004FA, 0x000018EF, 0xFFFFF910, 0x000002E4, 0x000018EF, 0xFFFFF910, 0x000002E4 },
+ { 0x0213EA94DE1C1924, 0x00001DAF, 0xFFFFF857, 0x0000030D, 0x00001915, 0xFFFFF9D8, 0x000002F7, 0x00001915, 0xFFFFF9D8, 0x000002F7 },
+ { 0x0213EA94DE2041A4, 0x000025B6, 0xFFFFF26B, 0x000003E5, 0x00001531, 0xFFFFFB68, 0x000002AF, 0x00001531, 0xFFFFFB68, 0x000002AF },
+ { 0x0213EA94DE061884, 0x00002B2E, 0xFFFFEF2E, 0x00000440, 0x00001968, 0xFFFFF91A, 0x000002DF, 0x00001968, 0xFFFFF91A, 0x000002DF },
+ { 0x0213EA94DE1C2064, 0x00002305, 0xFFFFF528, 0x00000377, 0x000018A4, 0xFFFFF9EB, 0x000002F0, 0x000018A4, 0xFFFFF9EB, 0x000002F0 },
+ { 0x0213F0FD42CA40C4, 0x000032A1, 0xFFFFE992, 0x0000052E, 0x00001A55, 0xFFFFF826, 0x000002FE, 0x00001A55, 0xFFFFF826, 0x000002FE },
+ { 0x0213EA94DE042184, 0x00002CCD, 0xFFFFEE35, 0x00000462, 0x00001B09, 0xFFFFF7E6, 0x0000030F, 0x00001B09, 0xFFFFF7E6, 0x0000030F },
+ { 0x0213EA94DE323084, 0x00002602, 0xFFFFF2CF, 0x000003C5, 0x000016EE, 0xFFFFFAD4, 0x000002B4, 0x000016EE, 0xFFFFFAD4, 0x000002B4 },
+ { 0x0213F0FD42D01964, 0x00003370, 0xFFFFE891, 0x00000560, 0x000017F0, 0xFFFFF930, 0x000002DF, 0x000017F0, 0xFFFFF930, 0x000002DF },
+ { 0x0213F0FD42CA1884, 0x00002EDC, 0xFFFFEB6D, 0x000004EC, 0x000016E6, 0xFFFFF9ED, 0x000002BC, 0x000016E6, 0xFFFFF9ED, 0x000002BC },
+ { 0x0213EA94DE1228C4, 0x00002A05, 0xFFFFF13D, 0x000003F0, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
+ { 0x0213F0FD42CE2044, 0x00002F8A, 0xFFFFEB6E, 0x000004E4, 0x00001E3E, 0xFFFFF50E, 0x0000038D, 0x00001E3E, 0xFFFFF50E, 0x0000038D },
+ { 0x0213F0FD42CA3044, 0x00002BB5, 0xFFFFED6A, 0x000004A1, 0x000017BF, 0xFFFFF937, 0x000002E5, 0x000017BF, 0xFFFFF937, 0x000002E5 },
+ { 0x0213EA94DE201964, 0x0000202C, 0xFFFFF6CE, 0x0000033F, 0x000015EE, 0xFFFFFB83, 0x000002B9, 0x000015EE, 0xFFFFFB83, 0x000002B9 },
+ { 0x0213EA94DE022884, 0x00002C0C, 0xFFFFEF10, 0x0000043F, 0x00001A73, 0xFFFFF83E, 0x0000030C, 0x00001A73, 0xFFFFF83E, 0x0000030C },
+ { 0x0213EA94DE324104, 0x0000234F, 0xFFFFF460, 0x00000385, 0x000018C3, 0xFFFFF9A5, 0x000002DD, 0x000018C3, 0xFFFFF9A5, 0x000002DD },
+ { 0x0213F0FD42CE1904, 0x00003679, 0xFFFFE704, 0x00000595, 0x00002177, 0xFFFFF31A, 0x000003D7, 0x00002177, 0xFFFFF31A, 0x000003D7 },
+ { 0x0213F0FD42CA2924, 0x00003008, 0xFFFFEBB8, 0x000004D5, 0x000024FF, 0xFFFFF112, 0x00000430, 0x000024FF, 0xFFFFF112, 0x00000430 },
+ { 0x0213F0FD42C641A4, 0x00003848, 0xFFFFE6A3, 0x00000594, 0x00002958, 0xFFFFEE37, 0x000004A0, 0x00002958, 0xFFFFEE37, 0x000004A0 },
+ { 0x0213F0FD42CC1924, 0x00002FDF, 0xFFFFEB08, 0x000004FD, 0x00001D77, 0xFFFFF58B, 0x0000037A, 0x00001D77, 0xFFFFF58B, 0x0000037A },
+ { 0x0213EA94DE063064, 0x00002EC8, 0xFFFFED41, 0x00000481, 0x00001949, 0xFFFFF91C, 0x000002DF, 0x00001949, 0xFFFFF91C, 0x000002DF },
+ { 0x0213F0FD42D041A4, 0x000037C1, 0xFFFFE5BA, 0x000005D7, 0x0000252C, 0xFFFFF023, 0x00000460, 0x0000252C, 0xFFFFF023, 0x00000460 },
+ { 0x0213F0FD42CE2944, 0x00003716, 0xFFFFE70C, 0x0000058A, 0x000028CC, 0xFFFFEE57, 0x0000049D, 0x000028CC, 0xFFFFEE57, 0x0000049D },
+ { 0x0213F0FD42CA40E4, 0x000033D1, 0xFFFFE8E8, 0x00000547, 0x00001AB1, 0xFFFFF7E5, 0x00000309, 0x00001AB1, 0xFFFFF7E5, 0x00000309 },
+ { 0x0213F0FD42CC2944, 0x00002D72, 0xFFFFED65, 0x0000048E, 0x00001E0D, 0xFFFFF5A7, 0x00000370, 0x00001E0D, 0xFFFFF5A7, 0x00000370 },
+ { 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
+ { 0x0213EA94DE243044, 0x000026EE, 0xFFFFF18C, 0x000003F7, 0x000018A7, 0xFFFFF95A, 0x000002E5, 0x000018A7, 0xFFFFF95A, 0x000002E5 },
+ { 0x0213EA94DE042164, 0x00002F62, 0xFFFFEC9B, 0x000004A4, 0x0000194E, 0xFFFFF932, 0x000002D9, 0x0000194E, 0xFFFFF932, 0x000002D9 },
+ { 0x0213EA94DE1E3984, 0x00001CE8, 0xFFFFF7FA, 0x0000031C, 0x000014CE, 0xFFFFFBD4, 0x000002AB, 0x000014CE, 0xFFFFFBD4, 0x000002AB },
+ { 0x0213EA94DE1210E4, 0x00002E5A, 0xFFFFEDAB, 0x0000047C, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
+ { 0x0213F0FD42CC30E4, 0x00003057, 0xFFFFEC34, 0x000004B9, 0x00002296, 0xFFFFF342, 0x000003D0, 0x00002296, 0xFFFFF342, 0x000003D0 },
+ { 0x0213EA94DE0418A4, 0x00002B0F, 0xFFFFEF58, 0x00000434, 0x00001BFD, 0xFFFFF721, 0x00000330, 0x00001BFD, 0xFFFFF721, 0x00000330 },
+ { 0x0213EA94DE2010A4, 0x00001F01, 0xFFFFF751, 0x0000032F, 0x00001502, 0xFFFFFC3E, 0x00000296, 0x00001502, 0xFFFFFC3E, 0x00000296 },
+ { 0x0213F0FD42CA3064, 0x00002FF4, 0xFFFFEAE2, 0x00000503, 0x00001B36, 0xFFFFF736, 0x00000330, 0x00001B36, 0xFFFFF736, 0x00000330 },
+ { 0x0213F0FD42CE2064, 0x00003762, 0xFFFFE5AB, 0x000005DE, 0x000018CB, 0xFFFFF896, 0x000002F4, 0x000018CB, 0xFFFFF896, 0x000002F4 },
+ { 0x0213F0FD42CC2064, 0x00002890, 0xFFFFEF92, 0x00000445, 0x0000191D, 0xFFFFF86F, 0x00000302, 0x0000191D, 0xFFFFF86F, 0x00000302 },
+ { 0x0213EA94DE043064, 0x00002F76, 0xFFFFEC0E, 0x000004BF, 0x00001F7D, 0xFFFFF41A, 0x000003C0, 0x00001F7D, 0xFFFFF41A, 0x000003C0 },
+ { 0x0213EA94DE1E08A4, 0x00001D55, 0xFFFFF7F8, 0x0000031E, 0x000015DF, 0xFFFFFB79, 0x000002B7, 0x000015DF, 0xFFFFFB79, 0x000002B7 },
+ { 0x0213EA94DE204924, 0x00001FE9, 0xFFFFF64A, 0x00000353, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
+ { 0x0213EA94DE063964, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001857, 0xFFFFF968, 0x000002D8, 0x00001857, 0xFFFFF968, 0x000002D8 },
+ { 0x0213F0FD42CA28C4, 0x00003398, 0xFFFFE9A3, 0x00000524, 0x00001FF9, 0xFFFFF458, 0x000003AD, 0x00001FF9, 0xFFFFF458, 0x000003AD },
+ { 0x0213F0FD42CE2964, 0x00003897, 0xFFFFE5BD, 0x000005C8, 0x00002519, 0xFFFFF0BA, 0x00000438, 0x00002519, 0xFFFFF0BA, 0x00000438 },
+ { 0x0213F0FD42D04064, 0x00003234, 0xFFFFE9B1, 0x00000530, 0x000022CC, 0xFFFFF20E, 0x00000409, 0x000022CC, 0xFFFFF20E, 0x00000409 },
+ { 0x0213EA94DE205104, 0x00001FD2, 0xFFFFF641, 0x00000354, 0x000017C9, 0xFFFFF9C0, 0x000002FB, 0x000017C9, 0xFFFFF9C0, 0x000002FB },
+ { 0x0213F0FD42CE48E4, 0x00003234, 0xFFFFE946, 0x0000053D, 0x00002267, 0xFFFFF1F5, 0x0000040D, 0x00002267, 0xFFFFF1F5, 0x0000040D },
+ { 0x0213EA94DE2029A4, 0x00002330, 0xFFFFF474, 0x00000399, 0x00001490, 0xFFFFFC67, 0x00000288, 0x00001490, 0xFFFFFC67, 0x00000288 },
+ { 0x0213F0FD42D03924, 0x000032A3, 0xFFFFE9EB, 0x0000051B, 0x0000234D, 0xFFFFF23C, 0x000003F7, 0x0000234D, 0xFFFFF23C, 0x000003F7 },
+ { 0x0213EA94DE200904, 0x0000217E, 0xFFFFF53A, 0x00000384, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
+ { 0x0213F0FD42CE50E4, 0x0000384F, 0xFFFFE562, 0x000005E2, 0x0000295A, 0xFFFFED53, 0x000004D3, 0x0000295A, 0xFFFFED53, 0x000004D3 },
+ { 0x0213F0FD42D05124, 0x00003315, 0xFFFFE8D1, 0x00000552, 0x000025D1, 0xFFFFEFAF, 0x00000471, 0x000025D1, 0xFFFFEFAF, 0x00000471 },
+ { 0x0213F0FD42C64924, 0x00004183, 0xFFFFDF61, 0x000006DA, 0x0000193C, 0xFFFFF88F, 0x000002EC, 0x0000193C, 0xFFFFF88F, 0x000002EC },
+ { 0x0213EA94DE242164, 0x00002DFC, 0xFFFFEDF2, 0x0000047A, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
+ { 0x0213F0FD42CA31A4, 0x000033FE, 0xFFFFE774, 0x0000059F, 0x00001E70, 0xFFFFF492, 0x000003A0, 0x00001E70, 0xFFFFF492, 0x000003A0 },
+ { 0x0213F0FD42C629A4, 0x000040D7, 0xFFFFDFB8, 0x000006CE, 0x00001AC8, 0xFFFFF773, 0x0000031D, 0x00001AC8, 0xFFFFF773, 0x0000031D },
+ { 0x0213EA94DE1E1164, 0x00001D02, 0xFFFFF803, 0x00000322, 0x000015FE, 0xFFFFFB71, 0x000002BB, 0x000015FE, 0xFFFFFB71, 0x000002BB },
+ { 0x0213F0FD42D02884, 0x00002EB0, 0xFFFFEC31, 0x000004C4, 0x00001B3C, 0xFFFFF73B, 0x00000330, 0x00001B3C, 0xFFFFF73B, 0x00000330 },
+ { 0x0213F0FD42CA4984, 0x00002D9F, 0xFFFFECBF, 0x000004A8, 0x000022B0, 0xFFFFF23C, 0x000003F9, 0x000022B0, 0xFFFFF23C, 0x000003F9 },
+ { 0x0213F0FD42CC18E4, 0x00002C6A, 0xFFFFEDAC, 0x00000488, 0x00002419, 0xFFFFF159, 0x00000427, 0x00002419, 0xFFFFF159, 0x00000427 },
+ { 0x0213EA94DE1210A4, 0x00002991, 0xFFFFF06C, 0x0000040E, 0x00001AA9, 0xFFFFF8D0, 0x000002E1, 0x00001AA9, 0xFFFFF8D0, 0x000002E1 },
+ { 0x0213EA94DE123904, 0x00002F8E, 0xFFFFED1B, 0x00000493, 0x00001DE4, 0xFFFFF69C, 0x00000347, 0x00001DE4, 0xFFFFF69C, 0x00000347 },
+ { 0x0213EA94DE204184, 0x00002136, 0xFFFFF540, 0x0000037C, 0x000014FF, 0xFFFFFB83, 0x000002B2, 0x000014FF, 0xFFFFFB83, 0x000002B2 },
+ { 0x0213EA94DE0618E4, 0x0000354C, 0xFFFFE97D, 0x0000051A, 0x00001906, 0xFFFFF965, 0x000002DD, 0x00001906, 0xFFFFF965, 0x000002DD },
+ { 0x0213F0FD42C620C4, 0x0000348B, 0xFFFFE94D, 0x0000051F, 0x0000285B, 0xFFFFEF1A, 0x00000473, 0x0000285B, 0xFFFFEF1A, 0x00000473 },
+ { 0x0213EA94DE3218A4, 0x000026E6, 0xFFFFF24E, 0x000003D6, 0x0000141F, 0xFFFFFCCE, 0x00000260, 0x0000141F, 0xFFFFFCCE, 0x00000260 },
+ { 0x0213F0FD42C64164, 0x00003CED, 0xFFFFE2A5, 0x0000064E, 0x00002060, 0xFFFFF3E0, 0x000003B0, 0x00002060, 0xFFFFF3E0, 0x000003B0 },
+ { 0x0213EA94DE021084, 0x000029D4, 0xFFFFEFF7, 0x00000426, 0x00001976, 0xFFFFF8E1, 0x000002EE, 0x00001976, 0xFFFFF8E1, 0x000002EE },
+ { 0x0213F0FD42CA40A4, 0x00003767, 0xFFFFE601, 0x000005CC, 0x00001D22, 0xFFFFF5F4, 0x00000361, 0x00001D22, 0xFFFFF5F4, 0x00000361 },
+ { 0x0213F0FD42C650C4, 0x00003CE8, 0xFFFFE2E8, 0x00000637, 0x0000232C, 0xFFFFF1E7, 0x00000405, 0x0000232C, 0xFFFFF1E7, 0x00000405 },
+ { 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
+ { 0x0213F0FD42CC30A4, 0x00003451, 0xFFFFE8B9, 0x00000551, 0x00001AD7, 0xFFFFF7BF, 0x00000318, 0x00001AD7, 0xFFFFF7BF, 0x00000318 },
+ { 0x0213F0FD42CE2984, 0x0000381B, 0xFFFFE5A0, 0x000005D0, 0x00001E0F, 0xFFFFF521, 0x00000382, 0x00001E0F, 0xFFFFF521, 0x00000382 },
+ { 0x0213EA94DE2038C4, 0x000023A4, 0xFFFFF4A6, 0x00000394, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
+ { 0x0213F0FD42C620A4, 0x00003C2B, 0xFFFFE447, 0x000005F0, 0x0000207F, 0xFFFFF44E, 0x0000039A, 0x0000207F, 0xFFFFF44E, 0x0000039A },
+ { 0x0213F0FD42CC3984, 0x00002F07, 0xFFFFEB70, 0x000004E9, 0x00001765, 0xFFFFF9A5, 0x000002C6, 0x00001765, 0xFFFFF9A5, 0x000002C6 },
+ { 0x0213F0FD42C62984, 0x00003A01, 0xFFFFE4E0, 0x000005E7, 0x0000227A, 0xFFFFF292, 0x000003E5, 0x0000227A, 0xFFFFF292, 0x000003E5 },
+ { 0x0213F0FD42CE20A4, 0x0000376E, 0xFFFFE686, 0x000005A6, 0x00001FCF, 0xFFFFF43B, 0x000003A8, 0x00001FCF, 0xFFFFF43B, 0x000003A8 },
+ { 0x0213F0FFEF5A4984, 0x0000485F, 0xFFFFDCC1, 0x00000713, 0x00002CF8, 0xFFFFEC45, 0x000004DA, 0x00002CF8, 0xFFFFEC45, 0x000004DA },
+ { 0x0213F0FFEF5C3184, 0x0000331C, 0xFFFFE8FF, 0x00000541, 0x00002366, 0xFFFFF19D, 0x00000411, 0x00002366, 0xFFFFF19D, 0x00000411 },
+ { 0x0213F0FFEF643864, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002FB3, 0xFFFFE827, 0x000005B9, 0x00002FB3, 0xFFFFE827, 0x000005B9 },
+ { 0x0213EA94DE321104, 0x000023F3, 0xFFFFF3EA, 0x0000039A, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
+ { 0x0213F0FFEF5C28A4, 0x000038C0, 0xFFFFE58A, 0x000005CC, 0x000023CA, 0xFFFFF1AA, 0x00000408, 0x000023CA, 0xFFFFF1AA, 0x00000408 },
+ { 0x0213F0FFEF662944, 0x00004976, 0xFFFFDD6A, 0x000006D7, 0x000033C6, 0xFFFFE8EB, 0x0000054D, 0x000033C6, 0xFFFFE8EB, 0x0000054D },
+ { 0x0213F0FFEF644904, 0x00004049, 0xFFFFDF6D, 0x000006D8, 0x00003129, 0xFFFFE716, 0x000005E9, 0x00003129, 0xFFFFE716, 0x000005E9 },
+ { 0x0213F0FFEF661164, 0x000046C2, 0xFFFFDCEB, 0x0000071C, 0x00002E6D, 0xFFFFEA8F, 0x0000052E, 0x00002E6D, 0xFFFFEA8F, 0x0000052E },
+ { 0x0213F0FFEF6238A4, 0x00004080, 0xFFFFE1E1, 0x0000063A, 0x0000396D, 0xFFFFE40A, 0x0000062C, 0x0000396D, 0xFFFFE40A, 0x0000062C },
+ { 0x0213F0FFEF5E2124, 0x00003DE0, 0xFFFFE358, 0x0000060C, 0x00002AA2, 0xFFFFEDBF, 0x000004A0, 0x00002AA2, 0xFFFFEDBF, 0x000004A0 },
+ { 0x0213F0FFEF5E3144, 0x00003FC0, 0xFFFFE2A1, 0x0000061A, 0x000027D8, 0xFFFFEFEC, 0x0000043A, 0x000027D8, 0xFFFFEFEC, 0x0000043A },
+ { 0x0213F0FFEF661924, 0x00003FBF, 0xFFFFE2F5, 0x00000603, 0x000032D7, 0xFFFFE900, 0x00000552, 0x000032D7, 0xFFFFE900, 0x00000552 },
+ { 0x0213F0FFEF5C10E4, 0x000035EE, 0xFFFFE6CA, 0x000005A2, 0x0000247C, 0xFFFFF088, 0x00000446, 0x0000247C, 0xFFFFF088, 0x00000446 },
+ { 0x0213F0FFEF643884, 0x000039C8, 0xFFFFE3AE, 0x0000062A, 0x000028AF, 0xFFFFED24, 0x000004DF, 0x000028AF, 0xFFFFED24, 0x000004DF },
+ { 0x0213F0FFEF5C2884, 0x00003BDE, 0xFFFFE33B, 0x00000632, 0x00001B6C, 0xFFFFF720, 0x00000326, 0x00001B6C, 0xFFFFF720, 0x00000326 },
+ { 0x0213F0FFEF7210A4, 0x00003818, 0xFFFFE57D, 0x000005D4, 0x000020EF, 0xFFFFF327, 0x000003CE, 0x000020EF, 0xFFFFF327, 0x000003CE },
+ { 0x0213F0FFEF5E19A4, 0x000038DA, 0xFFFFE561, 0x000005D3, 0x0000297D, 0xFFFFED6D, 0x000004C5, 0x0000297D, 0xFFFFED6D, 0x000004C5 },
+ { 0x0213F0FFEF684884, 0x000027AC, 0xFFFFF0CE, 0x00000417, 0x00001F5F, 0xFFFFF484, 0x000003B2, 0x00001F5F, 0xFFFFF484, 0x000003B2 },
+ { 0x0213F0FFEF6648A4, 0x00003F02, 0xFFFFE222, 0x00000643, 0x000026D4, 0xFFFFF000, 0x00000443, 0x000026D4, 0xFFFFF000, 0x00000443 },
+ { 0x0213F0FFEF624164, 0x00004303, 0xFFFFDFE3, 0x00000690, 0x0000312C, 0xFFFFE912, 0x00000561, 0x0000312C, 0xFFFFE912, 0x00000561 },
+ { 0x0213F0FFEF600904, 0x000039E5, 0xFFFFE31F, 0x00000657, 0x00001D23, 0xFFFFF51F, 0x00000386, 0x00001D23, 0xFFFFF51F, 0x00000386 },
+ { 0x0213F0FFEF661144, 0x000041FA, 0xFFFFE01B, 0x00000697, 0x00002767, 0xFFFFEF90, 0x00000455, 0x00002767, 0xFFFFEF90, 0x00000455 },
+ { 0x0213F0FFEF6830A4, 0x00002888, 0xFFFFF11C, 0x00000403, 0x00001864, 0xFFFFF9D8, 0x000002D3, 0x00001864, 0xFFFFF9D8, 0x000002D3 },
+ { 0x0213EA94DE201864, 0x0000215C, 0xFFFFF5B6, 0x0000036D, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
+ { 0x0213F0FFEF683984, 0x00002FAF, 0xFFFFEC27, 0x000004CA, 0x00002184, 0xFFFFF39C, 0x000003CD, 0x00002184, 0xFFFFF39C, 0x000003CD },
+ { 0x0213F0FFEF5E10C4, 0x00004ACE, 0xFFFFD9A3, 0x000007BC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC },
+ { 0x0213F0FFEF5A3044, 0x00003763, 0xFFFFE797, 0x0000055F, 0x000029B5, 0xFFFFEEA1, 0x00000474, 0x000029B5, 0xFFFFEEA1, 0x00000474 },
+ { 0x0213F0FFEF5E3164, 0x00003832, 0xFFFFE6F9, 0x00000575, 0x00002C99, 0xFFFFEC42, 0x000004E3, 0x00002C99, 0xFFFFEC42, 0x000004E3 },
+ { 0x0213F0FFEF604164, 0x000041C9, 0xFFFFDE33, 0x0000071E, 0x0000199D, 0xFFFFF808, 0x000002F9, 0x0000199D, 0xFFFFF808, 0x000002F9 },
+ { 0x0213F0FFEF641164, 0x0000474A, 0xFFFFD96E, 0x00000802, 0x00002A30, 0xFFFFEB57, 0x0000053F, 0x00002A30, 0xFFFFEB57, 0x0000053F },
+ { 0x0213F0FFEF5C31C4, 0x0000312F, 0xFFFFEA6A, 0x00000508, 0x000029D3, 0xFFFFED38, 0x000004D3, 0x000029D3, 0xFFFFED38, 0x000004D3 },
+ { 0x0213F0FFEF7210C4, 0x00003BD6, 0xFFFFE2E7, 0x00000644, 0x00002093, 0xFFFFF37B, 0x000003BD, 0x00002093, 0xFFFFF37B, 0x000003BD },
+ { 0x0213F0FFEF6840E4, 0x00002F94, 0xFFFFECD4, 0x000004A3, 0x00002196, 0xFFFFF40B, 0x000003B5, 0x00002196, 0xFFFFF40B, 0x000003B5 },
+ { 0x0213F0FFEF5E1944, 0x0000369B, 0xFFFFE762, 0x00000571, 0x00002726, 0xFFFFEF99, 0x00000459, 0x00002726, 0xFFFFEF99, 0x00000459 },
+ { 0x0213F0FFEF642064, 0x00003F57, 0xFFFFDF47, 0x000006F4, 0x00002E5F, 0xFFFFE8AE, 0x000005AB, 0x00002E5F, 0xFFFFE8AE, 0x000005AB },
+ { 0x0213EA94DE0A40C4, 0x00004313, 0xFFFFDD81, 0x0000072D, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
+ { 0x0213F0FFEF683044, 0x00002A35, 0xFFFFEFA8, 0x00000441, 0x00001F3F, 0xFFFFF4F3, 0x000003A0, 0x00001F3F, 0xFFFFF4F3, 0x000003A0 },
+ { 0x0213F0FFEF6630A4, 0x00003E33, 0xFFFFE4B0, 0x000005AF, 0x00002802, 0xFFFFF092, 0x00000412, 0x00002802, 0xFFFFF092, 0x00000412 },
+ { 0x0213EA94DE323904, 0x00002815, 0xFFFFF20E, 0x000003DD, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
+ { 0x0213F0FFEF5A2184, 0x00003CC2, 0xFFFFE43E, 0x000005DE, 0x00002C16, 0xFFFFECED, 0x000004BA, 0x00002C16, 0xFFFFECED, 0x000004BA },
+ { 0x0213F0FFEF5C4084, 0x00003CFA, 0xFFFFE1EE, 0x00000673, 0x00001F7D, 0xFFFFF402, 0x000003AE, 0x00001F7D, 0xFFFFF402, 0x000003AE },
+ { 0x0213F0FFEF622104, 0x0000486E, 0xFFFFDD43, 0x000006EE, 0x000036F0, 0xFFFFE609, 0x000005D5, 0x000036F0, 0xFFFFE609, 0x000005D5 },
+ { 0x0213F0FFEF5C4964, 0x000039FE, 0xFFFFE41F, 0x00000613, 0x0000266C, 0xFFFFEF35, 0x0000047D, 0x0000266C, 0xFFFFEF35, 0x0000047D },
+ { 0x0213EA94DE123124, 0x00002EA4, 0xFFFFEE3B, 0x00000462, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
+ { 0x0213F0FFEF683944, 0x00002D2E, 0xFFFFEE7B, 0x00000462, 0x0000229D, 0xFFFFF363, 0x000003D4, 0x0000229D, 0xFFFFF363, 0x000003D4 },
+ { 0x0213F0FFEF5E2844, 0x0000375C, 0xFFFFE695, 0x0000059D, 0x00002319, 0xFFFFF237, 0x000003EE, 0x00002319, 0xFFFFF237, 0x000003EE },
+ { 0x0213F0FFEF7250C4, 0x00004522, 0xFFFFDC71, 0x0000075E, 0x0000247E, 0xFFFFF0A0, 0x0000043C, 0x0000247E, 0xFFFFF0A0, 0x0000043C },
+ { 0x0213EA94DE1248E4, 0x00002E58, 0xFFFFECB9, 0x000004A9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
+ { 0x0213F0FFEF6438E4, 0x00003791, 0xFFFFE5FE, 0x000005B6, 0x000029F5, 0xFFFFED0D, 0x000004CD, 0x000029F5, 0xFFFFED0D, 0x000004CD },
+ { 0x0213EA94DE244144, 0x00002E9E, 0xFFFFEC8D, 0x000004C1, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
+ { 0x0213EA94DE203964, 0x0000237C, 0xFFFFF435, 0x000003A6, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
+ { 0x0213F0FFEF662924, 0x00003FE5, 0xFFFFE4A2, 0x000005A0, 0x00003416, 0xFFFFE995, 0x00000523, 0x00003416, 0xFFFFE995, 0x00000523 },
+ { 0x0213F0FFEF5C0924, 0x00002B27, 0xFFFFED51, 0x000004A5, 0x000025D1, 0xFFFFEF18, 0x00000492, 0x000025D1, 0xFFFFEF18, 0x00000492 },
+ { 0x0213F0FFEF684904, 0x00002D77, 0xFFFFED79, 0x00000494, 0x00002196, 0xFFFFF352, 0x000003DE, 0x00002196, 0xFFFFF352, 0x000003DE },
+ { 0x0213F0FFEF5C20C4, 0x00003750, 0xFFFFE6AC, 0x00000596, 0x00002524, 0xFFFFF0B5, 0x00000431, 0x00002524, 0xFFFFF0B5, 0x00000431 },
+ { 0x0213EA94DE122944, 0x00002896, 0xFFFFF1BB, 0x000003D9, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
+ { 0x0213F0FFEF641984, 0x00003CA7, 0xFFFFE0F7, 0x000006B1, 0x00002CB8, 0xFFFFE9AB, 0x00000587, 0x00002CB8, 0xFFFFE9AB, 0x00000587 },
+ { 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
+ { 0x0213F0FFEF662164, 0x00003914, 0xFFFFE683, 0x00000586, 0x00003120, 0xFFFFE9A6, 0x00000543, 0x00003120, 0xFFFFE9A6, 0x00000543 },
+ { 0x0213F0FFEF643904, 0x000040D0, 0xFFFFE007, 0x000006AC, 0x00002B9E, 0xFFFFEBF5, 0x000004FB, 0x00002B9E, 0xFFFFEBF5, 0x000004FB },
+ { 0x0213F0FFEF5A4884, 0x00004412, 0xFFFFDF5F, 0x000006A9, 0x00002A9E, 0xFFFFEDCE, 0x00000498, 0x00002A9E, 0xFFFFEDCE, 0x00000498 },
+ { 0x0213F0FFEF624884, 0x000042A6, 0xFFFFDFEF, 0x00000696, 0x00002E65, 0xFFFFEAAE, 0x00000529, 0x00002E65, 0xFFFFEAAE, 0x00000529 },
+ { 0x0213EA94DE322124, 0x000022E8, 0xFFFFF565, 0x0000035F, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
+ { 0x0213F0FFEF6239A4, 0x00004637, 0xFFFFDDD8, 0x000006E9, 0x0000349D, 0xFFFFE6C8, 0x000005C7, 0x0000349D, 0xFFFFE6C8, 0x000005C7 },
+ { 0x0213EA94DE263904, 0x00004686, 0xFFFFDC58, 0x0000073D, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
+ { 0x0213F0FFEF6808E4, 0x00002B35, 0xFFFFEE9C, 0x0000046C, 0x00001F5B, 0xFFFFF4A3, 0x000003A9, 0x00001F5B, 0xFFFFF4A3, 0x000003A9 },
+ { 0x0213F0FFEF724144, 0x00003AC9, 0xFFFFE3B2, 0x0000061B, 0x000023A1, 0xFFFFF170, 0x0000040F, 0x000023A1, 0xFFFFF170, 0x0000040F },
+ { 0x0213F0FFEF5E1884, 0x00003C50, 0xFFFFE37E, 0x00000617, 0x0000218F, 0xFFFFF339, 0x000003C4, 0x0000218F, 0xFFFFF339, 0x000003C4 },
+ { 0x0213F0FFEF663044, 0x00003793, 0xFFFFE761, 0x0000055D, 0x000029C7, 0xFFFFEE03, 0x00000496, 0x000029C7, 0xFFFFEE03, 0x00000496 },
+ { 0x0213F0FFEF6438A4, 0x000040B5, 0xFFFFDF78, 0x000006DA, 0x00002DED, 0xFFFFEA20, 0x00000551, 0x00002DED, 0xFFFFEA20, 0x00000551 },
+ { 0x0213F0FFEF601144, 0x000039D6, 0xFFFFE37D, 0x0000063C, 0x00001AED, 0xFFFFF6E2, 0x00000331, 0x00001AED, 0xFFFFF6E2, 0x00000331 },
+ { 0x0213F0FFEF662144, 0x0000431F, 0xFFFFE09B, 0x0000066A, 0x00002BDF, 0xFFFFED93, 0x00000496, 0x00002BDF, 0xFFFFED93, 0x00000496 },
+ { 0x0213F0FFEF623864, 0x00004887, 0xFFFFDC65, 0x00000721, 0x00003669, 0xFFFFE5C4, 0x000005E9, 0x00003669, 0xFFFFE5C4, 0x000005E9 },
+ { 0x0213F0FFEF640924, 0x00004120, 0xFFFFDDAE, 0x00000748, 0x0000303B, 0xFFFFE70D, 0x000005FC, 0x0000303B, 0xFFFFE70D, 0x000005FC },
+ { 0x0213F0FFEF5E28A4, 0x0000415D, 0xFFFFE0BE, 0x0000067B, 0x00002FA7, 0xFFFFEA28, 0x00000538, 0x00002FA7, 0xFFFFEA28, 0x00000538 },
+ { 0x0213F0FFEF681904, 0x00002B12, 0xFFFFEFF9, 0x00000428, 0x00001DDA, 0xFFFFF693, 0x00000356, 0x00001DDA, 0xFFFFF693, 0x00000356 },
+ { 0x0213F0FFEF5E3184, 0x00003ED3, 0xFFFFE28D, 0x0000062D, 0x00002B00, 0xFFFFED4E, 0x000004B3, 0x00002B00, 0xFFFFED4E, 0x000004B3 },
+ { 0x0213F0FFEF6250A4, 0x00004218, 0xFFFFE039, 0x0000068F, 0x00002F84, 0xFFFFEA0C, 0x00000541, 0x00002F84, 0xFFFFEA0C, 0x00000541 },
+ { 0x0213F0FFEF5A3844, 0x00003FF5, 0xFFFFE2A3, 0x00000617, 0x00003017, 0xFFFFEA7A, 0x00000520, 0x00003017, 0xFFFFEA7A, 0x00000520 },
+ { 0x0213F0FFEF5A08A4, 0x00004304, 0xFFFFDFCC, 0x0000069E, 0x00002E0C, 0xFFFFEB51, 0x00000505, 0x00002E0C, 0xFFFFEB51, 0x00000505 },
+ { 0x0213F0FFEF641944, 0x00003D3A, 0xFFFFE17F, 0x00000687, 0x0000284C, 0xFFFFED83, 0x000004CD, 0x0000284C, 0xFFFFED83, 0x000004CD },
+ { 0x0213F0FFEF5E40A4, 0x000042F5, 0xFFFFDF76, 0x000006B2, 0x000027B6, 0xFFFFEF72, 0x00000455, 0x000027B6, 0xFFFFEF72, 0x00000455 },
+ { 0x0213F0FFEF5C38C4, 0x00004267, 0xFFFFDF29, 0x000006D5, 0x0000298F, 0xFFFFEDBD, 0x000004AC, 0x0000298F, 0xFFFFEDBD, 0x000004AC },
+ { 0x0213EA94DE240924, 0x0000303E, 0xFFFFEC00, 0x000004CB, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
+ { 0x0213F0FFEF5E28C4, 0x00003127, 0xFFFFEBDB, 0x000004A6, 0x00002E95, 0xFFFFEB78, 0x000004F3, 0x00002E95, 0xFFFFEB78, 0x000004F3 },
+ { 0x0213EA94DE1C1064, 0x00002655, 0xFFFFF2D9, 0x000003CF, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
+ { 0x0213EA94DE164084, 0x00002372, 0xFFFFF449, 0x0000039B, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
+ { 0x0213F0FFEF6628C4, 0x0000348E, 0xFFFFEB20, 0x000004B2, 0x00002BE8, 0xFFFFEE80, 0x00000467, 0x00002BE8, 0xFFFFEE80, 0x00000467 },
+ { 0x0213F0FFEF5E1104, 0x00004092, 0xFFFFE073, 0x0000069B, 0x00002061, 0xFFFFF403, 0x000003A0, 0x00002061, 0xFFFFF403, 0x000003A0 },
+ { 0x0213F0FFEF7220E4, 0x000039D1, 0xFFFFE55D, 0x000005CC, 0x000025CB, 0xFFFFF0C0, 0x00000428, 0x000025CB, 0xFFFFF0C0, 0x00000428 },
+ { 0x0213F0FFEF5E4884, 0x000042AA, 0xFFFFDF68, 0x000006C2, 0x0000290B, 0xFFFFEE78, 0x00000485, 0x0000290B, 0xFFFFEE78, 0x00000485 },
+ { 0x0213F0FFEF7218C4, 0x0000356F, 0xFFFFE7AC, 0x0000056E, 0x00001BE8, 0xFFFFF6E3, 0x0000032A, 0x00001BE8, 0xFFFFF6E3, 0x0000032A },
+ { 0x0213F0FFEF5E1144, 0x00003525, 0xFFFFE7FF, 0x0000055D, 0x0000242C, 0xFFFFF12E, 0x0000041D, 0x0000242C, 0xFFFFF12E, 0x0000041D },
+ { 0x0213F0FFEF5C48C4, 0x00003360, 0xFFFFE895, 0x00000550, 0x00002175, 0xFFFFF29E, 0x000003E9, 0x00002175, 0xFFFFF29E, 0x000003E9 },
+ { 0x0213F0FFEF6440A4, 0x00003C94, 0xFFFFE1C4, 0x0000067E, 0x00002E28, 0xFFFFE964, 0x0000057F, 0x00002E28, 0xFFFFE964, 0x0000057F },
+ { 0x0213F0FFEF724124, 0x0000431C, 0xFFFFDE4B, 0x000006FF, 0x00002270, 0xFFFFF268, 0x000003E5, 0x00002270, 0xFFFFF268, 0x000003E5 },
+ { 0x0213EA94DE1218C4, 0x00002B67, 0xFFFFF01D, 0x00000414, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
+ { 0x0213F0FFEF5E3984, 0x0000400B, 0xFFFFE13D, 0x0000066F, 0x000024F3, 0xFFFFF125, 0x00000417, 0x000024F3, 0xFFFFF125, 0x00000417 },
+ { 0x0213F0FFEF5A20A4, 0x00004460, 0xFFFFE00E, 0x0000067B, 0x000023DF, 0xFFFFF2E6, 0x000003BB, 0x000023DF, 0xFFFFF2E6, 0x000003BB },
+ { 0x0213F0FFEF641864, 0x00003AFB, 0xFFFFE2C5, 0x00000650, 0x00002D46, 0xFFFFE9C4, 0x00000571, 0x00002D46, 0xFFFFE9C4, 0x00000571 },
+ { 0x0213F0FFEF622924, 0x00005482, 0xFFFFD5BC, 0x0000081A, 0x00003250, 0xFFFFE961, 0x00000541, 0x00003250, 0xFFFFE961, 0x00000541 },
+ { 0x0213F0FFEF5C2944, 0x00003D27, 0xFFFFE2FA, 0x00000632, 0x00002A4D, 0xFFFFED6A, 0x000004BB, 0x00002A4D, 0xFFFFED6A, 0x000004BB },
+ { 0x0213F0FFEF6018A4, 0x00003E03, 0xFFFFE142, 0x00000690, 0x00001E08, 0xFFFFF555, 0x0000036C, 0x00001E08, 0xFFFFF555, 0x0000036C },
+ { 0x0213F0FFEF5C2064, 0x000031B5, 0xFFFFE97D, 0x00000535, 0x0000232E, 0xFFFFF166, 0x00000422, 0x0000232E, 0xFFFFF166, 0x00000422 },
+ { 0x0213F0FFEF5E18E4, 0x00003753, 0xFFFFE724, 0x00000575, 0x0000281A, 0xFFFFEF1A, 0x0000046B, 0x0000281A, 0xFFFFEF1A, 0x0000046B },
+ { 0x0213EA94DE204144, 0x00002071, 0xFFFFF5C9, 0x0000036F, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
+ { 0x0213F0FFEF683144, 0x00002799, 0xFFFFF223, 0x000003CF, 0x00001CD3, 0xFFFFF74A, 0x00000333, 0x00001CD3, 0xFFFFF74A, 0x00000333 },
+ { 0x0213F0FFEF6610C4, 0x000040DF, 0xFFFFE11C, 0x00000664, 0x000031D4, 0xFFFFE8BC, 0x0000056F, 0x000031D4, 0xFFFFE8BC, 0x0000056F },
+ { 0x0213F0FFEF6440C4, 0x00003A4D, 0xFFFFE3A6, 0x00000627, 0x00002871, 0xFFFFEDA0, 0x000004C0, 0x00002871, 0xFFFFEDA0, 0x000004C0 },
+ { 0x0213F0FFEF681984, 0x00002AF9, 0xFFFFEED7, 0x00000464, 0x0000219B, 0xFFFFF368, 0x000003D6, 0x0000219B, 0xFFFFF368, 0x000003D6 },
+ { 0x0213EA94DE323124, 0x000026D5, 0xFFFFF36C, 0x000003A3, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
+ { 0x0213F0FFEF5E2044, 0x0000325D, 0xFFFFEA07, 0x0000050B, 0x000026D1, 0xFFFFEFB3, 0x0000045A, 0x000026D1, 0xFFFFEFB3, 0x0000045A },
+ { 0x0213F0FFEF682864, 0x00002F75, 0xFFFFEC64, 0x000004BE, 0x00001EEB, 0xFFFFF559, 0x00000386, 0x00001EEB, 0xFFFFF559, 0x00000386 },
+ { 0x0213F0FFEF5A38A4, 0x00003C2F, 0xFFFFE541, 0x000005A3, 0x000025B6, 0xFFFFF16F, 0x000003FA, 0x000025B6, 0xFFFFF16F, 0x000003FA },
+ { 0x0213F0FFEF684924, 0x00002BC2, 0xFFFFEE89, 0x0000046A, 0x00001D04, 0xFFFFF651, 0x00000361, 0x00001D04, 0xFFFFF651, 0x00000361 },
+ { 0x0213F0FFEF6829A4, 0x00002DD0, 0xFFFFED40, 0x0000049F, 0x00001C8C, 0xFFFFF6B3, 0x00000353, 0x00001C8C, 0xFFFFF6B3, 0x00000353 },
+ { 0x0213EA94DE1C08E4, 0x000021ED, 0xFFFFF530, 0x00000380, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
+ { 0x0213EA94DE321904, 0x000028C7, 0xFFFFF160, 0x000003FD, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
+ { 0x0213F0FFEF6610A4, 0x0000431C, 0xFFFFDF9D, 0x000006A3, 0x000034A6, 0xFFFFE6B0, 0x000005C9, 0x000034A6, 0xFFFFE6B0, 0x000005C9 },
+ { 0x0213EA94DE2630A4, 0x00004115, 0xFFFFE0D6, 0x00000667, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
+ { 0x0213F0FFEF643924, 0x0000424A, 0xFFFFDEEC, 0x000006E1, 0x0000346A, 0xFFFFE5EA, 0x00000602, 0x0000346A, 0xFFFFE5EA, 0x00000602 },
+ { 0x0213F0FFEF661984, 0x00004990, 0xFFFFDAFA, 0x00000771, 0x00002A9C, 0xFFFFED37, 0x000004BC, 0x00002A9C, 0xFFFFED37, 0x000004BC },
+ { 0x0213F0FFEF6428A4, 0x00003858, 0xFFFFE568, 0x000005D2, 0x00003030, 0xFFFFE8B0, 0x0000058E, 0x00003030, 0xFFFFE8B0, 0x0000058E },
+ { 0x0213F0FFEF684164, 0x00001EDC, 0xFFFFF6CD, 0x00000322, 0x00001FCA, 0xFFFFF4BD, 0x0000039E, 0x00001FCA, 0xFFFFF4BD, 0x0000039E },
+ { 0x0213F0FFEF662124, 0x00004C88, 0xFFFFDBA3, 0x0000071B, 0x000030C4, 0xFFFFEAFD, 0x000004F7, 0x000030C4, 0xFFFFEAFD, 0x000004F7 },
+ { 0x0213F0FFEF680904, 0x00002B9A, 0xFFFFEE41, 0x0000047D, 0x00002131, 0xFFFFF344, 0x000003E5, 0x00002131, 0xFFFFF344, 0x000003E5 },
+ { 0x0213F0FFEF623984, 0x00003E4B, 0xFFFFE33C, 0x000005FA, 0x00003877, 0xFFFFE437, 0x0000062E, 0x00003877, 0xFFFFE437, 0x0000062E },
+ { 0x0213EA94DE322064, 0x00002376, 0xFFFFF444, 0x0000038A, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
+ { 0x0213F0FFEF661084, 0x00004517, 0xFFFFDDF4, 0x000006F2, 0x000030DC, 0xFFFFE8EF, 0x00000571, 0x000030DC, 0xFFFFE8EF, 0x00000571 },
+ { 0x0213F0FFEF681944, 0x0000270C, 0xFFFFF1F3, 0x000003DF, 0x0000207B, 0xFFFFF474, 0x000003AD, 0x0000207B, 0xFFFFF474, 0x000003AD },
+ { 0x0213F0FFEF645144, 0x00004086, 0xFFFFDF39, 0x000006E3, 0x00002A24, 0xFFFFEC2B, 0x000004FF, 0x00002A24, 0xFFFFEC2B, 0x000004FF },
+ { 0x0213F0FFEF5C3124, 0x00003BDE, 0xFFFFE45E, 0x000005EB, 0x00002CD5, 0xFFFFEC45, 0x000004DD, 0x00002CD5, 0xFFFFEC45, 0x000004DD },
+ { 0x0213F0FFEF7230E4, 0x00003803, 0xFFFFE714, 0x00000579, 0x0000288A, 0xFFFFEF21, 0x0000046B, 0x0000288A, 0xFFFFEF21, 0x0000046B },
+ { 0x0213F0FFEF601104, 0x00003F50, 0xFFFFE002, 0x000006CD, 0x00001AD4, 0xFFFFF72E, 0x0000031F, 0x00001AD4, 0xFFFFF72E, 0x0000031F },
+ { 0x0213F0FFEF6820E4, 0x00002968, 0xFFFFF100, 0x00000402, 0x00001FB5, 0xFFFFF57C, 0x0000037F, 0x00001FB5, 0xFFFFF57C, 0x0000037F },
+ { 0x0213F0FFEF662104, 0x00004283, 0xFFFFE2A7, 0x000005F5, 0x00003165, 0xFFFFEB0C, 0x000004EC, 0x00003165, 0xFFFFEB0C, 0x000004EC },
+ { 0x0213F0FFEF6431A4, 0x00004253, 0xFFFFDDA8, 0x00000732, 0x00002E5C, 0xFFFFE90A, 0x00000593, 0x00002E5C, 0xFFFFE90A, 0x00000593 },
+ { 0x0213F0FFEF5C50A4, 0x00003551, 0xFFFFE756, 0x0000058D, 0x000029A7, 0xFFFFED0C, 0x000004DE, 0x000029A7, 0xFFFFED0C, 0x000004DE },
+ { 0x0213F0FFEF6428C4, 0x00003728, 0xFFFFE604, 0x000005C4, 0x00002832, 0xFFFFEE64, 0x00000493, 0x00002832, 0xFFFFEE64, 0x00000493 },
+ { 0x0213F0FFEF623964, 0x00004796, 0xFFFFDCC8, 0x00000715, 0x000032AB, 0xFFFFE848, 0x0000057C, 0x000032AB, 0xFFFFE848, 0x0000057C },
+ { 0x0213F0FFEF6210C4, 0x000049DF, 0xFFFFDB24, 0x0000075F, 0x00003076, 0xFFFFE967, 0x0000055C, 0x00003076, 0xFFFFE967, 0x0000055C },
+ { 0x0213F0FFEF721104, 0x00003F13, 0xFFFFE099, 0x000006A8, 0x00002279, 0xFFFFF226, 0x000003F3, 0x00002279, 0xFFFFF226, 0x000003F3 },
+ { 0x0213F0FFEF6430A4, 0x00003E03, 0xFFFFE19F, 0x00000674, 0x00002D66, 0xFFFFEAA7, 0x00000537, 0x00002D66, 0xFFFFEAA7, 0x00000537 },
+ { 0x0213F0FFEF5C4104, 0x000037DA, 0xFFFFE63F, 0x000005A7, 0x00002543, 0xFFFFF0A0, 0x00000431, 0x00002543, 0xFFFFF0A0, 0x00000431 },
+ { 0x0213F0FFEF624944, 0x00003D82, 0xFFFFE3F5, 0x000005D9, 0x0000332F, 0xFFFFE834, 0x00000577, 0x0000332F, 0xFFFFE834, 0x00000577 },
+ { 0x0213EA94DE1228C4, 0x00002915, 0xFFFFF1E0, 0x000003D4, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
+ { 0x0213F0FFEF5E4904, 0x000036FC, 0xFFFFE72D, 0x00000577, 0x00002811, 0xFFFFEF30, 0x00000464, 0x00002811, 0xFFFFEF30, 0x00000464 },
+ { 0x0213F0FFEF623184, 0x00004767, 0xFFFFDD30, 0x000006FD, 0x00003703, 0xFFFFE564, 0x000005F8, 0x00003703, 0xFFFFE564, 0x000005F8 },
+ { 0x0213F0FFEF603184, 0x00003094, 0xFFFFEAA9, 0x000004F5, 0x000022E7, 0xFFFFF200, 0x000003FB, 0x000022E7, 0xFFFFF200, 0x000003FB },
+ { 0x0213F0FFEF641144, 0x00003EF0, 0xFFFFDF83, 0x000006ED, 0x00002A27, 0xFFFFEB7C, 0x00000537, 0x00002A27, 0xFFFFEB7C, 0x00000537 },
+ { 0x0213F0FFEF681124, 0x0000243C, 0xFFFFF358, 0x000003AC, 0x00001DC4, 0xFFFFF5E9, 0x00000372, 0x00001DC4, 0xFFFFF5E9, 0x00000372 },
+ { 0x0213F0FFEF722144, 0x0000284B, 0xFFFFF036, 0x0000040F, 0x00001FCD, 0xFFFFF445, 0x00000395, 0x00001FCD, 0xFFFFF445, 0x00000395 },
+ { 0x0213F0FFEF6840C4, 0x00002611, 0xFFFFF285, 0x000003C7, 0x00001CFE, 0xFFFFF6A0, 0x00000355, 0x00001CFE, 0xFFFFF6A0, 0x00000355 },
+ { 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
+ { 0x0213F0FFEF5E38A4, 0x000037F3, 0xFFFFE68D, 0x00000590, 0x00002443, 0xFFFFF1AD, 0x000003FA, 0x00002443, 0xFFFFF1AD, 0x000003FA },
+ { 0x0213F0FFEF682144, 0x00002C01, 0xFFFFEF3F, 0x00000444, 0x0000210A, 0xFFFFF475, 0x000003A7, 0x0000210A, 0xFFFFF475, 0x000003A7 },
+ { 0x0213EA94DE1210E4, 0x00002C0E, 0xFFFFEF0F, 0x00000446, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
+ { 0x0213F0FFEF5E20C4, 0x00003FA6, 0xFFFFE20A, 0x0000063F, 0x00002E29, 0xFFFFEB21, 0x00000510, 0x00002E29, 0xFFFFEB21, 0x00000510 },
+ { 0x0213F0FFEF5C2164, 0x00003BCD, 0xFFFFE31B, 0x0000063C, 0x000019AF, 0xFFFFF83D, 0x000002F8, 0x000019AF, 0xFFFFF83D, 0x000002F8 },
+ { 0x0213F0FFEF664164, 0x000044C8, 0xFFFFDF08, 0x000006B0, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
+ { 0x0213F0FFEF5C1884, 0x00003790, 0xFFFFE571, 0x000005E3, 0x00002042, 0xFFFFF35D, 0x000003CF, 0x00002042, 0xFFFFF35D, 0x000003CF },
+ { 0x0213F0FFEF6050E4, 0x000038AC, 0xFFFFE46C, 0x00000609, 0x0000215E, 0xFFFFF22D, 0x00000403, 0x0000215E, 0xFFFFF22D, 0x00000403 },
+ { 0x0213F0FFEF5E29A4, 0x00003A1E, 0xFFFFE536, 0x000005C9, 0x000024F3, 0xFFFFF11A, 0x0000041B, 0x000024F3, 0xFFFFF11A, 0x0000041B },
+ { 0x0213F0FFEF6650E4, 0x0000431A, 0xFFFFDF1B, 0x000006C5, 0x00002F34, 0xFFFFEA02, 0x00000545, 0x00002F34, 0xFFFFEA02, 0x00000545 },
+ { 0x0213F0FFEF641904, 0x000042DC, 0xFFFFDE28, 0x0000070C, 0x00003B53, 0xFFFFE0EA, 0x000006E2, 0x00003B53, 0xFFFFE0EA, 0x000006E2 },
+ { 0x0213F0FFEF683164, 0x0000264B, 0xFFFFF29A, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4 },
+ { 0x0213F0FFEF5A4064, 0x00004225, 0xFFFFE0E8, 0x00000665, 0x00002B53, 0xFFFFED89, 0x0000049F, 0x00002B53, 0xFFFFED89, 0x0000049F },
+ { 0x0213EA94DE204924, 0x00001FCC, 0xFFFFF63F, 0x00000358, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
+ { 0x0213F0FFEF6240A4, 0x000045E0, 0xFFFFDDD0, 0x000006ED, 0x00003193, 0xFFFFE8BD, 0x00000572, 0x00003193, 0xFFFFE8BD, 0x00000572 },
+ { 0x0213F0FFEF683924, 0x000024FC, 0xFFFFF366, 0x000003A6, 0x00001FE8, 0xFFFFF509, 0x00000394, 0x00001FE8, 0xFFFFF509, 0x00000394 },
+ { 0x0213F0FFEF5C4884, 0x0000378F, 0xFFFFE54B, 0x000005F1, 0x00001C9B, 0xFFFFF5C7, 0x00000368, 0x00001C9B, 0xFFFFF5C7, 0x00000368 },
+ { 0x0213F0FFEF6418A4, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002CDD, 0xFFFFEA44, 0x00000557, 0x00002CDD, 0xFFFFEA44, 0x00000557 },
+ { 0x0213EA94DE200904, 0x000021EC, 0xFFFFF4F4, 0x0000038F, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
+ { 0x0213F0FFEF6010A4, 0x00003C8A, 0xFFFFE1C1, 0x00000685, 0x000019C7, 0xFFFFF7E2, 0x00000301, 0x000019C7, 0xFFFFF7E2, 0x00000301 },
+ { 0x0213F0FFEF5E2064, 0x00003908, 0xFFFFE5C7, 0x000005B3, 0x00002793, 0xFFFFEF46, 0x00000465, 0x00002793, 0xFFFFEF46, 0x00000465 },
+ { 0x0213F0FFEF605104, 0x000040A3, 0xFFFFDE61, 0x00000725, 0x00002077, 0xFFFFF2CE, 0x000003E8, 0x00002077, 0xFFFFF2CE, 0x000003E8 },
+ { 0x0213F0FFEF664144, 0x00003DCA, 0xFFFFE34D, 0x00000608, 0x00002D66, 0xFFFFEBDF, 0x000004E8, 0x00002D66, 0xFFFFEBDF, 0x000004E8 },
+ { 0x0213F0FFEF5E50C4, 0x00003085, 0xFFFFEB70, 0x000004C8, 0x000029B1, 0xFFFFEDD9, 0x000004A5, 0x000029B1, 0xFFFFEDD9, 0x000004A5 },
+ { 0x0213EA94DE083884, 0x00004C73, 0xFFFFD676, 0x0000086C, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
+ { 0x0213EA94DE242164, 0x00002CE5, 0xFFFFEE8C, 0x00000466, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
+ { 0x0213F0FFEF621124, 0x0000489F, 0xFFFFDBF1, 0x0000073E, 0x0000332D, 0xFFFFE786, 0x000005AD, 0x0000332D, 0xFFFFE786, 0x000005AD },
+ { 0x0213F0FFEF602864, 0x00003D09, 0xFFFFE193, 0x00000689, 0x00001E82, 0xFFFFF4C0, 0x00000386, 0x00001E82, 0xFFFFF4C0, 0x00000386 },
+ { 0x0213F0FFEF644104, 0x00003E4C, 0xFFFFE131, 0x00000689, 0x00002F4E, 0xFFFFE925, 0x0000057B, 0x00002F4E, 0xFFFFE925, 0x0000057B },
+ { 0x0213F0FFEF5A4084, 0x00003B31, 0xFFFFE53F, 0x000005B3, 0x0000248A, 0xFFFFF211, 0x000003DF, 0x0000248A, 0xFFFFF211, 0x000003DF },
+ { 0x0213F0FFEF644124, 0x000038DD, 0xFFFFE54A, 0x000005C9, 0x00002B6D, 0xFFFFEBDF, 0x00000502, 0x00002B6D, 0xFFFFEBDF, 0x00000502 },
+ { 0x0213F0FFEF684064, 0x00002698, 0xFFFFF1A8, 0x000003F2, 0x00002163, 0xFFFFF34B, 0x000003E3, 0x00002163, 0xFFFFF34B, 0x000003E3 },
+ { 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
+ { 0x0213F0FFEF6418C4, 0x00003EAF, 0xFFFFE0C3, 0x000006A0, 0x000030AB, 0xFFFFE829, 0x000005A6, 0x000030AB, 0xFFFFE829, 0x000005A6 },
+ { 0x0213F0FFEF684944, 0x00002E89, 0xFFFFECA6, 0x000004B6, 0x00001FA0, 0xFFFFF4A8, 0x000003A3, 0x00001FA0, 0xFFFFF4A8, 0x000003A3 },
+ { 0x0213F0FFEF6828A4, 0x000028A4, 0xFFFFF112, 0x00000402, 0x00001F7C, 0xFFFFF545, 0x0000038A, 0x00001F7C, 0xFFFFF545, 0x0000038A },
+ { 0x0213F0FFEF6650A4, 0x00004135, 0xFFFFDFA2, 0x000006C5, 0x0000324C, 0xFFFFE7AA, 0x000005AF, 0x0000324C, 0xFFFFE7AA, 0x000005AF },
+ { 0x0213EA94DE2038C4, 0x00002012, 0xFFFFF693, 0x00000352, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
+ { 0x0213F0FFEF643084, 0x00003D7C, 0xFFFFE1BC, 0x00000671, 0x00002A45, 0xFFFFEC84, 0x000004EC, 0x00002A45, 0xFFFFEC84, 0x000004EC },
+ { 0x0213F0FFEF723064, 0x00004172, 0xFFFFDF58, 0x000006DA, 0x00002504, 0xFFFFF0A6, 0x00000431, 0x00002504, 0xFFFFF0A6, 0x00000431 },
+ { 0x0213F0FE99281944, 0x000029C7, 0xFFFFF087, 0x00000414, 0x00001DCB, 0xFFFFF675, 0x0000035F, 0x00001DCB, 0xFFFFF675, 0x0000035F },
+ { 0x0213F0FE992A29A4, 0x000027F0, 0xFFFFF05A, 0x00000432, 0x00001707, 0xFFFFFA0E, 0x000002D1, 0x00001707, 0xFFFFFA0E, 0x000002D1 },
+ { 0x0213F0FE99222144, 0x00003279, 0xFFFFE9F7, 0x00000511, 0x00001B5E, 0xFFFFF787, 0x00000317, 0x00001B5E, 0xFFFFF787, 0x00000317 },
+ { 0x0213F0FE99322184, 0x000030A5, 0xFFFFEABC, 0x000004FF, 0x000019D1, 0xFFFFF83C, 0x00000304, 0x000019D1, 0xFFFFF83C, 0x00000304 },
+ { 0x0213F0FE99282844, 0x0000283B, 0xFFFFF122, 0x00000402, 0x000019C2, 0xFFFFF8E9, 0x000002FB, 0x000019C2, 0xFFFFF8E9, 0x000002FB },
+ { 0x0213F0FE992C2084, 0x00003376, 0xFFFFE9E1, 0x00000510, 0x000021A7, 0xFFFFF39F, 0x000003BF, 0x000021A7, 0xFFFFF39F, 0x000003BF },
+ { 0x0213F0FE993218C4, 0x000031D2, 0xFFFFEA9C, 0x000004FC, 0x00001F66, 0xFFFFF4E4, 0x00000390, 0x00001F66, 0xFFFFF4E4, 0x00000390 },
+ { 0x0213F0FE991A3864, 0x00003006, 0xFFFFEB18, 0x000004F2, 0x000019B3, 0xFFFFF84E, 0x00000301, 0x000019B3, 0xFFFFF84E, 0x00000301 },
+ { 0x0213F0FE993039A4, 0x0000364F, 0xFFFFE81F, 0x00000556, 0x00002AC9, 0xFFFFED87, 0x000004BD, 0x00002AC9, 0xFFFFED87, 0x000004BD },
+ { 0x0213F0FE992E3844, 0x00003043, 0xFFFFEBAE, 0x000004CD, 0x00001B0C, 0xFFFFF7ED, 0x0000030C, 0x00001B0C, 0xFFFFF7ED, 0x0000030C },
+ { 0x0213F0FE993048A4, 0x000037CE, 0xFFFFE69E, 0x00000596, 0x0000276B, 0xFFFFEF65, 0x0000046E, 0x0000276B, 0xFFFFEF65, 0x0000046E },
+ { 0x0213F0FE992C3104, 0x00003063, 0xFFFFED5E, 0x0000046F, 0x000024AE, 0xFFFFF2C4, 0x000003D8, 0x000024AE, 0xFFFFF2C4, 0x000003D8 },
+ { 0x0213F0FE992E08A4, 0x00002F5D, 0xFFFFEBDC, 0x000004D3, 0x00001EDB, 0xFFFFF50F, 0x0000038E, 0x00001EDB, 0xFFFFF50F, 0x0000038E },
+ { 0x0213F0FE992E48A4, 0x00003148, 0xFFFFEA9A, 0x000004FB, 0x0000192D, 0xFFFFF8E9, 0x000002DF, 0x0000192D, 0xFFFFF8E9, 0x000002DF },
+ { 0x0213F0FE992C2064, 0x00003682, 0xFFFFE7E4, 0x0000055C, 0x0000250E, 0xFFFFF150, 0x0000041A, 0x0000250E, 0xFFFFF150, 0x0000041A },
+ { 0x0213F0FE992A2084, 0x0000284E, 0xFFFFF15A, 0x000003F8, 0x00001CE2, 0xFFFFF6F9, 0x0000034F, 0x00001CE2, 0xFFFFF6F9, 0x0000034F },
+ { 0x0213F0FE993018A4, 0x00003171, 0xFFFFEAE9, 0x000004ED, 0x00001F40, 0xFFFFF513, 0x00000384, 0x00001F40, 0xFFFFF513, 0x00000384 },
+ { 0x0213F0FE99323044, 0x000031BD, 0xFFFFEA64, 0x0000050A, 0x00001EFD, 0xFFFFF4F7, 0x00000390, 0x00001EFD, 0xFFFFF4F7, 0x00000390 },
+ { 0x0213F0FE992E50E4, 0x00003050, 0xFFFFEB29, 0x000004EA, 0x000019B3, 0xFFFFF878, 0x000002F9, 0x000019B3, 0xFFFFF878, 0x000002F9 },
+ { 0x0213F0FE992C1904, 0x00003400, 0xFFFFE9A0, 0x0000051A, 0x00002460, 0xFFFFF1DA, 0x00000409, 0x00002460, 0xFFFFF1DA, 0x00000409 },
+ { 0x0213F0FE992C4884, 0x000034A1, 0xFFFFE86F, 0x00000558, 0x0000255D, 0xFFFFF09E, 0x00000443, 0x0000255D, 0xFFFFF09E, 0x00000443 },
+ { 0x0213F0FE992E48E4, 0x00003103, 0xFFFFEAD7, 0x000004F0, 0x00001896, 0xFFFFF95A, 0x000002CC, 0x00001896, 0xFFFFF95A, 0x000002CC },
+ { 0x0213F0FE993018E4, 0x00003120, 0xFFFFEB9E, 0x000004CB, 0x000021E8, 0xFFFFF3A2, 0x000003C1, 0x000021E8, 0xFFFFF3A2, 0x000003C1 },
+ { 0x0213F0FE991C50E4, 0x00003558, 0xFFFFE812, 0x00000565, 0x0000256E, 0xFFFFF097, 0x00000447, 0x0000256E, 0xFFFFF097, 0x00000447 },
+ { 0x0213F0FE991A2844, 0x00002DA8, 0xFFFFECA8, 0x000004B7, 0x0000180B, 0xFFFFF96D, 0x000002D8, 0x0000180B, 0xFFFFF96D, 0x000002D8 },
+ { 0x0213F0FE992E3064, 0x00003232, 0xFFFFEA66, 0x000004FF, 0x00001DDE, 0xFFFFF5FE, 0x0000035A, 0x00001DDE, 0xFFFFF5FE, 0x0000035A },
+ { 0x0213F0FE993050E4, 0x000034D2, 0xFFFFE89F, 0x00000548, 0x0000246C, 0xFFFFF17F, 0x00000418, 0x0000246C, 0xFFFFF17F, 0x00000418 },
+ { 0x0213F0FE99304904, 0x000033EC, 0xFFFFE954, 0x0000052A, 0x00002323, 0xFFFFF279, 0x000003EE, 0x00002323, 0xFFFFF279, 0x000003EE },
+ { 0x0213F0FE99303884, 0x000033AA, 0xFFFFE955, 0x0000052D, 0x0000229F, 0xFFFFF2B2, 0x000003E7, 0x0000229F, 0xFFFFF2B2, 0x000003E7 },
+ { 0x0213F0FE99324964, 0x00003258, 0xFFFFE9AA, 0x0000052A, 0x00001D5F, 0xFFFFF5D1, 0x0000036B, 0x00001D5F, 0xFFFFF5D1, 0x0000036B },
+ { 0x0213F0FE993029A4, 0x0000323A, 0xFFFFEA5F, 0x00000504, 0x00002108, 0xFFFFF3D5, 0x000003BA, 0x00002108, 0xFFFFF3D5, 0x000003BA },
+ { 0x0213F0FE992C2184, 0x00003216, 0xFFFFEA6B, 0x000004FF, 0x00001D6E, 0xFFFFF640, 0x00000350, 0x00001D6E, 0xFFFFF640, 0x00000350 },
+ { 0x0213F0FE993210E4, 0x000030C5, 0xFFFFEAC4, 0x000004FC, 0x00001924, 0xFFFFF8C2, 0x000002EE, 0x00001924, 0xFFFFF8C2, 0x000002EE },
+ { 0x0213F0FE99305104, 0x000032BB, 0xFFFFE9F1, 0x00000515, 0x00002211, 0xFFFFF31B, 0x000003D5, 0x00002211, 0xFFFFF31B, 0x000003D5 },
+ { 0x0213F0FE993048C4, 0x0000352C, 0xFFFFE85B, 0x00000553, 0x00002410, 0xFFFFF1B4, 0x0000040F, 0x00002410, 0xFFFFF1B4, 0x0000040F },
+ { 0x0213F0FE992238C4, 0x000036A0, 0xFFFFE7E8, 0x0000055D, 0x00002901, 0xFFFFEEB8, 0x00000489, 0x00002901, 0xFFFFEEB8, 0x00000489 },
+ { 0x0213F0FE992C3044, 0x00003340, 0xFFFFE9D9, 0x00000516, 0x00002332, 0xFFFFF27A, 0x000003F0, 0x00002332, 0xFFFFF27A, 0x000003F0 },
+ { 0x0213F0FE991A38A4, 0x00003564, 0xFFFFE86D, 0x0000054E, 0x00002613, 0xFFFFF07C, 0x00000444, 0x00002613, 0xFFFFF07C, 0x00000444 },
+ { 0x0213F0FE99280904, 0x00002AD1, 0xFFFFEF0B, 0x0000045C, 0x00001DEA, 0xFFFFF5C8, 0x00000381, 0x00001DEA, 0xFFFFF5C8, 0x00000381 },
+ { 0x0213F0FE992220E4, 0x000035B0, 0xFFFFE846, 0x00000555, 0x000027BE, 0xFFFFEF5D, 0x00000474, 0x000027BE, 0xFFFFEF5D, 0x00000474 },
+ { 0x0213F0FE992238A4, 0x000032C4, 0xFFFFEA48, 0x00000502, 0x000022C6, 0xFFFFF2DF, 0x000003DE, 0x000022C6, 0xFFFFF2DF, 0x000003DE },
+ { 0x0213F0FE993008C4, 0x00003036, 0xFFFFEB0D, 0x000004F9, 0x00001FE8, 0xFFFFF41A, 0x000003BC, 0x00001FE8, 0xFFFFF41A, 0x000003BC },
+ { 0x0213F0FE991A0904, 0x000030F8, 0xFFFFEA13, 0x00000524, 0x00001B6A, 0xFFFFF6C9, 0x0000034A, 0x00001B6A, 0xFFFFF6C9, 0x0000034A },
+ { 0x0213F0FE993010A4, 0x00002EE2, 0xFFFFEC0C, 0x000004CB, 0x00001A39, 0xFFFFF814, 0x0000030F, 0x00001A39, 0xFFFFF814, 0x0000030F },
+ { 0x0213F0FE991C3184, 0x00003457, 0xFFFFE924, 0x0000052A, 0x00001E9D, 0xFFFFF59C, 0x00000363, 0x00001E9D, 0xFFFFF59C, 0x00000363 },
+ { 0x0213F0FE99322844, 0x000030BF, 0xFFFFEB18, 0x000004ED, 0x00001D37, 0xFFFFF636, 0x0000035C, 0x00001D37, 0xFFFFF636, 0x0000035C },
+ { 0x0213F0FE992E4084, 0x000031AF, 0xFFFFEA75, 0x000004FE, 0x000019F2, 0xFFFFF87A, 0x000002F0, 0x000019F2, 0xFFFFF87A, 0x000002F0 },
+ { 0x0213F0FE99302884, 0x00003642, 0xFFFFE85B, 0x00000547, 0x00002975, 0xFFFFEE98, 0x0000048B, 0x00002975, 0xFFFFEE98, 0x0000048B },
+ { 0x0213F0FE992E2884, 0x00002E8B, 0xFFFFED1E, 0x0000048E, 0x000019C1, 0xFFFFF917, 0x000002D6, 0x000019C1, 0xFFFFF917, 0x000002D6 },
+ { 0x0213F0FE993241A4, 0x000033D9, 0xFFFFE8E1, 0x00000548, 0x0000224B, 0xFFFFF298, 0x000003F4, 0x0000224B, 0xFFFFF298, 0x000003F4 },
+ { 0x0213F0FE992E28C4, 0x000032BC, 0xFFFFEB0F, 0x000004D6, 0x00002488, 0xFFFFF240, 0x000003F2, 0x00002488, 0xFFFFF240, 0x000003F2 },
+ { 0x0213F0FE99304944, 0x000035FD, 0xFFFFE838, 0x00000553, 0x00002762, 0xFFFFEFBC, 0x00000460, 0x00002762, 0xFFFFEFBC, 0x00000460 },
+ { 0x0213F0FE992818A4, 0x0000268B, 0xFFFFF263, 0x000003D1, 0x00001914, 0xFFFFF977, 0x000002E8, 0x00001914, 0xFFFFF977, 0x000002E8 },
+ { 0x0213F0FE992C3184, 0x0000330B, 0xFFFFEA1E, 0x00000505, 0x000020B1, 0xFFFFF44D, 0x0000039E, 0x000020B1, 0xFFFFF44D, 0x0000039E },
+ { 0x0213F0FE99222084, 0x0000326E, 0xFFFFEA26, 0x00000508, 0x00001C17, 0xFFFFF722, 0x00000328, 0x00001C17, 0xFFFFF722, 0x00000328 },
+ { 0x0213F0FE992A31A4, 0x00002A3F, 0xFFFFEEE8, 0x0000046D, 0x00001B2B, 0xFFFFF737, 0x0000034D, 0x00001B2B, 0xFFFFF737, 0x0000034D },
+ { 0x0213F0FE992C4064, 0x00003732, 0xFFFFE765, 0x00000574, 0x00002A6D, 0xFFFFEDA8, 0x000004B7, 0x00002A6D, 0xFFFFEDA8, 0x000004B7 },
+ { 0x0213F0FE99300924, 0x000034D3, 0xFFFFE827, 0x00000569, 0x000027AA, 0xFFFFEEE7, 0x00000492, 0x000027AA, 0xFFFFEEE7, 0x00000492 },
+ { 0x0213F0FE992E40C4, 0x00003306, 0xFFFFEA39, 0x000004FC, 0x00001DCC, 0xFFFFF655, 0x00000344, 0x00001DCC, 0xFFFFF655, 0x00000344 },
+ { 0x0213F0FE99282044, 0x00002A48, 0xFFFFEFCA, 0x00000439, 0x00001DED, 0xFFFFF60D, 0x00000375, 0x00001DED, 0xFFFFF60D, 0x00000375 },
+ { 0x0213F0FE993038C4, 0x000033A3, 0xFFFFEA36, 0x000004F9, 0x0000247C, 0xFFFFF21F, 0x000003F4, 0x0000247C, 0xFFFFF21F, 0x000003F4 },
+ { 0x0213F0FE992C3164, 0x0000311B, 0xFFFFEB76, 0x000004D1, 0x00001EB1, 0xFFFFF5B6, 0x00000366, 0x00001EB1, 0xFFFFF5B6, 0x00000366 },
+ { 0x0213F0FE99324164, 0x00003307, 0xFFFFE97F, 0x0000052A, 0x00001E76, 0xFFFFF54D, 0x0000037C, 0x00001E76, 0xFFFFF54D, 0x0000037C },
+ { 0x0213F0FE991C2144, 0x0000344B, 0xFFFFE9C5, 0x00000509, 0x000020D6, 0xFFFFF486, 0x0000038F, 0x000020D6, 0xFFFFF486, 0x0000038F },
+ { 0x0213F0FE992C3144, 0x000034B9, 0xFFFFEA0B, 0x000004F7, 0x000027B3, 0xFFFFF057, 0x0000043A, 0x000027B3, 0xFFFFF057, 0x0000043A },
+ { 0x0213F0FE99301964, 0x00003360, 0xFFFFE984, 0x00000527, 0x00002238, 0xFFFFF2EE, 0x000003E0, 0x00002238, 0xFFFFF2EE, 0x000003E0 },
+ { 0x0213F0FE99302124, 0x0000315C, 0xFFFFEC05, 0x000004B1, 0x000023C8, 0xFFFFF2CC, 0x000003DE, 0x000023C8, 0xFFFFF2CC, 0x000003DE },
+ { 0x0213F0FE992C2864, 0x0000389B, 0xFFFFE6D5, 0x00000582, 0x00002C6C, 0xFFFFEC92, 0x000004DE, 0x00002C6C, 0xFFFFEC92, 0x000004DE },
+ { 0x0213F0FE992E1124, 0x00003058, 0xFFFFEB30, 0x000004E6, 0x000019B5, 0xFFFFF88B, 0x000002F1, 0x000019B5, 0xFFFFF88B, 0x000002F1 },
+ { 0x0213F0FE992E0904, 0x00002F69, 0xFFFFEB4A, 0x000004F1, 0x00001B82, 0xFFFFF6EC, 0x00000341, 0x00001B82, 0xFFFFF6EC, 0x00000341 },
+ { 0x0213F0FE991A18E4, 0x000031EB, 0xFFFFEA64, 0x00000508, 0x00002059, 0xFFFFF427, 0x000003B0, 0x00002059, 0xFFFFF427, 0x000003B0 },
+ { 0x0213F0FE99224124, 0x000033E2, 0xFFFFE94D, 0x0000052A, 0x000020BF, 0xFFFFF40B, 0x000003AB, 0x000020BF, 0xFFFFF40B, 0x000003AB },
+ { 0x0213F0FE99283184, 0x00002AF9, 0xFFFFEFE9, 0x00000427, 0x00001F72, 0xFFFFF57A, 0x00000383, 0x00001F72, 0xFFFFF57A, 0x00000383 },
+ { 0x0213F0FE992C2824, 0x00003282, 0xFFFFEA88, 0x000004FA, 0x00002561, 0xFFFFF126, 0x0000042B, 0x00002561, 0xFFFFF126, 0x0000042B },
+ { 0x0213F0FE993010E4, 0x0000308A, 0xFFFFEB5D, 0x000004E0, 0x00001E83, 0xFFFFF577, 0x00000378, 0x00001E83, 0xFFFFF577, 0x00000378 },
+ { 0x0213F0FE99324884, 0x0000336E, 0xFFFFE8C8, 0x00000553, 0x0000217C, 0xFFFFF2E1, 0x000003EB, 0x0000217C, 0xFFFFF2E1, 0x000003EB },
+ { 0x0213F0FE991A2164, 0x000034A9, 0xFFFFE838, 0x00000561, 0x000020CE, 0xFFFFF38A, 0x000003C7, 0x000020CE, 0xFFFFF38A, 0x000003C7 },
+ { 0x0213F0FE99222184, 0x00003152, 0xFFFFE9EB, 0x00000522, 0x00001755, 0xFFFFF9A9, 0x000002C6, 0x00001755, 0xFFFFF9A9, 0x000002C6 },
+ { 0x0213F0FE99281884, 0x0000286E, 0xFFFFF136, 0x000003FD, 0x00001BAB, 0xFFFFF7C3, 0x0000032C, 0x00001BAB, 0xFFFFF7C3, 0x0000032C },
+ { 0x0213F0FE99300944, 0x0000316B, 0xFFFFEA02, 0x00000528, 0x00002247, 0xFFFFF24E, 0x00000408, 0x00002247, 0xFFFFF24E, 0x00000408 },
+ { 0x0213F0FE992C08E4, 0x000034CF, 0xFFFFE83D, 0x00000562, 0x00002458, 0xFFFFF130, 0x00000430, 0x00002458, 0xFFFFF130, 0x00000430 },
+ { 0x0213F0FE992C2984, 0x00003352, 0xFFFFE9D1, 0x00000515, 0x0000212A, 0xFFFFF3DC, 0x000003B4, 0x0000212A, 0xFFFFF3DC, 0x000003B4 },
+ { 0x0213F0FE992840A4, 0x00002946, 0xFFFFF09B, 0x00000415, 0x00001DC9, 0xFFFFF650, 0x00000366, 0x00001DC9, 0xFFFFF650, 0x00000366 },
+ { 0x0213F0FE99301124, 0x00003080, 0xFFFFEB47, 0x000004E1, 0x00001BD5, 0xFFFFF73B, 0x00000329, 0x00001BD5, 0xFFFFF73B, 0x00000329 },
+ { 0x0213F0FE991A1884, 0x00002FBD, 0xFFFFEB7B, 0x000004DD, 0x000017FC, 0xFFFFF99E, 0x000002C7, 0x000017FC, 0xFFFFF99E, 0x000002C7 },
+ { 0x0213F0FE99281124, 0x00002A28, 0xFFFFF032, 0x0000041F, 0x00001B19, 0xFFFFF83A, 0x00000312, 0x00001B19, 0xFFFFF83A, 0x00000312 },
+ { 0x0213F0FE992240C4, 0x00003420, 0xFFFFE936, 0x00000530, 0x000023C2, 0xFFFFF203, 0x00000406, 0x000023C2, 0xFFFFF203, 0x00000406 },
+ { 0x0213F0FE99301144, 0x00002F7C, 0xFFFFEBBA, 0x000004D1, 0x0000185D, 0xFFFFF975, 0x000002CA, 0x0000185D, 0xFFFFF975, 0x000002CA },
+ { 0x0213F0FE992E2044, 0x00002C51, 0xFFFFEE3B, 0x0000046F, 0x000019AA, 0xFFFFF8DD, 0x000002ED, 0x000019AA, 0xFFFFF8DD, 0x000002ED },
+ { 0x0213F0FE991A4144, 0x000033D6, 0xFFFFE8F2, 0x0000053D, 0x00001D73, 0xFFFFF5FB, 0x0000035B, 0x00001D73, 0xFFFFF5FB, 0x0000035B },
+ { 0x0213F0FE99323084, 0x000031D9, 0xFFFFEAF7, 0x000004E4, 0x00001EBD, 0xFFFFF5A6, 0x00000368, 0x00001EBD, 0xFFFFF5A6, 0x00000368 },
+ { 0x0213F0FE991A20A4, 0x00003386, 0xFFFFE9CE, 0x00000515, 0x00002422, 0xFFFFF1F3, 0x00000405, 0x00002422, 0xFFFFF1F3, 0x00000405 },
+ { 0x0213F0FE992C50E4, 0x000032FB, 0xFFFFE9BC, 0x00000520, 0x00002301, 0xFFFFF267, 0x000003F7, 0x00002301, 0xFFFFF267, 0x000003F7 },
+ { 0x0213F0FE99322924, 0x000032C2, 0xFFFFEAC0, 0x000004EA, 0x0000250F, 0xFFFFF1A2, 0x00000413, 0x0000250F, 0xFFFFF1A2, 0x00000413 },
+ { 0x0213F0FE991C2944, 0x00003722, 0xFFFFE8A6, 0x00000527, 0x000026E4, 0xFFFFF0F5, 0x0000041C, 0x000026E4, 0xFFFFF0F5, 0x0000041C },
+ { 0x0213F0FE992C48C4, 0x000035A4, 0xFFFFE822, 0x00000558, 0x000022F2, 0xFFFFF288, 0x000003E8, 0x000022F2, 0xFFFFF288, 0x000003E8 },
+ { 0x0213F0FE99280924, 0x00002CD1, 0xFFFFEDC6, 0x0000048C, 0x00001EAF, 0xFFFFF53D, 0x00000396, 0x00001EAF, 0xFFFFF53D, 0x00000396 },
+ { 0x0213F0FE99301164, 0x00003156, 0xFFFFEA60, 0x0000050B, 0x00001BBC, 0xFFFFF704, 0x00000335, 0x00001BBC, 0xFFFFF704, 0x00000335 },
+ { 0x0213F0FE992C5104, 0x000034A1, 0xFFFFE8C0, 0x00000544, 0x00002528, 0xFFFFF105, 0x0000042C, 0x00002528, 0xFFFFF105, 0x0000042C },
+ { 0x0213F0FE99323064, 0x000032CE, 0xFFFFE9D3, 0x00000520, 0x000021FF, 0xFFFFF2FD, 0x000003E4, 0x000021FF, 0xFFFFF2FD, 0x000003E4 },
+ { 0x0213F0FE991A50A4, 0x000034A0, 0xFFFFE823, 0x0000056D, 0x0000256F, 0xFFFFF047, 0x0000045A, 0x0000256F, 0xFFFFF047, 0x0000045A },
+ { 0x0213F0FE99303944, 0x00003109, 0xFFFFEBD6, 0x000004BF, 0x000022D4, 0xFFFFF32D, 0x000003D0, 0x000022D4, 0xFFFFF32D, 0x000003D0 },
+ { 0x0213F0FE992C1164, 0x000030B7, 0xFFFFEAF0, 0x000004F3, 0x00001AEC, 0xFFFFF7A9, 0x0000031B, 0x00001AEC, 0xFFFFF7A9, 0x0000031B },
+ { 0x0213F0FE992C39A4, 0x00003078, 0xFFFFEBA4, 0x000004CF, 0x00001E7A, 0xFFFFF5AF, 0x0000036B, 0x00001E7A, 0xFFFFF5AF, 0x0000036B },
+ { 0x0213F0FE99304124, 0x00003442, 0xFFFFE998, 0x00000518, 0x000025EA, 0xFFFFF0F3, 0x0000042B, 0x000025EA, 0xFFFFF0F3, 0x0000042B },
+ { 0x0213F0FE993021A4, 0x000031CB, 0xFFFFEA80, 0x00000501, 0x000020A3, 0xFFFFF403, 0x000003B2, 0x000020A3, 0xFFFFF403, 0x000003B2 },
+ { 0x0213F0FE992A2984, 0x00002947, 0xFFFFF018, 0x00000433, 0x00001BA5, 0xFFFFF75C, 0x00000340, 0x00001BA5, 0xFFFFF75C, 0x00000340 },
+ { 0x0213F0FE992C3984, 0x000033F9, 0xFFFFE99D, 0x00000518, 0x00002231, 0xFFFFF358, 0x000003C5, 0x00002231, 0xFFFFF358, 0x000003C5 },
+ { 0x0213F0FE99321124, 0x00003131, 0xFFFFEA45, 0x00000513, 0x00001973, 0xFFFFF85E, 0x00000301, 0x00001973, 0xFFFFF85E, 0x00000301 },
+ { 0x0213F0FE991C29A4, 0x00003571, 0xFFFFE8AC, 0x00000539, 0x00002049, 0xFFFFF49C, 0x0000038D, 0x00002049, 0xFFFFF49C, 0x0000038D },
+ { 0x0213F0FE992E3864, 0x0000309E, 0xFFFFEB1D, 0x000004E8, 0x000019ED, 0xFFFFF86E, 0x000002F8, 0x000019ED, 0xFFFFF86E, 0x000002F8 },
+ { 0x0213F0FE99302984, 0x00003091, 0xFFFFEB9B, 0x000004CC, 0x00001D2C, 0xFFFFF6A2, 0x0000033D, 0x00001D2C, 0xFFFFF6A2, 0x0000033D },
+ { 0x0213F0FE993008E4, 0x00003069, 0xFFFFEAFD, 0x000004F8, 0x00001E82, 0xFFFFF51C, 0x0000038D, 0x00001E82, 0xFFFFF51C, 0x0000038D },
+ { 0x0213F0FE992210A4, 0x00003459, 0xFFFFE7F2, 0x00000572, 0x00001DA7, 0xFFFFF552, 0x0000037F, 0x00001DA7, 0xFFFFF552, 0x0000037F },
+ { 0x0213F0FE99321104, 0x0000304B, 0xFFFFEAFB, 0x000004F4, 0x0000191E, 0xFFFFF8BD, 0x000002EE, 0x0000191E, 0xFFFFF8BD, 0x000002EE },
+ { 0x0213F0FE993020C4, 0x0000346E, 0xFFFFEA07, 0x000004FD, 0x00002767, 0xFFFFF058, 0x00000440, 0x00002767, 0xFFFFF058, 0x00000440 },
+ { 0x0213F0FE992E3084, 0x000030B5, 0xFFFFEBC1, 0x000004C1, 0x00001B3C, 0xFFFFF818, 0x000002FD, 0x00001B3C, 0xFFFFF818, 0x000002FD },
+ { 0x0213F0FE99300904, 0x0000321F, 0xFFFFE9EA, 0x00000524, 0x00002380, 0xFFFFF1C2, 0x0000041A, 0x00002380, 0xFFFFF1C2, 0x0000041A },
+ { 0x0213F0FE992E3044, 0x000030DF, 0xFFFFEB37, 0x000004E2, 0x00001E3C, 0xFFFFF5BB, 0x00000369, 0x00001E3C, 0xFFFFF5BB, 0x00000369 },
+ { 0x0213F0FE992848A4, 0x000027E0, 0xFFFFF0E2, 0x00000416, 0x00001A6A, 0xFFFFF820, 0x00000321, 0x00001A6A, 0xFFFFF820, 0x00000321 },
+ { 0x0213F0FE991A1084, 0x00002FA1, 0xFFFFEB63, 0x000004E7, 0x0000196B, 0xFFFFF880, 0x000002FB, 0x0000196B, 0xFFFFF880, 0x000002FB },
+ { 0x0213F0FE991C1084, 0x0000310C, 0xFFFFEAAF, 0x000004FC, 0x000019EF, 0xFFFFF850, 0x000002FD, 0x000019EF, 0xFFFFF850, 0x000002FD },
+ { 0x0213F0FE99323904, 0x0000334A, 0xFFFFEA07, 0x0000050B, 0x00002380, 0xFFFFF26F, 0x000003F0, 0x00002380, 0xFFFFF26F, 0x000003F0 },
+ { 0x0213F0FE99302944, 0x00002FF9, 0xFFFFECDC, 0x00000492, 0x00002297, 0xFFFFF394, 0x000003BF, 0x00002297, 0xFFFFF394, 0x000003BF },
+ { 0x0213F0FE992C2164, 0x0000354B, 0xFFFFE894, 0x00000546, 0x000024C4, 0xFFFFF16C, 0x0000041B, 0x000024C4, 0xFFFFF16C, 0x0000041B },
+ { 0x0213F0FE99220924, 0x00003245, 0xFFFFE92F, 0x00000544, 0x00001829, 0xFFFFF8F1, 0x000002EA, 0x00001829, 0xFFFFF8F1, 0x000002EA },
+ { 0x0213F0FE992E4884, 0x0000302F, 0xFFFFEB51, 0x000004E3, 0x0000199F, 0xFFFFF894, 0x000002F4, 0x0000199F, 0xFFFFF894, 0x000002F4 },
+ { 0x0213F0FE992E18C4, 0x00002F54, 0xFFFFEC86, 0x000004A6, 0x00001A6F, 0xFFFFF891, 0x000002EC, 0x00001A6F, 0xFFFFF891, 0x000002EC },
+ { 0x0213F0FE99284164, 0x00002908, 0xFFFFF0D8, 0x0000040A, 0x00001C9B, 0xFFFFF729, 0x00000342, 0x00001C9B, 0xFFFFF729, 0x00000342 },
+ { 0x0213F0FE99302964, 0x000031D9, 0xFFFFEB40, 0x000004D7, 0x000023F5, 0xFFFFF259, 0x000003F4, 0x000023F5, 0xFFFFF259, 0x000003F4 },
+ { 0x0213F0FE993048E4, 0x000034C8, 0xFFFFE8C6, 0x0000053F, 0x00002313, 0xFFFFF280, 0x000003EC, 0x00002313, 0xFFFFF280, 0x000003EC },
+ { 0x0213F0FE993050C4, 0x000037D1, 0xFFFFE6A1, 0x0000059C, 0x00002C6A, 0xFFFFEBFF, 0x00000504, 0x00002C6A, 0xFFFFEBFF, 0x00000504 },
+ { 0x0213F0FE99321964, 0x000030E9, 0xFFFFEA6B, 0x0000050F, 0x00001A2D, 0xFFFFF7DF, 0x00000316, 0x00001A2D, 0xFFFFF7DF, 0x00000316 },
+ { 0x0213F0FE99302084, 0x0000323D, 0xFFFFEA95, 0x000004F4, 0x00001ED2, 0xFFFFF584, 0x0000036C, 0x00001ED2, 0xFFFFF584, 0x0000036C },
+ { 0x0213F0FE992C3024, 0x000033D6, 0xFFFFE9DB, 0x00000510, 0x000027A7, 0xFFFFEFC7, 0x0000045E, 0x000027A7, 0xFFFFEFC7, 0x0000045E },
+ { 0x0213F0FE991C3164, 0x00003444, 0xFFFFE98A, 0x00000517, 0x000020FD, 0xFFFFF43F, 0x0000039D, 0x000020FD, 0xFFFFF43F, 0x0000039D },
+ { 0x0213F0FE992808E4, 0x00002987, 0xFFFFEFA1, 0x0000044B, 0x00001B06, 0xFFFFF788, 0x0000033C, 0x00001B06, 0xFFFFF788, 0x0000033C },
+ { 0x0213F0FE992C28E4, 0x0000311D, 0xFFFFED20, 0x00000474, 0x000025DA, 0xFFFFF223, 0x000003F0, 0x000025DA, 0xFFFFF223, 0x000003F0 },
+ { 0x0213F0FE992C1124, 0x000032A2, 0xFFFFEA0A, 0x0000050D, 0x00001D48, 0xFFFFF659, 0x0000034A, 0x00001D48, 0xFFFFF659, 0x0000034A },
+ { 0x0213F0FE992208E4, 0x00003110, 0xFFFFE9EA, 0x00000529, 0x00001786, 0xFFFFF958, 0x000002DB, 0x00001786, 0xFFFFF958, 0x000002DB },
+ { 0x0213F0FE992821A4, 0x000027F2, 0xFFFFF174, 0x000003F7, 0x00001C7A, 0xFFFFF72A, 0x00000348, 0x00001C7A, 0xFFFFF72A, 0x00000348 },
+ { 0x0213F0FE991C10E4, 0x000031DB, 0xFFFFEA7D, 0x000004FB, 0x000019C4, 0xFFFFF8B1, 0x000002E6, 0x000019C4, 0xFFFFF8B1, 0x000002E6 },
+ { 0x0213F0FE992C1104, 0x00003158, 0xFFFFEAAC, 0x000004FA, 0x00001BC1, 0xFFFFF737, 0x0000032B, 0x00001BC1, 0xFFFFF737, 0x0000032B },
+ { 0x0213F0FE993010C4, 0x00002F36, 0xFFFFEBF9, 0x000004CA, 0x00001A2A, 0xFFFFF83F, 0x00000303, 0x00001A2A, 0xFFFFF83F, 0x00000303 },
+ { 0x0213F0FE993238A4, 0x000032B4, 0xFFFFEA72, 0x000004FA, 0x000021FF, 0xFFFFF378, 0x000003C5, 0x000021FF, 0xFFFFF378, 0x000003C5 },
+ { 0x0213F0FE99303164, 0x00003262, 0xFFFFEAFA, 0x000004DF, 0x00002441, 0xFFFFF237, 0x000003F6, 0x00002441, 0xFFFFF237, 0x000003F6 },
+ { 0x0213F0FE99303924, 0x0000336A, 0xFFFFEAFB, 0x000004D1, 0x00002746, 0xFFFFF0B8, 0x0000042B, 0x00002746, 0xFFFFF0B8, 0x0000042B },
+ { 0x0213F0FE991A4084, 0x000032E5, 0xFFFFE923, 0x00000541, 0x00001DF0, 0xFFFFF552, 0x00000380, 0x00001DF0, 0xFFFFF552, 0x00000380 },
+ { 0x0213F0FE99304064, 0x000035D1, 0xFFFFE80B, 0x0000055F, 0x00002780, 0xFFFFEF74, 0x0000046F, 0x00002780, 0xFFFFEF74, 0x0000046F },
+ { 0x0213F0FE993028A4, 0x000033EC, 0xFFFFEA48, 0x000004F4, 0x0000269F, 0xFFFFF0D8, 0x0000042A, 0x0000269F, 0xFFFFF0D8, 0x0000042A },
+ { 0x0213F0FE99323884, 0x000030C4, 0xFFFFEB39, 0x000004E2, 0x00001B44, 0xFFFFF7AA, 0x00000318, 0x00001B44, 0xFFFFF7AA, 0x00000318 },
+ { 0x0213F0FE99281144, 0x00002926, 0xFFFFF0AF, 0x0000040E, 0x0000194E, 0xFFFFF959, 0x000002E2, 0x0000194E, 0xFFFFF959, 0x000002E2 },
+ { 0x0213F0FE992C10C4, 0x00003141, 0xFFFFEAAF, 0x000004F6, 0x00001864, 0xFFFFF97C, 0x000002C6, 0x00001864, 0xFFFFF97C, 0x000002C6 },
+ { 0x0213F0FE99301064, 0x000030B2, 0xFFFFEB7C, 0x000004DB, 0x000022CE, 0xFFFFF2B5, 0x000003F0, 0x000022CE, 0xFFFFF2B5, 0x000003F0 },
+ { 0x0213F0FE99301944, 0x0000318C, 0xFFFFEAC7, 0x000004F6, 0x00002113, 0xFFFFF3CA, 0x000003BD, 0x00002113, 0xFFFFF3CA, 0x000003BD },
+ { 0x0213F0FE992E1104, 0x00002FD2, 0xFFFFEB8F, 0x000004D9, 0x00001996, 0xFFFFF89F, 0x000002F1, 0x00001996, 0xFFFFF89F, 0x000002F1 },
+ { 0x0213F0FE991A28A4, 0x0000310D, 0xFFFFEB25, 0x000004E7, 0x00001F67, 0xFFFFF4EF, 0x0000038E, 0x00001F67, 0xFFFFF4EF, 0x0000038E },
+ { 0x0213F0FE992A4964, 0x00002BBC, 0xFFFFEE68, 0x00000477, 0x00002050, 0xFFFFF41D, 0x000003C8, 0x00002050, 0xFFFFF41D, 0x000003C8 },
+ { 0x0213F0FE99302104, 0x00003096, 0xFFFFECED, 0x00000486, 0x000024C9, 0xFFFFF278, 0x000003E7, 0x000024C9, 0xFFFFF278, 0x000003E7 },
+ { 0x0213F0FE992C10A4, 0x00003401, 0xFFFFE8F1, 0x0000053C, 0x00001E75, 0xFFFFF55C, 0x00000376, 0x00001E75, 0xFFFFF55C, 0x00000376 },
+ { 0x0213F0FE99302844, 0x0000319E, 0xFFFFEAB1, 0x000004F8, 0x00001EA3, 0xFFFFF567, 0x00000378, 0x00001EA3, 0xFFFFF567, 0x00000378 },
+ { 0x0213F0FE99322964, 0x000030FD, 0xFFFFEB4C, 0x000004DB, 0x00001CA6, 0xFFFFF6E8, 0x00000335, 0x00001CA6, 0xFFFFF6E8, 0x00000335 },
+ { 0x0213F0FE992E40A4, 0x000030D6, 0xFFFFEB1A, 0x000004E4, 0x00001A0D, 0xFFFFF87D, 0x000002EF, 0x00001A0D, 0xFFFFF87D, 0x000002EF },
+ { 0x0213F0FE992C2124, 0x0000324B, 0xFFFFEB17, 0x000004D9, 0x00002225, 0xFFFFF3A8, 0x000003BA, 0x00002225, 0xFFFFF3A8, 0x000003BA },
+ { 0x0213F0FE99284084, 0x00002A00, 0xFFFFF02E, 0x00000424, 0x00001E21, 0xFFFFF61D, 0x0000036C, 0x00001E21, 0xFFFFF61D, 0x0000036C },
+ { 0x0213F0FE992A48A4, 0x000029CF, 0xFFFFEF53, 0x00000457, 0x00001B11, 0xFFFFF772, 0x0000033D, 0x00001B11, 0xFFFFF772, 0x0000033D },
+ { 0x0213F0FE991A30A4, 0x000032A1, 0xFFFFEA63, 0x000004FB, 0x00001F83, 0xFFFFF516, 0x0000037E, 0x00001F83, 0xFFFFF516, 0x0000037E },
+ { 0x0213F0FE992E20C4, 0x0000305C, 0xFFFFEC14, 0x000004B5, 0x00001D0B, 0xFFFFF6ED, 0x00000332, 0x00001D0B, 0xFFFFF6ED, 0x00000332 },
+ { 0x0213F0FE992C1064, 0x00003467, 0xFFFFE8D5, 0x00000543, 0x0000243F, 0xFFFFF190, 0x00000418, 0x0000243F, 0xFFFFF190, 0x00000418 },
+ { 0x0213F0FE992A2064, 0x00002796, 0xFFFFF133, 0x00000409, 0x00001903, 0xFFFFF91C, 0x000002FC, 0x00001903, 0xFFFFF91C, 0x000002FC },
+ { 0x0213F0FE99302164, 0x000031F6, 0xFFFFEAB7, 0x000004F5, 0x000022B9, 0xFFFFF2D0, 0x000003E6, 0x000022B9, 0xFFFFF2D0, 0x000003E6 },
+ { 0x0213F0FE992E5104, 0x00003196, 0xFFFFEA76, 0x00000503, 0x00001CC5, 0xFFFFF67D, 0x0000034A, 0x00001CC5, 0xFFFFF67D, 0x0000034A },
+ { 0x0213F0FE99321144, 0x00002F9E, 0xFFFFEAD9, 0x00000505, 0x000017C1, 0xFFFFF93D, 0x000002DF, 0x000017C1, 0xFFFFF93D, 0x000002DF },
+ { 0x0213F0FE992E2124, 0x00002FBC, 0xFFFFEC75, 0x000004A8, 0x00001D6D, 0xFFFFF6AC, 0x0000033D, 0x00001D6D, 0xFFFFF6AC, 0x0000033D },
+ { 0x0213F0FE992C38A4, 0x00003541, 0xFFFFE921, 0x00000524, 0x00002662, 0xFFFFF0CB, 0x0000042B, 0x00002662, 0xFFFFF0CB, 0x0000042B },
+ { 0x0213F0FE992A21A4, 0x00002953, 0xFFFFEF76, 0x00000459, 0x00001C05, 0xFFFFF6A0, 0x00000368, 0x00001C05, 0xFFFFF6A0, 0x00000368 },
+ { 0x0213F0FE992C4924, 0x000034BC, 0xFFFFE8DD, 0x00000536, 0x0000210E, 0xFFFFF3F4, 0x000003A8, 0x0000210E, 0xFFFFF3F4, 0x000003A8 },
+ { 0x0213F0FE992C29A4, 0x000034BE, 0xFFFFE916, 0x0000052F, 0x000024A1, 0xFFFFF1A6, 0x00000410, 0x000024A1, 0xFFFFF1A6, 0x00000410 },
+ { 0x0213F0FE99304964, 0x000037B5, 0xFFFFE7A9, 0x0000055B, 0x000028A1, 0xFFFFEF51, 0x00000467, 0x000028A1, 0xFFFFEF51, 0x00000467 },
+ { 0x0213F0FE99301104, 0x00002FC5, 0xFFFFEBBE, 0x000004D1, 0x00001BA5, 0xFFFFF757, 0x00000328, 0x00001BA5, 0xFFFFF757, 0x00000328 },
+ { 0x0213F0FE993040A4, 0x000033CB, 0xFFFFE944, 0x0000052B, 0x00001FBE, 0xFFFFF4B1, 0x0000038C, 0x00001FBE, 0xFFFFF4B1, 0x0000038C },
+ { 0x0213F0FE99301844, 0x000030AE, 0xFFFFEBA0, 0x000004D3, 0x00002268, 0xFFFFF316, 0x000003DD, 0x00002268, 0xFFFFF316, 0x000003DD },
+ { 0x0213F0FE992C20A4, 0x00002F90, 0xFFFFEC5A, 0x000004B0, 0x00001C3A, 0xFFFFF752, 0x00000323, 0x00001C3A, 0xFFFFF752, 0x00000323 },
+ { 0x0213F0FE992E38E4, 0x00003113, 0xFFFFEB91, 0x000004C8, 0x00001E3C, 0xFFFFF623, 0x0000034E, 0x00001E3C, 0xFFFFF623, 0x0000034E },
+ { 0x0213F0FE99323984, 0x0000330B, 0xFFFFE94B, 0x00000539, 0x000020E7, 0xFFFFF37E, 0x000003CD, 0x000020E7, 0xFFFFF37E, 0x000003CD },
+ { 0x0213F0FE992E2864, 0x000031D1, 0xFFFFEACB, 0x000004ED, 0x00001E82, 0xFFFFF5B2, 0x00000365, 0x00001E82, 0xFFFFF5B2, 0x00000365 },
+ { 0x0213F0FE992A3984, 0x00002CD5, 0xFFFFEDC1, 0x0000048D, 0x000020F8, 0xFFFFF3C1, 0x000003D1, 0x000020F8, 0xFFFFF3C1, 0x000003D1 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }
};
int pp_override_get_default_fuse_value(uint64_t key,
- struct phm_fuses_default list[],
struct phm_fuses_default *result)
{
+ const struct phm_fuses_default *list = vega10_fuses_default;
uint32_t i;
- uint64_t temp_serial_numer;
- uint32_t bit;
- const char *temp;
-
- for (i = 0; list[i].key != NULL; i++) {
- temp = list[i].key;
- temp_serial_numer = 0;
- do {
- bit = *temp=='1'? 1 : 0;
- temp_serial_numer = (temp_serial_numer <<1 ) | bit;
- temp++;
- } while (*temp);
- if (key == temp_serial_numer) {
+ for (i = 0; list[i].key != 0; i++) {
+ if (key == list[i].key) {
result->key = list[i].key;
result->VFT2_m1 = list[i].VFT2_m1;
result->VFT2_m2 = list[i].VFT2_m2;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
index 6e8f7a2119c1..c6ba0d64cfb7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
struct phm_fuses_default {
- const char *key;
+ uint64_t key;
uint32_t VFT2_m1;
uint32_t VFT2_m2;
uint32_t VFT2_b;
@@ -40,9 +40,7 @@ struct phm_fuses_default {
uint32_t VFT0_b;
};
-extern struct phm_fuses_default vega10_fuses_default[];
extern int pp_override_get_default_fuse_value(uint64_t key,
- struct phm_fuses_default list[],
struct phm_fuses_default *result);
#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
new file mode 100644
index 000000000000..ffa44bbb218e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "pp_psm.h"
+
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned int i;
+ unsigned int table_entries;
+ struct pp_power_state *state;
+ int size;
+
+ if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ return -EINVAL;
+
+ hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+
+ hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ sizeof(struct pp_power_state);
+
+ hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
+ if (hwmgr->ps == NULL)
+ return -ENOMEM;
+
+ hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->request_ps == NULL) {
+ kfree(hwmgr->ps);
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->current_ps == NULL) {
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
+
+ if (state->classification.flags & PP_StateClassificationFlag_Boot) {
+ hwmgr->boot_ps = state;
+ memcpy(hwmgr->current_ps, state, size);
+ memcpy(hwmgr->request_ps, state, size);
+ }
+
+ state->id = i + 1; /* assigned unique num for every power state id */
+
+ if (state->classification.flags & PP_StateClassificationFlag_Uvd)
+ hwmgr->uvd_ps = state;
+ state = (struct pp_power_state *)((unsigned long)state + size);
+ }
+
+ return 0;
+}
+
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ hwmgr->current_ps = NULL;
+ return 0;
+}
+
+static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel ui_label,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.ui_label & ui_label) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
+ enum PP_StateClassificationFlag flag,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.flags & flag) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->id == state_id) {
+ memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+int psm_set_boot_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_performance_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state)
+{
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ *state = hwmgr->ps;
+
+restart_search:
+ for (i = 0; i < table_entries; i++) {
+ if ((*state)->classification.ui_label & label_id)
+ return 0;
+ *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
+ }
+
+ switch (label_id) {
+ case PP_StateUILabel_Battery:
+ case PP_StateUILabel_Balanced:
+ label_id = PP_StateUILabel_Performance;
+ goto restart_search;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ struct pp_power_state *new_ps)
+{
+ struct pp_power_state *pcurrent;
+ struct pp_power_state *requested;
+ bool equal;
+
+ if (skip)
+ return 0;
+
+ phm_display_configuration_changed(hwmgr);
+
+ if (new_ps != NULL)
+ requested = new_ps;
+ else
+ requested = hwmgr->request_ps;
+
+ pcurrent = hwmgr->current_ps;
+
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+ if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
+ &pcurrent->hardware, &requested->hardware, &equal)))
+ equal = false;
+
+ if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
+ phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
+ memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
+ }
+
+ phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
new file mode 100644
index 000000000000..fa1b6825036a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef PP_PSM_H
+#define PP_PSM_H
+
+#include "hwmgr.h"
+
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_set_boot_states(struct pp_hwmgr *hwmgr);
+int psm_set_performance_states(struct pp_hwmgr *hwmgr);
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state);
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr,
+ bool skip,
+ struct pp_power_state *new_ps);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 953e0c9ad7cd..c6febbf0bf69 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -470,7 +470,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
* SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
* voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
*/
-bool atomctrl_is_voltage_controled_by_gpio_v3(
+bool atomctrl_is_voltage_controlled_by_gpio_v3(
struct pp_hwmgr *hwmgr,
uint8_t voltage_type,
uint8_t voltage_mode)
@@ -1100,10 +1100,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
}
}
- PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
- "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+ if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
return -EINVAL;
- );
+ }
get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
@@ -1418,3 +1418,83 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
return 0;
}
+
+int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
+{
+ int result;
+ SET_VOLTAGE_PS_ALLOCATION allocation;
+ SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
+ (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
+
+ voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, SetVoltage),
+ voltage_parameters);
+
+ *virtual_voltage_id = voltage_parameters->usVoltageLevel;
+
+ return result;
+}
+
+int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id)
+{
+ int i, j;
+ int ix;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ ix,
+ NULL, NULL, NULL);
+ if (!profile)
+ return -EINVAL;
+
+ if ((profile->asHeader.ucTableFormatRevision >= 2) &&
+ (profile->asHeader.ucTableContentRevision >= 1) &&
+ (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
+ leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
+ vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
+ vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
+ vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index e9fe2e84006b..c44a92064cf1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -291,7 +291,7 @@ extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode);
extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
-extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
+extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
@@ -314,5 +314,11 @@ extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_
extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
uint16_t *load_line);
+
+extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id);
+extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 84f01fd33aff..a651ebcf44fd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -173,8 +173,6 @@ static int get_vddc_lookup_table(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = vddc_lookup_pp_tables->ucNumEntries;
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
@@ -335,8 +333,6 @@ static int get_valid_clk(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = (uint32_t)clk_volt_pp_table->count;
for (i = 0; i < table->count; i++) {
@@ -390,8 +386,6 @@ static int get_mclk_voltage_dependency_table(
if (NULL == mclk_table)
return -ENOMEM;
- memset(mclk_table, 0x00, table_size);
-
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
@@ -439,8 +433,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
for (i = 0; i < tonga_table->ucNumEntries; i++) {
@@ -473,8 +465,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
for (i = 0; i < polaris_table->ucNumEntries; i++) {
@@ -525,8 +515,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -567,8 +555,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -615,8 +601,6 @@ static int get_cac_tdp_table(
if (NULL == tdp_table)
return -ENOMEM;
- memset(tdp_table, 0x00, table_size);
-
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == hwmgr->dyn_state.cac_dtp_table) {
@@ -624,8 +608,6 @@ static int get_cac_tdp_table(
return -ENOMEM;
}
- memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
-
if (table->ucRevId < 3) {
const ATOM_Tonga_PowerTune_Table *tonga_table =
(ATOM_Tonga_PowerTune_Table *)table;
@@ -725,8 +707,6 @@ static int get_mm_clock_voltage_table(
if (NULL == mm_table)
return -ENOMEM;
- memset(mm_table, 0x00, table_size);
-
mm_table->count = mm_dependency_table->ucNumEntries;
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
@@ -850,9 +830,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2716721e5453..afae32ee2b0d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-
+#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
#include <atombios.h>
@@ -790,6 +790,39 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
return pstate;
}
+static const unsigned char soft_dummy_pp_table[] = {
+ 0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x18, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x8e, 0x01, 0x00, 0x00, 0xb8, 0x01, 0x00, 0x00, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x70, 0x00, 0x91, 0xf4, 0x00,
+ 0x64, 0x00, 0x40, 0x19, 0x01, 0x5a, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00, 0x00, 0x09, 0x30, 0x75, 0x00, 0x30, 0x75, 0x00, 0x40, 0x9c, 0x00, 0x40, 0x9c, 0x00, 0x59,
+ 0xd8, 0x00, 0x59, 0xd8, 0x00, 0x91, 0xf4, 0x00, 0x91, 0xf4, 0x00, 0x0e, 0x28, 0x01, 0x0e, 0x28,
+ 0x01, 0x90, 0x5f, 0x01, 0x90, 0x5f, 0x01, 0x00, 0x77, 0x01, 0x00, 0x77, 0x01, 0xca, 0x91, 0x01,
+ 0xca, 0x91, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01,
+ 0x7c, 0x00, 0x02, 0x70, 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a,
+ 0x00, 0x07, 0x08, 0x08, 0x00, 0x08, 0x00, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x02, 0x03,
+ 0x02, 0x04, 0x02, 0x00, 0x08, 0x40, 0x9c, 0x00, 0x30, 0x75, 0x00, 0x74, 0xb5, 0x00, 0xa0, 0x8c,
+ 0x00, 0x60, 0xea, 0x00, 0x74, 0xb5, 0x00, 0x0e, 0x28, 0x01, 0x60, 0xea, 0x00, 0x90, 0x5f, 0x01,
+ 0x40, 0x19, 0x01, 0xb2, 0xb0, 0x01, 0x90, 0x5f, 0x01, 0xc0, 0xd4, 0x01, 0x00, 0x77, 0x01, 0x5e,
+ 0xff, 0x01, 0xca, 0x91, 0x01, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, 0x7c, 0x00, 0x02, 0x70,
+ 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, 0x00, 0x07, 0x00, 0x08,
+ 0x80, 0x00, 0x30, 0x75, 0x00, 0x7e, 0x00, 0x40, 0x9c, 0x00, 0x7c, 0x00, 0x59, 0xd8, 0x00, 0x70,
+ 0x00, 0xdc, 0x0b, 0x01, 0x64, 0x00, 0x80, 0x38, 0x01, 0x5a, 0x00, 0x80, 0x38, 0x01, 0x52, 0x00,
+ 0x80, 0x38, 0x01, 0x4a, 0x00, 0x80, 0x38, 0x01, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x74, 0x00, 0x91, 0xf4, 0x00,
+ 0x66, 0x00, 0x40, 0x19, 0x01, 0x58, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00
+};
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
@@ -799,12 +832,17 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
uint16_t size;
if (!table_addr) {
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
-
- hwmgr->soft_pp_table = table_addr;
- hwmgr->soft_pp_table_size = size;
+ if (hwmgr->chip_id == CHIP_RAVEN) {
+ table_addr = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
+ } else {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
}
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
@@ -924,15 +962,14 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
}
}
- if ((0 == result) &&
- (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot)))
- result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ if ((0 == result) && (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) {
+ if (hwmgr->chip_family < AMDGPU_FAMILY_RV)
+ result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ }
return result;
}
-
-
static int init_powerplay_tables(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table
@@ -1615,85 +1652,53 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_RAVEN)
return 0;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
- hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
+ hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
- hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
+ hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
- hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
+ hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
- hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
+ hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.valid_mclk_values) {
- kfree(hwmgr->dyn_state.valid_mclk_values);
- hwmgr->dyn_state.valid_mclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_mclk_values);
+ hwmgr->dyn_state.valid_mclk_values = NULL;
- if (NULL != hwmgr->dyn_state.valid_sclk_values) {
- kfree(hwmgr->dyn_state.valid_sclk_values);
- hwmgr->dyn_state.valid_sclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_sclk_values);
+ hwmgr->dyn_state.valid_sclk_values = NULL;
- if (NULL != hwmgr->dyn_state.cac_leakage_table) {
- kfree(hwmgr->dyn_state.cac_leakage_table);
- hwmgr->dyn_state.cac_leakage_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_leakage_table);
+ hwmgr->dyn_state.cac_leakage_table = NULL;
- if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) {
- kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
- hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
+ hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
- hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
- hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
- hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.ppm_parameter_table) {
- kfree(hwmgr->dyn_state.ppm_parameter_table);
- hwmgr->dyn_state.ppm_parameter_table = NULL;
- }
+ kfree(hwmgr->dyn_state.ppm_parameter_table);
+ hwmgr->dyn_state.ppm_parameter_table = NULL;
- if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
- hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
+ hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vq_budgeting_table);
+ hwmgr->dyn_state.vq_budgeting_table = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 2c3e6baf2524..3e0b267c74a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -38,20 +38,17 @@
#include "pp_soc15.h"
#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define RAVEN_MINIMUM_ENGINE_CLOCK 800 //8Mhz, the low boundary of engine clock allowed on this chip
+#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
#define SCLK_MIN_DIV_INTV_SHIFT 12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 //100mhz
+#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
+
+
int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req);
-struct phm_vq_budgeting_record rv_vqtable[] = {
- /* _TBD
- * CUs, SSP low, SSP High, Min Sclk Low, Min Sclk, High, AWD/non-AWD, DCLK, ECLK, Sustainable Sclk, Sustainable CUs */
- { 8, 0, 45, 0, 0, VQ_DisplayConfig_NoneAWD, 80000, 120000, 4, 0 },
-};
static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
{
@@ -70,101 +67,27 @@ static const struct rv_power_state *cast_const_rv_ps(
return (struct rv_power_state *)hw_ps;
}
-static int rv_init_vq_budget_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size, i;
- struct phm_vq_budgeting_table *ptable;
- uint32_t num_entries = ARRAY_SIZE(rv_vqtable);
-
- if (hwmgr->dyn_state.vq_budgeting_table != NULL)
- return 0;
-
- table_size = sizeof(struct phm_vq_budgeting_table) +
- sizeof(struct phm_vq_budgeting_record) * (num_entries - 1);
-
- ptable = kzalloc(table_size, GFP_KERNEL);
- if (NULL == ptable)
- return -ENOMEM;
-
- ptable->numEntries = (uint8_t) num_entries;
-
- for (i = 0; i < ptable->numEntries; i++) {
- ptable->entries[i].ulCUs = rv_vqtable[i].ulCUs;
- ptable->entries[i].ulSustainableSOCPowerLimitLow = rv_vqtable[i].ulSustainableSOCPowerLimitLow;
- ptable->entries[i].ulSustainableSOCPowerLimitHigh = rv_vqtable[i].ulSustainableSOCPowerLimitHigh;
- ptable->entries[i].ulMinSclkLow = rv_vqtable[i].ulMinSclkLow;
- ptable->entries[i].ulMinSclkHigh = rv_vqtable[i].ulMinSclkHigh;
- ptable->entries[i].ucDispConfig = rv_vqtable[i].ucDispConfig;
- ptable->entries[i].ulDClk = rv_vqtable[i].ulDClk;
- ptable->entries[i].ulEClk = rv_vqtable[i].ulEClk;
- ptable->entries[i].ulSustainableSclk = rv_vqtable[i].ulSustainableSclk;
- ptable->entries[i].ulSustainableCUs = rv_vqtable[i].ulSustainableCUs;
- }
-
- hwmgr->dyn_state.vq_budgeting_table = ptable;
-
- return 0;
-}
-
static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
- struct cgs_system_info sys_info = {0};
- int result;
- rv_hwmgr->ddi_power_gating_disabled = 0;
- rv_hwmgr->bapm_enabled = 1;
rv_hwmgr->dce_slow_sclk_threshold = 30000;
- rv_hwmgr->disable_driver_thermal_policy = 1;
rv_hwmgr->thermal_auto_throttling_treshold = 0;
rv_hwmgr->is_nb_dpm_enabled = 1;
rv_hwmgr->dpm_flags = 1;
- rv_hwmgr->disable_smu_acp_s3_handshake = 1;
- rv_hwmgr->disable_notify_smu_vpu_recovery = 0;
rv_hwmgr->gfx_off_controled_by_driver = false;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicM3Arbiter);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACP);
+ rv_hwmgr->need_min_deep_sleep_dcefclk = true;
+ rv_hwmgr->num_active_display = 0;
+ rv_hwmgr->deep_sleep_dcefclk = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableVoltageIsland);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_GFX_DMG)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
- }
-
+ PHM_PlatformCaps_PowerPlaySupport);
return 0;
}
@@ -234,102 +157,88 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
- if (clocks.dcefClock == 0 && clocks.dcefClockInSR == 0)
- clock_req.clock_freq_in_khz = rv_data->dcf_actual_hard_min_freq;
-
PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
- if(rv_data->need_min_deep_sleep_dcefclk && 0 != clocks.dcefClockInSR)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetMinDeepSleepDcefclk,
- clocks.dcefClockInSR / 100);
- /*
- if(!rv_data->isp_tileA_power_gated || !rv_data->isp_tileB_power_gated) {
- if ((hwmgr->ispArbiter.iclk != 0) && (rv_data->ISPActualHardMinFreq != (hwmgr->ispArbiter.iclk / 100) )) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetHardMinIspclkByFreq, hwmgr->ispArbiter.iclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->ISPActualHardMinFreq),
- }
- } */
-
if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinVcn,
(rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
}
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
}
if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
(rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoGfxclkFreq,
hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->gfx_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
}
if ((hwmgr->gfx_arbiter.fclk != 0) &&
(rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoFclkFreq,
hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->fabric_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
}
return 0;
}
-static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
- uint32_t num_of_active_displays = 0;
- struct cgs_display_info info = {0};
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
+ rv_data->deep_sleep_dcefclk = clock/100;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ rv_data->deep_sleep_dcefclk);
+ }
+ return 0;
+}
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_of_active_displays = info.display_count;
+static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ if (rv_data->num_active_display != count) {
+ rv_data->num_active_display = count;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- num_of_active_displays);
+ rv_data->num_active_display);
+ }
+
return 0;
}
-static const struct phm_master_table_item rv_set_power_state_list[] = {
- { .tableFunction = rv_tf_set_clock_limit },
- { .tableFunction = rv_tf_set_num_active_display },
- { }
-};
-
-static const struct phm_master_table_header rv_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_set_power_state_list
-};
+static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ return rv_set_clock_limit(hwmgr, input);
+}
-static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -340,20 +249,13 @@ static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static const struct phm_master_table_item rv_setup_asic_list[] = {
- { .tableFunction = rv_tf_init_power_gate_state },
- { }
-};
-static const struct phm_master_table_header rv_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_setup_asic_list
-};
+static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ return rv_init_power_gate_state(hwmgr);
+}
-static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -365,66 +267,42 @@ static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item rv_power_down_asic_list[] = {
- { .tableFunction = rv_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header rv_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_power_down_asic_list
-};
-
+static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ return rv_reset_cc6_data(hwmgr);
+}
-static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_disable_dpm_list[] = {
- { .tableFunction = rv_tf_disable_gfx_off },
- { },
-};
-
-
-static const struct phm_master_table_header rv_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_disable_dpm_list
-};
+static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_disable_gfx_off(hwmgr);
+}
-static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_enable_dpm_list[] = {
- { .tableFunction = rv_tf_enable_gfx_off },
- { },
-};
-
-static const struct phm_master_table_header rv_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_enable_dpm_list
-};
+static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_enable_gfx_off(hwmgr);
+}
static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
@@ -434,37 +312,37 @@ static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
/* temporary hardcoded clock voltage breakdown tables */
-DpmClock_t VddDcfClk[]= {
+static const DpmClock_t VddDcfClk[]= {
{ 300, 2600},
{ 600, 3200},
{ 600, 3600},
};
-DpmClock_t VddSocClk[]= {
+static const DpmClock_t VddSocClk[]= {
{ 478, 2600},
{ 722, 3200},
{ 722, 3600},
};
-DpmClock_t VddFClk[]= {
+static const DpmClock_t VddFClk[]= {
{ 400, 2600},
{1200, 3200},
{1200, 3600},
};
-DpmClock_t VddDispClk[]= {
+static const DpmClock_t VddDispClk[]= {
{ 435, 2600},
{ 661, 3200},
{1086, 3600},
};
-DpmClock_t VddDppClk[]= {
+static const DpmClock_t VddDppClk[]= {
{ 435, 2600},
{ 661, 3200},
{ 661, 3600},
};
-DpmClock_t VddPhyClk[]= {
+static const DpmClock_t VddPhyClk[]= {
{ 540, 2600},
{ 810, 3200},
{ 810, 3600},
@@ -472,7 +350,7 @@ DpmClock_t VddPhyClk[]= {
static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct rv_voltage_dependency_table **pptable,
- uint32_t num_entry, DpmClock_t *pclk_dependency_table)
+ uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
uint32_t table_size, i;
struct rv_voltage_dependency_table *ptable;
@@ -505,7 +383,7 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
DpmClocks_t *table = &(rv_data->clock_table);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- result = rv_copy_table_from_smc(hwmgr->smumgr, (uint8_t *)table, CLOCKTABLE);
+ result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
PP_ASSERT_WITH_CODE((0 == result),
"Attempt to copy clock table from smc failed",
@@ -543,6 +421,26 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMinGfxclkFrequency),
+ "Attempt to get min GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &result),
+ "Attempt to get min GFXCLK Failed!",
+ return -1);
+ rv_data->gfx_min_freq_limit = result * 100;
+
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxGfxclkFrequency),
+ "Attempt to get max GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &result),
+ "Attempt to get max GFXCLK Failed!",
+ return -1);
+ rv_data->gfx_max_freq_limit = result * 100;
+
return 0;
}
@@ -563,9 +461,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return result;
}
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerPlaySupport);
-
rv_populate_clock_table(hwmgr);
result = rv_get_system_info_data(hwmgr);
@@ -576,40 +471,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
rv_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &rv_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &rv_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
-
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
RAVEN_MAX_HARDWARE_POWERLEVELS;
@@ -624,8 +485,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- rv_init_vq_budget_table(hwmgr);
-
return result;
}
@@ -634,46 +493,21 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (pinfo->vdd_dep_on_dcefclk) {
- kfree(pinfo->vdd_dep_on_dcefclk);
- pinfo->vdd_dep_on_dcefclk = NULL;
- }
- if (pinfo->vdd_dep_on_socclk) {
- kfree(pinfo->vdd_dep_on_socclk);
- pinfo->vdd_dep_on_socclk = NULL;
- }
- if (pinfo->vdd_dep_on_fclk) {
- kfree(pinfo->vdd_dep_on_fclk);
- pinfo->vdd_dep_on_fclk = NULL;
- }
- if (pinfo->vdd_dep_on_dispclk) {
- kfree(pinfo->vdd_dep_on_dispclk);
- pinfo->vdd_dep_on_dispclk = NULL;
- }
- if (pinfo->vdd_dep_on_dppclk) {
- kfree(pinfo->vdd_dep_on_dppclk);
- pinfo->vdd_dep_on_dppclk = NULL;
- }
- if (pinfo->vdd_dep_on_phyclk) {
- kfree(pinfo->vdd_dep_on_phyclk);
- pinfo->vdd_dep_on_phyclk = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(pinfo->vdd_dep_on_dcefclk);
+ pinfo->vdd_dep_on_dcefclk = NULL;
+ kfree(pinfo->vdd_dep_on_socclk);
+ pinfo->vdd_dep_on_socclk = NULL;
+ kfree(pinfo->vdd_dep_on_fclk);
+ pinfo->vdd_dep_on_fclk = NULL;
+ kfree(pinfo->vdd_dep_on_dispclk);
+ pinfo->vdd_dep_on_dispclk = NULL;
+ kfree(pinfo->vdd_dep_on_dppclk);
+ pinfo->vdd_dep_on_dppclk = NULL;
+ kfree(pinfo->vdd_dep_on_phyclk);
+ pinfo->vdd_dep_on_phyclk = NULL;
+
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -687,12 +521,12 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
-static int rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
@@ -711,18 +545,9 @@ static int rv_dpm_get_pp_table_entry_callback(
{
struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
- const ATOM_PPLIB_CZ_CLOCK_INFO *rv_clock_info = clock_info;
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- uint8_t clock_info_index = rv_clock_info->index;
-
- if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
- clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
-
- rv_ps->levels[index].engine_clock = table->entries[clock_info_index].clk;
- rv_ps->levels[index].vddc_index = (uint8_t)table->entries[clock_info_index].v;
+ rv_ps->levels[index].engine_clock = 0;
+ rv_ps->levels[index].vddc_index = 0;
rv_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
@@ -794,43 +619,74 @@ static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
- return 0;
+ struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct rv_voltage_dependency_table *mclk_table =
+ data->clock_vol_info.vdd_dep_on_fclk;
+ int i, now, size = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetGfxclkFrequency),
+ "Attempt to get current GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &now),
+ "Attempt to get current GFXCLK Failed!",
+ return -1);
+
+ size += sprintf(buf + size, "0: %uMhz %s\n",
+ data->gfx_min_freq_limit / 100,
+ ((data->gfx_min_freq_limit / 100)
+ == now) ? "*" : "");
+ size += sprintf(buf + size, "1: %uMhz %s\n",
+ data->gfx_max_freq_limit / 100,
+ ((data->gfx_max_freq_limit / 100)
+ == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetFclkFrequency),
+ "Attempt to get current MEMCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &now),
+ "Attempt to get current MEMCLK Failed!",
+ return -1);
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i,
+ mclk_table->entries[i].clk / 100,
+ ((mclk_table->entries[i].clk / 100)
+ == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
}
static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- const struct rv_power_state *ps;
struct rv_hwmgr *data;
- uint32_t level_index;
- uint32_t i;
- uint32_t vol_dep_record_index = 0;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
data = (struct rv_hwmgr *)(hwmgr->backend);
- ps = cast_const_rv_ps(state);
- level_index = index > ps->level - 1 ? ps->level - 1 : index;
- level->coreClock = ps->levels[level_index].engine_clock;
-
- if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
- for (i = 1; i < ps->level; i++) {
- if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) {
- level->coreClock = ps->levels[i].engine_clock;
- break;
- }
- }
- }
-
- if (level_index == 0) {
- vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
- level->memory_clock =
- data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk;
- } else
+ if (index == 0) {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ level->coreClock = data->gfx_min_freq_limit;
+ } else {
+ level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+ level->coreClock = data->gfx_max_freq_limit;
+ }
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
@@ -993,7 +849,7 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
clk_freq);
return result;
@@ -1001,7 +857,8 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
- return -EINVAL;
+ clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
+ return 0;
}
static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
@@ -1023,13 +880,37 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
+ uint32_t sclk, mclk;
+ int ret = 0;
+
switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ if (!ret) {
+ rv_read_arg_from_smc(hwmgr, &sclk);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = sclk * 100;
+ *size = 4;
+ }
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ if (!ret) {
+ rv_read_arg_from_smc(hwmgr, &mclk);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = mclk * 100;
+ *size = 4;
+ }
+ break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
- return 0;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ return ret;
}
static const struct pp_hwmgr_func rv_hwmgr_funcs = {
@@ -1058,6 +939,13 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = {
.get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
.get_max_high_clocks = rv_get_max_high_clocks,
.read_sensor = rv_read_sensor,
+ .set_active_display_count = rv_set_active_display_count,
+ .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
+ .dynamic_state_management_enable = rv_enable_dpm_tasks,
+ .power_off_asic = rv_power_off_asic,
+ .asic_setup = rv_setup_asic_task,
+ .power_state_set = rv_set_power_state_tasks,
+ .dynamic_state_management_disable = rv_disable_dpm_tasks,
};
int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index 2472b50e54cf..9dc503055394 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -283,6 +283,8 @@ struct rv_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_min_freq_limit;
+ uint32_t gfx_max_freq_limit;
bool vcn_power_gated;
bool vcn_dpg_mode;
@@ -293,7 +295,9 @@ struct rv_hwmgr {
DpmClocks_t clock_table;
uint32_t active_process_mask;
- bool need_min_deep_sleep_dcefclk; /* disabled by default */
+ bool need_min_deep_sleep_dcefclk;
+ uint32_t deep_sleep_dcefclk;
+ uint32_t num_active_display;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 261b828ad590..69a0678ace98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -27,21 +27,21 @@
static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
PPSMC_MSG_UVDDPM_Disable);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
PPSMC_MSG_VCEDPM_Disable);
}
static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_SAMUDPM_Enable :
PPSMC_MSG_SAMUDPM_Disable);
}
@@ -70,7 +70,7 @@ static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
@@ -80,10 +80,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
@@ -94,7 +94,7 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
@@ -102,7 +102,7 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
@@ -111,7 +111,7 @@ static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerOFF);
return 0;
}
@@ -120,7 +120,7 @@ static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerON);
return 0;
}
@@ -140,7 +140,7 @@ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -166,10 +166,9 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
smu7_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -194,7 +193,6 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_PG_STATE_UNGATE);
smu7_update_vce_dpm(hwmgr, false);
}
- return 0;
}
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
@@ -237,7 +235,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -247,7 +245,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -260,7 +258,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -271,7 +269,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -284,7 +282,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -297,7 +295,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -311,7 +309,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -331,7 +329,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -341,7 +339,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -354,7 +352,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -365,7 +363,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -378,7 +376,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -388,7 +386,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -401,7 +399,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -412,7 +410,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -425,7 +423,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -436,7 +434,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -449,7 +447,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -489,9 +487,9 @@ int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
active_cus = sys_info.value;
if (enable)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
else
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GFX_CU_PG_DISABLE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index c96ed9ed7eaf..7b54d48b2ce2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -27,8 +27,8 @@
#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b526f49be65d..e33ec7fc5d09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#include <drm/amdgpu_drm.h>
#include "pp_acpi.h"
#include "ppatomctrl.h"
#include "atombios.h"
@@ -163,7 +164,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
return 0;
}
@@ -300,28 +301,28 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
PP_ASSERT_WITH_CODE(
(data->vddc_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddc_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE(
(data->vddgfx_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddgfx_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
PP_ASSERT_WITH_CODE(
(data->vddci_voltage_table.count <= tmp),
"Too many voltage values for VDDCI. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddci_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
PP_ASSERT_WITH_CODE(
(data->mvdd_voltage_table.count <= tmp),
"Too many voltage values for MVDD. Trimming to fit state table.",
@@ -387,6 +388,7 @@ static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
/* Clear reset for voting clients before enabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -394,50 +396,26 @@ static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4,
+ data->voting_rights_clients[i]);
return 0;
}
static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
+ int i;
+
/* Reset voting clients before disabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
return 0;
}
@@ -493,7 +471,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
}
/**
@@ -551,7 +529,7 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->pcie_gen_performance = data->pcie_gen_power_saving;
data->pcie_lane_performance = data->pcie_lane_power_saving;
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
tmp,
MAX_REGULAR_DPM_NUMBER);
@@ -607,13 +585,20 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->dpm_table.pcie_speed_table.count = 6;
}
/* Populate last level for boot PCIE level, but do not increment count. */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ data->vbios_boot_state.pcie_lane_bootup_value);
+ } else {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
get_pcie_gen_support(data->pcie_gen_cap,
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
-
+ }
return 0;
}
@@ -625,27 +610,27 @@ static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
phm_reset_single_dpm_table(
&data->dpm_table.sclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_GRAPHICS),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddc_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDC),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddci_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mvdd_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MVDD),
MAX_REGULAR_DPM_NUMBER);
return 0;
@@ -689,7 +674,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_sclk_table->entries[i].clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
}
@@ -703,7 +688,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_mclk_table->entries[i].clk) {
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.mclk_table.count++;
}
}
@@ -855,7 +840,7 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableVRHotGPIOInterrupt);
return 0;
@@ -873,7 +858,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
return 0;
}
@@ -883,7 +868,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
return 0;
}
@@ -892,12 +877,12 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -912,7 +897,7 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -928,12 +913,12 @@ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t soft_register_value = 0;
uint32_t handshake_disables_offset = data->soft_regs_start
- + smum_get_offsetof(hwmgr->smumgr,
+ + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, HandshakeDisables);
soft_register_value = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, handshake_disables_offset);
- soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+ soft_register_value |= smum_get_mac_definition(hwmgr,
SMU_UVD_MCLK_HANDSHAKE_DISABLE);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
handshake_disables_offset, soft_register_value);
@@ -947,7 +932,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -956,20 +941,31 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
smu7_disable_handshake_uvd(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_Enable)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
- udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
+ } else {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
}
return 0;
@@ -993,11 +989,15 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
data->soft_regs_start +
- smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+ smum_get_offsetof(hwmgr, SMU_SoftRegisters,
VoltageChangeTimeout), 0x1000);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
SWRST_COMMAND_1, RESETLC, 0x0);
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
+ cgs_write_register(hwmgr->device, 0x1488,
+ (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
+
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
pr_err("Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
@@ -1006,7 +1006,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
/* enable PCIE dpm */
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Enable)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
@@ -1014,7 +1014,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableACDCGPIOInterrupt)),
"Failed to enable AC DC GPIO Interrupt!",
);
@@ -1032,7 +1032,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
}
/* disable MCLK dpm */
@@ -1040,7 +1040,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1060,7 +1060,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
/* disable PCIE dpm */
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
+ (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
@@ -1072,7 +1072,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1226,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1361,14 +1361,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->vddc_vddgfx_delta = 300;
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+ data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
@@ -1382,23 +1382,40 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->force_pcie_gen = PP_PCIEGenInvalid;
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
- if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) {
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
uint8_t tmp1, tmp2;
uint16_t tmp3 = 0;
atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
&tmp3);
tmp3 = (tmp3 >> 5) & 0x3;
data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
+ } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ data->vddc_phase_shed_control = 1;
+ } else {
+ data->vddc_phase_shed_control = 0;
+ }
+
+ if (hwmgr->chip_id == CHIP_HAWAII) {
+ data->thermal_temp_setting.temperature_low = 94500;
+ data->thermal_temp_setting.temperature_high = 95000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ data->thermal_temp_setting.temperature_low = 99500;
+ data->thermal_temp_setting.temperature_high = 100000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
}
data->fast_watermark_threshold = 100;
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1406,25 +1423,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX);
- }
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1543,7 +1559,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (vddc >= 2000 || vddc == 0)
return -EINVAL;
} else {
- pr_warn("failed to retrieving EVV voltage!\n");
+ pr_debug("failed to retrieving EVV voltage!\n");
continue;
}
@@ -1676,7 +1692,7 @@ static int phm_add_voltage(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((0 != look_up_table->count),
"Lookup Table empty.", return -EINVAL);
- i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE((i >= look_up_table->count),
"Lookup Table is full.", return -EINVAL);
@@ -2274,7 +2290,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
}
- if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+ if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
return 0;
@@ -2282,40 +2298,65 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
- pp_smu7_thermal_fini(hwmgr);
- if (NULL != hwmgr->backend) {
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
return 0;
}
+static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
+{
+ uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
+
+ if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
+ for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
+ virtual_voltage_id,
+ efuse_voltage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+ data->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
+ data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
+ data->vddci_leakage.count++;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
- int result;
+ int result = 0;
data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- pp_smu7_thermal_initialize(hwmgr);
-
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
- result = smu7_get_evv_voltages(hwmgr);
-
- if (result) {
- pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
- return -EINVAL;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV)) {
+ result = smu7_get_evv_voltages(hwmgr);
+ if (result) {
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -EINVAL;
+ }
+ } else {
+ smu7_get_elb_voltages(hwmgr);
}
if (hwmgr->pp_table_version == PP_TABLE_V1) {
@@ -2382,7 +2423,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel, level);
}
}
@@ -2395,7 +2436,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2409,7 +2450,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2428,14 +2469,14 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
@@ -2451,7 +2492,7 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return -EINVAL;
if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_UnForceLevel);
}
@@ -2468,7 +2509,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
@@ -2478,7 +2519,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2488,7 +2529,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
(level));
}
@@ -2572,51 +2613,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = smu7_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = smu7_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -2625,26 +2631,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
-
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+ }
+ return ret;
}
static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -2843,7 +2846,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
-static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -2865,7 +2868,7 @@ static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
[smu7_ps->performance_level_count-1].memory_clock;
}
-static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -3002,7 +3005,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
[smu7_power_state->performance_level_count++]);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3071,11 +3074,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].vddci !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3166,7 +3169,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
data->highest_mclk = memory_clock;
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3219,11 +3222,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].v !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3312,14 +3315,14 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
struct pp_gpu_power *query)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogStart),
"Failed to start pm status log!",
return -1);
msleep_interruptible(20);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),
"Failed to sample pm status log!",
return -1);
@@ -3353,19 +3356,19 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
AverageGraphicsActivity);
@@ -3532,7 +3535,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3544,7 +3547,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3762,7 +3765,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3774,8 +3777,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3824,9 +3827,9 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
- return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -3899,10 +3902,7 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
@@ -3911,7 +3911,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
}
static int
@@ -3974,12 +3974,12 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
PreVBlankGap), 0x64);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
VBlankTimeout),
(frame_time_in_us - pre_vbi_time_in_us));
@@ -4004,10 +4004,7 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
@@ -4249,21 +4246,21 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
case PP_SCLK:
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
@@ -4276,7 +4273,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
level++;
if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
level);
break;
@@ -4300,7 +4297,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < sclk_table->count; i++) {
@@ -4316,7 +4313,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < mclk_table->count; i++) {
@@ -4353,31 +4350,27 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
-static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
- result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = smu7_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
+ smu7_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
-static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
}
@@ -4606,7 +4599,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (sclk_mask) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
sclk_dpm_enable_mask &
@@ -4615,7 +4608,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (mclk_mask) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
mclk_dpm_enable_mask &
@@ -4627,8 +4620,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (smu_data == NULL)
return -EINVAL;
@@ -4640,19 +4632,60 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs),
"Failed to enable AVFS!",
return -EINVAL);
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs),
"Failed to disable AVFS!",
return -EINVAL);
return 0;
}
+static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_H),
+ mc_addr_hi);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_L),
+ mc_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
+ virtual_addr_hi);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
+ virtual_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
+ size);
+ return 0;
+}
+
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -4703,6 +4736,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
+ .start_thermal_controller = smu7_start_thermal_controller,
+ .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f221e17b67e7..e021154aedbd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -182,14 +182,7 @@ struct smu7_hwmgr {
struct smu7_dpm_table dpm_table;
struct smu7_dpm_table golden_dpm_table;
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
+ uint32_t voting_rights_clients[8];
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 1dc31aa72781..85ca16abb626 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -629,51 +629,38 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t block_en = 0;
int32_t result = 0;
uint32_t didt_block;
- uint32_t data;
if (hwmgr->chip_id == CHIP_POLARIS11)
didt_block = Polaris11_DIDTBlock_Info;
else
didt_block = DIDTBlock_Info;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~SQ_Enable_MASK;
didt_block |= block_en << SQ_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~DB_Enable_MASK;
didt_block |= block_en << DB_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0;
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TD_Enable_MASK;
didt_block |= block_en << TD_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TCP_Enable_MASK;
didt_block |= block_en << TCP_Enable_SHIFT;
-
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
return result;
}
@@ -753,12 +740,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (result == 0)
num_se = sys_info.value;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
+ cgs_lock_grbm_idx(hwmgr->device, true);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
for (count = 0; count < num_se; count++) {
@@ -775,7 +763,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- if (hwmgr->smumgr->is_kicker)
+ if (hwmgr->is_kicker)
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
else
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
@@ -793,11 +781,12 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", return result);
}
+ cgs_lock_grbm_idx(hwmgr->device, false);
cgs_enter_safe_mode(hwmgr->device, false);
}
@@ -808,10 +797,10 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
@@ -820,7 +809,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
"Post DIDT enable clock gating failed.",
return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", return result);
@@ -836,10 +825,9 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
+ if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -854,9 +842,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -872,7 +859,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
@@ -880,7 +867,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
@@ -899,11 +886,9 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
-
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
@@ -913,14 +898,13 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
}
if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
@@ -937,14 +921,13 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
int smc_result;
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
@@ -953,7 +936,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
@@ -962,7 +945,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
@@ -987,16 +970,17 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
cac_table = table_info->cac_dtp_table;
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+
+ if (hwmgr->chip_id > CHIP_TONGA)
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+ else
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
+
result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index baddb569a8b8..d7aa643cdb51 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -37,9 +37,8 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
@@ -87,8 +86,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0))
+ !hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,13 +150,11 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM))
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM);
@@ -169,12 +165,12 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
}
if (!result && hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
@@ -187,7 +183,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
}
/**
@@ -209,8 +205,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -241,8 +236,7 @@ int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
@@ -270,8 +264,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = smu7_get_xclk(hwmgr);
@@ -367,7 +360,7 @@ static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
*
* @param hwmgr The address of the hardware manager.
*/
-int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
@@ -378,7 +371,7 @@ int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
}
/**
@@ -396,7 +389,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
}
/**
@@ -423,16 +416,14 @@ int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+static int smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
smu7_fan_ctrl_start_smc_fan_control(hwmgr);
smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -440,108 +431,34 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_disable_alert(hwmgr);
-}
+ smu7_thermal_initialize(hwmgr);
+ ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
+ if (ret)
+ return -EINVAL;
+ smu7_thermal_enable_alert(hwmgr);
+ ret = smum_thermal_avfs_enable(hwmgr);
+ if (ret)
+ return -EINVAL;
-static const struct phm_master_table_item
-phm_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_smu7_thermal_initialize },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { .tableFunction = smum_thermal_avfs_enable },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = smum_thermal_setup_fan_table },
- { .tableFunction = tf_smu7_thermal_start_smc_fan_control },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-phm_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_smu7_thermal_disable_alert },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_set_temperature_range_master_list
-};
+ smum_thermal_setup_fan_table(hwmgr);
+ smu7_thermal_start_smc_fan_control(hwmgr);
+ return 0;
+}
+
+
int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -550,35 +467,3 @@ int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
return 0;
}
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &phm_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &phm_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
-void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr)
-{
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller));
- return;
-} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
index ba71b608fa75..42c1ba0fad78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -46,14 +46,13 @@ extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *temperature_range);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f8f02e70b8bc..f8d838c2c8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -56,7 +56,7 @@
#define HBM_MEMORY_CHANNEL_WIDTH 128
-uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define MEM_FREQ_LOW_LATENCY 25000
#define MEM_FREQ_HIGH_LATENCY 80000
@@ -81,7 +81,7 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
-const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
+static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
struct vega10_power_state *cast_phw_vega10_power_state(
struct pp_hw_power_state *hw_ps)
@@ -201,9 +201,6 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_ControlVDDCI);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
sys_info.size = sizeof(struct cgs_system_info);
@@ -381,12 +378,10 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.socclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_SOCCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM))
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM))
data->smu_features[GNLD_DPM_UVD].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEDPM))
+ if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled)
@@ -395,9 +390,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep) &&
- data->registry_data.sclk_deep_sleep_support) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
+ data->registry_data.sclk_deep_sleep_support) {
data->smu_features[GNLD_DS_GFXCLK].supported = true;
data->smu_features[GNLD_DS_SOCCLK].supported = true;
data->smu_features[GNLD_DS_LCLK].supported = true;
@@ -431,8 +425,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
/* ACG firmware has major version 5 */
if ((data->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -497,8 +491,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (!vega10_get_socclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
+ if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
for (j = 1; j < socclk_table->count; j++) {
if (socclk_table->entries[j].clk == sclk &&
socclk_table->entries[j].cks_enable == 0) {
@@ -591,61 +584,37 @@ static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
static int vega10_patch_voltage_dependency_tables_with_lookup_table(
struct pp_hwmgr *hwmgr)
{
- uint8_t entry_id;
- uint8_t voltage_id;
+ uint8_t entry_id, voltage_id;
+ unsigned i;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
- table_info->vdd_dep_on_socclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
- table_info->vdd_dep_on_dcefclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
- table_info->vdd_dep_on_pixclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
- table_info->vdd_dep_on_dispclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
- table_info->vdd_dep_on_phyclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
- for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
- voltage_id = socclk_table->entries[entry_id].vddInd;
- socclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
- voltage_id = gfxclk_table->entries[entry_id].vddInd;
- gfxclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
- voltage_id = dcefclk_table->entries[entry_id].vddInd;
- dcefclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
- voltage_id = pixclk_table->entries[entry_id].vddInd;
- pixclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
+ for (i = 0; i < 6; i++) {
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
+ switch (i) {
+ case 0: vdt = table_info->vdd_dep_on_socclk; break;
+ case 1: vdt = table_info->vdd_dep_on_sclk; break;
+ case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
+ case 3: vdt = table_info->vdd_dep_on_pixclk; break;
+ case 4: vdt = table_info->vdd_dep_on_dispclk; break;
+ case 5: vdt = table_info->vdd_dep_on_phyclk; break;
+ }
- for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
- voltage_id = dspclk_table->entries[entry_id].vddInd;
- dspclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < vdt->count; entry_id++) {
+ voltage_id = vdt->entries[entry_id].vddInd;
+ vdt->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
}
- for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
- voltage_id = phyclk_table->entries[entry_id].vddInd;
- phyclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+ voltage_id = mm_table->entries[entry_id].vddcInd;
+ mm_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
}
for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
@@ -660,11 +629,6 @@ static int vega10_patch_voltage_dependency_tables_with_lookup_table(
table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
}
- for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
- voltage_id = mm_table->entries[entry_id].vddcInd;
- mm_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
return 0;
@@ -789,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -838,8 +803,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
/* VDDCI_MEM */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
+ if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
@@ -896,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
@@ -959,7 +933,7 @@ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
uint32_t features_enabled;
- if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
+ if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
if (features_enabled & SMC_DPM_FEATURES)
return true;
}
@@ -1198,6 +1172,8 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
{
int i;
+ dpm_table->count = 0;
+
for (i = 0; i < dep_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
dep_table->entries[i].clk) {
@@ -1306,10 +1282,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
/* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.soc_table.count = 0;
- data->dpm_table.gfx_table.count = 0;
- data->dpm_table.dcef_table.count = 0;
-
dpm_table = &(data->dpm_table.soc_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
@@ -1411,10 +1383,8 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
data->odn_dpm_table.odn_core_clock_dpm_levels.
number_of_performance_levels = data->dpm_table.gfx_table.count;
for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
@@ -1818,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
@@ -1842,16 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
@@ -2311,21 +2275,21 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
uint32_t agc_btc_response;
if (data->smu_features[GNLD_ACG].supported) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
- vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
+ vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
} else {
@@ -2342,13 +2306,11 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (data->smu_features[GNLD_ACG].supported) {
- if (data->smu_features[GNLD_ACG].enabled) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
- data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ if (data->smu_features[GNLD_ACG].supported &&
+ data->smu_features[GNLD_ACG].enabled)
+ if (!vega10_enable_smc_features(hwmgr, false,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = false;
- }
- }
return 0;
}
@@ -2363,9 +2325,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
if (!result) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- (data->registry_data.regulator_hot_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
+ data->registry_data.regulator_hot_gpio_support) {
pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
@@ -2377,9 +2338,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
pp_table->VR1HotPolarity = 0;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition) &&
- (data->registry_data.ac_dc_switch_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
+ data->registry_data.ac_dc_switch_gpio_support) {
pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
} else {
@@ -2398,16 +2358,16 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
if (data->smu_features[GNLD_AVFS].supported) {
if (enable) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Enable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = true;
} else {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
- data->smu_features[GNLD_AVFS].smu_feature_id),
+ data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Disable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = false;
@@ -2428,15 +2388,15 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ vega10_read_arg_from_smc(hwmgr, &top32);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ vega10_read_arg_from_smc(hwmgr, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
- if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
+ if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
avfs_fuse_table->VFT0_b = fuse.VFT0_b;
avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
@@ -2446,7 +2406,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
avfs_fuse_table->VFT2_b = fuse.VFT2_b;
avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload FuseOVerride!",
@@ -2585,14 +2545,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
if (0 != boot_up_values.usVddc) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
(boot_up_values.usVddc * 4));
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
}
@@ -2618,7 +2578,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
vega10_populate_and_upload_avfs_fuse_override(hwmgr);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -2641,7 +2601,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already enabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"Enable THERMAL Feature Failed!",
@@ -2661,7 +2621,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already disabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"disable THERMAL Feature Failed!",
@@ -2677,11 +2637,10 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
if (data->smu_features[GNLD_VR0HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2690,7 +2649,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
} else {
if (data->smu_features[GNLD_VR1HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2708,7 +2667,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"Enable ULV Feature Failed!",
return -1);
@@ -2724,7 +2683,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"disable ULV Feature Failed!",
return -EINVAL);
@@ -2740,7 +2699,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2748,7 +2707,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to Enable DS_SOCCLK Feature Failed!",
return -EINVAL);
@@ -2756,7 +2715,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to Enable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2764,7 +2723,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to Enable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2780,7 +2739,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to disable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2788,7 +2747,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to disable DS_ Feature Failed!",
return -EINVAL);
@@ -2796,7 +2755,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to disable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2804,7 +2763,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to disable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2822,7 +2781,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to disable LED DPM feature failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = false;
@@ -2840,7 +2799,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
+ vega10_enable_smc_features(hwmgr, false, feature_mask);
return 0;
}
@@ -2870,7 +2829,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- if (vega10_enable_smc_features(hwmgr->smumgr,
+ if (vega10_enable_smc_features(hwmgr,
true, feature_mask)) {
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap &
@@ -2880,22 +2839,21 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
if (data->vbios_boot_state.bsoc_vddc_lock) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage, 0);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
+ if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -1);
@@ -2912,13 +2870,13 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
int tmp_result, result = 0;
- tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to configure telemetry!",
return tmp_result);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, 0);
tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
@@ -2926,6 +2884,15 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is already running right , skipping re-enablement!",
return 0);
+ if ((data->smu_version == 0x001c2c00) ||
+ (data->smu_version == 0x001c2d00)) {
+ tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to set package power PID!",
+ return tmp_result);
+ }
+
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to contruct voltage tables!",
@@ -2936,8 +2903,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController)) {
+ if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
tmp_result = vega10_enable_thermal_protection(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable thermal protection!",
@@ -3172,8 +3138,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
+ stable_pstate_sclk_dpm_percentage =
+ data->registry_data.stable_pstate_sclk_dpm_percentage;
PP_ASSERT_WITH_CODE(
data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
@@ -3238,10 +3205,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchForVR);
- force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ForceMclkHigh);
+ disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
+ force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
disable_mclk_switching = (info.display_count > 1) ||
disable_mclk_switching_for_frame_lock ||
@@ -3292,8 +3257,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
vega10_ps->performance_levels[1].mem_clock;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
@@ -3325,10 +3289,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
data->need_update_dpm_table = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@@ -3412,10 +3374,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
uint32_t dpm_count, clock_percent;
uint32_t i;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
if (!data->need_update_dpm_table &&
!data->apply_optimized_settings &&
@@ -3480,10 +3440,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_table->
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
value = sclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
/* Need to do calculation based on the golden DPM table
* as the Heatmap GPU Clock axis is also based on
* the default values
@@ -3537,10 +3495,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
mem_table.dpm_levels[dpm_table->mem_table.count - 1].
value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
PP_ASSERT_WITH_CODE(
golden_dpm_table->mem_table.dpm_levels
@@ -3732,7 +3688,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_boot_level !=
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level),
"Failed to set soft min sclk index!",
@@ -3748,14 +3704,14 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
socclk_idx),
"Failed to set soft min uclk index!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
data->smc_state_table.mem_boot_level),
"Failed to set soft min uclk index!",
@@ -3780,7 +3736,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_max_level !=
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
data->smc_state_table.gfx_max_level),
"Failed to set soft max sclk index!",
@@ -3794,7 +3750,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_max_level !=
data->dpm_table.mem_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
data->smc_state_table.mem_max_level),
"Failed to set soft max mclk index!",
@@ -3853,7 +3809,7 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_VCE].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
"Attempt to Enable/Disable DPM VCE Failed!",
@@ -3871,9 +3827,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
+ if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
+ (hwmgr->gfx_arbiter.sclk_threshold !=
data->low_sclk_interrupt_threshold)) {
data->low_sclk_interrupt_threshold =
hwmgr->gfx_arbiter.sclk_threshold;
@@ -3884,7 +3839,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
cpu_to_le32(low_sclk_interrupt_threshold);
/* This message will also enable SmcToHost Interrupt */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
(uint32_t)low_sclk_interrupt_threshold);
}
@@ -3920,7 +3875,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
"Failed to update SCLK threshold!",
result = tmp_result);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -3931,7 +3886,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
return 0;
}
-static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3953,7 +3908,7 @@ static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
[vega10_ps->performance_level_count - 1].gfx_clock;
}
-static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3980,12 +3935,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
{
uint32_t value;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrPkgPwr),
"Failed to get current package power!",
return -EINVAL);
- vega10_read_arg_from_smc(hwmgr->smumgr, &value);
+ vega10_read_arg_from_smc(hwmgr, &value);
/* power value is an integer */
query->average_gpu_power = value << 8;
@@ -4002,25 +3957,25 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &sclk_idx);
*((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &mclk_idx);
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
+ vega10_read_arg_from_smc(hwmgr, &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
}
@@ -4055,7 +4010,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
}
@@ -4090,7 +4045,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (!result) {
clk_request = (clk_freq << 16) | clk_select;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
clk_request);
}
@@ -4160,7 +4115,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
+ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcefClockInSR /100),
"Attempt to set divider for DCEFCLK Failed!",);
} else {
@@ -4172,7 +4127,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4275,28 +4230,23 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
return 0;
}
-static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega10_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
@@ -4306,51 +4256,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = vega10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = vega10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -4359,27 +4274,25 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+ }
+ return ret;
}
-static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4624,7 +4537,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int i;
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
AMD_DPM_FORCED_LEVEL_LOW |
AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
@@ -4697,11 +4610,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentGfxclkIndex),
"Attempt to get current sclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read sclk index Failed!",
return -1);
@@ -4715,11 +4628,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentUclkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4730,11 +4643,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_PCIE:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentLinkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4762,7 +4675,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)wm_table, WMTABLE);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
@@ -4771,7 +4684,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
cgs_get_active_displays_info(hwmgr->device, &info);
num_turned_on_displays = info.display_count;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
}
@@ -4784,7 +4697,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UVD].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
"Attempt to Enable/Disable DPM UVD Failed!",
@@ -4794,20 +4707,20 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
return 0;
}
-static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->vce_power_gated = bgate;
- return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
+ vega10_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = bgate;
- return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
+ vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static inline bool vega10_are_power_levels_equal(
@@ -4866,7 +4779,7 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
if (data->display_timing.num_existing_displays != info.display_count)
is_update_required = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
is_update_required = true;
}
@@ -4883,8 +4796,7 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is not running right now, no need to disable DPM!",
return 0);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
+ if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
tmp_result = vega10_disable_power_containment(hwmgr);
@@ -4972,7 +4884,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
sclk_idx),
"Failed to set soft min sclk index!",
@@ -4983,7 +4895,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.mclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
mclk_idx),
"Failed to set soft min mclk index!",
@@ -5096,6 +5008,65 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return 0;
}
+static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrHigh,
+ virtual_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrLow,
+ virtual_addr_low);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrHigh,
+ mc_addr_hi);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrLow,
+ mc_addr_low);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramSize,
+ size);
+ return 0;
+}
+
+static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *info)
+{
+ struct cgs_irq_src_funcs *irq_src =
+ (struct cgs_irq_src_funcs *)info;
+
+ if (hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
+ hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
+ "Failed to register high thermal interrupt!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
+ "Failed to register low thermal interrupt!",
+ return -EINVAL);
+ }
+
+ /* Register CTF(GPIO_19) interrupt */
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
+ 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
+ "Failed to register CTF thermal interrupt!",
+ return -EINVAL);
+
+ return 0;
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5149,12 +5120,15 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_mclk_od = vega10_get_mclk_od,
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
+ .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
+ .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
+ .start_thermal_controller = vega10_start_thermal_controller,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
- pp_vega10_thermal_initialize(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 676cd7735883..8f7358cc3327 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -31,7 +31,6 @@
#include "vega10_ppsmc.h"
#include "vega10_powertune.h"
-extern const uint32_t PhwVega10_Magic;
#define VEGA10_MAX_HARDWARE_POWERLEVELS 2
#define WaterMarksExist 1
@@ -390,6 +389,7 @@ struct vega10_hwmgr {
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index e7fa67063cdc..598a194737a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -854,99 +854,79 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t en = (enable ? 1 : 0);
uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~SQ_Enable_MASK;
didt_block_info |= en << SQ_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~DB_Enable_MASK;
didt_block_info |= en << DB_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TD_Enable_MASK;
didt_block_info |= en << TD_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TCP_Enable_MASK;
didt_block_info |= en << TCP_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
- data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
}
}
if (enable) {
/* For Vega10, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
}
}
@@ -1040,10 +1020,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1059,12 +1039,12 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1159,12 +1139,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1180,12 +1160,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1263,8 +1243,8 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
- "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+ result = vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = true;
}
}
@@ -1310,8 +1290,8 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
- "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+ result = vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = false;
}
}
@@ -1364,7 +1344,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.enable_pkg_pwr_tracking_feature)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetPptLimit, n);
return 0;
@@ -1381,16 +1361,15 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1409,16 +1388,15 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1430,7 +1408,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
}
@@ -1438,8 +1416,7 @@ int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index e343df190375..f14c7611fad3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -291,8 +291,7 @@ static int get_mm_clock_voltage_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) *
mm_dependency_table->ucNumEntries;
- mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mm_table = kzalloc(table_size, GFP_KERNEL);
if (!mm_table)
return -ENOMEM;
@@ -519,8 +518,7 @@ static int get_socclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -554,8 +552,7 @@ static int get_mclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
mclk_dep_table->ucNumEntries;
- mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mclk_table = kzalloc(table_size, GFP_KERNEL);
if (!mclk_table)
return -ENOMEM;
@@ -596,8 +593,7 @@ static int get_gfxclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -663,8 +659,7 @@ static int get_pix_clk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -728,8 +723,7 @@ static int get_dcefclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
num_entries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -772,8 +766,7 @@ static int get_pcie_table(struct pp_hwmgr *hwmgr,
sizeof(struct phm_ppt_v1_pcie_record) *
atom_pcie_table->ucNumEntries;
- pcie_table = (struct phm_ppt_v1_pcie_table *)
- kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (!pcie_table)
return -ENOMEM;
@@ -1026,10 +1019,9 @@ static int get_vddc_lookup_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
- table = (phm_ppt_v1_voltage_lookup_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
- if (NULL == table)
+ if (table == NULL)
return -ENOMEM;
table->count = vddc_lookup_pp_tables->ucNumEntries;
@@ -1138,12 +1130,12 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL);
- PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
+ PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
"Failed to allocate hwmgr->pptable!", return -ENOMEM);
powerplay_table = get_powerplay_table(hwmgr);
- PP_ASSERT_WITH_CODE((NULL != powerplay_table),
+ PP_ASSERT_WITH_CODE((powerplay_table != NULL),
"Missing PowerPlay Table!", return -1);
result = check_powerplay_tables(hwmgr, powerplay_table);
@@ -1182,7 +1174,6 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- int result = 0;
struct phm_ppt_v2_information *pp_table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1225,7 +1216,7 @@ static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
- return result;
+ return 0;
}
const struct pp_table_func vega10_pptable_funcs = {
@@ -1238,7 +1229,7 @@ int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
const ATOM_Vega10_State_Array *state_arrays;
const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
- PP_ASSERT_WITH_CODE((NULL != pp_table),
+ PP_ASSERT_WITH_CODE((pp_table != NULL),
"Missing PowerPlay Table!", return -1);
PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >=
ATOM_Vega10_TABLE_REVISION_VEGA10),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d44243441d28..dc3761bcb9b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,11 +31,11 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
current_rpm),
"Attempt to read current RPM from SMC Failed!",
return -1);
@@ -54,8 +54,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
@@ -105,14 +104,15 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -1;
- if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
result = vega10_get_current_rpm(hwmgr, speed);
- else {
+ } else {
uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
- tach_period = (cgs_read_register(hwmgr->device,
- reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >>
- CG_TACH_STATUS__TACH_PERIOD__SHIFT;
+ tach_period =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS,
+ TACH_PERIOD);
if (tach_period == 0)
return -EINVAL;
@@ -141,23 +141,20 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
- (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__TMIN_MASK) >>
- CG_FDO_CTRL2__TMIN__SHIFT;
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+ hwmgr->tmin =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (0 << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN, 0));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
}
@@ -176,14 +173,13 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
if (!hwmgr->fan_ctrl_is_in_default_mode) {
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (hwmgr->fan_ctrl_default_mode <<
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE,
+ hwmgr->fan_ctrl_default_mode));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN,
+ hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
hwmgr->fan_ctrl_is_in_default_mode = true;
}
@@ -203,7 +199,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, true,
+ hwmgr, true,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -220,7 +216,7 @@ static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, false,
+ hwmgr, false,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -279,16 +275,14 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
- duty100 = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL1__FMAX_DUTY100_MASK) >>
- CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
@@ -300,9 +294,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) |
- (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -314,18 +307,13 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
*/
int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
- int result;
-
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = vega10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ return vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ else
+ return vega10_fan_ctrl_set_default_mode(hwmgr);
}
/**
@@ -342,12 +330,11 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t reg;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
- (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return -1;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
if (!result) {
@@ -356,9 +343,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_STATUS__TACH_PERIOD_MASK) |
- (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS, TACH_PERIOD,
+ tach_period));
}
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
@@ -374,12 +361,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
uint32_t reg;
reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
+ mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
temp = cgs_read_register(hwmgr->device, reg);
- temp = (temp & CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT;
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
temp = temp & 0x1ff;
@@ -418,20 +405,10 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = cgs_read_register(hwmgr->device, reg);
- val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK);
- val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK);
- val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
- val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
-
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
cgs_write_register(hwmgr->device, reg, val);
@@ -452,19 +429,16 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_CTRL__EDGE_PER_REV_MASK) |
- ((hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution - 1) <<
- CG_TACH_CTRL__EDGE_PER_REV__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_CTRL, EDGE_PER_REV,
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
}
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) |
- (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
return 0;
}
@@ -484,7 +458,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to Enable FW CTF feature Failed!",
@@ -516,7 +490,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
printk("[Thermal_EnableAlert] FW CTF Already disabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to disable FW CTF feature Failed!",
@@ -554,8 +528,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -573,7 +546,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanTargetTemperature = hwmgr->thermal_controller.
advanceFanControlParameters.usTMax;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature);
@@ -602,7 +575,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanStartTemp = hwmgr->thermal_controller.
advanceFanControlParameters.usZeroRPMStartTemperature;
- ret = vega10_copy_table_to_smc(hwmgr->smumgr,
+ ret = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE);
if (ret)
pr_info("Failed to update Fan Control Table in PPTable!");
@@ -619,123 +592,50 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- }
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+
+int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return vega10_thermal_set_temperature_range(hwmgr, range);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_disable_alert(hwmgr);
-}
+ vega10_thermal_initialize(hwmgr);
+ ret = vega10_thermal_set_temperature_range(hwmgr, range);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_item
-vega10_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_vega10_thermal_initialize },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
+ vega10_thermal_enable_alert(hwmgr);
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = tf_vega10_thermal_setup_fan_table },
- { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
- { }
-};
+ ret = vega10_thermal_setup_fan_table(hwmgr);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_header
-vega10_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_start_thermal_controller_master_list
-};
+ vega10_thermal_start_smc_fan_control(hwmgr);
-static struct phm_master_table_item
-vega10_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_vega10_thermal_disable_alert },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
- { }
+ return 0;
};
-struct phm_master_table_header
-vega10_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_set_temperature_range_master_list
-};
+
+
int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -745,32 +645,3 @@ int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
}
return 0;
}
-
-/**
-* Initializes the thermal controller related functions
-* in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &vega10_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &vega10_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr,
- &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 776f3a2effc0..82f10bdd5f07 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -50,13 +50,6 @@ struct vega10_temperature {
#define FDO_PWM_MODE_STATIC_RPM 5
-extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-
extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
@@ -69,7 +62,6 @@ extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed);
extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
@@ -77,9 +69,11 @@ extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
-int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range);
+extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 07e9c0b5915d..95932cc88460 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -31,9 +31,7 @@
#include "dm_pp_interface.h"
extern const struct amd_ip_funcs pp_ip_funcs;
-extern const struct amd_powerplay_funcs pp_dpm_funcs;
-
-#define PP_DPM_DISABLED 0xCCCC
+extern const struct amd_pm_funcs pp_dpm_funcs;
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
@@ -50,94 +48,12 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GPU_POWER,
};
-enum amd_pp_event {
- AMD_PP_EVENT_INITIALIZE = 0,
- AMD_PP_EVENT_UNINITIALIZE,
- AMD_PP_EVENT_POWER_SOURCE_CHANGE,
- AMD_PP_EVENT_SUSPEND,
- AMD_PP_EVENT_RESUME,
- AMD_PP_EVENT_ENTER_REST_STATE,
- AMD_PP_EVENT_EXIT_REST_STATE,
- AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_THERMAL_NOTIFICATION,
- AMD_PP_EVENT_VBIOS_NOTIFICATION,
- AMD_PP_EVENT_ENTER_THERMAL_STATE,
- AMD_PP_EVENT_EXIT_THERMAL_STATE,
- AMD_PP_EVENT_ENTER_FORCED_STATE,
- AMD_PP_EVENT_EXIT_FORCED_STATE,
- AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE,
- AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE,
- AMD_PP_EVENT_ENTER_SCREEN_SAVER,
- AMD_PP_EVENT_EXIT_SCREEN_SAVER,
- AMD_PP_EVENT_VPU_RECOVERY_BEGIN,
- AMD_PP_EVENT_VPU_RECOVERY_END,
- AMD_PP_EVENT_ENABLE_POWER_PLAY,
- AMD_PP_EVENT_DISABLE_POWER_PLAY,
- AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL,
- AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_ENABLE_CGPG,
- AMD_PP_EVENT_DISABLE_CGPG,
- AMD_PP_EVENT_ENTER_TEXT_MODE,
- AMD_PP_EVENT_EXIT_TEXT_MODE,
- AMD_PP_EVENT_VIDEO_START,
- AMD_PP_EVENT_VIDEO_STOP,
- AMD_PP_EVENT_ENABLE_USER_STATE,
- AMD_PP_EVENT_DISABLE_USER_STATE,
- AMD_PP_EVENT_READJUST_POWER_STATE,
- AMD_PP_EVENT_START_INACTIVITY,
- AMD_PP_EVENT_STOP_INACTIVITY,
- AMD_PP_EVENT_LINKED_ADAPTERS_READY,
- AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE,
- AMD_PP_EVENT_COMPLETE_INIT,
- AMD_PP_EVENT_CRITICAL_THERMAL_FAULT,
- AMD_PP_EVENT_BACKLIGHT_CHANGED,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL,
- AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT,
- AMD_PP_EVENT_SCREEN_ON,
- AMD_PP_EVENT_SCREEN_OFF,
- AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_ENTER_ULP_STATE,
- AMD_PP_EVENT_EXIT_ULP_STATE,
- AMD_PP_EVENT_REGISTER_IP_STATE,
- AMD_PP_EVENT_UNREGISTER_IP_STATE,
- AMD_PP_EVENT_ENTER_MGPU_MODE,
- AMD_PP_EVENT_EXIT_MGPU_MODE,
- AMD_PP_EVENT_ENTER_MULTI_GPU_MODE,
- AMD_PP_EVENT_PRE_SUSPEND,
- AMD_PP_EVENT_PRE_RESUME,
- AMD_PP_EVENT_ENTER_BACOS,
- AMD_PP_EVENT_EXIT_BACOS,
- AMD_PP_EVENT_RESUME_BACO,
- AMD_PP_EVENT_RESET_BACO,
- AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS,
- AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS,
- AMD_PP_EVENT_START_COMPUTE_APPLICATION,
- AMD_PP_EVENT_STOP_COMPUTE_APPLICATION,
- AMD_PP_EVENT_REDUCE_POWER_LIMIT,
- AMD_PP_EVENT_ENTER_FRAME_LOCK,
- AMD_PP_EVENT_EXIT_FRAME_LOOCK,
- AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO,
- AMD_PP_EVENT_LONG_IDLE_ENTER_BACO,
- AMD_PP_EVENT_LONG_IDLE_EXIT_BACO,
- AMD_PP_EVENT_HIBERNATE,
- AMD_PP_EVENT_CONNECTED_STANDBY,
- AMD_PP_EVENT_ENTER_SELF_REFRESH,
- AMD_PP_EVENT_EXIT_SELF_REFRESH,
- AMD_PP_EVENT_START_AVFS_BTC,
- AMD_PP_EVENT_MAX
+enum amd_pp_task {
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ AMD_PP_TASK_COMPLETE_INIT,
+ AMD_PP_TASK_MAX
};
struct amd_pp_init {
@@ -295,12 +211,6 @@ enum {
PP_GROUP_MAX
};
-enum pp_clock_type {
- PP_SCLK,
- PP_MCLK,
- PP_PCIE,
-};
-
struct pp_states_info {
uint32_t nums;
uint32_t states[16];
@@ -355,56 +265,13 @@ struct pp_display_clock_request {
support << PP_STATE_SUPPORT_SHIFT |\
state << PP_STATE_SHIFT)
-struct amd_powerplay_funcs {
- int (*get_temperature)(void *handle);
- int (*load_firmware)(void *handle);
- int (*wait_for_fw_loading_complete)(void *handle);
- int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
- enum amd_dpm_forced_level (*get_performance_level)(void *handle);
- enum amd_pm_state_type (*get_current_power_state)(void *handle);
- int (*get_sclk)(void *handle, bool low);
- int (*get_mclk)(void *handle, bool low);
- int (*powergate_vce)(void *handle, bool gate);
- int (*powergate_uvd)(void *handle, bool gate);
- int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
- void *input, void *output);
- int (*set_fan_control_mode)(void *handle, uint32_t mode);
- int (*get_fan_control_mode)(void *handle);
- int (*set_fan_speed_percent)(void *handle, uint32_t percent);
- int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
- int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
- int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
- int (*get_pp_table)(void *handle, char **table);
- int (*set_pp_table)(void *handle, const char *buf, size_t size);
- int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(void *handle);
- int (*set_sclk_od)(void *handle, uint32_t value);
- int (*get_mclk_od)(void *handle);
- int (*set_mclk_od)(void *handle, uint32_t value);
- int (*read_sensor)(void *handle, int idx, void *value, int *size);
- struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
- int (*reset_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(void *handle,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(void *handle,
- enum amd_pp_profile_type type);
-};
-
struct amd_powerplay {
+ struct cgs_device *cgs_device;
void *pp_handle;
const struct amd_ip_funcs *ip_funcs;
- const struct amd_powerplay_funcs *pp_funcs;
+ const struct amd_pm_funcs *pp_funcs;
};
-int amd_powerplay_create(struct amd_pp_init *pp_init,
- void **handle);
-
-int amd_powerplay_destroy(void *handle);
-
int amd_powerplay_reset(void *handle);
int amd_powerplay_display_configuration_change(void *handle,
@@ -437,6 +304,5 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *output);
-int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
deleted file mode 100644
index b9d84de8a44d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGER_H_
-#define _EVENT_MANAGER_H_
-
-#include "power_state.h"
-#include "pp_power_source.h"
-#include "hardwaremanager.h"
-#include "pp_asicblocks.h"
-
-struct pp_eventmgr;
-enum amd_pp_event;
-
-enum PEM_EventDataValid {
- PEM_EventDataValid_RequestedStateID = 0,
- PEM_EventDataValid_RequestedUILabel,
- PEM_EventDataValid_NewPowerState,
- PEM_EventDataValid_RequestedPowerSource,
- PEM_EventDataValid_RequestedClocks,
- PEM_EventDataValid_CurrentTemperature,
- PEM_EventDataValid_AsicBlocks,
- PEM_EventDataValid_ODParameters,
- PEM_EventDataValid_PXAdapterPrefs,
- PEM_EventDataValid_PXUserPrefs,
- PEM_EventDataValid_PXSwitchReason,
- PEM_EventDataValid_PXSwitchPhase,
- PEM_EventDataValid_HdVideo,
- PEM_EventDataValid_BacklightLevel,
- PEM_EventDatavalid_VariBrightParams,
- PEM_EventDataValid_VariBrightLevel,
- PEM_EventDataValid_VariBrightImmediateChange,
- PEM_EventDataValid_PercentWhite,
- PEM_EventDataValid_SdVideo,
- PEM_EventDataValid_HTLinkChangeReason,
- PEM_EventDataValid_HWBlocks,
- PEM_EventDataValid_RequestedThermalState,
- PEM_EventDataValid_MvcVideo,
- PEM_EventDataValid_Max
-};
-
-typedef enum PEM_EventDataValid PEM_EventDataValid;
-
-/* Number of bits in ULONG variable */
-#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8)
-
-/* Number of ULONG entries used by event data valid bits */
-#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \
- ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \
- PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)
-
-static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |=
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &=
- ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field)
-{
- return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-struct pem_event_data {
- unsigned long valid_fields[100];
- unsigned long requested_state_id;
- enum PP_StateUILabel requested_ui_label;
- struct pp_power_state *pnew_power_state;
- enum pp_power_source requested_power_source;
- struct PP_Clocks requested_clocks;
- bool skip_state_adjust_rules;
- struct phm_asic_blocks asic_blocks;
- /* to doPP_ThermalState requestedThermalState;
- enum ThermalStateRequestSrc requestThermalStateSrc;
- PP_Temperature currentTemperature;*/
-
-};
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event,
- struct pem_event_data *event_data);
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr);
-
-#endif /* _EVENT_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
deleted file mode 100644
index 7bd8a7e57080..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENTMGR_H_
-#define _EVENTMGR_H_
-
-#include <linux/mutex.h>
-#include "pp_instance.h"
-#include "hardwaremanager.h"
-#include "eventmanager.h"
-#include "pp_feature.h"
-#include "pp_power_source.h"
-#include "power_state.h"
-
-typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr,
- struct pem_event_data *event_data);
-
-struct action_chain {
- const char *description; /* action chain description for debugging purpose */
- const pem_event_action * const *action_chain; /* pointer to chain of event actions */
-};
-
-struct pem_power_source_ui_state_info {
- enum PP_StateUILabel current_ui_label;
- enum PP_StateUILabel default_ui_lable;
- unsigned long configurable_ui_mapping;
-};
-
-struct pp_clock_range {
- uint32_t min_sclk_khz;
- uint32_t max_sclk_khz;
-
- uint32_t min_mclk_khz;
- uint32_t max_mclk_khz;
-
- uint32_t min_vclk_khz;
- uint32_t max_vclk_khz;
-
- uint32_t min_dclk_khz;
- uint32_t max_dclk_khz;
-
- uint32_t min_aclk_khz;
- uint32_t max_aclk_khz;
-
- uint32_t min_eclk_khz;
- uint32_t max_eclk_khz;
-};
-
-enum pp_state {
- UNINITIALIZED,
- INACTIVE,
- ACTIVE
-};
-
-enum pp_ring_index {
- PP_RING_TYPE_GFX_INDEX = 0,
- PP_RING_TYPE_DMA_INDEX,
- PP_RING_TYPE_DMA1_INDEX,
- PP_RING_TYPE_UVD_INDEX,
- PP_RING_TYPE_VCE0_INDEX,
- PP_RING_TYPE_VCE1_INDEX,
- PP_RING_TYPE_CP1_INDEX,
- PP_RING_TYPE_CP2_INDEX,
- PP_NUM_RINGS,
-};
-
-struct pp_request {
- uint32_t flags;
- uint32_t sclk;
- uint32_t sclk_throttle;
- uint32_t mclk;
- uint32_t vclk;
- uint32_t dclk;
- uint32_t eclk;
- uint32_t aclk;
- uint32_t iclk;
- uint32_t vp8clk;
- uint32_t rsv[32];
-};
-
-struct pp_eventmgr {
- struct pp_hwmgr *hwmgr;
- struct pp_smumgr *smumgr;
-
- struct pp_feature_info features[PP_Feature_Max];
- const struct action_chain *event_chain[AMD_PP_EVENT_MAX];
- struct phm_platform_descriptor *platform_descriptor;
- struct pp_clock_range clock_range;
- enum pp_power_source current_power_source;
- struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max];
- enum pp_state states[PP_NUM_RINGS];
- struct pp_request hi_req;
- struct list_head context_list;
- struct mutex lock;
- bool block_adjust_power_state;
- bool enable_cg;
- bool enable_gfx_cgpg;
- int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr);
- void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
-};
-
-int eventmgr_early_init(struct pp_instance *handle);
-
-#endif /* _EVENTMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
deleted file mode 100644
index 8a31665321a8..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
+++ /dev/null
@@ -1,10299 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _FIJI_PWRVIRUS_H_
-#define _FIJI_PWRVIRUS_H_
-
-#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
-#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
-#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
-#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
-
-enum PWR_Command
-{
- PwrCmdNull = 0,
- PwrCmdWrite,
- PwrCmdEnd,
- PwrCmdMax
-};
-typedef enum PWR_Command PWR_Command;
-
-struct PWR_Command_Table
-{
- PWR_Command command;
- ULONG data;
- ULONG reg;
-};
-typedef struct PWR_Command_Table PWR_Command_Table;
-
-#define PWR_VIRUS_TABLE_SIZE 10243
-static const PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
-{
- { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
- { PwrCmdWrite, 0x0300078c, mmPCIE_DATA },
- { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000001, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
- { PwrCmdWrite, 0x00000000, mmBIF_FB_EN },
- { PwrCmdWrite, 0x00000001, mmBIF_DOORBELL_APER_EN },
- { PwrCmdWrite, 0x00000000, mmBIF_DOORBELL_APER_EN },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x22000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- /*
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_CITF_CNTL },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },*/
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_LO },
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_HI },
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_LENGTH },
- /*
- { PwrCmdWrite, 0x00000000, mmMC_VM_MX_L1_TLB_CNTL },
- { PwrCmdWrite, 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR },
- { PwrCmdWrite, 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },*/
- { PwrCmdWrite, 0x00000000, mmVM_CONTEXT0_CNTL },
- { PwrCmdWrite, 0x00000000, mmVM_CONTEXT1_CNTL },
- /*
- { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_BASE },
- { PwrCmdWrite, 0x00000002, mmMC_VM_AGP_BOT },
- { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_TOP },*/
- { PwrCmdWrite, 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR },
- { PwrCmdWrite, 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR },
- { PwrCmdWrite, 0x00000002, mmATC_VM_APERTURE0_CNTL },
- { PwrCmdWrite, 0x0000ffff, mmATC_VM_APERTURE0_CNTL2 },
- { PwrCmdWrite, 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL2 },
- //{ PwrCmdWrite, 0x00000000, mmMC_ARB_RAMCFG },
- { PwrCmdWrite, 0x12011003, mmGB_ADDR_CONFIG },
- { PwrCmdWrite, 0x00800010, mmGB_TILE_MODE0 },
- { PwrCmdWrite, 0x00800810, mmGB_TILE_MODE1 },
- { PwrCmdWrite, 0x00801010, mmGB_TILE_MODE2 },
- { PwrCmdWrite, 0x00801810, mmGB_TILE_MODE3 },
- { PwrCmdWrite, 0x00802810, mmGB_TILE_MODE4 },
- { PwrCmdWrite, 0x00802808, mmGB_TILE_MODE5 },
- { PwrCmdWrite, 0x00802814, mmGB_TILE_MODE6 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE7 },
- { PwrCmdWrite, 0x00000004, mmGB_TILE_MODE8 },
- { PwrCmdWrite, 0x02000008, mmGB_TILE_MODE9 },
- { PwrCmdWrite, 0x02000010, mmGB_TILE_MODE10 },
- { PwrCmdWrite, 0x06000014, mmGB_TILE_MODE11 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE12 },
- { PwrCmdWrite, 0x02400008, mmGB_TILE_MODE13 },
- { PwrCmdWrite, 0x02400010, mmGB_TILE_MODE14 },
- { PwrCmdWrite, 0x02400030, mmGB_TILE_MODE15 },
- { PwrCmdWrite, 0x06400014, mmGB_TILE_MODE16 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE17 },
- { PwrCmdWrite, 0x0040000c, mmGB_TILE_MODE18 },
- { PwrCmdWrite, 0x0100000c, mmGB_TILE_MODE19 },
- { PwrCmdWrite, 0x0100001c, mmGB_TILE_MODE20 },
- { PwrCmdWrite, 0x01000034, mmGB_TILE_MODE21 },
- { PwrCmdWrite, 0x01000024, mmGB_TILE_MODE22 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE23 },
- { PwrCmdWrite, 0x0040001c, mmGB_TILE_MODE24 },
- { PwrCmdWrite, 0x01000020, mmGB_TILE_MODE25 },
- { PwrCmdWrite, 0x01000038, mmGB_TILE_MODE26 },
- { PwrCmdWrite, 0x02c00008, mmGB_TILE_MODE27 },
- { PwrCmdWrite, 0x02c00010, mmGB_TILE_MODE28 },
- { PwrCmdWrite, 0x06c00014, mmGB_TILE_MODE29 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE30 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE31 },
- { PwrCmdWrite, 0x000000a8, mmGB_MACROTILE_MODE0 },
- { PwrCmdWrite, 0x000000a4, mmGB_MACROTILE_MODE1 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE2 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE3 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE4 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE5 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE6 },
- { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE7 },
- { PwrCmdWrite, 0x000000ee, mmGB_MACROTILE_MODE8 },
- { PwrCmdWrite, 0x000000ea, mmGB_MACROTILE_MODE9 },
- { PwrCmdWrite, 0x000000e9, mmGB_MACROTILE_MODE10 },
- { PwrCmdWrite, 0x000000e5, mmGB_MACROTILE_MODE11 },
- { PwrCmdWrite, 0x000000e4, mmGB_MACROTILE_MODE12 },
- { PwrCmdWrite, 0x000000e0, mmGB_MACROTILE_MODE13 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE14 },
- { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE15 },
- { PwrCmdWrite, 0x00900000, mmHDP_NONSURFACE_BASE },
- { PwrCmdWrite, 0x00008000, mmHDP_NONSURFACE_INFO },
- { PwrCmdWrite, 0x3fffffff, mmHDP_NONSURFACE_SIZE },
- { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
- //{ PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },
- { PwrCmdWrite, 0x00000000, mmSRBM_CNTL },
- { PwrCmdWrite, 0x00020000, mmSRBM_CNTL },
- { PwrCmdWrite, 0x80000000, mmATC_VMID0_PASID_MAPPING },
- { PwrCmdWrite, 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0xe0000000, mmGRBM_GFX_INDEX },
- { PwrCmdWrite, 0x00000000, mmCGTS_TCC_DISABLE },
- { PwrCmdWrite, 0x00000000, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x76543210, mmTCP_CHAN_STEER_LO },
- { PwrCmdWrite, 0xfedcba98, mmTCP_CHAN_STEER_HI },
- { PwrCmdWrite, 0x00000000, mmDB_DEBUG2 },
- { PwrCmdWrite, 0x00000000, mmDB_DEBUG },
- { PwrCmdWrite, 0x00002b16, mmCP_QUEUE_THRESHOLDS },
- { PwrCmdWrite, 0x00006030, mmCP_MEQ_THRESHOLDS },
- { PwrCmdWrite, 0x01000104, mmSPI_CONFIG_CNTL_1 },
- { PwrCmdWrite, 0x98184020, mmPA_SC_FIFO_SIZE },
- { PwrCmdWrite, 0x00000001, mmVGT_NUM_INSTANCES },
- { PwrCmdWrite, 0x00000000, mmCP_PERFMON_CNTL },
- { PwrCmdWrite, 0x01180000, mmSQ_CONFIG },
- { PwrCmdWrite, 0x00000000, mmVGT_CACHE_INVALIDATION },
- { PwrCmdWrite, 0x00000000, mmSQ_THREAD_TRACE_BASE },
- { PwrCmdWrite, 0x0000df80, mmSQ_THREAD_TRACE_MASK },
- { PwrCmdWrite, 0x02249249, mmSQ_THREAD_TRACE_MODE },
- { PwrCmdWrite, 0x00000000, mmPA_SC_LINE_STIPPLE_STATE },
- { PwrCmdWrite, 0x00000000, mmCB_PERFCOUNTER0_SELECT1 },
- { PwrCmdWrite, 0x06000100, mmCGTT_VGT_CLK_CTRL },
- { PwrCmdWrite, 0x00000007, mmPA_CL_ENHANCE },
- { PwrCmdWrite, 0x00000001, mmPA_SC_ENHANCE },
- { PwrCmdWrite, 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000010, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000020, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000030, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000040, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000050, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000060, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000070, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000080, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000090, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000a0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000b0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000c0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000d0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000e0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000f0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_PG_CNTL },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS2 },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x0000000e, mmSH_MEM_APE1_BASE },
- { PwrCmdWrite, 0x0000020d, mmSH_MEM_APE1_LIMIT },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_RB_VMID },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
- { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
- { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
- { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdEnd, 0x00000000, 0x00000000 },
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a4c8b09b6f14..57a0467b7267 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -283,6 +283,8 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps
(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)))));
}
+#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c))
+
#define PP_PCIEGenInvalid 0xffff
enum PP_PCIEGen {
PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */
@@ -295,7 +297,7 @@ typedef enum PP_PCIEGen PP_PCIEGen;
#define PP_Min_PCIEGen PP_PCIEGen1
#define PP_Max_PCIEGen PP_PCIEGen3
#define PP_Min_PCIELane 1
-#define PP_Max_PCIELane 32
+#define PP_Max_PCIELane 16
enum phm_clock_Type {
PHM_DispClock = 1,
@@ -373,8 +375,6 @@ struct phm_odn_clock_levels {
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
-extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 91b0105e8240..004a40e88bde 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -32,6 +32,7 @@
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
#include "power_state.h"
+#include "cgs_linux.h"
struct pp_instance;
struct pp_hwmgr;
@@ -61,10 +62,6 @@ struct vi_dpm_table {
struct vi_dpm_level dpm_level[1];
};
-enum PP_Result {
- PP_Result_TableImmediateExit = 0x13,
-};
-
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
#define PCIE_PERF_REQ_GEN1 2
@@ -103,17 +100,6 @@ enum PHM_BackEnd_Magic {
PHM_Rv_Magic = 0x20161121
};
-
-#define PHM_PCIE_POWERGATING_TARGET_GFX 0
-#define PHM_PCIE_POWERGATING_TARGET_DDI 1
-#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2
-#define PHM_PCIE_POWERGATING_TARGET_PHY 3
-
-typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result);
-
-typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr);
-
struct phm_set_power_state_input {
const struct pp_hw_power_state *pcurrent_state;
const struct pp_hw_power_state *pnew_state;
@@ -149,30 +135,6 @@ struct phm_gfx_arbiter {
uint32_t fclk;
};
-/* Entries in the master tables */
-struct phm_master_table_item {
- phm_check_function isFunctionNeededInRuntimeTable;
- phm_table_function tableFunction;
-};
-
-enum phm_master_table_flag {
- PHM_MasterTableFlag_None = 0,
- PHM_MasterTableFlag_ExitOnError = 1,
-};
-
-/* The header of the master tables */
-struct phm_master_table_header {
- uint32_t storage_size;
- uint32_t flags;
- const struct phm_master_table_item *master_list;
-};
-
-struct phm_runtime_table_header {
- uint32_t storage_size;
- bool exit_error;
- phm_table_function *function_list;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -216,19 +178,6 @@ struct phm_phase_shedding_limits_record {
uint32_t Mclk;
};
-
-extern int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output);
-
-extern int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table);
-
-extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table);
-
-
struct phm_uvd_clock_voltage_dependency_record {
uint32_t vclk;
uint32_t dclk;
@@ -286,6 +235,39 @@ struct phm_vce_clock_voltage_dependency_table {
struct phm_vce_clock_voltage_dependency_record entries[1];
};
+struct pp_smumgr_func {
+ int (*smu_init)(struct pp_hwmgr *hwmgr);
+ int (*smu_fini)(struct pp_hwmgr *hwmgr);
+ int (*start_smu)(struct pp_hwmgr *hwmgr);
+ int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
+ int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*get_argument)(struct pp_hwmgr *hwmgr);
+ int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
+ int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+ int (*download_pptable_settings)(struct pp_hwmgr *hwmgr,
+ void **table);
+ int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr);
+ int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+ int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+ int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+ int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+ int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+ int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+ int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+ uint32_t (*get_mac_definition)(uint32_t value);
+ bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
+ int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request);
+ bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
+};
+
struct pp_hwmgr_func {
int (*backend_init)(struct pp_hwmgr *hw_mgr);
int (*backend_fini)(struct pp_hwmgr *hw_mgr);
@@ -311,10 +293,10 @@ struct pp_hwmgr_func {
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
- int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
- int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
+ void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
+ void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
+ uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
+ uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
@@ -328,8 +310,8 @@ struct pp_hwmgr_func {
int (*get_temperature)(struct pp_hwmgr *hwmgr);
int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
- int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
- int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
+ void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
+ uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
@@ -378,6 +360,15 @@ struct pp_hwmgr_func {
struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
+ int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
+ int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
+ int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size);
};
struct pp_table_func {
@@ -745,7 +736,7 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
- bool block_hw_access;
+ enum amd_dpm_forced_level request_dpm_level;
struct phm_gfx_arbiter gfx_arbiter;
struct phm_acp_arbiter acp_arbiter;
struct phm_uvd_arbiter uvd_arbiter;
@@ -754,19 +745,17 @@ struct pp_hwmgr {
void *pptable;
struct phm_platform_descriptor platform_descriptor;
void *backend;
+
+ void *smu_backend;
+ const struct pp_smumgr_func *smumgr_funcs;
+ bool is_kicker;
+ bool reload_fw;
+
enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
- struct phm_runtime_table_header setup_asic;
- struct phm_runtime_table_header power_down_asic;
- struct phm_runtime_table_header disable_dynamic_state_management;
- struct phm_runtime_table_header enable_dynamic_state_management;
- struct phm_runtime_table_header set_power_state;
- struct phm_runtime_table_header enable_clock_power_gatings;
- struct phm_runtime_table_header display_configuration_changed;
- struct phm_runtime_table_header start_thermal_controller;
- struct phm_runtime_table_header set_temperature_range;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
+
struct pp_power_state *ps;
enum pp_power_source power_source;
uint32_t num_ps;
@@ -784,26 +773,44 @@ struct pp_hwmgr {
struct amd_pp_display_configuration display_config;
uint32_t feature_mask;
- /* power profile */
+ /* UMD Pstate */
struct amd_pp_profile gfx_power_profile;
struct amd_pp_profile compute_power_profile;
struct amd_pp_profile default_gfx_power_profile;
struct amd_pp_profile default_compute_power_profile;
enum amd_pp_profile_type current_power_profile;
+ bool en_umd_pstate;
+};
+
+struct cgs_irq_src_funcs {
+ cgs_irq_source_set_func_t set;
+ cgs_irq_handler_func_t handler;
};
extern int hwmgr_early_init(struct pp_instance *handle);
extern int hwmgr_hw_init(struct pp_instance *handle);
extern int hwmgr_hw_fini(struct pp_instance *handle);
+extern int hwmgr_hw_suspend(struct pp_instance *handle);
+extern int hwmgr_hw_resume(struct pp_instance *handle);
+extern int hwmgr_handle_task(struct pp_instance *handle,
+ enum amd_pp_task task_id,
+ void *input, void *output);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
uint32_t mask);
+extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+extern int phm_wait_for_indirect_register_unequal(
+ struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port, uint32_t index,
+ uint32_t value, uint32_t mask);
extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
@@ -888,5 +895,58 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t
PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field) )
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ index, value, mask) \
+ phm_wait_for_register_unequal(hwmgr, \
+ index, value, mask)
+
+#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ mm##reg, value, mask)
+
+#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index 0de443612312..6a53b7e74ccd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -29,10058 +29,1764 @@
#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
-enum PWR_Command {
- PwrCmdNull = 0,
- PwrCmdWrite,
- PwrCmdEnd,
- PwrCmdMax
-};
-
-typedef enum PWR_Command PWR_Command;
-
struct PWR_Command_Table {
- PWR_Command command;
uint32_t data;
uint32_t reg;
};
typedef struct PWR_Command_Table PWR_Command_Table;
+struct PWR_DFY_Section {
+ uint32_t dfy_cntl;
+ uint32_t dfy_addr_hi, dfy_addr_lo;
+ uint32_t dfy_size;
+ uint32_t dfy_data[];
+};
+
+typedef struct PWR_DFY_Section PWR_DFY_Section;
+
+static const PWR_Command_Table pwr_virus_table_pre[] = {
+ { 0x00000000, mmRLC_CNTL },
+ { 0x00000002, mmRLC_SRM_CNTL },
+ { 0x15000000, mmCP_ME_CNTL },
+ { 0x50000000, mmCP_MEC_CNTL },
+ { 0x80000004, mmCP_DFY_CNTL },
+ { 0x0840800a, mmCP_RB0_CNTL },
+ { 0xf30fff0f, mmTCC_CTRL },
+ { 0x00000002, mmTCC_EXE_DISABLE },
+ { 0x000000ff, mmTCP_ADDR_CONFIG },
+ { 0x540ff000, mmCP_CPC_IC_BASE_LO },
+ { 0x000000b4, mmCP_CPC_IC_BASE_HI },
+ { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
+ { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
+ { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00000000, 0xFFFFFFFF },
+};
+
+static const PWR_DFY_Section pwr_virus_section1 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540fe800,
+ .dfy_data = {
+ 0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 416
+};
+
+static const PWR_DFY_Section pwr_virus_section2 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540fef00,
+ .dfy_data = {
+ 0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 16
+};
-#define PWR_VIRUS_TABLE_SIZE 10031
+static const PWR_DFY_Section pwr_virus_section3 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540ff000,
+ .dfy_data = {
+ 0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000,
+ 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001,
+ 0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008,
+ 0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f,
+ 0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003,
+ 0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000,
+ 0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008,
+ 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e,
+ 0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029,
+ 0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a,
+ 0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013,
+ 0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe,
+ 0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080,
+ 0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f,
+ 0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039,
+ 0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055,
+ 0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043,
+ 0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000,
+ 0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b,
+ 0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034,
+ 0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000,
+ 0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001,
+ 0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001,
+ 0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020,
+ 0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8,
+ 0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc,
+ 0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009,
+ 0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013,
+ 0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002,
+ 0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f,
+ 0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200,
+ 0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020,
+ 0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020,
+ 0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020,
+ 0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020,
+ 0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380,
+ 0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082,
+ 0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005,
+ 0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000,
+ 0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000,
+ 0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018,
+ 0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080,
+ 0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230,
+ 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228,
+ 0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc,
+ 0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032,
+ 0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001,
+ 0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000,
+ 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70,
+ 0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000,
+ 0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b,
+ 0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b,
+ 0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002,
+ 0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055,
+ 0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028,
+ 0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e,
+ 0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d,
+ 0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a,
+ 0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279,
+ 0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004,
+ 0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f,
+ 0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077,
+ 0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8,
+ 0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001,
+ 0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078,
+ 0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000,
+ 0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f,
+ 0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025,
+ 0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00,
+ 0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001,
+ 0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277,
+ 0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8,
+ 0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267,
+ 0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0,
+ 0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2,
+ 0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011,
+ 0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008,
+ 0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021,
+ 0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282,
+ 0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011,
+ 0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d,
+ 0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a,
+ 0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013,
+ 0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002,
+ 0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a,
+ 0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d,
+ 0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002,
+ 0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000,
+ 0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019,
+ 0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22,
+ 0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008,
+ 0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069,
+ 0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249,
+ 0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002,
+ 0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001,
+ 0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016,
+ 0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0,
+ 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267,
+ 0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000,
+ 0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028,
+ 0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a,
+ 0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000,
+ 0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013,
+ 0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036,
+ 0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003,
+ 0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000,
+ 0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff,
+ 0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008,
+ 0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b,
+ 0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009,
+ 0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d,
+ 0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057,
+ 0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273,
+ 0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008,
+ 0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001,
+ 0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007,
+ 0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f,
+ 0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002,
+ 0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025,
+ 0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d,
+ 0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d,
+ 0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
+ 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000,
+ 0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024,
+ 0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a,
+ 0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027,
+ 0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249,
+ 0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f,
+ 0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e,
+ 0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200,
+ 0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034,
+ 0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755,
+ 0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e,
+ 0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265,
+ 0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010,
+ 0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013,
+ 0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000,
+ 0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000,
+ 0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001,
+ 0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200,
+ 0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158,
+ 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012,
+ 0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026,
+ 0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a,
+ 0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000,
+ 0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a,
+ 0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028,
+ 0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e,
+ 0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004,
+ 0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1,
+ 0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000,
+ 0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a,
+ 0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002,
+ 0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d,
+ 0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007,
+ 0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004,
+ 0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000,
+ 0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db,
+ 0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f,
+ 0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c,
+ 0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013,
+ 0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019,
+ 0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277,
+ 0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000,
+ 0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a,
+ 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009,
+ 0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624,
+ 0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273,
+ 0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061,
+ 0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000,
+ 0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260,
+ 0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a,
+ 0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
+ 0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013,
+ 0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277,
+ 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4,
+ 0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001,
+ 0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013,
+ 0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245,
+ 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350,
+ 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a,
+ 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026,
+ 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068,
+ 0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e,
+ 0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b,
+ 0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001,
+ 0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
+ 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345,
+ 0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068,
+ 0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000,
+ 0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267,
+ 0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a,
+ 0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
+ 0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001,
+ 0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f,
+ 0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a,
+ 0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004,
+ 0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d,
+ 0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
+ 0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000,
+ 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe,
+ 0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213,
+ 0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026,
+ 0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001,
+ 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013,
+ 0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001,
+ 0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001,
+ 0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001,
+ 0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a,
+ 0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003,
+ 0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b,
+ 0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005,
+ 0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254,
+ 0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013,
+ 0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef,
+ 0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014,
+ 0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013,
+ 0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013,
+ 0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009,
+ 0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002,
+ 0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001,
+ 0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000,
+ 0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4,
+ 0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8,
+ 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03,
+ 0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d,
+ 0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000,
+ 0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e,
+ 0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010,
+ 0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1,
+ 0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008,
+ 0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003,
+ 0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013,
+ 0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013,
+ 0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000,
+ 0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e,
+ 0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245,
+ 0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213,
+ 0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000,
+ 0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009,
+ 0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012,
+ 0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249,
+ 0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f,
+ 0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004,
+ 0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80,
+ 0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008,
+ 0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267,
+ 0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003,
+ 0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
+ 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448,
+ 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000,
+ 0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001,
+ 0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013,
+ 0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8,
+ 0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000,
+ 0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809,
+ 0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811,
+ 0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004,
+ 0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8,
+ 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013,
+ 0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005,
+ 0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013,
+ 0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021,
+ 0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a,
+ 0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006,
+ 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280,
+ 0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011,
+ 0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a,
+ 0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254,
+ 0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301,
+ 0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a,
+ 0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d,
+ 0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001,
+ 0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a,
+ 0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007,
+ 0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000,
+ 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e,
+ 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013,
+ 0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e,
+ 0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2,
+ 0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001,
+ 0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083,
+ 0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff,
+ 0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267,
+ 0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013,
+ 0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011,
+ 0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006,
+ 0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001,
+ 0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014,
+ 0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f,
+ 0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001,
+ 0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff,
+ 0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000,
+ 0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000,
+ 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e,
+ 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300,
+ 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009,
+ 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff,
+ 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010,
+ 0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a,
+ 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301,
+ 0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a,
+ 0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e,
+ 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011,
+ 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e,
+ 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a,
+ 0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000,
+ 0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020,
+ 0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010,
+ 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020,
+ 0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c,
+ 0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004,
+ 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021,
+ 0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f,
+ 0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff,
+ 0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011,
+ 0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013,
+ 0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001,
+ 0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032,
+ 0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008,
+ 0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f,
+ 0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001,
+ 0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013,
+ 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd,
+ 0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000,
+ 0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002,
+ 0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2,
+ 0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
+ 0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
+ 0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e,
+ 0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001,
+ 0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249,
+ 0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1,
+ 0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002,
+ 0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7,
+ 0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002,
+ 0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace,
+ 0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7,
+ 0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013,
+ 0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b,
+ 0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000,
+ 0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a,
+ 0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019,
+ 0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001,
+ 0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008,
+ 0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007,
+ 0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029,
+ 0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e,
+ 0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005,
+ 0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e,
+ 0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a,
+ 0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008,
+ 0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a,
+ 0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e,
+ 0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f,
+ 0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005,
+ 0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016,
+ 0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e,
+ 0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a,
+ 0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a,
+ 0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100,
+ 0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001,
+ 0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008,
+ 0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001,
+ 0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a,
+ 0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003,
+ 0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f370009, 0x13300017,
+ 0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16,
+ 0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037,
+ 0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168,
+ 0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
+ 0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208,
+ 0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
+ 0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037,
+ 0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002,
+ 0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011,
+ 0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028,
+ 0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000,
+ 0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074,
+ 0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f,
+ 0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff,
+ 0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30,
+ 0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074,
+ 0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004,
+ 0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8,
+ 0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001,
+ 0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f,
+ 0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1,
+ 0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000,
+ 0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002,
+ 0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013,
+ 0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001,
+ 0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001,
+ 0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412,
+ 0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004,
+ 0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002,
+ 0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017,
+ 0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020,
+ 0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013,
+ 0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70,
+ 0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0,
+ 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007,
+ 0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077,
+ 0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a,
+ 0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001,
+ 0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e,
+ 0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009,
+ 0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001,
+ 0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f,
+ 0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004,
+ 0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010,
+ 0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70,
+ 0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262,
+ 0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d,
+ 0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006,
+ 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017,
+ 0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b,
+ 0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267,
+ 0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a,
+ 0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267,
+ 0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83,
+ 0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a,
+ 0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000,
+ 0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058,
+ 0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025,
+ 0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000,
+ 0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001,
+ 0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1,
+ 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010,
+ 0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001,
+ 0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020,
+ 0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020,
+ 0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030,
+ 0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005,
+ 0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008,
+ 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027,
+ 0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026,
+ 0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002,
+ 0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e,
+ 0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009,
+ 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f,
+ 0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e,
+ 0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009,
+ 0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034,
+ 0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
+ 0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a,
+ 0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078,
+ 0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013,
+ 0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022,
+ 0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8,
+ 0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001,
+ 0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f,
+ 0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e,
+ 0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e,
+ 0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f,
+ 0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc,
+ 0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011,
+ 0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020,
+ 0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d,
+ 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005,
+ 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06,
+ 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002,
+ 0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001,
+ 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc,
+ 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000,
+ 0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001,
+ 0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe,
+ 0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000,
+ 0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058,
+ 0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026,
+ 0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001,
+ 0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003,
+ 0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b,
+ 0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000,
+ 0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011,
+ 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009,
+ 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004,
+ 0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40,
+ 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003,
+ 0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002,
+ 0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002,
+ 0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001,
+ 0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e,
+ 0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000,
+ 0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072,
+ 0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a,
+ 0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d,
+ 0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083,
+ 0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d,
+ 0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001,
+ 0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003,
+ 0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe,
+ 0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4,
+ 0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a,
+ 0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001,
+ 0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005,
+ 0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001,
+ 0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002,
+ 0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f,
+ 0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009,
+ 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002,
+ 0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024,
+ 0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000,
+ 0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
+ 0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026,
+ 0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
+ 0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009,
+ 0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001,
+ 0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000,
+ 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001,
+ 0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011,
+ 0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002,
+ 0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071,
+ 0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000,
+ 0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000,
+ 0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008,
+ 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006,
+ 0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a,
+ 0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1,
+ 0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d,
+ 0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d,
+ 0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001,
+ 0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023,
+ 0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e,
+ 0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013,
+ 0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000,
+ 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032,
+ 0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000,
+ 0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
+ 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e,
+ 0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001,
+ 0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024,
+ 0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000,
+ 0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b,
+ 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026,
+ 0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025,
+ 0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122,
+ 0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d,
+ 0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002,
+ 0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e,
+ 0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008,
+ 0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001,
+ 0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e,
+ 0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6,
+ 0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e,
+ 0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002,
+ 0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185,
+ 0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013,
+ 0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008,
+ 0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e,
+ 0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3,
+ 0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6,
+ 0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004,
+ 0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002,
+ 0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c,
+ 0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001,
+ 0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168,
+ 0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d,
+ 0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1,
+ 0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c,
+ 0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002,
+ 0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b,
+ 0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021,
+ 0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f,
+ 0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009,
+ 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd,
+ 0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040,
+ 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f,
+ 0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010,
+ 0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034,
+ 0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079,
+ 0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001,
+ 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000,
+ 0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002,
+ 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000,
+ 0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e,
+ 0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e,
+ 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f,
+ 0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001,
+ 0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011,
+ 0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1,
+ 0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16,
+ 0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b,
+ 0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a,
+ 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013,
+ 0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef,
+ 0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f,
+ 0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002,
+ 0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001,
+ 0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c,
+ 0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000,
+ 0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040,
+ 0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008,
+ 0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013,
+ 0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001,
+ 0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001,
+ 0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d,
+ 0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a,
+ 0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5,
+ 0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7,
+ 0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff,
+ 0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f,
+ 0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002,
+ 0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002,
+ 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008,
+ 0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026,
+ 0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020,
+ 0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
+ 0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
+ 0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029,
+ 0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc,
+ 0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021,
+ 0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013,
+ 0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256,
+ 0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011,
+ 0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009,
+ 0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008,
+ 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001,
+ 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013,
+ 0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001,
+ 0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021,
+ 0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024,
+ 0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020,
+ 0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8,
+ 0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000,
+ 0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072,
+ 0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c,
+ 0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c,
+ 0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012,
+ 0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007,
+ 0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d,
+ 0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004,
+ 0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001,
+ 0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013,
+ 0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a,
+ 0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000,
+ 0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001,
+ 0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c,
+ 0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002,
+ 0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a,
+ 0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004,
+ 0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008,
+ 0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013,
+ 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a,
+ 0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a,
+ 0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008,
+ 0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020,
+ 0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199,
+ 0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001,
+ 0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001,
+ 0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f,
+ 0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e,
+ 0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270,
+ 0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001,
+ 0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240,
+ 0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001,
+ 0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270,
+ 0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000,
+ 0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb,
+ 0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0,
+ 0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008,
+ 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001,
+ 0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001,
+ 0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004,
+ 0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005,
+ 0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238,
+ 0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801,
+ 0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004,
+ 0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014,
+ 0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a,
+ 0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005,
+ 0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020,
+ 0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002,
+ 0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230,
+ 0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298,
+ 0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013,
+ 0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240,
+ 0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a,
+ 0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001,
+ 0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f,
+ 0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0,
+ 0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f,
+ 0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9,
+ 0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f,
+ 0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001,
+ 0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a,
+ 0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a,
+ 0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609,
+ 0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019,
+ 0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb,
+ 0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001,
+ 0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000,
+ 0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020,
+ 0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006,
+ 0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228,
+ 0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001,
+ 0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688,
+ 0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e,
+ 0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0,
+ 0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001,
+ 0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a,
+ 0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319,
+ 0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee,
+ 0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000,
+ 0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a,
+ 0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247,
+ 0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255,
+ 0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f,
+ 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4,
+ 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff,
+ 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033,
+ 0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003,
+ 0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064,
+ 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
+ 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d,
+ 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b,
+ 0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608,
+ 0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265,
+ 0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020,
+ 0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057,
+ 0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c,
+ 0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000,
+ 0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001,
+ 0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200,
+ 0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628,
+ 0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247,
+ 0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260,
+ 0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270,
+ 0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270,
+ 0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020,
+ 0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f,
+ 0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001,
+ 0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004,
+ 0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247,
+ 0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0,
+ 0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e,
+ 0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b,
+ 0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260,
+ 0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013,
+ 0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe,
+ 0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003,
+ 0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013,
+ 0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013,
+ 0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013,
+ 0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b,
+ 0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001,
+ 0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000,
+ 0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001,
+ 0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246,
+ 0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a,
+ 0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267,
+ 0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267,
+ 0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018,
+ 0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032,
+ 0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f,
+ 0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a,
+ 0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024,
+ 0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e,
+ 0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c,
+ 0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252,
+ 0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252,
+ 0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff,
+ 0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013,
+ 0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253,
+ 0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f,
+ 0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007,
+ 0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000,
+ 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001,
+ 0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e,
+ 0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001,
+ 0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c,
+ 0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008,
+ 0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027,
+ 0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010,
+ 0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250,
+ 0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001,
+ 0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004,
+ 0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001,
+ 0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001,
+ 0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f,
+ 0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d,
+ 0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008,
+ 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003,
+ 0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b,
+ 0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267,
+ 0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a,
+ 0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278,
+ 0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247,
+ 0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062,
+ 0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002,
+ 0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002,
+ 0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe,
+ 0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b,
+ 0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003,
+ 0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001,
+ 0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082,
+ 0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f,
+ 0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf,
+ 0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb,
+ 0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a,
+ 0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012,
+ 0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001,
+ 0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078,
+ 0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280,
+ 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001,
+ 0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248,
+ 0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f,
+ 0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4,
+ 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff,
+ 0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033,
+ 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003,
+ 0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064,
+ 0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
+ 0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d,
+ 0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b,
+ 0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078,
+ 0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001,
+ 0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000,
+ 0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a,
+ 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020,
+ 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001,
+ 0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b,
+ 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260,
+ 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013,
+ 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe,
+ 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003,
+ 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013,
+ 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013,
+ 0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013,
+ 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b,
+ 0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005,
+ 0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c,
+ 0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267,
+ 0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b,
+ 0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037,
+ 0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b,
+ 0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033,
+ 0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078,
+ 0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a,
+ 0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021,
+ 0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff,
+ 0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001,
+ 0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039,
+ 0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c,
+ 0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406,
+ 0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013,
+ 0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013,
+ 0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff,
+ 0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410,
+ 0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013,
+ 0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034,
+ 0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f,
+ 0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40,
+ 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000,
+ 0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601,
+ 0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000,
+ 0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 7440
+};
+
+static const PWR_DFY_Section pwr_virus_section4 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54106500,
+ .dfy_data = {
+ 0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304,
+ 0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400,
+ 0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 240
+};
+
+static const PWR_DFY_Section pwr_virus_section5 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54106900,
+ .dfy_data = {
+ 0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282,
+ 0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 384
+};
+
+static const PWR_DFY_Section pwr_virus_section6 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54116f00,
+ .dfy_data = {
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 1024
+};
-static const PWR_Command_Table pwr_virus_table[PWR_VIRUS_TABLE_SIZE] = {
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
- { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
- { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
- { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdEnd, 0x00000000, 0x00000000 },
+static const PWR_Command_Table pwr_virus_table_post[] = {
+ { 0x00000000, mmCP_MEC_CNTL },
+ { 0x00000000, mmCP_MEC_CNTL },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x54116f00, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000005, mmSRBM_GFX_CNTL },
+ { 0x54117300, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000006, mmSRBM_GFX_CNTL },
+ { 0x54117700, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000007, mmSRBM_GFX_CNTL },
+ { 0x54117b00, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000104, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000204, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000304, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000404, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000504, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000604, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000704, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000005, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000105, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000205, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000305, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000405, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000505, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000605, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000705, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000006, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000106, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000206, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000306, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000406, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000506, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000606, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000706, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000007, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000107, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000207, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000307, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000407, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000507, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000607, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000707, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000008, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000108, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000208, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000308, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000408, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000508, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000608, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000708, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000009, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000109, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000209, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000309, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000409, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000509, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000609, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000709, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, 0xFFFFFFFF },
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
index 4c3b537a714f..7d1eec5d2e7a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
@@ -23,22 +23,15 @@
#ifndef _PP_INSTANCE_H_
#define _PP_INSTANCE_H_
-#include "smumgr.h"
#include "hwmgr.h"
-#include "eventmgr.h"
-
-#define PP_VALID 0x1F1F1F1F
struct pp_instance {
- uint32_t pp_valid;
uint32_t chip_family;
uint32_t chip_id;
bool pm_en;
uint32_t feature_mask;
void *device;
- struct pp_smumgr *smu_mgr;
struct pp_hwmgr *hwmgr;
- struct pp_eventmgr *eventmgr;
struct mutex pp_lock;
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 901c960cfe21..2b3497135bbd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -70,7 +70,12 @@
#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
#define PPSMC_MSG_SetSoftMinVcn 0x28
-#define PPSMC_Message_Count 0x29
+#define PPSMC_MSG_GetGfxclkFrequency 0x2A
+#define PPSMC_MSG_GetFclkFrequency 0x2B
+#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
+#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
+#define PPSMC_MSG_SoftReset 0x2E
+#define PPSMC_Message_Count 0x2F
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
index 08cd70c75d8b..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#ifndef SMU72_H
#define SMU72_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
index b2edbc0c3c4d..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#ifndef SMU72_DISCRETE_H
#define SMU72_DISCRETE_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 5d61cc9d4554..b1b27b2128f6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -23,23 +23,13 @@
#ifndef _SMUMGR_H_
#define _SMUMGR_H_
#include <linux/types.h>
-#include "pp_instance.h"
#include "amd_powerplay.h"
-
-struct pp_smumgr;
-struct pp_instance;
-struct pp_hwmgr;
+#include "hwmgr.h"
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
-extern const struct pp_smumgr_func cz_smu_funcs;
-extern const struct pp_smumgr_func iceland_smu_funcs;
-extern const struct pp_smumgr_func tonga_smu_funcs;
-extern const struct pp_smumgr_func fiji_smu_funcs;
-extern const struct pp_smumgr_func polaris10_smu_funcs;
-extern const struct pp_smumgr_func vega10_smu_funcs;
-extern const struct pp_smumgr_func rv_smu_funcs;
+
enum AVFS_BTC_STATUS {
AVFS_BTC_BOOT = 0,
@@ -85,6 +75,11 @@ enum SMU_MEMBER {
VceBootLevel,
SamuBootLevel,
LowSclkInterruptThreshold,
+ DRAM_LOG_ADDR_H,
+ DRAM_LOG_ADDR_L,
+ DRAM_LOG_PHY_ADDR_H,
+ DRAM_LOG_PHY_ADDR_L,
+ DRAM_LOG_BUFF_SIZE,
};
@@ -100,216 +95,44 @@ enum SMU_MAC_DEFINITION {
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
+extern int smum_get_argument(struct pp_hwmgr *hwmgr);
-struct pp_smumgr_func {
- int (*smu_init)(struct pp_smumgr *smumgr);
- int (*smu_fini)(struct pp_smumgr *smumgr);
- int (*start_smu)(struct pp_smumgr *smumgr);
- int (*check_fw_load_finish)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*request_smu_load_fw)(struct pp_smumgr *smumgr);
- int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*get_argument)(struct pp_smumgr *smumgr);
- int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg);
- int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter);
- int (*download_pptable_settings)(struct pp_smumgr *smumgr,
- void **table);
- int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
- int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
- int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
- int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
- int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
- int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
- int (*init_smc_table)(struct pp_hwmgr *hwmgr);
- int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
- int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
- int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
- uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
- uint32_t (*get_mac_definition)(uint32_t value);
- bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
- int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
- bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
-};
-
-struct pp_smumgr {
- uint32_t chip_family;
- uint32_t chip_id;
- void *device;
- void *backend;
- uint32_t usec_timeout;
- bool reload_fw;
- const struct pp_smumgr_func *smumgr_funcs;
- bool is_kicker;
-};
-
-extern int smum_early_init(struct pp_instance *handle);
+extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
-extern int smum_get_argument(struct pp_smumgr *smumgr);
+extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr);
-
-extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-
-extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-extern int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
-
-extern void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
extern int smu_allocate_memory(void *device, uint32_t size,
enum cgs_gpu_mem_type type,
uint32_t byte_align, uint64_t *mc_addr,
void **kptr, void *handle);
extern int smu_free_memory(void *device, void *handle);
-extern int vega10_smum_init(struct pp_smumgr *smumgr);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
-extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr,
uint32_t type, uint32_t member);
-extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value);
extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
-extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
-
-#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
-
-#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
-
-#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
-
-#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- index, value, mask) \
- smum_wait_for_register_unequal(smumgr, \
- index, value, mask)
-
-#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \
- SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- mm##reg, value, mask)
-
-#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \
- SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_GET_FIELD(value, reg, field) \
- (((value) & SMUM_FIELD_MASK(reg, field)) \
- >> SMUM_FIELD_SHIFT(reg, field))
-
-#define SMUM_READ_FIELD(device, reg, field) \
- SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
-
-#define SMUM_SET_FIELD(value, reg, field, field_val) \
- (((value) & ~SMUM_FIELD_MASK(reg, field)) | \
- (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
- SMUM_FIELD_SHIFT(reg, field))))
-
-#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-
-/*Operations on named fields.*/
-
-#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \
- cgs_write_register(device, mm##reg, \
- SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval))
-
-#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
-#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index cb070ebc7de1..247c97397a27 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,12 +124,15 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x5A
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x5B
#define PPSMC_MSG_RunAcgBtc 0x5C
#define PPSMC_MSG_RunAcgInClosedLoop 0x5D
#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
-#define PPSMC_Message_Count 0x62
+#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
+#define PPSMC_Message_Count 0x69
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index e7ad45297b1d..98e701e4f553 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -1,11 +1,31 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
- polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
- smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 51adf04ab4b3..4d672cd15785 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1,16 +1,9 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
@@ -18,90 +11,231 @@
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include <linux/types.h>
+#include "smumgr.h"
#include "pp_debug.h"
-#include "iceland_smc.h"
-#include "smu7_dyn_defaults.h"
-
+#include "ci_smumgr.h"
+#include "ppsmc.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-#include "smu71_discrete.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "processpptables.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
-#include "iceland_smumgr.h"
+#include "processpptables.h"
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
-#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
-#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
-#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
-
-static const struct iceland_pt_defaults defaults_iceland = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
- * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+
+static const struct ci_pt_defaults defaults_hawaii_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt = {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-/* 35W - XT, XTL */
-static const struct iceland_pt_defaults defaults_icelandxt = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
-};
-/* 25W - PRO, LE */
-static const struct iceland_pt_defaults defaults_icelandpro = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+static const struct ci_pt_defaults defaults_saturn_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+
+static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_addr, uint32_t limit)
+{
+ if ((0 != (3 & smc_addr))
+ || ((smc_addr + 3) >= limit)) {
+ pr_err("smc_addr invalid \n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ return 0;
+}
+
+static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ if ((3 & smc_start_address)
+ || ((smc_start_address + byte_count) >= limit)) {
+ pr_err("smc_start_address invalid \n");
+ return -EINVAL;
+ }
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC address space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ }
+
+ return 0;
+}
+
+
+static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+ return 0;
+}
+
+static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ int ret;
+
+ if (!ci_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
+
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+
+ return 0;
+}
+
+static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(hwmgr, msg);
+}
+
+static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
@@ -111,27 +245,282 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
dev_id = (uint32_t)sys_info.value;
switch (dev_id) {
- case DEVICE_ID_VI_ICELAND_M_6900:
- case DEVICE_ID_VI_ICELAND_M_6903:
- smu_data->power_tune_defaults = &defaults_icelandxt;
+ case 0x67BA:
+ case 0x66B1:
+ smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
-
- case DEVICE_ID_VI_ICELAND_M_6901:
- case DEVICE_ID_VI_ICELAND_M_6902:
- smu_data->power_tune_defaults = &defaults_icelandpro;
+ case 0x67B8:
+ case 0x66B0:
+ smu_data->power_tune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x6640:
+ case 0x6641:
+ case 0x6646:
+ case 0x6647:
+ smu_data->power_tune_defaults = &defaults_saturn_xt;
break;
+ case 0x6649:
+ case 0x6650:
+ case 0x6651:
+ case 0x6658:
+ case 0x665C:
+ case 0x665D:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
default:
- smu_data->power_tune_defaults = &defaults_iceland;
- pr_warn("Unknown V.I. Device ID.\n");
+ smu_data->power_tune_defaults = &defaults_bonaire_xt;
break;
}
- return;
}
-static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ss_info;
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ss_info)) {
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ss_info.speed_spectrum_rate);
+ uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+}
+
+static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ if (clock < min) {
+ pr_info("Engine clock can't satisfy stutter requirement!\n");
+ return 0;
+ }
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU7_Discrete_GraphicsLevel *level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+
+ result = ci_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
+ (uint32_t *)(&level->MinVddc));
+ if (result) {
+ pr_err("vdd_dep_on_sclk table is NULL\n");
+ return result;
+ }
+
+ level->SclkFrequency = clock;
+ level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ clock,
+ &level->MinVddcPhases);
+
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ level->EnabledForThrottle = 1;
+ level->UpH = 0;
+ level->DownH = 0;
+ level->VoltageDownH = 0;
+ level->PowerThrottle = 0;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId =
+ ci_get_sleep_divider_id_from_clock(clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result = 0;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = ci_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ if (i == (dpm_table->sclk_table.count - 1))
+ smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ result = ci_copy_bytes_to_smc(hwmgr, array,
+ (u8 *)levels, array_size,
+ SMC_RAM_END);
+
+ return result;
+
+}
+
+static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
@@ -141,11 +530,11 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
smu_data->power_tune_table.TDC_VDDC_PkgLimit =
@@ -157,15 +546,15 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (ci_read_smc_sram_dword(hwmgr,
fuse_table_offset +
- offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
@@ -176,52 +565,29 @@ static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offs
return 0;
}
-static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- return 0;
-}
-
-static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 8; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
+ uint16_t tmp;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- return 0;
-}
-
-static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
+ else
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
return 0;
}
-static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
"The CAC Leakage table does not exist!", return -EINVAL);
@@ -230,22 +596,24 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
"CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
- for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
+ } else {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
}
- } else {
- PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
}
return 0;
}
-static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *vid = smu_data->power_tune_table.VddCVid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -253,117 +621,154 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
"There should never be more than 8 entries for VddcVid!!!",
return -EINVAL);
- for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
}
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
+ smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
+
return 0;
}
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+ return 0;
+}
+
+static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
+ int ret = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
+ if (ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END)) {
+ pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
+ return -EINVAL;
+ }
/* DW0 - DW3 */
- if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate bapm vddc vid Failed!",
- return -EINVAL);
-
+ ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
/* DW4 - DW5 */
- if (iceland_populate_vddc_vid(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate vddc vid Failed!",
- return -EINVAL);
-
+ ret |= ci_populate_vddc_vid(hwmgr);
/* DW6 */
- if (iceland_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
+ ret |= ci_populate_svi_load_line(hwmgr);
/* DW7 */
- if (iceland_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ ret |= ci_populate_tdc_limit(hwmgr);
/* DW8 */
- if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != iceland_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW16 */
- if (iceland_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW17 */
- if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW18 */
- if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
+
+ ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
+ sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
}
- return 0;
+ return ret;
}
-static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, uint32_t *vol)
+static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
- uint32_t i = 0;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *vol = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
+ dpm_table->DTETjOffset = 0;
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
}
- /* sclk is bigger than max sclk in the dependence table */
- *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
return 0;
}
-static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
uint16_t *lo)
{
@@ -372,7 +777,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
*hi = tab->value * VOLTAGE_SCALE;
*lo = tab->value * VOLTAGE_SCALE;
- /* SCLK/VDDC Dependency Table has to exist. */
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
"The SCLK/VDDC Dependency Table does not exist.\n",
return -EINVAL);
@@ -382,11 +786,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
- /*
- * Since voltage in the sclk/vddc dependency table is not
- * necessarily in ascending order because of ELB voltage
- * patching, loop through entire list to find exact voltage.
- */
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
vol_found = true;
@@ -402,10 +801,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
}
}
- /*
- * If voltage is not found in the first pass, loop again to
- * find the best match, equal or higher value.
- */
if (!vol_found) {
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
@@ -429,29 +824,29 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab,
- SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+ SMU7_Discrete_VoltageLevel *smc_voltage_tab)
{
int result;
- result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ result = ci_get_std_voltage_value_sidd(hwmgr, tab,
&smc_voltage_tab->StdVoltageHiSidd,
&smc_voltage_tab->StdVoltageLoSidd);
- if (0 != result) {
+ if (result) {
smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
}
smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
- CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
return 0;
}
-static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
unsigned int count;
int result;
@@ -459,7 +854,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
table->VddcLevelCount = data->vddc_voltage_table.count;
for (count = 0; count < table->VddcLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddc_voltage_table.entries[count]),
&(table->VddcLevel[count]));
PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
@@ -467,7 +862,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
/* GPIO voltage control */
if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ else
table->VddcLevel[count].Smio = 0;
}
@@ -476,8 +871,8 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -486,7 +881,7 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
table->VddciLevelCount = data->vddci_voltage_table.count;
for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddci_voltage_table.entries[count]),
&(table->VddciLevel[count]));
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
@@ -501,8 +896,8 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -510,8 +905,8 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->mvdd_voltage_table.entries[count]),
&table->MvddLevel[count]);
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
@@ -527,28 +922,28 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
}
-static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result;
- result = iceland_populate_smc_vddc_table(hwmgr, table);
+ result = ci_populate_smc_vddc_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDC voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ result = ci_populate_smc_vdd_ci_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDCI voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ result = ci_populate_smc_mvdd_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate MVDD voltage table to SMC", return -EINVAL);
return 0;
}
-static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU71_Discrete_Ulv *state)
+static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU7_Discrete_Ulv *state)
{
uint32_t voltage_response_time, ulv_voltage;
int result;
@@ -591,33 +986,28 @@ static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_Ulv *ulv_level)
+static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_Ulv *ulv_level)
{
- return iceland_populate_ulv_level(hwmgr, ulv_level);
+ return ci_populate_ulv_level(hwmgr, ulv_level);
}
-static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i;
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
table->LinkLevel[i].PcieGenSpeed =
(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
table->LinkLevel[i].PcieLaneCount =
(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
}
smu_data->smc_state_table.LinkLevelCount =
@@ -628,294 +1018,15 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret
return 0;
}
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
- const struct phm_phase_shedding_limits_table *pl,
- uint32_t sclk, uint32_t *p_shed)
-{
- unsigned int i;
-
- /* use the minimum phase shedding */
- *p_shed = 1;
-
- for (i = 0; i < pl->count; i++) {
- if (sclk < pl->entries[i].Sclk) {
- *p_shed = i;
- break;
- }
- }
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU71_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
- &graphic_level->MinVddc);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- graphic_level->MinVddcPhases = 1;
-
- if (data->vddc_phase_shed_control)
- iceland_populate_phase_value_based_on_sclk(hwmgr,
- hwmgr->dyn_state.vddc_phase_shed_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 100;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
- SMU71_MAX_LEVELS_GRAPHICS;
-
- SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = iceland_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (highest_pcie_level_enabled + 1))) != 0) {
- highest_pcie_level_enabled++;
- }
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
- count++;
- }
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_mclk_params(
+static int ci_calculate_mclk_params(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *mclk,
+ SMU7_Discrete_MemoryLevel *mclk,
bool strobe_mode,
bool dllStateOn
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
@@ -934,10 +1045,8 @@ static int iceland_calculate_mclk_params(
PP_ASSERT_WITH_CODE(0 == result,
"Error retrieving Memory Clock Parameters from VBIOS.", return result);
- /* MPLL_FUNC_CNTL setup*/
mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
- /* MPLL_FUNC_CNTL_1 setup*/
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
@@ -945,12 +1054,10 @@ static int iceland_calculate_mclk_params(
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
- /* MPLL_AD_FUNC_CNTL setup*/
mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
@@ -959,21 +1066,6 @@ static int iceland_calculate_mclk_params(
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
pp_atomctrl_internal_ss_info ss_info;
uint32_t freq_nom;
uint32_t tmp;
@@ -990,14 +1082,7 @@ static int iceland_calculate_mclk_params(
tmp = tmp * tmp;
if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
uint32_t clkv =
(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
@@ -1007,7 +1092,6 @@ static int iceland_calculate_mclk_params(
}
}
- /* MCLK_PWRMGT_CNTL setup */
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
@@ -1016,7 +1100,6 @@ static int iceland_calculate_mclk_params(
MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
- /* Save the result data to outpupt memory level structure */
mclk->MclkFrequency = memory_clock;
mclk->MpllFuncCntl = mpll_func_cntl;
mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
@@ -1031,48 +1114,45 @@ static int iceland_calculate_mclk_params(
return 0;
}
-static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
bool strobe_mode)
{
uint8_t mc_para_index;
if (strobe_mode) {
- if (memory_clock < 12500) {
+ if (memory_clock < 12500)
mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
+ else if (memory_clock > 47500)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
} else {
- if (memory_clock < 65000) {
+ if (memory_clock < 65000)
mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
+ else if (memory_clock > 135000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
}
return mc_para_index;
}
-static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
{
uint8_t mc_para_index;
- if (memory_clock < 10000) {
+ if (memory_clock < 10000)
mc_para_index = 0;
- } else if (memory_clock >= 80000) {
+ else if (memory_clock >= 80000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
return mc_para_index;
}
-static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
uint32_t memory_clock, uint32_t *p_shed)
{
unsigned int i;
@@ -1089,10 +1169,10 @@ static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, co
return 0;
}
-static int iceland_populate_single_memory_level(
+static int ci_populate_single_memory_level(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *memory_level
+ SMU7_Discrete_MemoryLevel *memory_level
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1104,16 +1184,14 @@ static int iceland_populate_single_memory_level(
uint32_t mclk_strobe_mode_threshold = 40000;
if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
}
- if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
- memory_level->MinVddci = memory_level->MinVddc;
- } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddci_dependency_on_mclk,
memory_clock,
&memory_level->MinVddci);
@@ -1121,18 +1199,27 @@ static int iceland_populate_single_memory_level(
"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
}
+ if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.mvdd_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
+ }
+
memory_level->MinVddcPhases = 1;
if (data->vddc_phase_shed_control) {
- iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
memory_clock, &memory_level->MinVddcPhases);
}
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
@@ -1148,7 +1235,7 @@ static int iceland_populate_single_memory_level(
cgs_get_active_displays_info(hwmgr->device, &info);
data->display_timing.num_existing_displays = info.display_count;
- /* stutter mode not support on iceland */
+ /* stutter mode not support on ci */
/* decide strobe mode*/
memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
@@ -1156,7 +1243,7 @@ static int iceland_populate_single_memory_level(
/* decide EDC mode and memory clock ratio*/
if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
memory_level->StrobeEnable);
if ((mclk_edc_enable_threshold != 0) &&
@@ -1170,7 +1257,7 @@ static int iceland_populate_single_memory_level(
}
if (memory_level->StrobeEnable) {
- if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
else
@@ -1179,11 +1266,11 @@ static int iceland_populate_single_memory_level(
dll_state_on = data->dll_default_on;
} else {
memory_level->StrobeRatio =
- iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ ci_get_ddr3_mclk_frequency_ratio(memory_clock);
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
}
- result = iceland_calculate_mclk_params(hwmgr,
+ result = ci_calculate_mclk_params(hwmgr,
memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
if (0 == result) {
@@ -1209,23 +1296,18 @@ static int iceland_populate_single_memory_level(
return result;
}
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
- SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
uint32_t i;
memset(levels, 0x00, level_array_size);
@@ -1233,39 +1315,42 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->mclk_table.count; i++) {
PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
"can not populate memory level as memory clock is zero", return -EINVAL);
- result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
&(smu_data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
+ if (0 != result)
return result;
- }
}
- /* Only enable level 0 for now.*/
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ if ((dpm_table->mclk_table.count >= 2)
+ && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
+ smu_data->smc_state_table.MemoryLevel[1].MinVddci =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddci;
+ smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
+ smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+ }
smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ result = ci_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
return result;
}
-static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
- SMU71_Discrete_VoltageLevel *voltage)
+static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
{
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1291,15 +1376,14 @@ static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
return 0;
}
-static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t vddc_phase_shed_control = 0;
- SMU71_Discrete_VoltageLevel voltage_level;
+ SMU7_Discrete_VoltageLevel voltage_level;
uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
@@ -1314,7 +1398,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
else
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
- table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
/* assign zero for now*/
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
@@ -1346,7 +1430,6 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->ACPILevel.CcPwrDynRm = 0;
table->ACPILevel.CcPwrDynRm1 = 0;
-
/* For various features to be enabled/disabled while this level is active.*/
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
/* SCLK frequency in units of 10KHz*/
@@ -1360,6 +1443,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
@@ -1373,7 +1457,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
}
- if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
table->MemoryACPILevel.MinMvdd =
PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
else
@@ -1418,9 +1502,9 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.EnabledForThrottle = 0;
table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
@@ -1433,35 +1517,145 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = 0;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ table->UvdLevelCount = (uint8_t)(uvd_table->count);
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ uvd_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ uvd_table->entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ uvd_table->entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
+ }
+
+ return result;
}
-static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ table->VceLevelCount = (uint8_t)(vce_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ vce_table->entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_acp_clock_voltage_dependency_table *acp_table =
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+
+ table->AcpLevelCount = (uint8_t)(acp_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
+ table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_samu_clock_voltage_dependency_table *samu_table =
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(samu_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+ table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_memory_timing_parameters(
+static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
uint32_t memory_clock,
- struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
)
{
uint32_t dramTiming;
@@ -1486,42 +1680,34 @@ static int iceland_populate_memory_timing_parameters(
return 0;
}
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
- SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
- memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+ memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = iceland_populate_memory_timing_parameters
+ result = ci_populate_memory_timing_parameters
(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
data->dpm_table.mclk_table.dpm_levels[j].value,
&arb_regs.entries[i][j]);
- if (0 != result) {
+ if (0 != result)
break;
- }
}
}
if (0 == result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->arb_table_start,
(uint8_t *)&arb_regs,
- sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
SMC_RAM_END
);
}
@@ -1529,12 +1715,13 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
return result;
}
-static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
@@ -1562,26 +1749,22 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
}
table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- table->BootVddci = table->BootVddc;
- else
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
-
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
return result;
}
-static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU71_Discrete_MCRegisters *mc_reg_table)
+static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
{
- const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+ const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
uint32_t i, j;
for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
mc_reg_table->address[i].s0 =
PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
@@ -1596,10 +1779,9 @@ static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
return 0;
}
-/*convert register values from driver to SMC format */
-static void iceland_convert_mc_registers(
- const struct iceland_mc_reg_entry *entry,
- SMU71_Discrete_MCRegisterSet *data,
+static void ci_convert_mc_registers(
+ const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
uint32_t num_entries, uint32_t valid_flag)
{
uint32_t i, j;
@@ -1612,13 +1794,13 @@ static void iceland_convert_mc_registers(
}
}
-static int iceland_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
+static int ci_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
const uint32_t memory_clock,
- SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data
)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i = 0;
for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
@@ -1631,15 +1813,15 @@ static int iceland_convert_mc_reg_table_entry_to_smc(
if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
--i;
- iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
mc_reg_table_data, smu_data->mc_reg_table.last,
smu_data->mc_reg_table.validflag);
return 0;
}
-static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_MCRegisters *mc_regs)
+static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_regs)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1647,8 +1829,8 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = iceland_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
+ res = ci_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
data->dpm_table.mclk_table.dpm_levels[i].value,
&mc_regs->data[i]
);
@@ -1660,10 +1842,9 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t address;
int32_t result;
@@ -1672,45 +1853,43 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
return 0;
- memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+ memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
if (result != 0)
return result;
+ address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
- address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ return ci_copy_bytes_to_smc(hwmgr, address,
(uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
SMC_RAM_END);
}
-static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
- result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
+ result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for the MC register addresses!", return result;);
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for driver state!", return result;);
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+ return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
}
-static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t count, level;
count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
@@ -1736,107 +1915,48 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
- struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
- const uint16_t *def1, *def2;
- int i, j, k;
-
-
- /*
- * TDP number of fraction bits are changed from 8 to 7 for Iceland
- * as requested by SMC team
- */
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-
- dpm_table->DTETjOffset = 0;
-
- dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- /* The following are for new Iceland Multi-input fan/thermal control */
- if (NULL != ppm) {
- dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
- dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
- } else {
- dpm_table->PPM_PkgPwrLimit = 0;
- dpm_table->PPM_TemperatureLimit = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
-
- dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- def1 = defaults->bapmti_r;
- def2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU71_DTE_SOURCES; j++) {
- for (k = 0; k < SMU71_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
- dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
- def1++;
- def2++;
- }
- }
- }
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
return 0;
}
-static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *tab)
+static int ci_start_smc(struct pp_hwmgr *hwmgr)
{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ /* set smc instruct start point at 0x0 */
+ ci_program_jump_on_start(hwmgr);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
- tab->SVI2Enable |= VDDC_ON_SVI2;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- tab->SVI2Enable |= VDDCI_ON_SVI2;
- else
- tab->MergedVddci = 1;
+ /* enable smc clock */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
- tab->SVI2Enable |= MVDD_ON_SVI2;
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
- (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
return 0;
}
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ u32 i;
- iceland_initialize_power_tune_defaults(hwmgr);
+ ci_initialize_power_tune_defaults(hwmgr);
memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- iceland_populate_smc_voltage_tables(hwmgr, table);
- }
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ ci_populate_smc_voltage_tables(hwmgr, table);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
@@ -1850,67 +1970,75 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
if (data->is_memory_gddr5)
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
if (data->ulv_supported) {
- result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
+ "Failed to initialize ULV state!", return result);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixCG_ULV_PARAMETER, 0x40035);
}
- result = iceland_populate_smc_link_level(hwmgr, table);
+ result = ci_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
+ "Failed to initialize Graphics Level!", return result);
- result = iceland_populate_all_graphic_levels(hwmgr);
+ result = ci_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
+ "Failed to initialize Memory Level!", return result);
- result = iceland_populate_all_memory_levels(hwmgr);
+ result = ci_populate_smc_link_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
+ "Failed to initialize Link Level!", return result);
- result = iceland_populate_smc_acpi_level(hwmgr, table);
+ result = ci_populate_smc_acpi_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
+ "Failed to initialize ACPI Level!", return result);
- result = iceland_populate_smc_vce_level(hwmgr, table);
+ result = ci_populate_smc_vce_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
+ "Failed to initialize VCE Level!", return result);
- result = iceland_populate_smc_acp_level(hwmgr, table);
+ result = ci_populate_smc_acp_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
+ "Failed to initialize ACP Level!", return result);
- result = iceland_populate_smc_samu_level(hwmgr, table);
+ result = ci_populate_smc_samu_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
+ "Failed to initialize SAMU Level!", return result);
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
- result = iceland_program_memory_timing_parameters(hwmgr);
+ result = ci_program_memory_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
+ "Failed to Write ARB settings for the initial state.", return result);
- result = iceland_populate_smc_uvd_level(hwmgr, table);
+ result = ci_populate_smc_uvd_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
+ "Failed to initialize UVD Level!", return result);
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
- result = iceland_populate_smc_boot_level(hwmgr, table);
+ result = ci_populate_smc_boot_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result;);
+ "Failed to initialize Boot Level!", return result);
- result = iceland_populate_smc_initial_state(hwmgr);
+ result = ci_populate_smc_initial_state(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
- result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
table->GraphicsInterval = 1;
@@ -1927,17 +2055,35 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryVoltageChangeEnable = 1;
table->MemoryInterval = 1;
table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
table->PCIeGenInterval = 1;
- result = iceland_populate_smc_svi2_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate SVI2 setting!", return result);
+ ci_populate_smc_svi2_config(hwmgr, table);
+
+ for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
+ CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
@@ -1947,6 +2093,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
@@ -1955,47 +2102,32 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
- SMC_RAM_END);
+ result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
+ SMC_RAM_END);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload dpm data to SMC memory!", return result;);
- /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.ulv_setting_starts,
- (uint8_t *)&(smu_data->ulv_setting),
- sizeof(SMU71_Discrete_Ulv),
- SMC_RAM_END);
-
-
- result = iceland_populate_initial_mc_reg_table(hwmgr);
+ result = ci_populate_initial_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate initialize MC Reg table!", return result);
- result = iceland_populate_pm_fuses(hwmgr);
+ result = ci_populate_pm_fuses(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate PM fuses to SMC memory!", return result);
+ ci_start_smc(hwmgr);
+
return 0;
}
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
@@ -2012,7 +2144,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
- if (0 == smu7_data->fan_table_start) {
+ if (0 == ci_data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
@@ -2062,29 +2194,26 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
- /* fan_table.FanControl_GL_Flag = 1; */
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+ res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
return 0;
}
-
-static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return iceland_program_memory_timing_parameters(hwmgr);
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return ci_program_memory_timing_parameters(hwmgr);
return 0;
}
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2100,21 +2229,21 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable,
- LowSclkInterruptThreshold),
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable,
+ LowSclkInterruptT),
(uint8_t *)&low_sclk_interrupt_threshold,
sizeof(uint32_t),
SMC_RAM_END);
}
- result = iceland_update_and_upload_mc_reg_table(hwmgr);
+ result = ci_update_and_upload_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
- result = iceland_program_mem_timing_parameters(hwmgr);
+ result = ci_program_mem_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE((result == 0),
"Failed to program memory timing parameters!",
);
@@ -2122,161 +2251,200 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
return result;
}
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
{
switch (type) {
case SMU_SoftRegisters:
switch (member) {
case HandshakeDisables:
- return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ return offsetof(SMU7_SoftRegisters, HandshakeDisables);
case VoltageChangeTimeout:
- return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
- return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
case PreVBlankGap:
- return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ return offsetof(SMU7_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
- return offsetof(SMU71_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ return offsetof(SMU7_SoftRegisters, VBlankTimeout);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
case SMU_Discrete_DpmTable:
switch (member) {
case LowSclkInterruptThreshold:
- return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
}
}
- pr_warn("can't get the offset of type %x member %x\n", type, member);
+ pr_debug("can't get the offset of type %x member %x\n", type, member);
return 0;
}
-uint32_t iceland_get_mac_definition(uint32_t value)
+static uint32_t ci_get_mac_definition(uint32_t value)
{
switch (value) {
case SMU_MAX_LEVELS_GRAPHICS:
- return SMU71_MAX_LEVELS_GRAPHICS;
+ return SMU7_MAX_LEVELS_GRAPHICS;
case SMU_MAX_LEVELS_MEMORY:
- return SMU71_MAX_LEVELS_MEMORY;
+ return SMU7_MAX_LEVELS_MEMORY;
case SMU_MAX_LEVELS_LINK:
- return SMU71_MAX_LEVELS_LINK;
+ return SMU7_MAX_LEVELS_LINK;
case SMU_MAX_ENTRIES_SMIO:
- return SMU71_MAX_ENTRIES_SMIO;
+ return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
- return SMU71_MAX_LEVELS_VDDC;
+ return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
- return SMU71_MAX_LEVELS_VDDCI;
+ return SMU7_MAX_LEVELS_VDDCI;
case SMU_MAX_LEVELS_MVDD:
- return SMU71_MAX_LEVELS_MVDD;
+ return SMU7_MAX_LEVELS_MVDD;
}
- pr_warn("can't get the mac of %x\n", value);
+ pr_debug("can't get the mac of %x\n", value);
return 0;
}
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
+{
+ uint32_t byte_count, start_addr;
+ uint8_t *src;
+ uint32_t data;
+
+ struct cgs_firmware_info info = {0};
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+ hwmgr->is_kicker = info.is_kicker;
+ byte_count = info.image_size;
+ src = (uint8_t *)info.kptr;
+ start_addr = info.ucode_start_address;
+
+ if (byte_count > SMC_RAM_END) {
+ pr_err("SMC address is beyond the SMC RAM area.\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ for (; byte_count >= 4; byte_count -= 4) {
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ }
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ if (0 != byte_count) {
+ pr_err("SMC size must be divisible by 4\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+ if (ci_is_smc_ram_running(hwmgr)) {
+ pr_info("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
+ boot_seq_done, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
+ pre_fetcher_en, 1);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ return ci_load_smc_ucode(hwmgr);
+}
+
+static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- uint32_t tmp;
+ uint32_t tmp = 0;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, DpmTable),
+ if (ci_upload_firmware(hwmgr))
+ return -EINVAL;
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->dpm_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->dpm_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, SoftRegisters),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
if (0 == result) {
data->soft_regs_start = tmp;
- smu7_data->soft_regs_start = tmp;
+ ci_data->soft_regs_start = tmp;
}
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->mc_reg_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, FanTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->fan_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->fan_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->arb_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->arb_table_start = tmp;
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, Version),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, Version),
&tmp, SMC_RAM_END);
- if (0 == result) {
+ if (0 == result)
hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, UlvSettings),
- &tmp, SMC_RAM_END);
-
- if (0 == result) {
- smu7_data->ulv_setting_starts = tmp;
- }
error |= (0 != result);
return error ? 1 : 0;
}
-/*---------------------------MC----------------------------*/
-
-static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
{
return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
}
-static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
{
bool result = true;
@@ -2369,32 +2537,32 @@ static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
return result;
}
-static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
{
uint32_t i;
uint16_t address;
for (i = 0; i < table->last; i++) {
table->mc_reg_address[i].s0 =
- iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
? address : table->mc_reg_address[i].s1;
}
return 0;
}
-static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct iceland_mc_reg_table *ni_table)
+static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct ci_mc_reg_table *ni_table)
{
uint8_t i, j;
- PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
"Invalid VramInfo table.", return -EINVAL);
- for (i = 0; i < table->last; i++) {
+ for (i = 0; i < table->last; i++)
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
+
ni_table->last = table->last;
for (i = 0; i < table->num_entries; i++) {
@@ -2411,26 +2579,15 @@ static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *tabl
return 0;
}
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct iceland_mc_reg_table *table)
+static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct ci_mc_reg_table *table)
{
uint8_t i, j, k;
uint32_t temp_reg;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
switch (table->mc_reg_address[i].s1) {
@@ -2445,7 +2602,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
@@ -2456,15 +2613,14 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -2472,7 +2628,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
}
@@ -2488,7 +2644,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
break;
@@ -2503,14 +2659,15 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+static int ci_set_valid_flag(struct ci_mc_reg_table *table)
{
uint8_t i, j;
+
for (i = 0; i < table->last; i++) {
for (j = 1; j < table->num_entries; j++) {
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
+ table->validflag |= (1 << i);
break;
}
}
@@ -2519,13 +2676,13 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
return 0;
}
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *table;
- struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+ struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = ci_get_memory_modile_index(hwmgr);
table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
@@ -2559,24 +2716,103 @@ int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
- result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+ result = ci_copy_vbios_smc_reg_table(table, ni_table);
if (0 == result) {
- iceland_set_s0_mc_reg_index(ni_table);
- result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ ci_set_s0_mc_reg_index(ni_table);
+ result = ci_set_mc_special_registers(hwmgr, ni_table);
}
if (0 == result)
- iceland_set_valid_flag(ni_table);
+ ci_set_valid_flag(ni_table);
kfree(table);
return result;
}
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
+
+static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpH = request->up_hyst;
+ levels[i].DownH = request->down_hyst;
+ }
+
+ return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
+
+static int ci_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *ci_priv = NULL;
+
+ ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
+
+ if (ci_priv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ ci_priv->activity_target[i] = 30;
+
+ hwmgr->smu_backend = ci_priv;
+
+ return 0;
+}
+
+static int ci_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
+
+static int ci_start_smu(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+const struct pp_smumgr_func ci_smu_funcs = {
+ .smu_init = ci_smu_init,
+ .smu_fini = ci_smu_fini,
+ .start_smu = ci_start_smu,
+ .check_fw_load_finish = NULL,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = ci_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = ci_get_offsetof,
+ .process_firmware_header = ci_process_firmware_header,
+ .init_smc_table = ci_init_smc_table,
+ .update_sclk_threshold = ci_update_sclk_threshold,
+ .thermal_setup_fan_table = ci_thermal_setup_fan_table,
+ .populate_all_graphic_levels = ci_populate_all_graphic_levels,
+ .populate_all_memory_levels = ci_populate_all_memory_levels,
+ .get_mac_definition = ci_get_mac_definition,
+ .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+ .is_dpm_running = ci_is_dpm_running,
+ .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
new file mode 100644
index 000000000000..8189cfa17c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _CI_SMUMANAGER_H_
+#define _CI_SMUMANAGER_H_
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#include "smu7_discrete.h"
+#include <pp_endian.h>
+#include "ppatomctrl.h"
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+struct ci_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ uint8_t last;
+ uint8_t num_entries;
+ uint16_t validflag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_smumgr {
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ struct SMU7_Discrete_DpmTable smc_state_table;
+ struct SMU7_Discrete_PmFuses power_tune_table;
+ const struct ci_pt_defaults *power_tune_defaults;
+ SMU7_Discrete_MCRegisters mc_regs;
+ struct ci_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 652aaa43e95c..78ab0556e48f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = {
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int cz_smum_get_argument(struct pp_smumgr *smumgr)
+static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
+ return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
- uint16_t msg)
+static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
return result;
}
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
return 0;
}
/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- result = cz_send_msg_to_smc_async(smumgr, msg);
+ result = cz_send_msg_to_smc_async(hwmgr, msg);
if (result != 0)
return result;
- return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
}
-static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
+static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t limit)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
if (0 != (3 & smc_address)) {
@@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
return -EINVAL;
}
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
SMN_MP1_SRAM_START_ADDR + smc_address);
return 0;
}
-static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
+static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t value, uint32_t limit)
{
int result;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = cz_set_smc_sram_address(smumgr, smc_address, limit);
+ result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
if (!result)
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
return result;
}
-static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc(smumgr, msg);
+ return cz_send_msg_to_smc(hwmgr, msg);
}
-static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
+static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
int i;
@@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- for (i = 0; i < smumgr->usec_timeout; i++) {
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
if (firmware ==
- (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
break;
udelay(1);
}
- if (i >= smumgr->usec_timeout) {
+ if (i >= hwmgr->usec_timeout) {
pr_err("SMU check loaded firmware failed.\n");
return -EINVAL;
}
@@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
return 0;
}
-static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
+static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
{
uint32_t reg_data;
uint32_t tmp;
@@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
struct cgs_firmware_info info = {0};
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
- ret = cgs_get_firmware_info(smumgr->device,
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
if (ret)
return -EINVAL;
/* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_MEC_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_CPC_IC_BASE_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
reg_data = smu_lower_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
reg_data = smu_upper_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
return 0;
}
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
enum cz_scratch_entry firmware_enum)
{
uint8_t ret = 0;
@@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_SDMA0;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_SDMA0;
else
ret = UCODE_ID_SDMA1;
@@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_CP_MEC_JT1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_CP_MEC_JT1;
else
ret = UCODE_ID_CP_MEC_JT2;
@@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
}
static int cz_smu_populate_single_scratch_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
uint8_t type, bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task(
}
static int cz_smu_populate_single_ucode_load_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task(
return 0;
}
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_SAVE, true);
return 0;
}
-static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
+static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
{
int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
@@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_SAVE, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_SAVE, true);
@@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
}
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
else
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
/* populate scratch */
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_LOAD, true);
return 0;
}
-static int cz_smu_construct_toc_for_power_profiling(
- struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
return 0;
}
-static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(smumgr);
- cz_smu_construct_toc_for_rlc_aram_save(smumgr);
- cz_smu_construct_toc_for_vddgfx_enter(smumgr);
- cz_smu_construct_toc_for_vddgfx_exit(smumgr);
- cz_smu_construct_toc_for_power_profiling(smumgr);
- cz_smu_construct_toc_for_bootup(smumgr);
- cz_smu_construct_toc_for_clock_table(smumgr);
+ cz_smu_initialize_toc_empty_job_list(hwmgr);
+ cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ cz_smu_construct_toc_for_power_profiling(hwmgr);
+ cz_smu_construct_toc_for_bootup(hwmgr);
+ cz_smu_construct_toc_for_clock_table(hwmgr);
return 0;
}
-static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
+static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
uint32_t firmware_type;
uint32_t i;
int ret;
@@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
- firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
+ firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
firmware_list[i]);
ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
ucode_id, &info);
if (ret == 0) {
@@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
}
static int cz_smu_populate_single_scratch_entry(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry scratch_type,
uint32_t ulsize_byte,
struct cz_buffer_entry *entry)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
long long mc_addr =
((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
| cz_smu->smu_buffer.mc_addr_low;
@@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry(
return 0;
}
-static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
+static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
return 0;
}
-static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
+static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
break;
}
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
return 0;
}
-static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
+static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
uint32_t smc_address;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
- cz_smu_populate_firmware_entries(smumgr);
+ cz_smu_populate_firmware_entries(hwmgr);
- cz_smu_construct_toc(smumgr);
+ cz_smu_construct_toc(hwmgr);
smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
+ cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
cz_smu->toc_buffer.mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
cz_smu->toc_buffer.mc_addr_low);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- return cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
}
-static int cz_start_smu(struct pp_smumgr *smumgr)
+static int cz_start_smu(struct pp_hwmgr *hwmgr)
{
int ret = 0;
uint32_t fw_to_check = 0;
@@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK;
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- ret = cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(hwmgr);
if (ret)
pr_err("SMU firmware load failed\n");
- cz_check_fw_load_finish(smumgr, fw_to_check);
+ cz_check_fw_load_finish(hwmgr, fw_to_check);
- ret = cz_load_mec_firmware(smumgr);
+ ret = cz_load_mec_firmware(hwmgr);
if (ret)
pr_err("Mec Firmware load failed\n");
return ret;
}
-static int cz_smu_init(struct pp_smumgr *smumgr)
+static int cz_smu_init(struct pp_hwmgr *hwmgr)
{
uint64_t mc_addr = 0;
int ret = 0;
@@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
if (cz_smu == NULL)
return -ENOMEM;
- smumgr->backend = cz_smu;
+ hwmgr->smu_backend = cz_smu;
cz_smu->toc_buffer.data_size = 4096;
cz_smu->smu_buffer.data_size =
@@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->toc_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_fini(struct pp_smumgr *smumgr)
+static int cz_smu_fini(struct pp_hwmgr *hwmgr)
{
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
if (cz_smu) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->toc_buffer.handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->smu_buffer.handle);
kfree(cz_smu);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
deleted file mode 100644
index 8712f093d6d9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ /dev/null
@@ -1,2498 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "fiji_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "fiji_smumgr.h"
-#include "pppcielanes.h"
-#include "smu7_ppsmc.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define VDDC_VDDCI_DELTA 300
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- *voltage = *mvdd = 0;
-
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
-{
- switch (line) {
- case SMU7_I2CLineID_DDC1:
- *scl = SMU7_I2C_DDC1CLK;
- *sda = SMU7_I2C_DDC1DATA;
- break;
- case SMU7_I2CLineID_DDC2:
- *scl = SMU7_I2C_DDC2CLK;
- *sda = SMU7_I2C_DDC2DATA;
- break;
- case SMU7_I2CLineID_DDC3:
- *scl = SMU7_I2C_DDC3CLK;
- *sda = SMU7_I2C_DDC3DATA;
- break;
- case SMU7_I2CLineID_DDC4:
- *scl = SMU7_I2C_DDC4CLK;
- *sda = SMU7_I2C_DDC4DATA;
- break;
- case SMU7_I2CLineID_DDC5:
- *scl = SMU7_I2C_DDC5CLK;
- *sda = SMU7_I2C_DDC5DATA;
- break;
- case SMU7_I2CLineID_DDC6:
- *scl = SMU7_I2C_DDC6CLK;
- *sda = SMU7_I2C_DDC6DATA;
- break;
- case SMU7_I2CLineID_SCLSDA:
- *scl = SMU7_I2C_SCL;
- *sda = SMU7_I2C_SDA;
- break;
- case SMU7_I2CLineID_DDCVGA:
- *scl = SMU7_I2C_DDCVGACLK;
- *sda = SMU7_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-}
-
-static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- uint32_t pm_fuse_table_offset;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
-
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- (uint32_t *)(&level->MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000)
- return 0x0;
- if (mem_clock <= 15000)
- return 0x1;
- if (mem_clock <= 20000)
- return 0x2;
- if (mem_clock <= 25000)
- return 0x3;
- if (mem_clock <= 30000)
- return 0x4;
- if (mem_clock <= 35000)
- return 0x5;
- if (mem_clock <= 40000)
- return 0x6;
- if (mem_clock <= 45000)
- return 0x7;
- if (mem_clock <= 50000)
- return 0x8;
- if (mem_clock <= 55000)
- return 0x9;
- if (mem_clock <= 60000)
- return 0xa;
- if (mem_clock <= 65000)
- return 0xb;
- if (mem_clock <= 70000)
- return 0xc;
- if (mem_clock <= 75000)
- return 0xd;
- if (mem_clock <= 80000)
- return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",
- );
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- uint32_t mclk_stutter_mode_threshold = 60000;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value " \
- "in Clock Dependency Table",
- );
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
- );
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
- return 0;
-}
-
-static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
-{
- pp_atomctrl_voltage_table param_led_dpm;
- int result = 0;
- u32 mask = 0;
-
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
- &param_led_dpm);
- if (result == 0) {
- int i, j;
- u32 tmp = param_led_dpm.mask_low;
-
- for (i = 0, j = 0; i < 32; i++) {
- if (tmp & 1) {
- mask |= (i << (8 * j));
- if (++j >= 3)
- break;
- }
- tmp >>= 1;
- }
- }
- if (mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_LedConfig,
- mask);
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = fiji_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- result = fiji_setup_dpm_led_config(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup dpm led config", return result);
-
- fiji_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
- return 0;
-
- ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
- return result;
-}
-
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU73_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU73_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU73_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t fiji_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU73_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU73_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU73_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU73_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU73_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU73_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU73_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU73_MAX_LEVELS_MVDD;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-
-static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- fiji_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- fiji_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
deleted file mode 100644
index d9c72d992e30..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_SMC_H
-#define FIJI_SMC_H
-
-#include "smumgr.h"
-#include "smu73.h"
-
-struct fiji_pt_defaults {
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
-uint32_t fiji_get_mac_definition(uint32_t value);
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 6ae948fc524f..f572beff197f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include "smumgr.h"
+#include "smu7_dyn_defaults.h"
#include "smu73.h"
#include "smu_ucode_xfer_vi.h"
#include "fiji_smumgr.h"
@@ -37,14 +38,54 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "fiji_pwrvirus.h"
-#include "fiji_smc.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "hardwaremanager.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
#define FIJI_SMC_SIZE 0x20000
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define VDDC_VDDCI_DELTA 300
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+ {1, 0xF, 0xFD,
+ /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+ 0x19, 5, 45}
+};
+
static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
/* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
/* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
@@ -58,147 +99,114 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0); */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for ROM firmware to initialize interrupt hendler */
- /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
+ /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND,
SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = PwrVirusTable;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
+static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
@@ -206,23 +214,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
}
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
/* clear reset */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
return result;
}
-static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
int32_t vr_config;
uint32_t table_start;
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -237,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -245,7 +253,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -253,9 +261,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -265,17 +273,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
if (!smu_started)
break;
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
return -EINVAL;);
@@ -293,64 +301,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
return 0;
}
-static int fiji_start_smu(struct pp_smumgr *smumgr)
+static int fiji_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr)
- || cgs_is_virtualization_enabled(smumgr->device))) {
- fiji_avfs_event_mgr(smumgr, false);
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
+ fiji_avfs_event_mgr(hwmgr, false);
/* Check if SMU is running in protected mode */
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = fiji_start_smu_in_non_protection_mode(smumgr);
+ result = fiji_start_smu_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = fiji_start_smu_in_protection_mode(smumgr);
+ result = fiji_start_smu_in_protection_mode(hwmgr);
if (result)
return result;
}
- fiji_avfs_event_mgr(smumgr, true);
+ fiji_avfs_event_mgr(hwmgr, true);
}
/* To initialize all clock gating before RLC loaded and running.*/
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- smu7_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
&(priv->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
- if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+ if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
return true;
@@ -358,14 +366,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-static int fiji_smu_init(struct pp_smumgr *smumgr)
+static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_smumgr *fiji_priv = NULL;
@@ -375,9 +376,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (fiji_priv == NULL)
return -ENOMEM;
- smumgr->backend = fiji_priv;
+ hwmgr->smu_backend = fiji_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
@@ -386,6 +387,2334 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ *voltage = *mvdd = 0;
+
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+ switch (line) {
+ case SMU7_I2CLineID_DDC1:
+ *scl = SMU7_I2C_DDC1CLK;
+ *sda = SMU7_I2C_DDC1DATA;
+ break;
+ case SMU7_I2CLineID_DDC2:
+ *scl = SMU7_I2C_DDC2CLK;
+ *sda = SMU7_I2C_DDC2DATA;
+ break;
+ case SMU7_I2CLineID_DDC3:
+ *scl = SMU7_I2C_DDC3CLK;
+ *sda = SMU7_I2C_DDC3DATA;
+ break;
+ case SMU7_I2CLineID_DDC4:
+ *scl = SMU7_I2C_DDC4CLK;
+ *sda = SMU7_I2C_DDC4DATA;
+ break;
+ case SMU7_I2CLineID_DDC5:
+ *scl = SMU7_I2C_DDC5CLK;
+ *sda = SMU7_I2C_DDC5DATA;
+ break;
+ case SMU7_I2CLineID_DDC6:
+ *scl = SMU7_I2C_DDC6CLK;
+ *sda = SMU7_I2C_DDC6DATA;
+ break;
+ case SMU7_I2CLineID_SCLSDA:
+ *scl = SMU7_I2C_SCL;
+ *sda = SMU7_I2C_SDA;
+ break;
+ case SMU7_I2CLineID_DDCVGA:
+ *scl = SMU7_I2C_DDCVGACLK;
+ *sda = SMU7_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &fiji_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ uint8_t uc_scl, uc_sda;
+
+ /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+ * as requested by SMC team
+ */
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+ /* The following are for new Fiji Multi-input fan/thermal control */
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+ dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+ dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrVddc * 256);
+ dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+ dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitPlx * 256);
+
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+ dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainLiquid));
+ dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+ dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+ dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainPlx));
+ dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+ dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+ dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+ dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+ dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+ get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Liquid_I2C_LineSCL = uc_scl;
+ dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Vr_I2C_LineSCL = uc_scl;
+ dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Plx_I2C_LineSCL = uc_scl;
+ dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+ return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ 0 == hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ uint32_t pm_fuse_table_offset;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (fiji_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (fiji_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != fiji_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (fiji_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (fiji_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW20 */
+ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = fiji_populate_cac_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_Ulv *state)
+{
+ int result = 0;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+ }
+ return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ssInfo;
+
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ssInfo)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ *
+ * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+ */
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ssInfo.speed_spectrum_rate);
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU73_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t threshold, mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ (uint32_t *)(&level->MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+
+ level->SclkFrequency = clock;
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ threshold = clock * data->fast_watermark_threshold / 100;
+
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = fiji_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now.*/
+ levels[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz, 800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+ if (mem_clock <= 10000)
+ return 0x0;
+ if (mem_clock <= 15000)
+ return 0x1;
+ if (mem_clock <= 20000)
+ return 0x2;
+ if (mem_clock <= 25000)
+ return 0x3;
+ if (mem_clock <= 30000)
+ return 0x4;
+ if (mem_clock <= 35000)
+ return 0x5;
+ if (mem_clock <= 40000)
+ return 0x6;
+ if (mem_clock <= 45000)
+ return 0x7;
+ if (mem_clock <= 50000)
+ return 0x8;
+ if (mem_clock <= 55000)
+ return 0x9;
+ if (mem_clock <= 60000)
+ return 0xa;
+ if (mem_clock <= 65000)
+ return 0xb;
+ if (mem_clock <= 70000)
+ return 0xc;
+ if (mem_clock <= 75000)
+ return 0xd;
+ if (mem_clock <= 80000)
+ return 0xe;
+ /* mem_clock > 800MHz */
+ return 0xf;
+}
+
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+ struct pp_atomctrl_memory_clock_param mem_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to get Memory PLL Dividers.",
+ );
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = clock;
+ mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
+ mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
+
+ return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ /* enable stutter mode if all the follow condition applied
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+ data->display_timing.num_existing_displays = 1;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+ SMU73_MAX_LEVELS_MEMORY;
+ struct SMU73_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = fiji_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now. */
+ levels[0].EnabledForActivity = 1;
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high */
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (!data->sclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ table->ACPILevel.SclkFrequency =
+ data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ table->ACPILevel.SclkFrequency,
+ (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value " \
+ "in Clock Dependency Table",
+ );
+ } else {
+ table->ACPILevel.SclkFrequency =
+ data->vbios_boot_state.sclk_bootup_value;
+ table->ACPILevel.MinVoltage =
+ data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+ }
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ if (!data->mclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+ );
+ } else {
+ table->MemoryACPILevel.MclkFrequency =
+ data->vbios_boot_state.mclk_bootup_value;
+ table->MemoryACPILevel.MinVoltage =
+ data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+ }
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!fiji_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->VceLevel[count].MinVoltage |=
+ ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t)(mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burstTime;
+ ULONG state, trrds, trrdl;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+ state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+ trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+ trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+ arb_regs->TRRDS = (uint8_t)trrds;
+ arb_regs->TRRDL = (uint8_t)trrdl;
+
+ return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = fiji_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ break;
+ }
+ }
+
+ if (!result)
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU73_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+ }
+ return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (fiji_clock_stretch_amount_conversion
+ [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >=
+ (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq <
+ (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+
+ return 0;
+}
+
+static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
+{
+ pp_atomctrl_voltage_table param_led_dpm;
+ int result = 0;
+ u32 mask = 0;
+
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
+ &param_led_dpm);
+ if (result == 0) {
+ int i, j;
+ u32 tmp = param_led_dpm.mask_low;
+
+ for (i = 0, j = 0; i < 32; i++) {
+ if (tmp & 1) {
+ mask |= (i << (8 * j));
+ if (++j >= 3)
+ break;
+ }
+ tmp >>= 1;
+ }
+ }
+ if (mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_LedConfig,
+ mask);
+ return 0;
+}
+
+static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+ fiji_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ fiji_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = fiji_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = fiji_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = fiji_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = fiji_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = fiji_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = fiji_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = fiji_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = fiji_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = fiji_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = fiji_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = fiji_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = fiji_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = fiji_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = fiji_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = fiji_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = fiji_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ result = fiji_setup_dpm_led_config(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup dpm led config", return result);
+
+ fiji_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+
+static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ return 0;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return fiji_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ result = fiji_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+ return result;
+}
+
+static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t fiji_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU73_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU73_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU73_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU73_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU73_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU73_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU73_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU73_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ fiji_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ fiji_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+ /* Program additional LP registers
+ * that are no longer programmed by VBIOS
+ */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+ return 0;
+}
+
+static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 175bf9f8ef9c..279647772578 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,6 +28,15 @@
#include "smu7_smumgr.h"
+struct fiji_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+};
+
struct fiji_smumgr {
struct smu7_smumgr smu7_data;
struct SMU73_Discrete_DpmTable smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 0bf2def3b659..34128822b8fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -30,64 +30,133 @@
#include "smumgr.h"
#include "iceland_smumgr.h"
-#include "smu_ucode_xfer_vi.h"
+
#include "ppsmc.h"
+
+#include "cgs_common.h"
+
+#include "smu7_dyn_defaults.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "processpptables.h"
+
+
#include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h"
-#include "cgs_common.h"
-#include "iceland_smc.h"
+#include "smu71_discrete.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#define ICELAND_SMC_SIZE 0x20000
-static int iceland_start_smc(struct pp_smumgr *smumgr)
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+ * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static int iceland_start_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
return 0;
}
-static void iceland_reset_smc(struct pp_smumgr *smumgr)
+static void iceland_reset_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
}
-static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 1);
}
-static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 0);
}
-static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr)
{
/* set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* enable smc clock */
- iceland_start_smc_clock(smumgr);
+ iceland_start_smc_clock(hwmgr);
/* de-assert reset */
- iceland_start_smc(smumgr);
+ iceland_start_smc(hwmgr);
- SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
INTERRUPTS_ENABLED, 1);
return 0;
}
-static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr,
uint32_t length, const uint8_t *src,
uint32_t limit, uint32_t start_addr)
{
@@ -96,34 +165,34 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
while (byte_count >= 4) {
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
{
uint32_t val;
struct cgs_firmware_info info = {0};
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
/* load SMC firmware */
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) {
@@ -137,68 +206,61 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
}
/* wait for smc boot up */
- SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* clear firmware interrupt enable flag */
- val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL, val | 1);
/* stop smc clock */
- iceland_stop_smc_clock(smumgr);
+ iceland_stop_smc_clock(hwmgr);
/* reset smc */
- iceland_reset_smc(smumgr);
- iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ iceland_reset_smc(hwmgr);
+ iceland_upload_smc_firmware_data(hwmgr, info.image_size,
(uint8_t *)info.kptr, ICELAND_SMC_SIZE,
info.ucode_start_address);
return 0;
}
-static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
uint32_t firmwareType)
{
return 0;
}
-static int iceland_start_smu(struct pp_smumgr *smumgr)
+static int iceland_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
pr_info("smu not running, upload firmware again \n");
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int iceland_smu_init(struct pp_smumgr *smumgr)
+static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct iceland_smumgr *iceland_priv = NULL;
@@ -208,9 +270,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
if (iceland_priv == NULL)
return -ENOMEM;
- smumgr->backend = iceland_priv;
+ hwmgr->smu_backend = iceland_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
@@ -219,6 +281,2413 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case DEVICE_ID_VI_ICELAND_M_6900:
+ case DEVICE_ID_VI_ICELAND_M_6903:
+ smu_data->power_tune_defaults = &defaults_icelandxt;
+ break;
+
+ case DEVICE_ID_VI_ICELAND_M_6901:
+ case DEVICE_ID_VI_ICELAND_M_6902:
+ smu_data->power_tune_defaults = &defaults_icelandpro;
+ break;
+ default:
+ smu_data->power_tune_defaults = &defaults_iceland;
+ pr_warn("Unknown V.I. Device ID.\n");
+ break;
+ }
+ return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 8; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ }
+ } else {
+ PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+ }
+
+ return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW0 - DW3 */
+ if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate bapm vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW4 - DW5 */
+ if (iceland_populate_vddc_vid(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (iceland_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (iceland_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != iceland_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW16 */
+ if (iceland_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW18 */
+ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+ return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ /* SCLK/VDDC Dependency Table has to exist. */
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ /*
+ * Since voltage in the sclk/vddc dependency table is not
+ * necessarily in ascending order because of ELB voltage
+ * patching, loop through entire list to find exact voltage.
+ */
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ /*
+ * If voltage is not found in the first pass, loop again to
+ * find the best match, equal or higher value.
+ */
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (0 != result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = iceland_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU71_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_Ulv *ulv_level)
+{
+ return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+ &graphic_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for VDDC \
+ engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ graphic_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ iceland_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 100;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+ SMU71_MAX_LEVELS_GRAPHICS;
+
+ SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = iceland_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (highest_pcie_level_enabled + 1))) != 0) {
+ highest_pcie_level_enabled++;
+ }
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+ count++;
+ }
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+ }
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 47500) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ }
+ } else {
+ if (memory_clock < 65000) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 135000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000) {
+ mc_para_index = 0;
+ } else if (memory_clock >= 80000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+ }
+
+ return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+ memory_level->MinVddci = memory_level->MinVddc;
+ } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on iceland */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = iceland_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+ SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result) {
+ return result;
+ }
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU71_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t vddc_phase_shed_control = 0;
+
+ SMU71_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = iceland_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result) {
+ break;
+ }
+ }
+ }
+
+ if (0 == result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->BootVddci = table->BootVddc;
+ else
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+ const struct iceland_mc_reg_entry *entry,
+ SMU71_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = iceland_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+ result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+
+ /*
+ * TDP number of fraction bits are changed from 8 to 7 for Iceland
+ * as requested by SMC team
+ */
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+ dpm_table->DTETjOffset = 0;
+
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ /* The following are for new Iceland Multi-input fan/thermal control */
+ if (NULL != ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU71_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ tab->SVI2Enable |= VDDC_ON_SVI2;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ tab->SVI2Enable |= VDDCI_ON_SVI2;
+ else
+ tab->MergedVddci = 1;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+ tab->SVI2Enable |= MVDD_ON_SVI2;
+
+ PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+ (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+
+
+ iceland_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+ iceland_populate_smc_voltage_tables(hwmgr, table);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+ if (data->ulv_supported) {
+ result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = iceland_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result;);
+
+ result = iceland_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result;);
+
+ result = iceland_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result;);
+
+ result = iceland_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result;);
+
+ result = iceland_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result;);
+
+ result = iceland_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+ result = iceland_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result;);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result;);
+
+ result = iceland_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result;);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = iceland_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result;);
+
+ result = iceland_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+
+ result = iceland_populate_smc_svi2_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate SVI2 setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.ulv_setting_starts,
+ (uint8_t *)&(smu_data->ulv_setting),
+ sizeof(SMU71_Discrete_Ulv),
+ SMC_RAM_END);
+
+
+ result = iceland_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = iceland_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ return 0;
+}
+
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu7_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ /* fan_table.FanControl_GL_Flag = 1; */
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return iceland_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = iceland_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t iceland_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU71_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU71_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU71_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU71_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU71_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU71_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU71_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->dpm_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ smu7_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->mc_reg_table_start = tmp;
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->fan_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->arb_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ hwmgr->microcode_version_info.SMC = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, UlvSettings),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->ulv_setting_starts = tmp;
+ }
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct iceland_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++) {
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ }
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j;
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ iceland_set_s0_mc_reg_index(ni_table);
+ result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ iceland_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
const struct pp_smumgr_func iceland_smu_funcs = {
.smu_init = &iceland_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 8eae01b37c40..802472530d34 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -39,7 +39,7 @@ struct iceland_pt_defaults {
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
- uint32_t bamp_temp_gradient;
+ uint32_t bapm_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
deleted file mode 100644
index 99a00bd39256..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ /dev/null
@@ -1,2364 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "polaris10_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "polaris10_smumgr.h"
-#include "pppcielanes.h"
-
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
-#include "smu7_ppsmc.h"
-#include "smu7_smumgr.h"
-
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VDDC_VDDCI_DELTA 200
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
- { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
- {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
- {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
- {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- int i, j, k;
- const uint16_t *pdef1;
- const uint16_t *pdef2;
-
- table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
- table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
- pdef1 = defaults->BAPMTI_R;
- pdef2 = defaults->BAPMTI_RC;
-
- for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU74_DTE_SOURCES; j++) {
- for (k = 0; k < SMU74_DTE_SINKS; k++) {
- table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
- table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
-/* TO DO move to hwmgr */
- if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
- || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- if (polaris10_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
-
- if (polaris10_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
- if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- if (0 != polaris10_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- if (polaris10_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- if (polaris10_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count, level;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- count = data->mvdd_voltage_table.count;
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; level++) {
- table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[level].Smio =
- (uint8_t) level;
- table->Smio[level] |=
- data->mvdd_voltage_table.entries[level].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
- }
-
- return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count, level;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- count = data->vddci_voltage_table.count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; ++level) {
- table->SmioTable1.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
- table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
- table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- polaris10_populate_smc_vddci_table(hwmgr, table);
- polaris10_populate_smc_mvdd_table(hwmgr, table);
- polaris10_populate_cac_table(hwmgr, table);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_Ulv *state)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
- state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
- else
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
-
-/* To Do move to hwmgr */
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t i, ref_clk;
-
- struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
- ref_clk = smu7_get_xclk(hwmgr);
-
- if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
- table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
- return;
- }
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
- smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
- table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
- table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct pp_atomctrl_clock_dividers_ai dividers;
- uint32_t ref_clock;
- uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
- uint8_t i;
- int result;
- uint64_t temp;
-
- sclk_setting->SclkFrequency = clock;
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
- if (result == 0) {
- sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
- sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
- sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
- sclk_setting->PllRange = dividers.ucSclkPllRange;
- sclk_setting->Sclk_slew_rate = 0x400;
- sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
- sclk_setting->Pcc_down_slew_rate = 0xffff;
- sclk_setting->SSc_En = dividers.ucSscEnable;
- sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
- sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
- sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
- return result;
- }
-
- ref_clock = smu7_get_xclk(hwmgr);
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- if (clock > smu_data->range_table[i].trans_lower_frequency
- && clock <= smu_data->range_table[i].trans_upper_frequency) {
- sclk_setting->PllRange = i;
- break;
- }
- }
-
- sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw_frac = temp & 0xffff;
-
- pcc_target_percent = 10; /* Hardcode 10% for now. */
- pcc_target_freq = clock - (clock * pcc_target_percent / 100);
- sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
- ss_target_percent = 2; /* Hardcode 2% for now. */
- sclk_setting->SSc_En = 0;
- if (ss_target_percent) {
- sclk_setting->SSc_En = 1;
- ss_target_freq = clock - (clock * ss_target_percent / 100);
- sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw1_frac = temp & 0xffff;
- }
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU_SclkSetting curr_sclk_setting = { 0 };
-
- result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
- /* populate graphics levels */
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
- level->ActivityLevel = sclk_al_threshold;
-
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- if (data->update_up_hyst)
- level->UpHyst = (uint8_t)data->up_hyst;
- if (data->update_down_hyst)
- level->DownHyst = (uint8_t)data->down_hyst;
-
- level->SclkSetting = curr_sclk_setting;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
- return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
- result = polaris10_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport))
- smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
- uint32_t mclk_stutter_mode_threshold = 40000;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (table_info->vdd_dep_on_mclk) {
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->MclkFrequency = clock;
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- data->display_timing.num_existing_displays = info.display_count;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
- SMU74_MAX_LEVELS_MEMORY;
- struct SMU74_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = polaris10_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (i == dpm_table->mclk_table.count - 1) {
- levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- levels[i].EnabledForActivity = 1;
- }
- if (result)
- return result;
- }
-
- /* In order to prevent MC activity from stutter mode to push DPM up,
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not affected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = 0x1f;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- uint32_t sclk_frequency;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- sclk_frequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",
- );
-
- result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
- PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- table->ACPILevel.DeepSleepDivId = 0;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
- table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- table->MemoryACPILevel.StutterEnable = false;
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
- table->VceLevel[count].MinVoltage |=
- (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burst_time;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burst_time;
-
- return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
- result = polaris10_populate_memory_timing_parameters(hwmgr,
- hw_data->dpm_table.sclk_table.dpm_levels[i].value,
- hw_data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result == 0)
- result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
- if (result != 0)
- return result;
- }
- }
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU74_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- hw_data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- hw_data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (67 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
-
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
- } else {
- min = 1100;
- max = 2100;
- }
-
- ro = efuse * (max - min) / 255 + min;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
- (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
- (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
- } else {
- volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
- volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
- }
-
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
- offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- int result = 0;
- struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
- AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
- AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
- uint32_t tmp, i;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
-
- if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
-
- result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
- if (0 == result) {
- table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
- table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
- table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
- table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
- table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
- table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
- table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
- table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
- table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
- table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
- table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
- table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
- table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
- table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
- table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
- AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
- AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
- AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
- AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
- AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
- AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
- AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
- for (i = 0; i < NUM_VFT_COLUMNS; i++) {
- AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
- AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
- }
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
- &tmp, SMC_RAM_END);
-
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_meanNsigma,
- sizeof(AVFS_meanNsigma_t),
- SMC_RAM_END);
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
- &tmp, SMC_RAM_END);
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_SclkOffset,
- sizeof(AVFS_Sclk_Offset_t),
- SMC_RAM_END);
-
- data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
- }
- return result;
-}
-
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &polaris10_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
- pp_atomctrl_clock_dividers_vi dividers;
-
- polaris10_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
- polaris10_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (hw_data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = polaris10_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
- }
-
- result = polaris10_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = polaris10_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = polaris10_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = polaris10_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = polaris10_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = polaris10_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = polaris10_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = polaris10_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- result = polaris10_populate_avfs_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
- table->CurrSclkPllRange = 0xff;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = polaris10_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
- & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
- && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- /* Populate BIF_SCLK levels into SMC DPM table */
- for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
- PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
- if (i == 0)
- table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- else
- table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- }
-
- for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = polaris10_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = polaris10_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- polaris10_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return polaris10_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return 0;
-
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
-static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- int max_entry, i;
-
- max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU74_MAX_LEVELS_LINK :
- pcie_table->count;
- /* Setup BIF_SCLK levels */
- for (i = 0; i < max_entry; i++)
- smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
- return 0;
-}
-
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- polaris10_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- polaris10_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
- case SMU_BIF_TABLE:
- polaris10_update_bif_smc_table(hwmgr);
- default:
- break;
- }
- return 0;
-}
-
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to update SCLK threshold!", return result);
-
- result = polaris10_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
-
- return result;
-}
-
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU74_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU74_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU74_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t polaris10_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU74_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU74_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU74_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU74_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU74_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU74_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU74_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU74_MAX_LEVELS_MVDD;
- case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
- return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 75f43dadc56b..bd6be7793ca7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -35,13 +35,47 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
-#include "polaris10_smc.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
@@ -60,46 +94,13 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = pwr_virus_table;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -107,16 +108,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
if (smu_data->avfs.avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
}
return result;
}
-static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -127,7 +128,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -138,14 +139,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -153,7 +154,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -162,7 +163,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -172,9 +173,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
static int
-polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -183,20 +184,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
return -EINVAL);
smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
@@ -215,146 +216,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
return 0;
}
-static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS))
PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu(struct pp_smumgr *smumgr)
+static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
SMU_VFT_INTACT = false;
- smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
- result = polaris10_start_smu_in_non_protection_mode(smumgr);
+ result = polaris10_start_smu_in_non_protection_mode(hwmgr);
} else {
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
/* If failed, try with different security Key. */
if (result != 0) {
smu_data->smu7_data.security_hard_key ^= 1;
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
}
}
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(smumgr, true);
+ polaris10_avfs_event_mgr(hwmgr, true);
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+ polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse;
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
@@ -362,7 +363,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-static int polaris10_smu_init(struct pp_smumgr *smumgr)
+static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data;
int i;
@@ -371,9 +372,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu_data == NULL)
return -ENOMEM;
- smumgr->backend = smu_data;
+ hwmgr->smu_backend = smu_data;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
@@ -382,6 +383,2195 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
+ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+ else
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = smu7_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = smu7_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+ uint32_t mclk_stutter_mode_threshold = 40000;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* In order to prevent MC activity from stutter mode to push DPM up,
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not affected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = polaris10_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ polaris10_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ polaris10_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ polaris10_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU74_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU74_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU74_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU74_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU74_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU74_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU74_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU74_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
.smu_fini = smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index ce0a30388ea1..b98ade676d12 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -48,20 +48,20 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
-static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
-int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
-int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
+static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
&smc_driver_if_version),
"Attempt to read SMC IF Version Number Failed!",
return -EINVAL);
@@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
}
/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerUpSdma),
"Attempt to power up sdma Failed!",
return -EINVAL);
@@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerDownSdma),
"Attempt to power down sdma Failed!",
return -EINVAL);
@@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
}
/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0),
"Attempt to power up vcn Failed!",
return -EINVAL);
@@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0),
"Attempt to power down vcn Failed!",
return -EINVAL);
@@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smu_fini(struct pp_smumgr *smumgr)
+static int rv_smu_fini(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
if (priv) {
- rv_smc_disable_sdma(smumgr);
- rv_smc_disable_vcn(smumgr);
- cgs_free_gpu_mem(smumgr->device,
+ rv_smc_disable_sdma(hwmgr);
+ rv_smc_disable_vcn(hwmgr);
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[CLOCKTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int rv_start_smu(struct pp_smumgr *smumgr)
+static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
- if (rv_verify_smc_interface(smumgr))
+ if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
- if (rv_smc_enable_sdma(smumgr))
+ if (rv_smc_enable_sdma(hwmgr))
return -EINVAL;
- if (rv_smc_enable_vcn(smumgr))
+ if (rv_smc_enable_vcn(hwmgr))
return -EINVAL;
return 0;
}
-static int rv_smu_init(struct pp_smumgr *smumgr)
+static int rv_smu_init(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv;
uint64_t mc_addr;
@@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
@@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(DpmClocks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for CLOCKTABLE.",
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index 262c8ded87c0..58888400f1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -51,11 +51,11 @@ struct rv_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr);
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f22002f..7f5359a97ef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -25,27 +25,28 @@
#include "pp_debug.h"
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_common.h"
+
+#include "polaris10_pwrvirus.h"
#define SMU7_SMC_SIZE 0x20000
-static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
return 0;
}
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
@@ -59,7 +60,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
addr = smc_start_address;
while (byte_count >= 4) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
@@ -69,7 +70,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
if (byte_count) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
@@ -81,7 +82,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
@@ -99,12 +100,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
@@ -115,13 +116,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data = 0;
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
@@ -135,53 +136,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data |= (original_data & ~((~0UL) << extra_shift));
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
- smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+ smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send message %x ret is %d \n", msg, ret);
@@ -189,53 +190,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
return -EINVAL;
}
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc(smumgr, msg);
+ return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+ return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
}
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
pr_info("Failed to send Message.\n");
return 0;
}
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
{
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
@@ -289,29 +290,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
}
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
return 0;
}
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
@@ -354,14 +355,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
return result;
}
-static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
- result = cgs_get_firmware_info(smumgr->device,
+ result = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(fw_type),
&info);
@@ -374,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->meta_data_addr_low = 0;
/* digest need be excluded out */
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
@@ -389,30 +390,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
return 0;
}
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
0x0);
- if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- if (!cgs_is_virtualization_enabled(smumgr->device)) {
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
smu_data->smu_buffer.mc_addr_low);
}
@@ -439,122 +440,162 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
toc->num_entries = 0;
toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ if (cgs_is_virtualization_enabled(hwmgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
- if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
-
return ret;
}
-int smu7_reload_firmware(struct pp_smumgr *smumgr)
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
{
- return smumgr->smumgr_funcs->start_smu(smumgr);
+ return hwmgr->smumgr_funcs->start_smu(hwmgr);
}
-static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
- smumgr->is_kicker = info.is_kicker;
+ hwmgr->is_kicker = info.is_kicker;
- result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+ result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
}
-int smu7_init(struct pp_smumgr *smumgr)
+static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
+{
+ int i;
+ uint32_t reg, data;
+
+ for (i = 0; i < size; i++) {
+ reg = pvirus->reg;
+ data = pvirus->data;
+ if (reg != 0xffffffff)
+ cgs_write_register(hwmgr->device, reg, data);
+ else
+ break;
+ pvirus++;
+ }
+}
+
+static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
+{
+ int i;
+
+ cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
+ for (i = 0; i < section->dfy_size; i++)
+ cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
+}
+
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
+{
+ execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
+ execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
+
+ return 0;
+}
+
+int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
uint8_t *internal_buf;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
- smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -568,16 +609,16 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
smu_data->smu_buffer.data_size = 200*4096;
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -591,12 +632,12 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
- if (smum_is_hw_avfs_present(smumgr))
+ if (smum_is_hw_avfs_present(hwmgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
@@ -605,12 +646,10 @@ int smu7_init(struct pp_smumgr *smumgr)
}
-int smu7_smu_fini(struct pp_smumgr *smumgr)
+int smu7_smu_fini(struct pp_hwmgr *hwmgr)
{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index ee5e32d2921e..c87263bc0caa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,32 +60,34 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
uint32_t *dest, uint32_t byte_count, uint32_t limit);
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit);
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
-int smu7_reload_firmware(struct pp_smumgr *smumgr);
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
-int smu7_init(struct pp_smumgr *smumgr);
-int smu7_smu_fini(struct pp_smumgr *smumgr);
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr);
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr);
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
+int smu7_init(struct pp_hwmgr *hwmgr);
+int smu7_smu_fini(struct pp_hwmgr *hwmgr);
-#endif \ No newline at end of file
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3bdf6478de7f..867388456530 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
-#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
-int smum_early_init(struct pp_instance *handle)
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
- if (smumgr == NULL)
- return -ENOMEM;
-
- smumgr->device = handle->device;
- smumgr->chip_family = handle->chip_family;
- smumgr->chip_id = handle->chip_id;
- smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- smumgr->reload_fw = 1;
- handle->smu_mgr = smumgr;
-
- switch (smumgr->chip_family) {
- case AMDGPU_FAMILY_CZ:
- smumgr->smumgr_funcs = &cz_smu_funcs;
- break;
- case AMDGPU_FAMILY_VI:
- switch (smumgr->chip_id) {
- case CHIP_TOPAZ:
- smumgr->smumgr_funcs = &iceland_smu_funcs;
- break;
- case CHIP_TONGA:
- smumgr->smumgr_funcs = &tonga_smu_funcs;
- break;
- case CHIP_FIJI:
- smumgr->smumgr_funcs = &fiji_smu_funcs;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- smumgr->smumgr_funcs = &polaris10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_AI:
- switch (smumgr->chip_id) {
- case CHIP_VEGA10:
- smumgr->smumgr_funcs = &vega10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_RV:
- switch (smumgr->chip_id) {
- case CHIP_RAVEN:
- smumgr->smumgr_funcs = &rv_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- kfree(smumgr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
- return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
return 0;
}
-int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
- return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
return 0;
}
@@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
- return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr);
return 0;
}
@@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
- return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+ if (NULL != hwmgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type);
return 0;
}
-uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member)
{
- if (NULL != smumgr->smumgr_funcs->get_offsetof)
- return smumgr->smumgr_funcs->get_offsetof(type, member);
+ if (NULL != hwmgr->smumgr_funcs->get_offsetof)
+ return hwmgr->smumgr_funcs->get_offsetof(type, member);
return 0;
}
int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
- return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr_funcs->process_firmware_header(hwmgr);
return 0;
}
-int smum_get_argument(struct pp_smumgr *smumgr)
+int smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->get_argument)
- return smumgr->smumgr_funcs->get_argument(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->get_argument)
+ return hwmgr->smumgr_funcs->get_argument(hwmgr);
return 0;
}
-uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
- if (NULL != smumgr->smumgr_funcs->get_mac_definition)
- return smumgr->smumgr_funcs->get_mac_definition(value);
+ if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
+ return hwmgr->smumgr_funcs->get_mac_definition(value);
return 0;
}
-int smum_download_powerplay_table(struct pp_smumgr *smumgr,
- void **table)
+int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table)
{
- if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
- return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
+ if (NULL != hwmgr->smumgr_funcs->download_pptable_settings)
+ return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr,
table);
return 0;
}
-int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
+int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
- return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings)
+ return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr);
return 0;
}
-int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
+ if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
+ return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
}
-int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL ||
- smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
- return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
- smumgr, msg, parameter);
-}
-
-/*
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL)
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Returns once the part of the register indicated by the mask
- * has reached the given value.The indirect space is described by
- * giving the memory-mapped index of the indirect index register.
- */
-int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(smumgr->device, indirect_port, index);
- return smum_wait_on_register(smumgr, indirect_port + 1,
- mask, value);
-}
-
-void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return;
- cgs_write_register(smumgr->device, indirect_port, index);
- smum_wait_for_register_unequal(smumgr, indirect_port + 1,
- value, mask);
+ return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+ hwmgr, msg, parameter);
}
int smu_allocate_memory(void *device, uint32_t size,
@@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size,
return -EINVAL;
ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
- 0, 0, (cgs_handle_t *)handle);
+ (cgs_handle_t *)handle);
if (ret)
return -ENOMEM;
@@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle)
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
- return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr_funcs->init_smc_table(hwmgr);
return 0;
}
int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
return 0;
}
int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
return 0;
}
@@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
/*this interface is needed by island ci/vi */
int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
- return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
return 0;
}
bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
- return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr_funcs->is_dpm_running(hwmgr);
return true;
}
@@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
- if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(
+ if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
hwmgr, request);
return 0;
}
-bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
- if (smumgr->smumgr_funcs->is_hw_avfs_present)
- return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+ if (hwmgr->smumgr_funcs->is_hw_avfs_present)
+ return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr);
return false;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
deleted file mode 100644
index 65d3a4893958..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ /dev/null
@@ -1,3275 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- *
- */
-
-#include "pp_debug.h"
-#include "tonga_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "tonga_smumgr.h"
-#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-
-#include "smu72_discrete.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-
-static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
- {1, 0xF, 0xFD, 0x19,
- 5, 45, 0, 0xB0000,
- {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
- 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
- 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
- },
-};
-
-/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0},
- {600, 1050, 6, 1}
-};
-
-/* [FF, SS] type, [] 4 voltage ranges,
- * and [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
-};
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
-static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- voltage->VddGfx = phm_get_voltage_index(
- pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i].vddgfx);
- voltage->Vddc = phm_get_voltage_index(
- pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i].vddc);
-
- if (allowed_clock_voltage_table->entries[i].vddci)
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
- else
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
-
-
- if (allowed_clock_voltage_table->entries[i].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-
- voltage->Phases = 1;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddgfx);
- voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddc);
-
- if (allowed_clock_voltage_table->entries[i-1].vddci)
- voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i-1].vddci);
-
- if (allowed_clock_voltage_table->entries[i-1].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-
- return 0;
-}
-
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- table->VddcTable[count] =
- PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
- }
- return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
- for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
- table->VddGfxTable[count] =
- PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
- }
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- table->SmioTable1.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
- table->SmioTable1.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->vddci_voltage_table.entries[count].smio_low;
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- table->SmioTable2.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->mvdd_voltage_table.entries[count].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
- }
-
- return 0;
-}
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
- pptable_info->vddgfx_lookup_table;
- struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
- pptable_info->vddc_lookup_table;
-
- /* table is already swapped, so in order to use the value from it
- * we need to swap it back.
- */
- uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
- uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
- for (count = 0; count < vddc_level_count; count++) {
- /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
-
- if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
- /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
- for (count = 0; count < vddgfx_level_count; count++) {
- index = phm_get_voltage_index(vddgfx_lookup_table,
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
- }
- } else {
- for (count = 0; count < vddc_level_count; count++) {
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
- }
-
- return 0;
-}
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result;
-
- result = tonga_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDC voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDCI voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDGFX voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate MVDD voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_cac_tables(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_Ulv *state)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_DpmTable *table)
-{
- return tonga_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU72_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
- &graphic_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((!result),
- "can not find VDDC voltage value for VDDC "
- "engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (!result) {
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
- uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
-
- SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i, max_entry;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = tonga_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- }
- } else {
- if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- pr_err("Pcie Dpm Enablemask is 0 !");
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(highest_pcie_level_enabled+1))) != 0)) {
- highest_pcie_level_enabled++;
- }
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<lowest_pcie_level_enabled)) == 0)) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
- count++;
- }
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(
- !result,
- "Error retrieving Memory Clock Parameters from VBIOS.",
- return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
- mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF,
- mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC,
- mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE,
- mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
-
- if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL,
- mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
-
- return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-
- return mc_para_index;
-}
-
-
-static int tonga_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *memory_level
- )
-{
- uint32_t mvdd = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dll_state_on;
- struct cgs_display_info info = {0};
- uint32_t mclk_edc_wr_enable_threshold = 40000;
- uint32_t mclk_stutter_mode_threshold = 30000;
- uint32_t mclk_edc_enable_threshold = 40000;
- uint32_t mclk_strobe_mode_threshold = 40000;
-
- if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk,
- memory_clock,
- &memory_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE(
- !result,
- "can not find MinVddc voltage value from memory VDDC "
- "voltage dependency table",
- return result);
- }
-
- if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
- memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
- else
- memory_level->MinMvdd = mvdd;
-
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0))
- memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((mclk_edc_enable_threshold != 0) &&
- (memory_clock > mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dll_state_on = data->dll_default_on;
- }
- } else {
- memory_level->StrobeRatio =
- tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = tonga_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
-
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_address =
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size =
- sizeof(SMU72_Discrete_MemoryLevel) *
- SMU72_MAX_LEVELS_MEMORY;
- SMU72_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = tonga_populate_single_memory_level(
- hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &(smu_data->smc_state_table.MemoryLevel[i]));
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now.*/
- smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- smio_pattern->Voltage =
- data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
-
- SMIO_Pattern voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- table->ACPILevel.MinVoltage =
- smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
- table->MemoryACPILevel.MinVoltage =
- smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t) (mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->UvdLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->UvdLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->UvdLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(
- hwmgr,
- table->UvdLevel[count].VclkFrequency,
- &dividers);
-
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Vclk clock",
- return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Dclk clock",
- return result);
-
- table->UvdLevel[count].DclkDivider =
- (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- }
-
- return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t) (mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->VceLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->VceLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->VceLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t) (mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->AcpLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->AcpLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->AcpLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- int result = 0;
- SMU72_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = tonga_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (result)
- break;
- }
- }
-
- if (!result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU72_Discrete_MCArbDramTimingTable),
- SMC_RAM_END
- );
- }
-
- return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table*/
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.GraphicsBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot engine "
- "clock value in dependency table. "
- "Using Graphics DPM level 0 !");
- result = 0;
- }
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.MemoryBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot "
- "engine clock value in dependency table."
- "Using Memory DPM level 0 !");
- result = 0;
- }
-
- table->BootVoltage.Vddc =
- phm_get_voltage_id(&(data->vddc_voltage_table),
- data->vbios_boot_state.vddc_bootup_value);
- table->BootVoltage.VddGfx =
- phm_get_voltage_id(&(data->vddgfx_voltage_table),
- data->vbios_boot_state.vddgfx_bootup_value);
- table->BootVoltage.Vddci =
- phm_get_voltage_id(&(data->vddci_voltage_table),
- data->vbios_boot_state.vddci_bootup_value);
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return result;
-}
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- uint32_t hw_revision, dev_id;
- struct cgs_system_info sys_info = {0};
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- sys_info.size = sizeof(struct cgs_system_info);
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- hw_revision = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
- volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
- (sclk_table->entries[i].clk/100) / 10000) * 1000 /
- (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
- volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
- (sclk_table->entries[i].clk/100) / 100000) * 1000 /
- (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
- } else {
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- }
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (tonga_clock_stretch_amount_conversion
- [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
- cks_setting |= 0x2;
- if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* Splitted mode */
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC and VDDGFX should "
- "be both on SVI2 control in splitted mode !\n");
- }
- } else {
- /* Merged mode */
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC should be on "
- "SVI2 control in merged mode !\n");
- }
- }
-
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- }
-
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /*
- * This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result != 0)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-
-static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- int i, j, k;
- const uint16_t *pdef1, *pdef2;
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range !",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- dpm_table->BAPM_TEMP_GRADIENT =
- PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- pdef1 = defaults->bapmti_r;
- pdef2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU72_DTE_SOURCES; j++) {
- for (k = 0; k < SMU72_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef1);
- dpm_table->BAPMTI_RC[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->tdc_vddc_throttle_release_limit_perc;
- smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
- return 0;
-}
-
-static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 "
- "(SviLoadLineEn) from SMC Failed !",
- return -EINVAL);
- else
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
- return 0;
-}
-
-static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed !",
- return -EINVAL);
-
- /* DW6 */
- if (tonga_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed !",
- return -EINVAL);
- /* DW7 */
- if (tonga_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed !",
- return -EINVAL);
- /* DW8 */
- if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl Failed !",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (tonga_populate_temperature_scaler(hwmgr) != 0)
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed !",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (tonga_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan "
- "Control parameters Failed !",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (tonga_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed !",
- return -EINVAL);
-
- /* DW19 */
- if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML "
- "Min and Max Vid Failed !",
- return -EINVAL);
-
- /* DW20 */
- if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(
- false,
- "Attempt to populate BapmVddCBaseLeakage "
- "Hi and Lo Sidd Failed !",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed !",
- return -EINVAL);
- }
- return 0;
-}
-
-static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
- if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(
- i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array "
- "out of boundary",
- return -EINVAL);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/*convert register values from driver to SMC format */
-static void tonga_convert_mc_registers(
- const struct tonga_mc_reg_entry *entry,
- SMU72_Discrete_MCRegisterSet *data,
- uint32_t num_entries, uint32_t valid_flag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & 1<<j) {
- data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
- const uint32_t memory_clock,
- SMU72_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, smu_data->mc_reg_table.last,
- smu_data->mc_reg_table.validflag);
-
- return 0;
-}
-
-static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_MCRegisters *mc_regs)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_regs->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t address;
- int32_t result;
-
- if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
-
- memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
- if (result != 0)
- return result;
-
-
- address = smu_data->smu7_data.mc_reg_table_start +
- (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(
- hwmgr->smumgr, address,
- (uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU72_Discrete_MCRegisterSet) *
- data->dpm_table.mclk_table.count,
- SMC_RAM_END);
-}
-
-static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
-
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for the MC register addresses !",
- return result;);
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for driver state !",
- return result;);
-
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &tonga_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
-}
-
-static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- uint8_t i;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-
- memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
- tonga_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- tonga_populate_smc_voltage_tables(hwmgr, table);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
- if (i == 1 || i == 0)
- table->SystemFlags |= 0x40;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = tonga_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ULV state !",
- return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = tonga_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Link Level !", return result);
-
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Graphics Level !", return result);
-
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Memory Level !", return result);
-
- result = tonga_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACPI Level !", return result);
-
- result = tonga_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize VCE Level !", return result);
-
- result = tonga_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACP Level !", return result);
-
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
- /* Since only the initial state is completely set up at this
- * point (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = tonga_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to Write ARB settings for the initial state.",
- return result;);
-
- result = tonga_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize UVD Level !", return result);
-
- result = tonga_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Boot Level !", return result);
-
- tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate BAPM Parameters !", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = tonga_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate Clock Stretcher Data Table !",
- return result;);
- }
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
-
- /*
- * Cail reads current link status and reports it as cap (we cannot
- * change this due to some previous issues we had)
- * SMC drops the link status to lowest level after enabling
- * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
- * but this time Cail reads current link status which was set to low by
- * SMC and reports it as cap to powerplay
- * To avoid it, we set PCIeBootLinkLevel to highest dpm level
- */
- PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
-
- table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
- table->PCIeGenInterval = 1;
-
- result = tonga_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate VRConfig setting !", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
-
- if (0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr,
- THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
- table->ThermOutPolarity =
- (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
-
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal)){
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- }
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
- SMC_RAM_END);
-
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload dpm data to SMC memory !", return result;);
-
- result = tonga_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload arb data to SMC memory !", return result);
-
- tonga_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize pm fuses !", return result);
-
- result = tonga_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize MC Reg table !", return result);
-
- tonga_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- return 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (0 == smu_data->smu7_data.fan_table_start) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- fan_table.FanControl_GL_Flag = 1;
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table,
- (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- return 0;
-}
-
-
-static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return tonga_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
-
- result = tonga_update_and_upload_mc_reg_table(hwmgr);
-
- PP_ASSERT_WITH_CODE((!result),
- "Failed to upload MC reg table !",
- return result);
-
- result = tonga_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters !",
- );
-
- return result;
-}
-
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU72_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU72_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU72_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t tonga_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU72_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU72_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU72_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU72_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU72_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU72_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU72_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU72_MAX_LEVELS_MVDD;
- }
- pr_warn("can't get the mac value %x\n", value);
-
- return 0;
-}
-
-
-static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- tonga_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- tonga_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (result != 0);
-
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (result != 0);
-
- return error ? 1 : 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
- bool result = true;
-
- switch (in_reg) {
- case mmMC_SEQ_RAS_TIMING:
- *out_reg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *out_reg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *out_reg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *out_reg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *out_reg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *out_reg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
- &address) ?
- address :
- table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct tonga_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -EINVAL);
-
- for (i = 0; i < table->last; i++)
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
- * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
- * mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
- * mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct tonga_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- switch (table->mc_reg_address[i].s1) {
-
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device,
- mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_gddr5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- if (!data->is_memory_gddr5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
-{
- uint8_t i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- pp_atomctrl_mc_reg_table *table;
- struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (table == NULL)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (!result)
- result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
- if (!result) {
- tonga_set_s0_mc_reg_index(ni_table);
- result = tonga_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (!result)
- tonga_set_valid_flag(ni_table);
-
- kfree(table);
-
- return result;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
deleted file mode 100644
index 962860f13f24..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _TONGA_SMC_H
-#define _TONGA_SMC_H
-
-#include "smumgr.h"
-#include "smu72.h"
-
-
-#define ASICID_IS_TONGA_P(wDID, bRID) \
- (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
- || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-
-
-struct tonga_pt_defaults {
- uint8_t svi_load_line_en;
- uint8_t svi_load_line_vddC;
- uint8_t tdc_vddc_throttle_release_limit_perc;
- uint8_t tdc_mawt;
- uint8_t tdc_waterfall_ctl;
- uint8_t dte_ambient_temp_base;
- uint32_t display_cac;
- uint32_t bamp_temp_gradient;
- uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
- uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-};
-
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
-uint32_t tonga_get_mac_definition(uint32_t value);
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index c35f4c35c9ca..0a8e48bff219 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,141 +33,193 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "cgs_common.h"
-#include "tonga_smc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
-static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+
+#include "atombios.h"
+
+#include "pppcielanes.h"
+#include "pp_endian.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ {1, 0xF, 0xFD, 0x19,
+ 5, 45, 0, 0xB0000,
+ {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+ 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+ 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+ },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0},
+ {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5},
+ {0, 2, 4, 5, 6, 5}
+};
+
+static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result;
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
pr_err("SMU Firmware start failed\n");
return -EINVAL;
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return 0;
}
-
-static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/*Clear firmware interrupt enable flag*/
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/*De-assert reset*/
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int tonga_start_smu(struct pp_smumgr *smumgr)
+static int tonga_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr) ||
- cgs_is_virtualization_enabled(smumgr->device))) {
+ if (!(smu7_is_smc_ram_running(hwmgr) ||
+ cgs_is_virtualization_enabled(hwmgr->device))) {
/*Check if SMU is running in protected mode*/
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_start_in_non_protection_mode(smumgr);
+ result = tonga_start_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = tonga_start_in_protection_mode(smumgr);
+ result = tonga_start_in_protection_mode(hwmgr);
if (result)
return result;
}
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int tonga_smu_init(struct pp_smumgr *smumgr)
+static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *tonga_priv = NULL;
int i;
@@ -176,9 +228,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
if (tonga_priv == NULL)
return -ENOMEM;
- smumgr->backend = tonga_priv;
+ hwmgr->smu_backend = tonga_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
@@ -187,6 +239,3053 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ voltage->VddGfx = phm_get_voltage_index(
+ pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(
+ pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddc);
+
+ if (allowed_clock_voltage_table->entries[i].vddci)
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+ else
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+ if (allowed_clock_voltage_table->entries[i].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+ voltage->Phases = 1;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddc);
+
+ if (allowed_clock_voltage_table->entries[i-1].vddci)
+ voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i-1].vddci);
+
+ if (allowed_clock_voltage_table->entries[i-1].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+ return 0;
+}
+
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ table->VddcTable[count] =
+ PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+ for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+ table->VddGfxTable[count] =
+ PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->SmioTable1.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+ table->SmioTable1.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->vddci_voltage_table.entries[count].smio_low;
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ table->SmioTable2.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->mvdd_voltage_table.entries[count].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ }
+
+ return 0;
+}
+
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+ pptable_info->vddgfx_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+ pptable_info->vddc_lookup_table;
+
+ /* table is already swapped, so in order to use the value from it
+ * we need to swap it back.
+ */
+ uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+ uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+ for (count = 0; count < vddc_level_count; count++) {
+ /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+
+ if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+ for (count = 0; count < vddgfx_level_count; count++) {
+ index = phm_get_voltage_index(vddgfx_lookup_table,
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+ }
+ } else {
+ for (count = 0; count < vddc_level_count; count++) {
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddGfxVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddGfxVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = tonga_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDC voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDCI voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDGFX voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate MVDD voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_cac_tables(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_Ulv *state)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_DpmTable *table)
+{
+ return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_sclk, engine_clock,
+ &graphic_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find VDDC voltage value for VDDC "
+ "engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 0;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (!result) {
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+ uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+
+ SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i, max_entry;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = tonga_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ }
+ } else {
+ if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+ pr_err("Pcie Dpm Enablemask is 0 !");
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(highest_pcie_level_enabled+1))) != 0)) {
+ highest_pcie_level_enabled++;
+ }
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<lowest_pcie_level_enabled)) == 0)) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+ count++;
+ }
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "Error retrieving Memory Clock Parameters from VBIOS.",
+ return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+ mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF,
+ mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC,
+ mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE,
+ mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+ mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *memory_level
+ )
+{
+ uint32_t mvdd = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_stutter_mode_threshold = 30000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (NULL != pptable_info->vdd_dep_on_mclk) {
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_mclk,
+ memory_clock,
+ &memory_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "can not find MinVddc voltage value from memory VDDC "
+ "voltage dependency table",
+ return result);
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else
+ memory_level->MinMvdd = mvdd;
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
+ memory_level->StutterEnable = 1;
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ } else {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ } else {
+ dll_state_on = data->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio =
+ tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = tonga_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_address =
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size =
+ sizeof(SMU72_Discrete_MemoryLevel) *
+ SMU72_MAX_LEVELS_MEMORY;
+ SMU72_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = tonga_populate_single_memory_level(
+ hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ smio_pattern->Voltage =
+ data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMIO_Pattern voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ table->ACPILevel.MinVoltage =
+ smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVoltage =
+ smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+ /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+ if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t) (mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->UvdLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->UvdLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->UvdLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(
+ hwmgr,
+ table->UvdLevel[count].VclkFrequency,
+ &dividers);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Vclk clock",
+ return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Dclk clock",
+ return result);
+
+ table->UvdLevel[count].DclkDivider =
+ (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ }
+
+ return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t) (mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->VceLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->VceLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->VceLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t) (mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->AcpLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->AcpLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->AcpLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->SamuLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->SamuLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->SamuLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU72_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = tonga_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (!result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU72_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot engine "
+ "clock value in dependency table. "
+ "Using Graphics DPM level 0 !");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot "
+ "engine clock value in dependency table."
+ "Using Memory DPM level 0 !");
+ result = 0;
+ }
+
+ table->BootVoltage.Vddc =
+ phm_get_voltage_id(&(data->vddc_voltage_table),
+ data->vbios_boot_state.vddc_bootup_value);
+ table->BootVoltage.VddGfx =
+ phm_get_voltage_id(&(data->vddgfx_voltage_table),
+ data->vbios_boot_state.vddgfx_bootup_value);
+ table->BootVoltage.Vddci =
+ phm_get_voltage_id(&(data->vddci_voltage_table),
+ data->vbios_boot_state.vddci_bootup_value);
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t hw_revision, dev_id;
+ struct cgs_system_info sys_info = {0};
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+ volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+ (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+ (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+ volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+ (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+ (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+ } else {
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ }
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (tonga_clock_stretch_amount_conversion
+ [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* Splitted mode */
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC and VDDGFX should "
+ "be both on SVI2 control in splitted mode !\n");
+ }
+ } else {
+ /* Merged mode */
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC should be on "
+ "SVI2 control in merged mode !\n");
+ }
+ }
+
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /*
+ * This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result != 0)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ int i, j, k;
+ const uint16_t *pdef1, *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range !",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ dpm_table->BAPM_TEMP_GRADIENT =
+ PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ pdef1 = defaults->bapmti_r;
+ pdef2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU72_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 "
+ "(SviLoadLineEn) from SMC Failed !",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed !",
+ return -EINVAL);
+
+ /* DW6 */
+ if (tonga_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed !",
+ return -EINVAL);
+ /* DW7 */
+ if (tonga_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed !",
+ return -EINVAL);
+ /* DW8 */
+ if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl Failed !",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (tonga_populate_temperature_scaler(hwmgr) != 0)
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed !",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (tonga_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan "
+ "Control parameters Failed !",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (tonga_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed !",
+ return -EINVAL);
+
+ /* DW20 */
+ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(
+ false,
+ "Attempt to populate BapmVddCBaseLeakage "
+ "Hi and Lo Sidd Failed !",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed !",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(
+ i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array "
+ "out of boundary",
+ return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+ const struct tonga_mc_reg_entry *entry,
+ SMU72_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = tonga_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start +
+ (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(
+ hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU72_Discrete_MCRegisterSet) *
+ data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+ result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for the MC register addresses !",
+ return result;);
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for driver state !",
+ return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &tonga_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ uint8_t i;
+ pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ tonga_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ tonga_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+ if (i == 1 || i == 0)
+ table->SystemFlags |= 0x40;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = tonga_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state !",
+ return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = tonga_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level !", return result);
+
+ result = tonga_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level !", return result);
+
+ result = tonga_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level !", return result);
+
+ result = tonga_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level !", return result);
+
+ result = tonga_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level !", return result);
+
+ result = tonga_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+ result = tonga_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level !", return result);
+
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = tonga_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.",
+ return result;);
+
+ result = tonga_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level !", return result);
+
+ result = tonga_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level !", return result);
+
+ tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters !", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = tonga_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table !",
+ return result;);
+ }
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ /*
+ * Cail reads current link status and reports it as cap (we cannot
+ * change this due to some previous issues we had)
+ * SMC drops the link status to lowest level after enabling
+ * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+ * but this time Cail reads current link status which was set to low by
+ * SMC and reports it as cap to powerplay
+ * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+ */
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+ table->PCIeGenInterval = 1;
+
+ result = tonga_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting !", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+
+ if (0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ }
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory !", return result;);
+
+ result = tonga_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload arb data to SMC memory !", return result);
+
+ tonga_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize pm fuses !", return result);
+
+ result = tonga_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize MC Reg table !", return result);
+
+ tonga_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu_data->smu7_data.fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ fan_table.FanControl_GL_Flag = 1;
+
+ res = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table,
+ (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return tonga_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to upload MC reg table !",
+ return result);
+
+ result = tonga_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters !",
+ );
+
+ return result;
+}
+
+static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t tonga_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU72_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU72_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU72_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU72_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU72_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU72_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU72_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU72_MAX_LEVELS_MVDD;
+ }
+ pr_warn("can't get the mac value %x\n", value);
+
+ return 0;
+}
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ tonga_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ tonga_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (result != 0);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (result != 0);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+ &address) ?
+ address :
+ table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct tonga_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device,
+ mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (!result)
+ result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (!result) {
+ tonga_set_s0_mc_reg_index(ni_table);
+ result = tonga_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (!result)
+ tonga_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 8c4f761d5bc8..5d70a00348e2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -25,8 +25,26 @@
#define _TONGA_SMUMGR_H_
#include "smu72_discrete.h"
-
#include "smu7_smumgr.h"
+#include "smu72.h"
+
+
+#define ASICID_IS_TONGA_P(wDID, bRID) \
+ (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+ || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
+
+struct tonga_pt_defaults {
+ uint8_t svi_load_line_en;
+ uint8_t svi_load_line_vddC;
+ uint8_t tdc_vddc_throttle_release_limit_perc;
+ uint8_t tdc_mawt;
+ uint8_t tdc_waterfall_ctl;
+ uint8_t dte_ambient_temp_base;
+ uint32_t display_cac;
+ uint32_t bapm_temp_gradient;
+ uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+ uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+};
struct tonga_mc_reg_entry {
uint32_t mclk_max;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 408514c965a0..2f979fb86824 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -53,20 +53,20 @@
#define smnMP0_FW_INTF 0x3010104
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
+static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
-static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
/*
@@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
/*
* Send a message to the SMC, and wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
/*
* Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
/*
* Send a message to the SMC with parameter, do not wait for response
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int vega10_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+ struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ return vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
}
/*
* Retrieve an argument from SMC.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
/*
* Copy table from SMC into driver FB
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
/*
* Copy table from Driver FB into SMC
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask)
{
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
- return vega10_send_msg_to_smc_with_parameter(smumgr,
+ return vega10_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
- if (!vega10_send_msg_to_smc(smumgr,
+ if (!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
- vega10_read_arg_from_smc(smumgr, features_enabled);
+ vega10_read_arg_from_smc(hwmgr, features_enabled);
return 0;
}
return -EINVAL;
}
-int vega10_set_tools_address(struct pp_smumgr *smumgr)
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
- if (!vega10_send_msg_to_smc_with_parameter(smumgr,
+ if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
priv->smu_tables.entry[TOOLSTABLE].table_addr_high))
- vega10_send_msg_to_smc_with_parameter(smumgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
}
return 0;
}
-static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
+static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
+ vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
dev_id = (uint32_t)sys_info.value;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
rev_id = (uint32_t)sys_info.value;
if (!((dev_id == 0x687f) &&
@@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_init(struct pp_smumgr *smumgr)
+static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv;
uint64_t mc_addr;
@@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
int ret;
struct cgs_firmware_info info = {0};
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
&info);
if (ret || !info.kptr)
@@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for pptable */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(PPTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for pptable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
tools_size = 0x19000;
if (tools_size) {
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
tools_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
}
/* allocate space for AVFS Fuse table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsFuseOverride_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs fuse table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_fini(struct pp_smumgr *smumgr)
+static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
if (priv->smu_tables.entry[TOOLSTABLE].table)
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int vega10_start_smu(struct pp_smumgr *smumgr)
+static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
+ PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
"Failed to verify SMC interface!",
return -EINVAL);
- vega10_set_tools_address(smumgr);
+ vega10_set_tools_address(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 821425c1e4e0..0695455b21b2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -52,19 +52,19 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask);
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled);
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-int vega10_set_tools_address(struct pp_smumgr *smumgr);
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 283a0dc25e84..07129e6c31a9 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a25f6c72f219..92ec663fdada 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
entity->rq = rq;
entity->sched = sched;
+ spin_lock_init(&entity->rq_lock);
spin_lock_init(&entity->queue_lock);
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
if (r)
@@ -204,18 +205,38 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- struct amd_sched_rq *rq = entity->rq;
+ int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
-
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs
+ * queued IBs or discard them on SIGKILL
*/
- wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+ r = -ERESTARTSYS;
+ else
+ r = wait_event_killable(sched->job_scheduled,
+ amd_sched_entity_is_idle(entity));
+ amd_sched_entity_set_rq(entity, NULL);
+ if (r) {
+ struct amd_sched_job *job;
+
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
+ struct amd_sched_fence *s_fence = job->s_fence;
+ amd_sched_fence_scheduled(s_fence);
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+ amd_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ sched->ops->free_job(job);
+ }
- amd_sched_rq_remove_entity(rq, entity);
+ }
kfifo_free(&entity->job_queue);
}
@@ -236,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq)
+{
+ if (entity->rq == rq)
+ return;
+
+ spin_lock(&entity->rq_lock);
+
+ if (entity->rq)
+ amd_sched_rq_remove_entity(entity->rq, entity);
+
+ entity->rq = rq;
+ if (rq)
+ amd_sched_rq_add_entity(rq, entity);
+
+ spin_unlock(&entity->rq_lock);
+}
+
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity)
{
@@ -293,7 +332,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
static struct amd_sched_job *
-amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+amd_sched_entity_peek_job(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_job *sched_job;
@@ -333,14 +372,15 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
+ spin_lock(&entity->rq_lock);
amd_sched_rq_add_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
amd_sched_wakeup(sched);
}
return added;
}
-/* job_finish is called after hw fence signaled, and
- * the job had already been deleted from ring_mirror_list
+/* job_finish is called after hw fence signaled
*/
static void amd_sched_job_finish(struct work_struct *work)
{
@@ -366,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work)
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
spin_unlock(&sched->job_list_lock);
+ dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -381,6 +422,9 @@ static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
+ amd_sched_job_finish_cb);
+
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
@@ -473,8 +517,6 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -545,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
+ dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_finished(s_fence);
@@ -585,7 +628,7 @@ static int amd_sched_main(void *param)
if (!entity)
continue;
- sched_job = amd_sched_entity_pop_job(entity);
+ sched_job = amd_sched_entity_peek_job(entity);
if (!sched_job)
continue;
@@ -596,6 +639,7 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
+
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f9d8f28efd16..52c8e5447624 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -39,6 +39,7 @@ struct amd_sched_rq;
struct amd_sched_entity {
struct list_head list;
struct amd_sched_rq *rq;
+ spinlock_t rq_lock;
struct amd_gpu_scheduler *sched;
spinlock_t queue_lock;
@@ -115,9 +116,14 @@ struct amd_sched_backend_ops {
enum amd_sched_priority {
AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_NORMAL,
+ AMD_SCHED_PRIORITY_HIGH_SW,
+ AMD_SCHED_PRIORITY_HIGH_HW,
AMD_SCHED_PRIORITY_KERNEL,
- AMD_SCHED_PRIORITY_MAX
+ AMD_SCHED_PRIORITY_MAX,
+ AMD_SCHED_PRIORITY_INVALID = -1,
+ AMD_SCHED_PRIORITY_UNSET = -2
};
/**
@@ -150,6 +156,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq);
int amd_sched_fence_slab_init(void);
void amd_sched_fence_slab_fini(void);
@@ -167,4 +175,11 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity);
void amd_sched_job_kickout(struct amd_sched_job *s_job);
+
+static inline enum amd_sched_priority
+amd_sched_get_job_priority(struct amd_sched_job *job)
+{
+ return (job->s_entity->rq - job->sched->sched_rq);
+}
+
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 289eda54e5aa..074fd4ea7ece 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <linux/of_reserved_mem.h>
@@ -32,7 +33,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
}
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 72b22b805412..5a5427bbd70e 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
+ if (ret)
return ERR_PTR(ret);
- }
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index f9bda7b0d2ec..0afb53b1f4e9 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -25,6 +26,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -106,7 +108,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = hdlcd_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -353,7 +355,7 @@ err_unload:
err_free:
drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -378,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
}
@@ -431,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
return 0;
drm_kms_helper_poll_disable(drm);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
hdlcd->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(hdlcd->state)) {
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
return PTR_ERR(hdlcd->state);
}
@@ -450,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
return 0;
drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
- pm_runtime_set_active(dev);
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 3615d18a7ddf..904fff80917b 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
/* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
- hwdev->modeset(hwdev, &vm);
- hwdev->leave_config_mode(hwdev);
+ hwdev->hw->modeset(hwdev, &vm);
+ hwdev->hw->leave_config_mode(hwdev);
drm_crtc_vblank_on(crtc);
}
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
struct malidp_hw_device *hwdev = malidp->dev;
int err;
+ /* always disable planes on the CRTC that is being turned off */
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
drm_crtc_vblank_off(crtc);
- hwdev->enter_config_mode(hwdev);
+ hwdev->hw->enter_config_mode(hwdev);
+
clk_disable_unprepare(hwdev->pxlclk);
err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
mclk_calc:
drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
- ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+ ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
if (ret < 0)
return -EINVAL;
return 0;
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
return 0;
}
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 1a57cc28955e..91f2b0191368 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -26,6 +26,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include "malidp_drv.h"
@@ -46,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
* directly.
*/
malidp_hw_write(hwdev, gamma_write_mask,
- hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+ hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
malidp_hw_write(hwdev, data[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COEF_TABLE_DATA);
}
@@ -102,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
malidp_hw_write(hwdev,
mc->coloradj_coeffs[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COLOR_ADJ_COEF + 4 * i);
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -119,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_se_config *s = &cs->scaler_config;
struct malidp_se_config *old_s = &old_cs->scaler_config;
- u32 se_control = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 se_control = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC);
u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -134,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
return;
}
- hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+ hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
val = malidp_hw_read(hwdev, se_control);
val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
@@ -169,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
int ret;
atomic_set(&malidp->config_valid, 0);
- hwdev->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev))
return 0;
ret = wait_event_interruptible_timeout(malidp->wq,
@@ -249,7 +250,7 @@ static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
};
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = malidp_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -454,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev)
struct malidp_hw_device *hwdev = malidp->dev;
/* we can only suspend if the hardware is in config mode */
- WARN_ON(!hwdev->in_config_mode(hwdev));
+ WARN_ON(!hwdev->hw->in_config_mode(hwdev));
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
@@ -499,11 +500,7 @@ static int malidp_bind(struct device *dev)
if (!hwdev)
return -ENOMEM;
- /*
- * copy the associated data from malidp_drm_of_match to avoid
- * having to keep a reference to the OF node after binding
- */
- memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+ hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -567,13 +564,13 @@ static int malidp_bind(struct device *dev)
goto query_hw_fail;
}
- ret = hwdev->query_hw(hwdev);
+ ret = hwdev->hw->query_hw(hwdev);
if (ret) {
DRM_ERROR("Invalid HW configuration\n");
goto query_hw_fail;
}
- version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+ version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
@@ -588,7 +585,7 @@ static int malidp_bind(struct device *dev)
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
- malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+ malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
atomic_set(&malidp->config_valid, 0);
init_waitqueue_head(&malidp->wq);
@@ -670,7 +667,7 @@ query_hw_fail:
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
alloc_fail:
of_reserved_mem_device_release(dev);
@@ -703,7 +700,7 @@ static void malidp_unbind(struct device *dev)
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 17bca99e8ac8..2bfb542135ac 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
break;
/*
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
return true;
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
break;
/*
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
return true;
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
return 0;
}
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
.coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir
{
u32 base = malidp_get_block_base(hwdev, block);
- if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+ if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
else
malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev;
+ struct malidp_hw *hw;
const struct malidp_irq_map *de;
u32 status, mask, dc_status;
irqreturn_t ret = IRQ_NONE;
hwdev = malidp->dev;
- de = &hwdev->map.de_irq_map;
+ hw = hwdev->hw;
+ de = &hw->map.de_irq_map;
/*
* if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
return IRQ_NONE;
/* first handle the config valid IRQ */
- dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
- if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+ dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if (dc_status & hw->map.dc_irq_map.vsync_irq) {
/* we have a page flip event */
atomic_set(&malidp->config_valid, 1);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
/* first enable the DC block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
/* now enable the DE block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
return 0;
}
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
}
static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_hw *hw = hwdev->hw;
+ const struct malidp_irq_map *se = &hw->map.se_irq_map;
u32 status, mask;
/*
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
if (hwdev->pm_suspended)
return IRQ_NONE;
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
- if (!(status & hwdev->map.se_irq_map.irq_mask))
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+ if (!(status & se->irq_mask))
return IRQ_NONE;
- mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
/* ToDo: status decoding and firing up of VSYNC and page flip events */
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
}
malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
return 0;
}
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 849ad9a30c3a..b0690ebb3565 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -120,18 +120,14 @@ struct malidp_hw_regmap {
/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
-struct malidp_hw_device {
- const struct malidp_hw_regmap map;
- void __iomem *regs;
+struct malidp_hw_device;
- /* APB clock */
- struct clk *pclk;
- /* AXI clock */
- struct clk *aclk;
- /* main clock for display core */
- struct clk *mclk;
- /* pixel clock for display core */
- struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+ const struct malidp_hw_regmap map;
/*
* Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@ struct malidp_hw_device {
struct videomode *vm);
u8 features;
-
- u8 min_line_size;
- u16 max_line_size;
-
- /* track the device PM state */
- bool pm_suspended;
-
- /* size of memory used for rotating layers, up to two banks available */
- u32 rotation_memory[2];
};
/* Supported variants of the hardware */
@@ -202,7 +189,33 @@ enum {
MALIDP_MAX_DEVICES
};
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+ struct malidp_hw *hw;
+ void __iomem *regs;
+
+ /* APB clock */
+ struct clk *pclk;
+ /* AXI clock */
+ struct clk *aclk;
+ /* main clock for display core */
+ struct clk *mclk;
+ /* pixel clock for display core */
+ struct clk *pxlclk;
+
+ u8 min_line_size;
+ u16 max_line_size;
+
+ /* track the device PM state */
+ bool pm_suspended;
+
+ /* size of memory used for rotating layers, up to two banks available */
+ u32 rotation_memory[2];
+};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
{
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
{
switch (block) {
case MALIDP_SE_BLOCK:
- return hwdev->map.se_base;
+ return hwdev->hw->map.se_base;
case MALIDP_DC_BLOCK:
- return hwdev->map.dc_base;
+ return hwdev->hw->map.dc_base;
}
return 0;
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
unsigned int pitch)
{
- return !(pitch & (hwdev->map.bus_align_bytes - 1));
+ return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
}
/* U16.16 */
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
};
u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
- u32 image_enh = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 image_enh = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
int i;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 94e7e3fa3408..e7419797bbd1 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
struct malidp_plane *mp = to_malidp_plane(plane);
if (mp->base.fb)
- drm_framebuffer_unreference(mp->base.fb);
+ drm_framebuffer_put(mp->base.fb);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
@@ -185,8 +185,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
- ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- fb->format->format);
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+ mp->layer->id,
+ fb->format->format);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
@@ -211,7 +212,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
* third plane stride register.
*/
if (ms->n_planes == 3 &&
- !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+ !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
(state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
@@ -229,9 +230,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
- state->crtc_w,
- fb->format->format);
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+ state->crtc_w,
+ fb->format->format);
if (val < 0)
return val;
@@ -251,7 +252,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
return;
if (num_planes == 3)
- num_strides = (mp->hwdev->features &
+ num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
for (i = 0; i < num_strides; ++i)
@@ -264,13 +265,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct malidp_plane *mp;
- const struct malidp_hw_regmap *map;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u32 src_w, src_h, dest_w, dest_h, val;
int i;
mp = to_malidp_plane(plane);
- map = &mp->hwdev->map;
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -363,7 +362,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
- const struct malidp_hw_regmap *map = &malidp->dev->map;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index 1ab4cf863bf7..ecf25cf9f9f5 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -5,5 +5,3 @@ armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
obj-$(CONFIG_DRM_ARMADA) := armada.o
-
-CFLAGS_armada_trace.o := -I$(src)
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index ad3d2ebf95c9..41a784f5a5e6 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2a4d163ac76f..2e065facdce7 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -298,7 +298,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
if (force) {
/* Display is disabled, so just drop the old fb */
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return;
}
@@ -321,7 +321,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
* the best. The worst that will happen is the buffer gets
* reused before it has finished being displayed.
*/
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
@@ -577,7 +577,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
unsigned i;
bool interlaced;
- drm_framebuffer_reference(crtc->primary->fb);
+ drm_framebuffer_get(crtc->primary->fb);
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
@@ -718,7 +718,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
MAX_SCHEDULE_TIMEOUT);
/* Take a reference to the new fb as we're using it */
- drm_framebuffer_reference(crtc->primary->fb);
+ drm_framebuffer_get(crtc->primary->fb);
/* Update the base in the CRTC */
armada_drm_crtc_update_regs(dcrtc, regs);
@@ -742,7 +742,7 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
* primary plane.
*/
if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
+ drm_framebuffer_put(plane->fb);
/* Power down the Y/U/V FIFOs */
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
@@ -947,13 +947,13 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
/* Must be a kernel-mapped object */
if (!obj->addr) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -EINVAL;
}
if (obj->obj.size < w * h * 4) {
DRM_ERROR("buffer is too small\n");
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -ENOMEM;
}
}
@@ -961,7 +961,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
if (dcrtc->cursor_obj) {
dcrtc->cursor_obj->update = NULL;
dcrtc->cursor_obj->update_data = NULL;
- drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
}
dcrtc->cursor_obj = obj;
dcrtc->cursor_w = w;
@@ -997,7 +997,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
struct armada_private *priv = crtc->dev->dev_private;
if (dcrtc->cursor_obj)
- drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
@@ -1045,12 +1045,12 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* Ensure that we hold a reference on the new framebuffer.
* This has to match the behaviour in mode_set.
*/
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
if (ret) {
/* Undo our reference above */
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
kfree(work);
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 0b3227c039d7..e857b88a9799 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,7 +9,6 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
@@ -26,7 +25,7 @@ static void armada_drm_unref_work(struct work_struct *work)
struct drm_framebuffer *fb;
while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
/* Must be called with dev->event_lock held */
@@ -70,8 +69,6 @@ static struct drm_driver armada_drm_driver = {
.gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
- .dumb_map_offset = armada_gem_dumb_map_offset,
- .dumb_destroy = armada_gem_dumb_destroy,
.gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 92e6b08ea64a..a38d5a0892a9 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -5,7 +5,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "armada_drm.h"
@@ -18,7 +17,7 @@ static void armada_fb_destroy(struct drm_framebuffer *fb)
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+ drm_gem_object_put_unlocked(&dfb->obj->obj);
kfree(dfb);
}
@@ -95,7 +94,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
* the above call, but the caller will drop their reference
* to it. Hence we need to take our own reference.
*/
- drm_gem_object_reference(&obj->obj);
+ drm_gem_object_get(&obj->obj);
return dfb;
}
@@ -144,12 +143,12 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err;
}
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return &dfb->fb;
err_unref:
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
err:
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 29c7d047b152..a2ce83f84800 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -52,13 +51,13 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
ret = armada_gem_linear_back(dev, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return ret;
}
ptr = armada_gem_map_object(dev, obj);
if (!ptr) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -ENOMEM;
}
@@ -68,7 +67,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
* A reference is now held by the framebuffer object if
* successful, otherwise this drops the ref for the error path.
*/
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
if (IS_ERR(dfb))
return PTR_ERR(dfb);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a76ca21d063b..a97f509743a5 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -8,7 +8,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
-#include <drm/drmP.h>
#include "armada_drm.h"
#include "armada_gem.h"
#include <drm/armada_drm.h>
@@ -266,46 +265,10 @@ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
-int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- struct armada_gem_object *obj;
- int ret = 0;
-
- obj = armada_gem_object_lookup(file, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- return -EINVAL;
- }
-
- /* Don't allow imported objects to be mapped */
- if (obj->obj.import_attach) {
- ret = -EINVAL;
- goto err_unref;
- }
-
- ret = drm_gem_create_mmap_offset(&obj->obj);
- if (ret == 0) {
- *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
- DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
- }
-
- err_unref:
- drm_gem_object_unreference_unlocked(&obj->obj);
-
- return ret;
-}
-
-int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/* Private driver gem ioctls */
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -334,7 +297,7 @@ int armada_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
@@ -351,13 +314,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (!dobj->obj.filp) {
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
@@ -412,7 +375,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
unref:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
@@ -561,7 +524,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* Importing our own dmabuf(s) increases the
* refcount on the gem object itself.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return obj;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 6e524e0676bb..1ac90792b166 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -35,10 +35,6 @@ struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
size_t);
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *);
-int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
- uint32_t, uint64_t *);
-int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
- uint32_t);
struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index edc44910d79f..b411b608821a 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -177,7 +177,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
* Take a reference on the new framebuffer - we want to
* hold on to it while the hardware is displaying it.
*/
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
@@ -278,7 +278,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane,
fb = xchg(&dplane->old_fb, NULL);
if (fb)
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index 1e9f55fc8735..8dbfea7a00fe 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -63,5 +63,5 @@ TRACE_EVENT(armada_ovl_plane_work,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/armada
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6f3849ec0c1d..9555a3542022 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -713,7 +713,7 @@ static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connect
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 74d66e11f688..c6e8061ffcfc 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -458,7 +458,7 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 4237b0446721..6833ee253cfa 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6a91e62da2f4..a24a18fbd65a 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,7 +213,7 @@ bochs_connector_best_encoder(struct drm_connector *connector)
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index adf9ae0e0b7c..3b99d5a06c16 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -71,7 +71,7 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF
+ depends on OF && RC_CORE
select DRM_KMS_HELPER
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
@@ -84,6 +84,14 @@ config DRM_SII902X
---help---
Silicon Image sii902x bridge chip driver.
+config DRM_SII9234
+ tristate "Silicon Image SII9234 HDMI/MHL bridge"
+ depends on OF
+ ---help---
+ Say Y here if you want support for the MHL interface.
+ It is an I2C driver, that detects connection of MHL bridge
+ and starts encapsulation of HDMI signal.
+
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 60dab87e4783..373eb28f31ed 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 2fed567f9943..592b9d2ec034 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -21,3 +21,11 @@ config DRM_I2C_ADV7533
default y
help
Support for the Analog Devices ADV7533 DSI to HDMI encoder.
+
+config DRM_I2C_ADV7511_CEC
+ bool "ADV7511/33 HDMI CEC driver"
+ depends on DRM_I2C_ADV7511
+ select CEC_CORE
+ default y
+ help
+ When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 5ba675534f6e..5bb384938a71 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,4 +1,5 @@
adv7511-y := adv7511_drv.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index fe18a5d2d84b..d034b2cb5eee 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,6 +195,25 @@
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
+#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
+#define ADV7511_REG_CEC_TX_FRAME_LEN 0x10
+#define ADV7511_REG_CEC_TX_ENABLE 0x11
+#define ADV7511_REG_CEC_TX_RETRY 0x12
+#define ADV7511_REG_CEC_TX_LOW_DRV_CNT 0x14
+#define ADV7511_REG_CEC_RX_FRAME_HDR 0x15
+#define ADV7511_REG_CEC_RX_FRAME_DATA0 0x16
+#define ADV7511_REG_CEC_RX_FRAME_LEN 0x25
+#define ADV7511_REG_CEC_RX_ENABLE 0x26
+#define ADV7511_REG_CEC_RX_BUFFERS 0x4a
+#define ADV7511_REG_CEC_LOG_ADDR_MASK 0x4b
+#define ADV7511_REG_CEC_LOG_ADDR_0_1 0x4c
+#define ADV7511_REG_CEC_LOG_ADDR_2 0x4d
+#define ADV7511_REG_CEC_CLK_DIV 0x4e
+#define ADV7511_REG_CEC_SOFT_RESET 0x50
+
+#define ADV7533_REG_CEC_OFFSET 0x70
+
enum adv7511_input_clock {
ADV7511_INPUT_CLOCK_1X,
ADV7511_INPUT_CLOCK_2X,
@@ -297,6 +316,8 @@ enum adv7511_type {
ADV7533,
};
+#define ADV7511_MAX_ADDRS 3
+
struct adv7511 {
struct i2c_client *i2c_main;
struct i2c_client *i2c_edid;
@@ -328,8 +349,6 @@ struct adv7511 {
enum adv7511_sync_polarity hsync_polarity;
bool rgb;
- struct edid *edid;
-
struct gpio_desc *gpio_pd;
struct regulator_bulk_data *supplies;
@@ -343,15 +362,36 @@ struct adv7511 {
enum adv7511_type type;
struct platform_device *audio_pdev;
+
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV7511_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
+ struct clk *cec_clk;
+ u32 cec_clk_freq;
};
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return 0;
+}
+#endif
+
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
-void adv7533_uninit_cec(struct adv7511 *adv);
-int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
@@ -374,11 +414,7 @@ static inline int adv7533_patch_registers(struct adv7511 *adv)
return -ENODEV;
}
-static inline void adv7533_uninit_cec(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_init_cec(struct adv7511 *adv)
+static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
{
return -ENODEV;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 67469c26bae8..1b4783d45c53 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -210,7 +210,7 @@ static const struct hdmi_codec_ops adv7511_codec_ops = {
.get_dai_id = adv7511_hdmi_i2s_get_dai_id,
};
-static struct hdmi_codec_pdata codec_data = {
+static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
new file mode 100644
index 000000000000..a20a45c0b353
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -0,0 +1,349 @@
+/*
+ * adv7511_cec.c - Analog Devices ADV7511/33 cec driver
+ *
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <media/cec.h>
+
+#include "adv7511.h"
+
+#define ADV7511_INT1_CEC_MASK \
+ (ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
+ ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1)
+
+static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ unsigned int val;
+
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, &val))
+ return;
+
+ if ((val & 0x01) == 0)
+ return;
+
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
+ cec_transmit_attempt_done(adv7511->cec_adap,
+ CEC_TX_STATUS_ARB_LOST);
+ return;
+ }
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
+ u8 status;
+ u8 err_cnt = 0;
+ u8 nack_cnt = 0;
+ u8 low_drive_cnt = 0;
+ unsigned int cnt;
+
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_LOW_DRV_CNT + offset, &cnt)) {
+ err_cnt = 1;
+ status |= CEC_TX_STATUS_ERROR;
+ } else {
+ nack_cnt = cnt & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = cnt >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ }
+ cec_transmit_done(adv7511->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, err_cnt);
+ return;
+ }
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
+ cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ return;
+ }
+}
+
+void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
+ ADV7511_INT1_CEC_TX_ARBIT_LOST |
+ ADV7511_INT1_CEC_TX_RETRY_TIMEOUT;
+ struct cec_msg msg = {};
+ unsigned int len;
+ unsigned int val;
+ u8 i;
+
+ if (irq1 & irq_tx_mask)
+ adv_cec_tx_raw_status(adv7511, irq1);
+
+ if (!(irq1 & ADV7511_INT1_CEC_RX_READY1))
+ return;
+
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_FRAME_LEN + offset, &len))
+ return;
+
+ msg.len = len & 0x1f;
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (!msg.len)
+ return;
+
+ for (i = 0; i < msg.len; i++) {
+ regmap_read(adv7511->regmap_cec,
+ i + ADV7511_REG_CEC_RX_FRAME_HDR + offset, &val);
+ msg.msg[i] = val;
+ }
+
+ /* toggle to re-enable rx 1 */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 1);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
+ cec_received_msg(adv7511->cec_adap, &msg);
+}
+
+static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ if (adv7511->i2c_cec == NULL)
+ return -EIO;
+
+ if (!adv7511->cec_enabled_adap && enable) {
+ /* power up cec section */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ 0x03, 0x01);
+ /* legacy mode and clear all rx buffers */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0x07);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
+ /* initially disable tx */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, 1, 0);
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready 1 */
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1), 0x3f,
+ ADV7511_INT1_CEC_MASK);
+ } else if (adv7511->cec_enabled_adap && !enable) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1), 0x3f, 0);
+ /* disable address mask 1-3 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x70, 0x00);
+ /* power down cec section */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ 0x03, 0x00);
+ adv7511->cec_valid_addrs = 0;
+ }
+ adv7511->cec_enabled_adap = enable;
+ return 0;
+}
+
+static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ unsigned int i, free_idx = ADV7511_MAX_ADDRS;
+
+ if (!adv7511->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x70, 0);
+ adv7511->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = adv7511->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && adv7511->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV7511_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV7511_MAX_ADDRS)
+ return -ENXIO;
+ }
+ adv7511->cec_addr[i] = addr;
+ adv7511->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x10, 0x10);
+ /* set address for mask 0 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
+ 0x0f, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x20, 0x20);
+ /* set address for mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
+ 0xf0, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x40, 0x40);
+ /* set address for mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_2 + offset,
+ 0x0f, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ u8 len = msg->len;
+ unsigned int i;
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_RETRY + offset,
+ 0x70, max(1, attempts - 1) << 4);
+
+ /* blocking, clear cec tx irq status */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INT(1), 0x38, 0x38);
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ regmap_write(adv7511->regmap_cec,
+ i + ADV7511_REG_CEC_TX_FRAME_HDR + offset,
+ msg->msg[i]);
+
+ /* set length (data + header) */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_FRAME_LEN + offset, len);
+ /* start transmit, enable tx */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, 0x01);
+ return 0;
+}
+
+static const struct cec_adap_ops adv7511_cec_adap_ops = {
+ .adap_enable = adv7511_cec_adap_enable,
+ .adap_log_addr = adv7511_cec_adap_log_addr,
+ .adap_transmit = adv7511_cec_adap_transmit,
+};
+
+static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->cec_clk = devm_clk_get(dev, "cec");
+ if (IS_ERR(adv7511->cec_clk)) {
+ int ret = PTR_ERR(adv7511->cec_clk);
+
+ adv7511->cec_clk = NULL;
+ return ret;
+ }
+ clk_prepare_enable(adv7511->cec_clk);
+ adv7511->cec_clk_freq = clk_get_rate(adv7511->cec_clk);
+ return 0;
+}
+
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ int ret = adv7511_cec_parse_dt(dev, adv7511);
+
+ if (ret)
+ goto err_cec_parse_dt;
+
+ adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
+ adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
+ if (IS_ERR(adv7511->cec_adap)) {
+ ret = PTR_ERR(adv7511->cec_adap);
+ goto err_cec_alloc;
+ }
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ /* cec soft reset */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_SOFT_RESET + offset, 0x00);
+
+ /* legacy mode */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0x00);
+
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ ((adv7511->cec_clk_freq / 750000) - 1) << 2);
+
+ ret = cec_register_adapter(adv7511->cec_adap, dev);
+ if (ret)
+ goto err_cec_register;
+ return 0;
+
+err_cec_register:
+ cec_delete_adapter(adv7511->cec_adap);
+ adv7511->cec_adap = NULL;
+err_cec_alloc:
+ dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+ ret);
+err_cec_parse_dt:
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return ret == -EPROBE_DEFER ? ret : 0;
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index b2431aee7887..efa29db5fc2b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -11,12 +11,15 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <media/cec.h>
+
#include "adv7511.h"
/* ADI recommended values for proper operation. */
@@ -199,17 +202,14 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb)
+ bool rgb, bool hdmi_mode)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
uint8_t infoframe[17];
- if (adv7511->edid)
- config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
- else
- config.hdmi_mode = false;
+ config.hdmi_mode = hdmi_mode;
hdmi_avi_infoframe_init(&config.avi_infoframe);
@@ -339,8 +339,10 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
*/
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR,
+ ADV7511_INT1_DDC_ERROR);
}
/*
@@ -376,6 +378,9 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR, 0);
regcache_mark_dirty(adv7511->regmap);
}
@@ -426,6 +431,8 @@ static void adv7511_hpd_work(struct work_struct *work)
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
+ if (status == connector_status_disconnected)
+ cec_phys_addr_invalidate(adv7511->cec_adap);
drm_kms_helper_hotplug_event(adv7511->connector.dev);
}
}
@@ -456,6 +463,10 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
wake_up_all(&adv7511->wq);
}
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511_cec_irq_process(adv7511, irq1);
+#endif
+
return 0;
}
@@ -589,15 +600,16 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- kfree(adv7511->edid);
- adv7511->edid = edid;
- if (!edid)
- return 0;
drm_mode_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
+ drm_detect_hdmi_monitor(edid));
+
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+
+ kfree(edid);
return count;
}
@@ -833,7 +845,11 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
return -ENODEV;
}
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ if (adv->i2c_main->irq)
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
ret = drm_connector_init(bridge->dev, &adv->connector,
&adv7511_connector_funcs,
@@ -919,6 +935,65 @@ static void adv7511_uninit_regulators(struct adv7511 *adv)
regulator_bulk_disable(adv->num_supplies, adv->supplies);
}
+static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ if (adv7511->type == ADV7533)
+ reg -= ADV7533_REG_CEC_OFFSET;
+
+ switch (reg) {
+ case ADV7511_REG_CEC_RX_FRAME_HDR:
+ case ADV7511_REG_CEC_RX_FRAME_DATA0...
+ ADV7511_REG_CEC_RX_FRAME_DATA0 + 14:
+ case ADV7511_REG_CEC_RX_FRAME_LEN:
+ case ADV7511_REG_CEC_RX_BUFFERS:
+ case ADV7511_REG_CEC_TX_LOW_DRV_CNT:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adv7511_cec_register_volatile,
+};
+
+static int adv7511_init_cec_regmap(struct adv7511 *adv)
+{
+ int ret;
+
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+ adv->i2c_main->addr - 1);
+ if (!adv->i2c_cec)
+ return -ENOMEM;
+ i2c_set_clientdata(adv->i2c_cec, adv);
+
+ adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+ &adv7511_cec_regmap_config);
+ if (IS_ERR(adv->regmap_cec)) {
+ ret = PTR_ERR(adv->regmap_cec);
+ goto err;
+ }
+
+ if (adv->type == ADV7533) {
+ ret = adv7533_patch_cec_registers(adv);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ i2c_unregister_device(adv->i2c_cec);
+ return ret;
+}
+
static int adv7511_parse_dt(struct device_node *np,
struct adv7511_link_config *config)
{
@@ -1092,11 +1167,9 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto uninit_regulators;
}
- if (adv7511->type == ADV7533) {
- ret = adv7533_init_cec(adv7511);
- if (ret)
- goto err_i2c_unregister_edid;
- }
+ ret = adv7511_init_cec_regmap(adv7511);
+ if (ret)
+ goto err_i2c_unregister_edid;
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
@@ -1111,10 +1184,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
- /* CEC is unused for now */
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
-
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
@@ -1122,17 +1191,22 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (adv7511->type == ADV7511)
adv7511_set_link_config(adv7511, &link_config);
+ ret = adv7511_cec_init(dev, adv7511);
+ if (ret)
+ goto err_unregister_cec;
+
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
-
return 0;
err_unregister_cec:
- adv7533_uninit_cec(adv7511);
+ i2c_unregister_device(adv7511->i2c_cec);
+ if (adv7511->cec_clk)
+ clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
@@ -1145,10 +1219,11 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533) {
+ if (adv7511->type == ADV7533)
adv7533_detach_dsi(adv7511);
- adv7533_uninit_cec(adv7511);
- }
+ i2c_unregister_device(adv7511->i2c_cec);
+ if (adv7511->cec_clk)
+ clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
@@ -1156,9 +1231,9 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7511_audio_exit(adv7511);
- i2c_unregister_device(adv7511->i2c_edid);
+ cec_unregister_adapter(adv7511->cec_adap);
- kfree(adv7511->edid);
+ i2c_unregister_device(adv7511->i2c_edid);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index ac804f81e2f6..185b6d842166 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -32,14 +32,6 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static const struct regmap_config adv7533_cec_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = 0xff,
- .cache_type = REGCACHE_RBTREE,
-};
-
static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
@@ -145,37 +137,11 @@ int adv7533_patch_registers(struct adv7511 *adv)
ARRAY_SIZE(adv7533_fixed_registers));
}
-void adv7533_uninit_cec(struct adv7511 *adv)
-{
- i2c_unregister_device(adv->i2c_cec);
-}
-
-int adv7533_init_cec(struct adv7511 *adv)
+int adv7533_patch_cec_registers(struct adv7511 *adv)
{
- int ret;
-
- adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
- adv->i2c_main->addr - 1);
- if (!adv->i2c_cec)
- return -ENOMEM;
-
- adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
- &adv7533_cec_regmap_config);
- if (IS_ERR(adv->regmap_cec)) {
- ret = PTR_ERR(adv->regmap_cec);
- goto err;
- }
-
- ret = regmap_register_patch(adv->regmap_cec,
+ return regmap_register_patch(adv->regmap_cec,
adv7533_cec_fixed_registers,
ARRAY_SIZE(adv7533_cec_fixed_registers));
- if (ret)
- goto err;
-
- return 0;
-err:
- adv7533_uninit_cec(adv);
- return ret;
}
int adv7533_attach_dsi(struct adv7511 *adv)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5dd3f1cd074a..a8905049b9da 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
return 0;
}
+ pm_runtime_get_sync(dp->dev);
edid = drm_get_edid(connector, &dp->aux.ddc);
+ pm_runtime_put(dp->dev);
if (edid) {
drm_mode_connector_update_edid_property(&dp->connector,
edid);
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 0903ba574f61..75b0d3f6e4de 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -13,13 +13,37 @@
#include <linux/of_graph.h>
+struct lvds_encoder {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+ struct lvds_encoder *lvds_encoder = container_of(bridge,
+ struct lvds_encoder,
+ bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+ bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_encoder_attach,
+};
+
static int lvds_encoder_probe(struct platform_device *pdev)
{
struct device_node *port;
struct device_node *endpoint;
struct device_node *panel_node;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct lvds_encoder *lvds_encoder;
+
+ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+ GFP_KERNEL);
+ if (!lvds_encoder)
+ return -ENOMEM;
/* Locate the panel DT node. */
port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
+ lvds_encoder->panel_bridge =
+ devm_drm_panel_bridge_add(&pdev->dev,
+ panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds_encoder->panel_bridge))
+ return PTR_ERR(lvds_encoder->panel_bridge);
+
+ /* The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_encoder->bridge.of_node = pdev->dev.of_node;
+ lvds_encoder->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_encoder->bridge);
- platform_set_drvdata(pdev, bridge);
+ platform_set_drvdata(pdev, lvds_encoder);
return 0;
}
static int lvds_encoder_remove(struct platform_device *pdev)
{
- struct drm_bridge *bridge = platform_get_drvdata(pdev);
+ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&lvds_encoder->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e0cca19b4044..6d99d4a3beb3 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -188,7 +188,15 @@ EXPORT_SYMBOL(drm_panel_bridge_add);
*/
void drm_panel_bridge_remove(struct drm_bridge *bridge)
{
- struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct panel_bridge *panel_bridge;
+
+ if (!bridge)
+ return;
+
+ if (bridge->funcs != &panel_bridge_bridge_funcs)
+ return;
+
+ panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
devm_kfree(panel_bridge->panel->dev, bridge);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
new file mode 100644
index 000000000000..c77000626c22
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2017 Samsung Electronics
+ *
+ * Authors:
+ * Tomasz Stanislawski <t.stanislaws@samsung.com>
+ * Maciej Purski <m.purski@samsung.com>
+ *
+ * Based on sii9234 driver created by:
+ * Adam Hampson <ahampson@sta.samsung.com>
+ * Erik Gilling <konkers@android.com>
+ * Shankar Bandal <shankar.b@samsung.com>
+ * Dharam Kumar <dharam.kr@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program
+ *
+ */
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define CBUS_DEVCAP_OFFSET 0x80
+
+#define SII9234_MHL_VERSION 0x11
+#define SII9234_SCRATCHPAD_SIZE 0x10
+#define SII9234_INT_STAT_SIZE 0x33
+
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+#define MHL_HPD_OUT_OVR_EN BIT(4)
+#define MHL_HPD_OUT_OVR_VAL BIT(5)
+#define MHL_INIT_TIMEOUT 0x0C
+
+/* MHL Tx registers and bits */
+#define MHL_TX_SRST 0x05
+#define MHL_TX_SYSSTAT_REG 0x09
+#define MHL_TX_INTR1_REG 0x71
+#define MHL_TX_INTR4_REG 0x74
+#define MHL_TX_INTR1_ENABLE_REG 0x75
+#define MHL_TX_INTR4_ENABLE_REG 0x78
+#define MHL_TX_INT_CTRL_REG 0x79
+#define MHL_TX_TMDS_CCTRL 0x80
+#define MHL_TX_DISC_CTRL1_REG 0x90
+#define MHL_TX_DISC_CTRL2_REG 0x91
+#define MHL_TX_DISC_CTRL3_REG 0x92
+#define MHL_TX_DISC_CTRL4_REG 0x93
+#define MHL_TX_DISC_CTRL5_REG 0x94
+#define MHL_TX_DISC_CTRL6_REG 0x95
+#define MHL_TX_DISC_CTRL7_REG 0x96
+#define MHL_TX_DISC_CTRL8_REG 0x97
+#define MHL_TX_STAT2_REG 0x99
+#define MHL_TX_MHLTX_CTL1_REG 0xA0
+#define MHL_TX_MHLTX_CTL2_REG 0xA1
+#define MHL_TX_MHLTX_CTL4_REG 0xA3
+#define MHL_TX_MHLTX_CTL6_REG 0xA5
+#define MHL_TX_MHLTX_CTL7_REG 0xA6
+
+#define RSEN_STATUS BIT(2)
+#define HPD_CHANGE_INT BIT(6)
+#define RSEN_CHANGE_INT BIT(5)
+#define RGND_READY_INT BIT(6)
+#define VBUS_LOW_INT BIT(5)
+#define CBUS_LKOUT_INT BIT(4)
+#define MHL_DISC_FAIL_INT BIT(3)
+#define MHL_EST_INT BIT(2)
+#define HPD_CHANGE_INT_MASK BIT(6)
+#define RSEN_CHANGE_INT_MASK BIT(5)
+
+#define RGND_READY_MASK BIT(6)
+#define CBUS_LKOUT_MASK BIT(4)
+#define MHL_DISC_FAIL_MASK BIT(3)
+#define MHL_EST_MASK BIT(2)
+
+#define SKIP_GND BIT(6)
+
+#define ATT_THRESH_SHIFT 0x04
+#define ATT_THRESH_MASK (0x03 << ATT_THRESH_SHIFT)
+#define USB_D_OEN BIT(3)
+#define DEGLITCH_TIME_MASK 0x07
+#define DEGLITCH_TIME_2MS 0
+#define DEGLITCH_TIME_4MS 1
+#define DEGLITCH_TIME_8MS 2
+#define DEGLITCH_TIME_16MS 3
+#define DEGLITCH_TIME_40MS 4
+#define DEGLITCH_TIME_50MS 5
+#define DEGLITCH_TIME_60MS 6
+#define DEGLITCH_TIME_128MS 7
+
+#define USB_D_OVR BIT(7)
+#define USB_ID_OVR BIT(6)
+#define DVRFLT_SEL BIT(5)
+#define BLOCK_RGND_INT BIT(4)
+#define SKIP_DEG BIT(3)
+#define CI2CA_POL BIT(2)
+#define CI2CA_WKUP BIT(1)
+#define SINGLE_ATT BIT(0)
+
+#define USB_D_ODN BIT(5)
+#define VBUS_CHECK BIT(2)
+#define RGND_INTP_MASK 0x03
+#define RGND_INTP_OPEN 0
+#define RGND_INTP_2K 1
+#define RGND_INTP_1K 2
+#define RGND_INTP_SHORT 3
+
+/* HDMI registers */
+#define HDMI_RX_TMDS0_CCTRL1_REG 0x10
+#define HDMI_RX_TMDS_CLK_EN_REG 0x11
+#define HDMI_RX_TMDS_CH_EN_REG 0x12
+#define HDMI_RX_PLL_CALREFSEL_REG 0x17
+#define HDMI_RX_PLL_VCOCAL_REG 0x1A
+#define HDMI_RX_EQ_DATA0_REG 0x22
+#define HDMI_RX_EQ_DATA1_REG 0x23
+#define HDMI_RX_EQ_DATA2_REG 0x24
+#define HDMI_RX_EQ_DATA3_REG 0x25
+#define HDMI_RX_EQ_DATA4_REG 0x26
+#define HDMI_RX_TMDS_ZONE_CTRL_REG 0x4C
+#define HDMI_RX_TMDS_MODE_CTRL_REG 0x4D
+
+/* CBUS registers */
+#define CBUS_INT_STATUS_1_REG 0x08
+#define CBUS_INTR1_ENABLE_REG 0x09
+#define CBUS_MSC_REQ_ABORT_REASON_REG 0x0D
+#define CBUS_INT_STATUS_2_REG 0x1E
+#define CBUS_INTR2_ENABLE_REG 0x1F
+#define CBUS_LINK_CONTROL_2_REG 0x31
+#define CBUS_MHL_STATUS_REG_0 0xB0
+#define CBUS_MHL_STATUS_REG_1 0xB1
+
+#define BIT_CBUS_RESET BIT(3)
+#define SET_HPD_DOWNSTREAM BIT(6)
+
+/* TPI registers */
+#define TPI_DPD_REG 0x3D
+
+/* Timeouts in msec */
+#define T_SRC_VBUS_CBUS_TO_STABLE 200
+#define T_SRC_CBUS_FLOAT 100
+#define T_SRC_CBUS_DEGLITCH 2
+#define T_SRC_RXSENSE_DEGLITCH 110
+
+#define MHL1_MAX_CLK 75000 /* in kHz */
+
+#define I2C_TPI_ADDR 0x3D
+#define I2C_HDMI_ADDR 0x49
+#define I2C_CBUS_ADDR 0x64
+
+enum sii9234_state {
+ ST_OFF,
+ ST_D3,
+ ST_RGND_INIT,
+ ST_RGND_1K,
+ ST_RSEN_HIGH,
+ ST_MHL_ESTABLISHED,
+ ST_FAILURE_DISCOVERY,
+ ST_FAILURE,
+};
+
+struct sii9234 {
+ struct i2c_client *client[4];
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct gpio_desc *gpio_reset;
+ int i2c_error;
+ struct regulator_bulk_data supplies[4];
+
+ struct mutex lock; /* Protects fields below and device registers */
+ enum sii9234_state state;
+};
+
+enum sii9234_client_id {
+ I2C_MHL,
+ I2C_TPI,
+ I2C_HDMI,
+ I2C_CBUS,
+};
+
+static const char * const sii9234_client_name[] = {
+ [I2C_MHL] = "MHL",
+ [I2C_TPI] = "TPI",
+ [I2C_HDMI] = "HDMI",
+ [I2C_CBUS] = "CBUS",
+};
+
+static int sii9234_writeb(struct sii9234 *ctx, int id, int offset,
+ int value)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte_data(client, offset, value);
+ if (ret < 0)
+ dev_err(ctx->dev, "writeb: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+
+ return ret;
+}
+
+static int sii9234_writebm(struct sii9234 *ctx, int id, int offset,
+ int value, int mask)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte(client, offset);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ value = (value & mask) | (ret & ~mask);
+
+ ret = i2c_smbus_write_byte_data(client, offset, value);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ }
+
+ return ret;
+}
+
+static int sii9234_readb(struct sii9234 *ctx, int id, int offset)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte(client, offset);
+ if (ret < 0) {
+ dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
+ sii9234_client_name[id], offset);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
+ sii9234_client_name[id], offset);
+ ctx->i2c_error = ret;
+ }
+
+ return ret;
+}
+
+static int sii9234_clear_error(struct sii9234 *ctx)
+{
+ int ret = ctx->i2c_error;
+
+ ctx->i2c_error = 0;
+
+ return ret;
+}
+
+#define mhl_tx_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_MHL, offset, value)
+#define mhl_tx_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_MHL, offset, value, mask)
+#define mhl_tx_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_MHL, offset)
+#define cbus_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_CBUS, offset, value)
+#define cbus_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_CBUS, offset, value, mask)
+#define cbus_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_CBUS, offset)
+#define hdmi_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_HDMI, offset, value)
+#define hdmi_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_HDMI, offset, value, mask)
+#define hdmi_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_HDMI, offset)
+#define tpi_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_TPI, offset, value)
+#define tpi_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_TPI, offset, value, mask)
+#define tpi_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_TPI, offset)
+
+static u8 sii9234_tmds_control(struct sii9234 *ctx, bool enable)
+{
+ mhl_tx_writebm(ctx, MHL_TX_TMDS_CCTRL, enable ? ~0 : 0,
+ BIT_TMDS_CCTRL_TMDS_OE);
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, enable ? ~0 : 0,
+ MHL_HPD_OUT_OVR_EN | MHL_HPD_OUT_OVR_VAL);
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_cbus_reset(struct sii9234 *ctx)
+{
+ int i;
+
+ mhl_tx_writebm(ctx, MHL_TX_SRST, ~0, BIT_CBUS_RESET);
+ msleep(T_SRC_CBUS_DEGLITCH);
+ mhl_tx_writebm(ctx, MHL_TX_SRST, 0, BIT_CBUS_RESET);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Enable WRITE_STAT interrupt for writes to all
+ * 4 MSC Status registers.
+ */
+ cbus_writeb(ctx, 0xE0 + i, 0xF2);
+ /*
+ * Enable SET_INT interrupt for writes to all
+ * 4 MSC Interrupt registers.
+ */
+ cbus_writeb(ctx, 0xF0 + i, 0xF2);
+ }
+
+ return sii9234_clear_error(ctx);
+}
+
+/* Require to chek mhl imformation of samsung in cbus_init_register */
+static int sii9234_cbus_init(struct sii9234 *ctx)
+{
+ cbus_writeb(ctx, 0x07, 0xF2);
+ cbus_writeb(ctx, 0x40, 0x03);
+ cbus_writeb(ctx, 0x42, 0x06);
+ cbus_writeb(ctx, 0x36, 0x0C);
+ cbus_writeb(ctx, 0x3D, 0xFD);
+ cbus_writeb(ctx, 0x1C, 0x01);
+ cbus_writeb(ctx, 0x1D, 0x0F);
+ cbus_writeb(ctx, 0x44, 0x02);
+ /* Setup our devcap */
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEV_STATE, 0x00);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_MHL_VERSION,
+ SII9234_MHL_VERSION);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_CAT,
+ MHL_DCAP_CAT_SOURCE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_H, 0x01);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_L, 0x41);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VID_LINK_MODE,
+ MHL_DCAP_VID_LINK_RGB444 | MHL_DCAP_VID_LINK_YCBCR444);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VIDEO_TYPE,
+ MHL_DCAP_VT_GRAPHICS);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_LOG_DEV_MAP,
+ MHL_DCAP_LD_GUI);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_BANDWIDTH, 0x0F);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_FEATURE_FLAG,
+ MHL_DCAP_FEATURE_RCP_SUPPORT | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_H, 0x0);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_L, 0x0);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_SCRATCHPAD_SIZE,
+ SII9234_SCRATCHPAD_SIZE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_INT_STAT_SIZE,
+ SII9234_INT_STAT_SIZE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_RESERVED, 0);
+ cbus_writebm(ctx, 0x31, 0x0C, 0x0C);
+ cbus_writeb(ctx, 0x30, 0x01);
+ cbus_writebm(ctx, 0x3C, 0x30, 0x38);
+ cbus_writebm(ctx, 0x22, 0x0D, 0x0F);
+ cbus_writebm(ctx, 0x2E, 0x15, 0x15);
+ cbus_writeb(ctx, CBUS_INTR1_ENABLE_REG, 0);
+ cbus_writeb(ctx, CBUS_INTR2_ENABLE_REG, 0);
+
+ return sii9234_clear_error(ctx);
+}
+
+static void force_usb_id_switch_open(struct sii9234 *ctx)
+{
+ /* Disable CBUS discovery */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0, 0x01);
+ /* Force USB ID switch to open */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
+ /* Force upstream HPD to 0 when not in MHL mode. */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x30);
+}
+
+static void release_usb_id_switch_open(struct sii9234 *ctx)
+{
+ msleep(T_SRC_CBUS_FLOAT);
+ /* Clear USB ID switch to open */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
+ /* Enable CBUS discovery */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 0x01);
+}
+
+static int sii9234_power_init(struct sii9234 *ctx)
+{
+ /* Force the SiI9234 into the D0 state. */
+ tpi_writeb(ctx, TPI_DPD_REG, 0x3F);
+ /* Enable TxPLL Clock */
+ hdmi_writeb(ctx, HDMI_RX_TMDS_CLK_EN_REG, 0x01);
+ /* Enable Tx Clock Path & Equalizer */
+ hdmi_writeb(ctx, HDMI_RX_TMDS_CH_EN_REG, 0x15);
+ /* Power Up TMDS */
+ mhl_tx_writeb(ctx, 0x08, 0x35);
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_hdmi_init(struct sii9234 *ctx)
+{
+ hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
+ hdmi_writeb(ctx, HDMI_RX_PLL_CALREFSEL_REG, 0x03);
+ hdmi_writeb(ctx, HDMI_RX_PLL_VCOCAL_REG, 0x20);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA0_REG, 0x8A);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA1_REG, 0x6A);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA2_REG, 0xAA);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA3_REG, 0xCA);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA4_REG, 0xEA);
+ hdmi_writeb(ctx, HDMI_RX_TMDS_ZONE_CTRL_REG, 0xA0);
+ hdmi_writeb(ctx, HDMI_RX_TMDS_MODE_CTRL_REG, 0x00);
+ mhl_tx_writeb(ctx, MHL_TX_TMDS_CCTRL, 0x34);
+ hdmi_writeb(ctx, 0x45, 0x44);
+ hdmi_writeb(ctx, 0x31, 0x0A);
+ hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_mhl_tx_ctl_int(struct sii9234 *ctx)
+{
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0xD0);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL2_REG, 0xFC);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL4_REG, 0xEB);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL7_REG, 0x0C);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_reset(struct sii9234 *ctx)
+{
+ int ret;
+
+ sii9234_clear_error(ctx);
+
+ ret = sii9234_power_init(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_cbus_reset(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_hdmi_init(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_mhl_tx_ctl_int(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* Enable HDCP Compliance safety */
+ mhl_tx_writeb(ctx, 0x2B, 0x01);
+ /* CBUS discovery cycle time for each drive and float = 150us */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0x04, 0x06);
+ /* Clear bit 6 (reg_skip_rgnd) */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL2_REG, (1 << 7) /* Reserved */
+ | 2 << ATT_THRESH_SHIFT | DEGLITCH_TIME_50MS);
+ /*
+ * Changed from 66 to 65 for 94[1:0] = 01 = 5k reg_cbusmhl_pup_sel
+ * 1.8V CBUS VTH & GND threshold
+ * to meet CTS 3.3.7.2 spec
+ */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
+ cbus_writebm(ctx, CBUS_LINK_CONTROL_2_REG, ~0, MHL_INIT_TIMEOUT);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL6_REG, 0xA0);
+ /* RGND & single discovery attempt (RGND blocking) */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL6_REG, BLOCK_RGND_INT |
+ DVRFLT_SEL | SINGLE_ATT);
+ /* Use VBUS path of discovery state machine */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL8_REG, 0);
+ /* 0x92[3] sets the CBUS / ID switch */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
+ /*
+ * To allow RGND engine to operate correctly.
+ * When moving the chip from D2 to D0 (power up, init regs)
+ * the values should be
+ * 94[1:0] = 01 reg_cbusmhl_pup_sel[1:0] should be set for 5k
+ * 93[7:6] = 10 reg_cbusdisc_pup_sel[1:0] should be
+ * set for 10k (default)
+ * 93[5:4] = 00 reg_cbusidle_pup_sel[1:0] = open (default)
+ */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
+ /*
+ * Change from CC to 8C to match 5K
+ * to meet CTS 3.3.72 spec
+ */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
+ /* Configure the interrupt as active high */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x06);
+
+ msleep(25);
+
+ /* Release usb_id switch */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL1_REG, 0x27);
+
+ ret = sii9234_clear_error(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_cbus_init(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto soft reset on SCDT = 0 */
+ mhl_tx_writeb(ctx, 0x05, 0x04);
+ /* HDMI Transcode mode enable */
+ mhl_tx_writeb(ctx, 0x0D, 0x1C);
+ mhl_tx_writeb(ctx, MHL_TX_INTR4_ENABLE_REG,
+ RGND_READY_MASK | CBUS_LKOUT_MASK
+ | MHL_DISC_FAIL_MASK | MHL_EST_MASK);
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG, 0x60);
+
+ /* This point is very important before measure RGND impedance */
+ force_usb_id_switch_open(ctx);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, 0, 0xF0);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL5_REG, 0, 0x03);
+ release_usb_id_switch_open(ctx);
+
+ /* Force upstream HPD to 0 when not in MHL mode */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 1 << 5);
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, ~0, 1 << 4);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_goto_d3(struct sii9234 *ctx)
+{
+ int ret;
+
+ dev_dbg(ctx->dev, "sii9234: detection started d3\n");
+
+ ret = sii9234_reset(ctx);
+ if (ret < 0)
+ goto exit;
+
+ hdmi_writeb(ctx, 0x01, 0x03);
+ tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
+ /* I2C above is expected to fail because power goes down */
+ sii9234_clear_error(ctx);
+
+ ctx->state = ST_D3;
+
+ return 0;
+ exit:
+ dev_err(ctx->dev, "%s failed\n", __func__);
+ return -1;
+}
+
+static int sii9234_hw_on(struct sii9234 *ctx)
+{
+ return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii9234_hw_off(struct sii9234 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ msleep(20);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii9234_hw_reset(struct sii9234 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpio_reset, 0);
+}
+
+static void sii9234_cable_in(struct sii9234 *ctx)
+{
+ int ret;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->state != ST_OFF)
+ goto unlock;
+ ret = sii9234_hw_on(ctx);
+ if (ret < 0)
+ goto unlock;
+
+ sii9234_hw_reset(ctx);
+ sii9234_goto_d3(ctx);
+ /* To avoid irq storm, when hw is in meta state */
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static void sii9234_cable_out(struct sii9234 *ctx)
+{
+ mutex_lock(&ctx->lock);
+
+ if (ctx->state == ST_OFF)
+ goto unlock;
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ tpi_writeb(ctx, TPI_DPD_REG, 0);
+ /* Turn on&off hpd festure for only QCT HDMI */
+ sii9234_hw_off(ctx);
+
+ ctx->state = ST_OFF;
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static enum sii9234_state sii9234_rgnd_ready_irq(struct sii9234 *ctx)
+{
+ int value;
+
+ if (ctx->state == ST_D3) {
+ int ret;
+
+ dev_dbg(ctx->dev, "RGND_READY_INT\n");
+ sii9234_hw_reset(ctx);
+
+ ret = sii9234_reset(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "sii9234_reset() failed\n");
+ return ST_FAILURE;
+ }
+
+ return ST_RGND_INIT;
+ }
+
+ /* Got interrupt in inappropriate state */
+ if (ctx->state != ST_RGND_INIT)
+ return ST_FAILURE;
+
+ value = mhl_tx_readb(ctx, MHL_TX_STAT2_REG);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ if ((value & RGND_INTP_MASK) != RGND_INTP_1K) {
+ dev_warn(ctx->dev, "RGND is not 1k\n");
+ return ST_RGND_INIT;
+ }
+ dev_dbg(ctx->dev, "RGND 1K!!\n");
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, 0x05);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ msleep(T_SRC_VBUS_CBUS_TO_STABLE);
+ return ST_RGND_1K;
+}
+
+static enum sii9234_state sii9234_mhl_established(struct sii9234 *ctx)
+{
+ dev_dbg(ctx->dev, "mhl est interrupt\n");
+
+ /* Discovery override */
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0x10);
+ /* Increase DDC translation layer timer (byte mode) */
+ cbus_writeb(ctx, 0x07, 0x32);
+ cbus_writebm(ctx, 0x44, ~0, 1 << 1);
+ /* Keep the discovery enabled. Need RGND interrupt */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 1);
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG,
+ RSEN_CHANGE_INT_MASK | HPD_CHANGE_INT_MASK);
+
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ return ST_MHL_ESTABLISHED;
+}
+
+static enum sii9234_state sii9234_hpd_change(struct sii9234 *ctx)
+{
+ int value;
+
+ value = cbus_readb(ctx, CBUS_MSC_REQ_ABORT_REASON_REG);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ if (value & SET_HPD_DOWNSTREAM) {
+ /* Downstream HPD High, Enable TMDS */
+ sii9234_tmds_control(ctx, true);
+ } else {
+ /* Downstream HPD Low, Disable TMDS */
+ sii9234_tmds_control(ctx, false);
+ }
+
+ return ctx->state;
+}
+
+static enum sii9234_state sii9234_rsen_change(struct sii9234 *ctx)
+{
+ int value;
+
+ /* Work_around code to handle wrong interrupt */
+ if (ctx->state != ST_RGND_1K) {
+ dev_err(ctx->dev, "RSEN_HIGH without RGND_1K\n");
+ return ST_FAILURE;
+ }
+ value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
+ if (value < 0)
+ return ST_FAILURE;
+
+ if (value & RSEN_STATUS) {
+ dev_dbg(ctx->dev, "MHL cable connected.. RSEN High\n");
+ return ST_RSEN_HIGH;
+ }
+ dev_dbg(ctx->dev, "RSEN lost\n");
+ /*
+ * Once RSEN loss is confirmed,we need to check
+ * based on cable status and chip power status,whether
+ * it is SINK Loss(HDMI cable not connected, TV Off)
+ * or MHL cable disconnection
+ * TODO: Define the below mhl_disconnection()
+ */
+ msleep(T_SRC_RXSENSE_DEGLITCH);
+ value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
+ if (value < 0)
+ return ST_FAILURE;
+ dev_dbg(ctx->dev, "sys_stat: %x\n", value);
+
+ if (value & RSEN_STATUS) {
+ dev_dbg(ctx->dev, "RSEN recovery\n");
+ return ST_RSEN_HIGH;
+ }
+ dev_dbg(ctx->dev, "RSEN Really LOW\n");
+ /* To meet CTS 3.3.22.2 spec */
+ sii9234_tmds_control(ctx, false);
+ force_usb_id_switch_open(ctx);
+ release_usb_id_switch_open(ctx);
+
+ return ST_FAILURE;
+}
+
+static irqreturn_t sii9234_irq_thread(int irq, void *data)
+{
+ struct sii9234 *ctx = data;
+ int intr1, intr4;
+ int intr1_en, intr4_en;
+ int cbus_intr1, cbus_intr2;
+
+ dev_dbg(ctx->dev, "%s\n", __func__);
+
+ mutex_lock(&ctx->lock);
+
+ intr1 = mhl_tx_readb(ctx, MHL_TX_INTR1_REG);
+ intr4 = mhl_tx_readb(ctx, MHL_TX_INTR4_REG);
+ intr1_en = mhl_tx_readb(ctx, MHL_TX_INTR1_ENABLE_REG);
+ intr4_en = mhl_tx_readb(ctx, MHL_TX_INTR4_ENABLE_REG);
+ cbus_intr1 = cbus_readb(ctx, CBUS_INT_STATUS_1_REG);
+ cbus_intr2 = cbus_readb(ctx, CBUS_INT_STATUS_2_REG);
+
+ if (sii9234_clear_error(ctx))
+ goto done;
+
+ dev_dbg(ctx->dev, "irq %02x/%02x %02x/%02x %02x/%02x\n",
+ intr1, intr1_en, intr4, intr4_en, cbus_intr1, cbus_intr2);
+
+ if (intr4 & RGND_READY_INT)
+ ctx->state = sii9234_rgnd_ready_irq(ctx);
+ if (intr1 & RSEN_CHANGE_INT)
+ ctx->state = sii9234_rsen_change(ctx);
+ if (intr4 & MHL_EST_INT)
+ ctx->state = sii9234_mhl_established(ctx);
+ if (intr1 & HPD_CHANGE_INT)
+ ctx->state = sii9234_hpd_change(ctx);
+ if (intr4 & CBUS_LKOUT_INT)
+ ctx->state = ST_FAILURE;
+ if (intr4 & MHL_DISC_FAIL_INT)
+ ctx->state = ST_FAILURE_DISCOVERY;
+
+ done:
+ /* Clean interrupt status and pending flags */
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_REG, intr1);
+ mhl_tx_writeb(ctx, MHL_TX_INTR4_REG, intr4);
+ cbus_writeb(ctx, CBUS_MHL_STATUS_REG_0, 0xFF);
+ cbus_writeb(ctx, CBUS_MHL_STATUS_REG_1, 0xFF);
+ cbus_writeb(ctx, CBUS_INT_STATUS_1_REG, cbus_intr1);
+ cbus_writeb(ctx, CBUS_INT_STATUS_2_REG, cbus_intr2);
+
+ sii9234_clear_error(ctx);
+
+ if (ctx->state == ST_FAILURE) {
+ dev_dbg(ctx->dev, "try to reset after failure\n");
+ sii9234_hw_reset(ctx);
+ sii9234_goto_d3(ctx);
+ }
+
+ if (ctx->state == ST_FAILURE_DISCOVERY) {
+ dev_err(ctx->dev, "discovery failed, no power for MHL?\n");
+ tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
+ ctx->state = ST_D3;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sii9234_init_resources(struct sii9234 *ctx,
+ struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int ret;
+
+ if (!ctx->dev->of_node) {
+ dev_err(ctx->dev, "not DT device\n");
+ return -ENODEV;
+ }
+
+ ctx->gpio_reset = devm_gpiod_get(ctx->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(ctx->dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "avcc12";
+ ctx->supplies[1].supply = "avcc33";
+ ctx->supplies[2].supply = "iovcc18";
+ ctx->supplies[3].supply = "cvcc12";
+ ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
+ if (ret) {
+ dev_err(ctx->dev, "regulator_bulk failed\n");
+ return ret;
+ }
+
+ ctx->client[I2C_MHL] = client;
+
+ ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
+ if (!ctx->client[I2C_TPI]) {
+ dev_err(ctx->dev, "failed to create TPI client\n");
+ return -ENODEV;
+ }
+
+ ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
+ if (!ctx->client[I2C_HDMI]) {
+ dev_err(ctx->dev, "failed to create HDMI RX client\n");
+ goto fail_tpi;
+ }
+
+ ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
+ if (!ctx->client[I2C_CBUS]) {
+ dev_err(ctx->dev, "failed to create CBUS client\n");
+ goto fail_hdmi;
+ }
+
+ return 0;
+
+fail_hdmi:
+ i2c_unregister_device(ctx->client[I2C_HDMI]);
+fail_tpi:
+ i2c_unregister_device(ctx->client[I2C_TPI]);
+
+ return -ENODEV;
+}
+
+static void sii9234_deinit_resources(struct sii9234 *ctx)
+{
+ i2c_unregister_device(ctx->client[I2C_CBUS]);
+ i2c_unregister_device(ctx->client[I2C_HDMI]);
+ i2c_unregister_device(ctx->client[I2C_TPI]);
+}
+
+static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii9234, bridge);
+}
+
+static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > MHL1_MAX_CLK)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs sii9234_bridge_funcs = {
+ .mode_valid = sii9234_mode_valid,
+};
+
+static int sii9234_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct sii9234 *ctx;
+ struct device *dev = &client->dev;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "I2C adapter lacks SMBUS feature\n");
+ return -EIO;
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii9234_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii9234", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+ return ret;
+ }
+
+ ret = sii9234_init_resources(ctx, client);
+ if (ret < 0)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii9234_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii9234_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii9234_remove(struct i2c_client *client)
+{
+ struct sii9234 *ctx = i2c_get_clientdata(client);
+
+ sii9234_cable_out(ctx);
+ drm_bridge_remove(&ctx->bridge);
+ sii9234_deinit_resources(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii9234_dt_match[] = {
+ { .compatible = "sil,sii9234" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii9234_dt_match);
+
+static const struct i2c_device_id sii9234_id[] = {
+ { "SII9234", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sii9234_id);
+
+static struct i2c_driver sii9234_driver = {
+ .driver = {
+ .name = "sii9234",
+ .of_match_table = sii9234_dt_match,
+ },
+ .probe = sii9234_probe,
+ .remove = sii9234_remove,
+ .id_table = sii9234_id,
+};
+
+module_i2c_driver(sii9234_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 5131bfb94f06..b7eb704d0a8a 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -28,6 +28,8 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <media/rc-core.h>
+
#include "sil-sii8620.h"
#define SII8620_BURST_BUF_LEN 288
@@ -58,6 +60,7 @@ enum sii8620_mt_state {
struct sii8620 {
struct drm_bridge bridge;
struct device *dev;
+ struct rc_dev *rc_dev;
struct clk *clk_xtal;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_int;
@@ -431,6 +434,16 @@ static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
}
+static void sii8620_mt_rcpk(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPK, code);
+}
+
+static void sii8620_mt_rcpe(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPE, code);
+}
+
static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
@@ -1753,6 +1766,25 @@ static void sii8620_send_features(struct sii8620 *ctx)
sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
}
+static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
+{
+ bool pressed = !(scancode & MHL_RCP_KEY_RELEASED_MASK);
+
+ scancode &= MHL_RCP_KEY_ID_MASK;
+
+ if (!ctx->rc_dev) {
+ dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ return false;
+ }
+
+ if (pressed)
+ rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
+ else
+ rc_keyup(ctx->rc_dev);
+
+ return true;
+}
+
static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
{
u8 ints[MHL_INT_SIZE];
@@ -1804,19 +1836,25 @@ static void sii8620_msc_mt_done(struct sii8620 *ctx)
static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
{
- struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ struct sii8620_mt_msg *msg;
u8 buf[2];
- if (!msg)
- return;
-
sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
switch (buf[0]) {
case MHL_MSC_MSG_RAPK:
+ msg = sii8620_msc_msg_first(ctx);
+ if (!msg)
+ return;
msg->ret = buf[1];
ctx->mt_state = MT_STATE_DONE;
break;
+ case MHL_MSC_MSG_RCP:
+ if (!sii8620_rcp_consume(ctx, buf[1]))
+ sii8620_mt_rcpe(ctx,
+ MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE);
+ sii8620_mt_rcpk(ctx, buf[1]);
+ break;
default:
dev_err(ctx->dev, "%s message type %d,%d not supported",
__func__, buf[0], buf[1]);
@@ -2102,11 +2140,57 @@ static void sii8620_cable_in(struct sii8620 *ctx)
enable_irq(to_i2c_client(ctx->dev)->irq);
}
+static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
+{
+ struct rc_dev *rc_dev;
+ int ret;
+
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ if (!rc_dev) {
+ dev_err(ctx->dev, "Failed to allocate RC device\n");
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+ rc_dev->input_phys = "sii8620/input0";
+ rc_dev->input_id.bustype = BUS_VIRTUAL;
+ rc_dev->map_name = RC_MAP_CEC;
+ rc_dev->allowed_protocols = RC_PROTO_BIT_CEC;
+ rc_dev->driver_name = "sii8620";
+ rc_dev->device_name = "sii8620";
+
+ ret = rc_register_device(rc_dev);
+
+ if (ret) {
+ dev_err(ctx->dev, "Failed to register RC device\n");
+ ctx->error = ret;
+ rc_free_device(ctx->rc_dev);
+ return;
+ }
+ ctx->rc_dev = rc_dev;
+}
+
static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii8620, bridge);
}
+static int sii8620_attach(struct drm_bridge *bridge)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+
+ sii8620_init_rcp_input_dev(ctx);
+
+ return sii8620_clear_error(ctx);
+}
+
+static void sii8620_detach(struct drm_bridge *bridge)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+
+ rc_unregister_device(ctx->rc_dev);
+}
+
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2151,6 +2235,8 @@ end:
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .attach = sii8620_attach,
+ .detach = sii8620_detach,
.mode_fixup = sii8620_mode_fixup,
};
@@ -2217,8 +2303,8 @@ static int sii8620_remove(struct i2c_client *client)
struct sii8620 *ctx = i2c_get_clientdata(client);
disable_irq(to_i2c_client(ctx->dev)->irq);
- drm_bridge_remove(&ctx->bridge);
sii8620_hw_off(ctx);
+ drm_bridge_remove(&ctx->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index bf14214fa464..b72259bf6e2f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,6 +138,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
goto err_isfr;
}
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+ if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+ hdmi->cec_clk = NULL;
+ } else if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+ ret);
+
+ hdmi->cec_clk = NULL;
+ goto err_iahb;
+ } else {
+ ret = clk_prepare_enable(hdmi->cec_clk);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+ ret);
+ goto err_iahb;
+ }
+ }
+
/* Product and revision IDs */
hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
| (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
cec_notifier_put(hdmi->cec_notifier);
clk_disable_unprepare(hdmi->iahb_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 63c7a01b7053..d9cca4fd66ec 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -30,19 +30,20 @@
#include <video/mipi_display.h>
#define DSI_VERSION 0x00
+
#define DSI_PWR_UP 0x04
#define RESET 0
#define POWERUP BIT(0)
#define DSI_CLKMGR_CFG 0x08
-#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
-#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
+#define TO_CLK_DIVISION(div) (((div) & 0xff) << 8)
+#define TX_ESC_CLK_DIVISION(div) ((div) & 0xff)
#define DSI_DPI_VCID 0x0c
-#define DPI_VID(vid) (((vid) & 0x3) << 0)
+#define DPI_VCID(vcid) ((vcid) & 0x3)
#define DSI_DPI_COLOR_CODING 0x10
-#define EN18_LOOSELY BIT(8)
+#define LOOSELY18_EN BIT(8)
#define DPI_COLOR_CODING_16BIT_1 0x0
#define DPI_COLOR_CODING_16BIT_2 0x1
#define DPI_COLOR_CODING_16BIT_3 0x2
@@ -61,22 +62,25 @@
#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
+#define DSI_DBI_VCID 0x1c
#define DSI_DBI_CFG 0x20
+#define DSI_DBI_PARTITIONING_EN 0x24
#define DSI_DBI_CMDSIZE 0x28
#define DSI_PCKHDL_CFG 0x2c
-#define EN_CRC_RX BIT(4)
-#define EN_ECC_RX BIT(3)
-#define EN_BTA BIT(2)
-#define EN_EOTP_RX BIT(1)
-#define EN_EOTP_TX BIT(0)
+#define CRC_RX_EN BIT(4)
+#define ECC_RX_EN BIT(3)
+#define BTA_EN BIT(2)
+#define EOTP_RX_EN BIT(1)
+#define EOTP_TX_EN BIT(0)
+
+#define DSI_GEN_VCID 0x30
#define DSI_MODE_CFG 0x34
#define ENABLE_VIDEO_MODE 0
#define ENABLE_CMD_MODE BIT(0)
#define DSI_VID_MODE_CFG 0x38
-#define FRAME_BTA_ACK BIT(14)
#define ENABLE_LOW_POWER (0x3f << 8)
#define ENABLE_LOW_POWER_MASK (0x3f << 8)
#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
@@ -85,8 +89,13 @@
#define VID_MODE_TYPE_MASK 0x3
#define DSI_VID_PKT_SIZE 0x3c
-#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
-#define VID_PKT_MAX_SIZE 0x3fff
+#define VID_PKT_SIZE(p) ((p) & 0x3fff)
+
+#define DSI_VID_NUM_CHUNKS 0x40
+#define VID_NUM_CHUNKS(c) ((c) & 0x1fff)
+
+#define DSI_VID_NULL_SIZE 0x44
+#define VID_NULL_SIZE(b) ((b) & 0x1fff)
#define DSI_VID_HSA_TIME 0x48
#define DSI_VID_HBP_TIME 0x4c
@@ -95,6 +104,8 @@
#define DSI_VID_VBP_LINES 0x58
#define DSI_VID_VFP_LINES 0x5c
#define DSI_VID_VACTIVE_LINES 0x60
+#define DSI_EDPI_CMD_SIZE 0x64
+
#define DSI_CMD_MODE_CFG 0x68
#define MAX_RD_PKT_SIZE_LP BIT(24)
#define DCS_LW_TX_LP BIT(19)
@@ -108,8 +119,8 @@
#define GEN_SW_2P_TX_LP BIT(10)
#define GEN_SW_1P_TX_LP BIT(9)
#define GEN_SW_0P_TX_LP BIT(8)
-#define EN_ACK_RQST BIT(1)
-#define EN_TEAR_FX BIT(0)
+#define ACK_RQST_EN BIT(1)
+#define TEAR_FX_EN BIT(0)
#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
DCS_LW_TX_LP | \
@@ -125,27 +136,31 @@
GEN_SW_0P_TX_LP)
#define DSI_GEN_HDR 0x6c
+/* TODO These 2 defines will be reworked thanks to mipi_dsi_create_packet() */
#define GEN_HDATA(data) (((data) & 0xffff) << 8)
-#define GEN_HDATA_MASK (0xffff << 8)
#define GEN_HTYPE(type) (((type) & 0xff) << 0)
-#define GEN_HTYPE_MASK 0xff
#define DSI_GEN_PLD_DATA 0x70
#define DSI_CMD_PKT_STATUS 0x74
-#define GEN_CMD_EMPTY BIT(0)
-#define GEN_CMD_FULL BIT(1)
-#define GEN_PLD_W_EMPTY BIT(2)
-#define GEN_PLD_W_FULL BIT(3)
-#define GEN_PLD_R_EMPTY BIT(4)
-#define GEN_PLD_R_FULL BIT(5)
#define GEN_RD_CMD_BUSY BIT(6)
+#define GEN_PLD_R_FULL BIT(5)
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
#define DSI_TO_CNT_CFG 0x78
#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
#define LPRX_TO_CNT(p) ((p) & 0xffff)
+#define DSI_HS_RD_TO_CNT 0x7c
+#define DSI_LP_RD_TO_CNT 0x80
+#define DSI_HS_WR_TO_CNT 0x84
+#define DSI_LP_WR_TO_CNT 0x88
#define DSI_BTA_TO_CNT 0x8c
+
#define DSI_LPCLK_CTRL 0x94
#define AUTO_CLKLANE_CTRL BIT(1)
#define PHY_TXREQUESTCLKHS BIT(0)
@@ -154,6 +169,7 @@
#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
+/* TODO Next register is slightly different between 1.30 & 1.31 IP version */
#define DSI_PHY_TMR_CFG 0x9c
#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
@@ -170,12 +186,15 @@
#define PHY_UNSHUTDOWNZ BIT(0)
#define DSI_PHY_IF_CFG 0xa4
-#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+#define N_LANES(n) (((n) - 1) & 0x3)
+
+#define DSI_PHY_ULPS_CTRL 0xa8
+#define DSI_PHY_TX_TRIGGERS 0xac
#define DSI_PHY_STATUS 0xb0
-#define LOCK BIT(0)
-#define STOP_STATE_CLK_LANE BIT(2)
+#define PHY_STOP_STATE_CLK_LANE BIT(2)
+#define PHY_LOCK BIT(0)
#define DSI_PHY_TST_CTRL0 0xb4
#define PHY_TESTCLK BIT(1)
@@ -187,12 +206,13 @@
#define PHY_TESTEN BIT(16)
#define PHY_UNTESTEN 0
#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
-#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+#define PHY_TESTDIN(n) ((n) & 0xff)
#define DSI_INT_ST0 0xbc
#define DSI_INT_ST1 0xc0
#define DSI_INT_MSK0 0xc4
#define DSI_INT_MSK1 0xc8
+#define DSI_PHY_TMR_RD_CFG 0xf4
#define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000
@@ -201,7 +221,6 @@ struct dw_mipi_dsi {
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
struct drm_bridge *panel_bridge;
- bool is_panel_bridge;
struct device *dev;
void __iomem *base;
@@ -277,7 +296,6 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
- dsi->is_panel_bridge = true;
}
dsi->panel_bridge = bridge;
@@ -292,8 +310,7 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
- if (dsi->is_panel_bridge)
- drm_panel_bridge_remove(dsi->panel_bridge);
+ drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
drm_bridge_remove(&dsi->bridge);
@@ -307,7 +324,7 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
u32 val = 0;
if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
- val |= EN_ACK_RQST;
+ val |= ACK_RQST_EN;
if (lpm)
val |= CMD_MODE_ALL_LP;
@@ -506,8 +523,8 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
* timeout clock division should be computed with the
* high speed transmission counter timeout and byte lane...
*/
- dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
- TX_ESC_CLK_DIVIDSION(esc_clk_division));
+ dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVISION(10) |
+ TX_ESC_CLK_DIVISION(esc_clk_division));
}
static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
@@ -520,7 +537,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
color = DPI_COLOR_CODING_24BIT;
break;
case MIPI_DSI_FMT_RGB666:
- color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
+ color = DPI_COLOR_CODING_18BIT_2 | LOOSELY18_EN;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
color = DPI_COLOR_CODING_18BIT_1;
@@ -535,7 +552,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
val |= HSYNC_ACTIVE_LOW;
- dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
+ dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel));
dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
dsi_write(dsi, DSI_DPI_CFG_POL, val);
/*
@@ -550,7 +567,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
{
- dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
+ dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN);
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
@@ -571,7 +588,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
/*
* TODO dw drv improvements
* compute high speed transmission counter timeout according
- * to the timeout clock division (TO_CLK_DIVIDSION) and byte lane...
+ * to the timeout clock division (TO_CLK_DIVISION) and byte lane...
*/
dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
/*
@@ -684,13 +701,13 @@ static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val,
+ val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US);
if (ret < 0)
DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & STOP_STATE_CLK_LANE, 1000,
+ val, val & PHY_STOP_STATE_CLK_LANE, 1000,
PHY_STATUS_TIMEOUT_US);
if (ret < 0)
DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
@@ -865,15 +882,14 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
* Note that the reset was not defined in the initial device tree, so
* we have to be prepared for it not being found.
*/
- apb_rst = devm_reset_control_get(dev, "apb");
+ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
if (IS_ERR(apb_rst)) {
ret = PTR_ERR(apb_rst);
- if (ret == -ENOENT) {
- apb_rst = NULL;
- } else {
+
+ if (ret != -EPROBE_DEFER)
dev_err(dev, "Unable to get reset control: %d\n", ret);
- return ERR_PTR(ret);
- }
+
+ return ERR_PTR(ret);
}
if (apb_rst) {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8571cfd877c5..8636e7eeb731 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -97,7 +97,7 @@
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
#define DP0_MISC 0x0658
-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
tmp = (tmp << 8) | buf[i];
i++;
if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
tmp = 0;
}
}
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
if (ret < 0)
goto err_dpcd_read;
- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
- goto err_dpcd_inval;
+ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+ tc->link.base.rate = 270000;
+ }
+
+ if (tc->link.base.num_lanes > 2) {
+ dev_dbg(tc->dev, "Falling to 2 lanes\n");
+ tc->link.base.num_lanes = 2;
+ }
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
if (ret < 0)
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
err_dpcd_read:
dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
return ret;
-err_dpcd_inval:
- dev_err(tc->dev, "invalid DPCD\n");
- return -EINVAL;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
- /* LCD Ctl Frame Size */
- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ /*
+ * LCD Ctl Frame Size
+ * datasheet is not clear of vsdelay in case of DPI
+ * assume we do not need any delay when DPI is a source of
+ * sync signals
+ */
+ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
- (hsync_len << 0)); /* Hsync */
- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
- (mode->hdisplay << 0)); /* width */
+ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
+ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
+ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
(vsync_len << 0)); /* Vsync */
tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
tc_write(DP0_VIDSYNCDELAY,
- (0x003e << 16) | /* thresh_dly */
+ (max_tu_symbol << 16) | /* thresh_dly */
(vid_sync_dly << 0));
tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
- /*
- * Recommended maximum number of symbols transferred in a transfer unit:
- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
- * (output active video bandwidth in bytes))
- * Must be less than tu_size.
- */
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ BPC_8);
return 0;
err:
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
unsigned int rate;
u32 dp_phy_ctrl;
int timeout;
- bool aligned;
- bool ready;
u32 value;
int ret;
u8 tmp[8];
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
if (ret < 0)
goto err_dpcd_read;
- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
- DP_CHANNEL_EQ_BITS)); /* Lane0 */
- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
- } while ((--timeout) && !(ready && aligned));
+ } while ((--timeout) &&
+ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
if (timeout == 0) {
/* Read DPCD 0x200-0x201 */
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
if (ret < 0)
goto err_dpcd_read;
+ dev_err(dev, "channel(s) EQ not ok\n");
dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
tmp[1]);
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
tmp[6]);
- if (!ready)
- dev_err(dev, "Lane0/1 not ready\n");
- if (!aligned)
- dev_err(dev, "Lane0/1 not aligned\n");
return -EAGAIN;
}
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static int tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- /* Accept any mode */
+ /* DPI interface clock limitation: upto 154 MHz */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index a4c4a465b385..cd23b1b28259 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -457,7 +457,7 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index c89953449e96..737f02885c28 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -70,7 +70,6 @@ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
return 0;
}
-
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
@@ -95,18 +94,18 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire(struct drm_device * dev)
+int drm_agp_acquire(struct drm_device *dev)
{
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
- if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+ dev->agp->bridge = agp_backend_acquire(dev->pdev);
+ if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
return 0;
}
-
EXPORT_SYMBOL(drm_agp_acquire);
/**
@@ -135,7 +134,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
-int drm_agp_release(struct drm_device * dev)
+int drm_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -161,7 +160,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
-int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -171,7 +170,6 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
dev->agp->enabled = 1;
return 0;
}
-
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
@@ -203,12 +201,14 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
+ memory = agp_allocate_memory(dev->agp->bridge, pages, type);
+ if (!memory) {
kfree(entry);
return -ENOMEM;
}
@@ -244,8 +244,8 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
- unsigned long handle)
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
+ unsigned long handle)
{
struct drm_agp_mem *entry;
@@ -275,9 +275,8 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (!entry->bound)
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry || !entry->bound)
return -EINVAL;
ret = drm_unbind_agp(entry->memory);
if (ret == 0)
@@ -316,12 +315,12 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry || entry->bound)
return -EINVAL;
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
- if ((retcode = drm_bind_agp(entry->memory, page)))
+ retcode = drm_bind_agp(entry->memory, page);
+ if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
@@ -359,7 +358,8 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry)
return -EINVAL;
if (entry->bound)
drm_unbind_agp(entry->memory);
@@ -373,7 +373,6 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
EXPORT_SYMBOL(drm_agp_free);
-
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -398,11 +397,13 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
- if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
return NULL;
head->bridge = agp_find_bridge(dev->pdev);
if (!head->bridge) {
- if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+ head->bridge = agp_backend_acquire(dev->pdev);
+ if (!head->bridge) {
kfree(head);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2fd383d7253a..c2da5585e201 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -163,13 +163,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtcs[i].state);
- if (state->crtcs[i].commit) {
- kfree(state->crtcs[i].commit->event);
- state->crtcs[i].commit->event = NULL;
- drm_crtc_commit_put(state->crtcs[i].commit);
- }
-
- state->crtcs[i].commit = NULL;
state->crtcs[i].ptr = NULL;
state->crtcs[i].state = NULL;
}
@@ -189,9 +182,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
- if (!obj)
- continue;
-
obj->funcs->atomic_destroy_state(obj,
state->private_objs[i].state);
state->private_objs[i].ptr = NULL;
@@ -199,6 +189,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
}
state->num_private_objs = 0;
+ if (state->fake_commit) {
+ drm_crtc_commit_put(state->fake_commit);
+ state->fake_commit = NULL;
+ }
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -721,7 +715,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
@@ -737,7 +731,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -1152,7 +1146,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
@@ -1818,7 +1812,7 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, uint64_t user_data)
+ struct drm_crtc *crtc, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
@@ -1828,7 +1822,8 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
- e->event.user_data = user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
+ e->event.vbl.user_data = user_data;
return e;
}
@@ -2082,7 +2077,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
struct drm_pending_vblank_event *e;
- e = create_vblank_event(dev, arg->user_data);
+ e = create_vblank_event(crtc, arg->user_data);
if (!e)
return -ENOMEM;
@@ -2237,7 +2232,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
return -EINVAL;
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state = drm_atomic_state_alloc(dev);
if (!state)
@@ -2262,7 +2257,7 @@ retry:
goto out;
}
- obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
+ obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj) {
ret = -ENOENT;
goto out;
@@ -2350,8 +2345,9 @@ out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0028591f3f95..b16f1d69a0bb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -860,6 +860,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
+ int ret;
/* Shut down everything that needs a full modeset. */
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -883,6 +884,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->disable(crtc);
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (!(dev->irq_enabled && dev->num_crtcs))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
+ if (ret == 0)
+ drm_crtc_vblank_put(crtc);
}
}
@@ -1216,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
return;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+ if (!new_crtc_state->active)
continue;
ret = drm_crtc_vblank_get(crtc);
@@ -1262,12 +1271,12 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- struct drm_crtc_state *unused;
+ struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, unused, i) {
- struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ struct drm_crtc_commit *commit = new_crtc_state->commit;
int ret;
if (!commit)
@@ -1388,35 +1397,31 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_crtc_commit *commit;
- struct drm_plane *__plane, *plane = NULL;
- struct drm_plane_state *__plane_state, *plane_state = NULL;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
const struct drm_plane_helper_funcs *funcs;
- int i, j, n_planes = 0;
+ int i, n_planes = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return -EINVAL;
}
- for_each_new_plane_in_state(state, __plane, __plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
n_planes++;
- plane = __plane;
- plane_state = __plane_state;
- }
/* FIXME: we support only single plane updates for now */
- if (!plane || n_planes != 1)
+ if (n_planes != 1)
return -EINVAL;
- if (!plane_state->crtc)
+ if (!new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
- if (plane_state->fence)
+ if (new_plane_state->fence)
return -EINVAL;
/*
@@ -1424,31 +1429,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (plane->crtc != crtc)
- continue;
-
- spin_lock(&crtc->commit_lock);
- commit = list_first_entry_or_null(&crtc->commit_list,
- struct drm_crtc_commit,
- commit_entry);
- if (!commit) {
- spin_unlock(&crtc->commit_lock);
- continue;
- }
- spin_unlock(&crtc->commit_lock);
-
- if (!crtc->state->state)
- continue;
-
- for_each_plane_in_state(crtc->state->state, __plane,
- __plane_state, j) {
- if (__plane == plane)
- return -EINVAL;
- }
- }
+ if (old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ return -EBUSY;
- return funcs->atomic_async_check(plane, plane_state);
+ return funcs->atomic_async_check(plane, new_plane_state);
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
@@ -1633,8 +1618,7 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
return -EBUSY;
}
} else if (i == 1) {
- stall_commit = commit;
- drm_crtc_commit_get(stall_commit);
+ stall_commit = drm_crtc_commit_get(commit);
break;
}
@@ -1668,6 +1652,38 @@ static void release_crtc_commit(struct completion *completion)
drm_crtc_commit_put(commit);
}
+static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
+{
+ init_completion(&commit->flip_done);
+ init_completion(&commit->hw_done);
+ init_completion(&commit->cleanup_done);
+ INIT_LIST_HEAD(&commit->commit_entry);
+ kref_init(&commit->ref);
+ commit->crtc = crtc;
+}
+
+static struct drm_crtc_commit *
+crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ if (crtc) {
+ struct drm_crtc_state *new_crtc_state;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ return new_crtc_state->commit;
+ }
+
+ if (!state->fake_commit) {
+ state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
+ if (!state->fake_commit)
+ return NULL;
+
+ init_commit(state->fake_commit, NULL);
+ }
+
+ return state->fake_commit;
+}
+
/**
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
* @state: new modeset state to be committed
@@ -1697,7 +1713,7 @@ static void release_crtc_commit(struct completion *completion)
* drm_atomic_helper_commit_cleanup_done().
*
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
- * complete and esay-to-use default implementation of the atomic_commit() hook.
+ * complete and easy-to-use default implementation of the atomic_commit() hook.
*
* The tracking of asynchronously executed and still pending commits is done
* using the core structure &drm_crtc_commit.
@@ -1716,6 +1732,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
int i, ret;
@@ -1724,14 +1744,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
if (!commit)
return -ENOMEM;
- init_completion(&commit->flip_done);
- init_completion(&commit->hw_done);
- init_completion(&commit->cleanup_done);
- INIT_LIST_HEAD(&commit->commit_entry);
- kref_init(&commit->ref);
- commit->crtc = crtc;
+ init_commit(commit, crtc);
- state->crtcs[i].commit = commit;
+ new_crtc_state->commit = commit;
ret = stall_checks(crtc, nonblock);
if (ret)
@@ -1765,25 +1780,45 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
drm_crtc_commit_get(commit);
}
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
+ for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (nonblock && old_conn_state->commit &&
+ !try_wait_for_completion(&old_conn_state->commit->flip_done))
+ return -EBUSY;
+ /* commit tracked through new_crtc_state->commit, no need to do it explicitly */
+ if (new_conn_state->crtc)
+ continue;
-static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_commit *commit;
- int i = 0;
+ commit = crtc_or_fake_commit(state, old_conn_state->crtc);
+ if (!commit)
+ return -ENOMEM;
- list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
- /* skip the first entry, that's the current commit */
- if (i == 1)
- return commit;
- i++;
+ new_conn_state->commit = drm_crtc_commit_get(commit);
+ }
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (nonblock && old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->flip_done))
+ return -EBUSY;
+
+ /*
+ * Unlike connectors, always track planes explicitly for
+ * async pageflip support.
+ */
+ commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
+ if (!commit)
+ return -ENOMEM;
+
+ new_plane_state->commit = drm_crtc_commit_get(commit);
}
- return NULL;
+ return 0;
}
+EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
@@ -1792,7 +1827,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
- * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event).
+ * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -1800,17 +1835,17 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *old_conn_state;
struct drm_crtc_commit *commit;
int i;
long ret;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- spin_lock(&crtc->commit_lock);
- commit = preceeding_commit(crtc);
- if (commit)
- drm_crtc_commit_get(commit);
- spin_unlock(&crtc->commit_lock);
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (!commit)
continue;
@@ -1828,8 +1863,48 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
+ }
- drm_crtc_commit_put(commit);
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ commit = old_conn_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
+ conn->base.id, conn->name);
+
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
+ conn->base.id, conn->name);
+ }
+
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ commit = old_plane_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
+ plane->base.id, plane->name);
+
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
+ plane->base.id, plane->name);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
@@ -1852,19 +1927,34 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- commit = old_state->crtcs[i].commit;
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ commit = new_crtc_state->commit;
if (!commit)
continue;
+ /*
+ * copy new_crtc_state->commit to old_crtc_state->commit,
+ * it's unsafe to touch new_crtc_state after hw_done,
+ * but we still need to do so in cleanup_done().
+ */
+ if (old_crtc_state->commit)
+ drm_crtc_commit_put(old_crtc_state->commit);
+
+ old_crtc_state->commit = drm_crtc_commit_get(commit);
+
/* backend must have consumed any event by now */
WARN_ON(new_crtc_state->event);
complete_all(&commit->hw_done);
}
+
+ if (old_state->fake_commit) {
+ complete_all(&old_state->fake_commit->hw_done);
+ complete_all(&old_state->fake_commit->flip_done);
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
@@ -1882,39 +1972,25 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
- long ret;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- commit = old_state->crtcs[i].commit;
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
complete_all(&commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&commit->hw_done));
- /* commit_list borrows our reference, need to remove before we
- * clean up our drm_atomic_state. But only after it actually
- * completed, otherwise subsequent commits won't stall properly. */
- if (try_wait_for_completion(&commit->flip_done))
- goto del_commit;
-
- /* We must wait for the vblank event to signal our completion
- * before releasing our reference, since the vblank work does
- * not hold a reference of its own. */
- ret = wait_for_completion_timeout(&commit->flip_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
- crtc->base.id, crtc->name);
-
-del_commit:
spin_lock(&crtc->commit_lock);
list_del(&commit->commit_entry);
spin_unlock(&crtc->commit_lock);
}
+
+ if (old_state->fake_commit)
+ complete_all(&old_state->fake_commit->cleanup_done);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -2294,20 +2370,44 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- spin_lock(&crtc->commit_lock);
- commit = list_first_entry_or_null(&crtc->commit_list,
- struct drm_crtc_commit, commit_entry);
- if (commit)
- drm_crtc_commit_get(commit);
- spin_unlock(&crtc->commit_lock);
+ /*
+ * We have to stall for hw_done here before
+ * drm_atomic_helper_wait_for_dependencies() because flip
+ * depth > 1 is not yet supported by all drivers. As long as
+ * obj->state is directly dereferenced anywhere in the drivers
+ * atomic_commit_tail function, then it's unsafe to swap state
+ * before drm_atomic_helper_commit_hw_done() is called.
+ */
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (!commit)
continue;
ret = wait_for_completion_interruptible(&commit->hw_done);
- drm_crtc_commit_put(commit);
+ if (ret)
+ return ret;
+ }
+ for_each_old_connector_in_state(state, connector, old_conn_state, i) {
+ commit = old_conn_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ return ret;
+ }
+
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
+ commit = old_plane_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
@@ -2332,13 +2432,13 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
state->crtcs[i].state = old_crtc_state;
crtc->state = new_crtc_state;
- if (state->crtcs[i].commit) {
+ if (new_crtc_state->commit) {
spin_lock(&crtc->commit_lock);
- list_add(&state->crtcs[i].commit->commit_entry,
+ list_add(&new_crtc_state->commit->commit_entry,
&crtc->commit_list);
spin_unlock(&crtc->commit_lock);
- state->crtcs[i].commit->event = NULL;
+ new_crtc_state->commit->event = NULL;
}
}
@@ -3115,7 +3215,7 @@ struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector *connector)
{
WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
@@ -3187,6 +3287,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->connectors_changed = false;
state->color_mgmt_changed = false;
state->zpos_changed = false;
+ state->commit = NULL;
state->event = NULL;
state->pageflip_flags = 0;
}
@@ -3225,6 +3326,12 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
*/
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
+ if (state->commit) {
+ kfree(state->commit->event);
+ state->commit->event = NULL;
+ drm_crtc_commit_put(state->commit);
+ }
+
drm_property_blob_put(state->mode_blob);
drm_property_blob_put(state->degamma_lut);
drm_property_blob_put(state->ctm);
@@ -3287,6 +3394,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
drm_framebuffer_get(state->fb);
state->fence = NULL;
+ state->commit = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3328,6 +3436,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
if (state->fence)
dma_fence_put(state->fence);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
@@ -3406,6 +3517,7 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
memcpy(state, connector->state, sizeof(*state));
if (state->crtc)
drm_connector_get(connector);
+ state->commit = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
@@ -3532,6 +3644,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
{
if (state->crtc)
drm_connector_put(state->connector);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 7ff697389d74..aad468d170a7 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include "drm_internal.h"
#include "drm_legacy.h"
+#include <drm/drm_lease.h>
/**
* DOC: master and authentication
@@ -93,7 +94,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
return file ? 0 : -EINVAL;
}
-static struct drm_master *drm_master_create(struct drm_device *dev)
+struct drm_master *drm_master_create(struct drm_device *dev)
{
struct drm_master *master;
@@ -107,6 +108,14 @@ static struct drm_master *drm_master_create(struct drm_device *dev)
idr_init(&master->magic_map);
master->dev = dev;
+ /* initialize the tree of output resource lessees */
+ master->lessor = NULL;
+ master->lessee_id = 0;
+ INIT_LIST_HEAD(&master->lessees);
+ INIT_LIST_HEAD(&master->lessee_list);
+ idr_init(&master->leases);
+ idr_init(&master->lessee_idr);
+
return master;
}
@@ -189,6 +198,12 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (file_priv->master->lessor != NULL) {
+ DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = drm_set_master(dev, file_priv, false);
out_unlock:
mutex_unlock(&dev->master_mutex);
@@ -270,6 +285,13 @@ void drm_master_release(struct drm_file *file_priv)
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
+ if (drm_core_check_feature(dev, DRIVER_MODESET) && file_priv->is_master) {
+ /* Revoke any leases held by this or lessees, but only if
+ * this is the "real" master
+ */
+ drm_lease_revoke(master);
+ }
+
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
@@ -288,7 +310,7 @@ out:
*/
bool drm_is_current_master(struct drm_file *fpriv)
{
- return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
+ return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master;
}
EXPORT_SYMBOL(drm_is_current_master);
@@ -310,12 +332,18 @@ static void drm_master_destroy(struct kref *kref)
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_device *dev = master->dev;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_lease_destroy(master);
+
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
+ idr_destroy(&master->leases);
+ idr_destroy(&master->lessee_idr);
+
kfree(master->unique);
kfree(master);
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index dc8cdfe1dcac..1638bfe9627c 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -67,17 +67,12 @@ static LIST_HEAD(bridge_list);
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
- *
- * RETURNS:
- * Unconditionally returns Zero.
*/
-int drm_bridge_add(struct drm_bridge *bridge)
+void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
-
- return 0;
}
EXPORT_SYMBOL(drm_bridge_add);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index fe0982708e95..0d002b045bd2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -230,7 +230,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
@@ -308,7 +308,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ba9f36cef68c..9ae236036e32 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,6 +152,25 @@ static void drm_connector_free(struct kref *kref)
connector->funcs->destroy(connector);
}
+void drm_connector_free_work_fn(struct work_struct *work)
+{
+ struct drm_connector *connector, *n;
+ struct drm_device *dev =
+ container_of(work, struct drm_device, mode_config.connector_free_work);
+ struct drm_mode_config *config = &dev->mode_config;
+ unsigned long flags;
+ struct llist_node *freed;
+
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ freed = llist_del_all(&config->connector_free_list);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+ llist_for_each_entry_safe(connector, n, freed, free_node) {
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+ }
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -234,6 +253,10 @@ int drm_connector_init(struct drm_device *dev,
config->link_status_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->non_desktop_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
@@ -525,6 +548,25 @@ void drm_connector_list_iter_begin(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_connector_list_iter_begin);
+/*
+ * Extra-safe connector put function that works in any context. Should only be
+ * used from the connector_iter functions, where we never really expect to
+ * actually release the connector when dropping our final reference.
+ */
+static void
+__drm_connector_put_safe(struct drm_connector *conn)
+{
+ struct drm_mode_config *config = &conn->dev->mode_config;
+
+ lockdep_assert_held(&config->connector_list_lock);
+
+ if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+ return;
+
+ llist_add(&conn->free_node, &config->connector_free_list);
+ schedule_work(&config->connector_free_work);
+}
+
/**
* drm_connector_list_iter_next - return next connector
* @iter: connectr_list iterator
@@ -554,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
/* loop until it's not a zombie connector */
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
- spin_unlock_irqrestore(&config->connector_list_lock, flags);
if (old_conn)
- drm_connector_put(old_conn);
+ __drm_connector_put_safe(old_conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
return iter->conn;
}
@@ -574,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
*/
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
+ struct drm_mode_config *config = &iter->dev->mode_config;
+ unsigned long flags;
+
iter->dev = NULL;
- if (iter->conn)
- drm_connector_put(iter->conn);
+ if (iter->conn) {
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ __drm_connector_put_safe(iter->conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+ }
lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -615,7 +663,6 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
{ DRM_MODE_LINK_STATUS_GOOD, "Good" },
{ DRM_MODE_LINK_STATUS_BAD, "Bad" },
};
-DRM_ENUM_NAME_FN(drm_get_link_status_name, drm_link_status_enum_list)
/**
* drm_display_info_set_bus_formats - set the supported bus formats
@@ -720,6 +767,29 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core. This is the only standard connector
* property that userspace can change.
+ *
+ * Note that this property cannot be set through the MODE_ATOMIC ioctl,
+ * userspace must use "ACTIVE" on the CRTC instead.
+ *
+ * WARNING:
+ *
+ * For userspace also running on legacy drivers the "DPMS" semantics are a
+ * lot more complicated. First, userspace cannot rely on the "DPMS" value
+ * returned by the GETCONNECTOR actually reflecting reality, because many
+ * drivers fail to update it. For atomic drivers this is taken care of in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * The second issue is that the DPMS state is only well-defined when the
+ * connector is connected to a CRTC. In atomic the DRM core enforces that
+ * "ACTIVE" is off in such a case, no such checks exists for "DPMS".
+ *
+ * Finally, when enabling an output using the legacy SETCONFIG ioctl then
+ * "DPMS" is forced to ON. But see above, that might not be reflected in
+ * the software value on legacy drivers.
+ *
+ * Summarizing: Only set "DPMS" when the connector is known to be enabled,
+ * assume that a successful SETCONFIG call also sets "DPMS" to on, and
+ * never read back the value of "DPMS" because it can be incorrect.
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
@@ -741,6 +811,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* value of link-status is "GOOD". If something fails during or after modeset,
* the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
* should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ * Indicates the output should be ignored for purposes of displaying a
+ * standard desktop environment or console. This is most likely because
+ * the output device is not rectilinear.
*
* Connectors also have one standardized atomic property:
*
@@ -789,6 +863,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.non_desktop_property = prop;
+
return 0;
}
@@ -1172,6 +1251,23 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ /* Set the display info, using edid if available, otherwise
+ * reseting the values to defaults. This duplicates the work
+ * done in drm_add_edid_modes, but that function is not
+ * consistently called before this one in all drivers and the
+ * computation is cheap enough that it seems better to
+ * duplicate it rather than attempt to ensure some arbitrary
+ * ordering of calls.
+ */
+ if (edid)
+ drm_add_display_info(connector, edid);
+ else
+ drm_reset_display_info(connector);
+
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
@@ -1288,7 +1384,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- connector = drm_connector_lookup(dev, out_resp->connector_id);
+ connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id);
if (!connector)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5af25ce5bf7c..f0556e654116 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -402,7 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
if (!crtc)
return -ENOENT;
@@ -569,7 +569,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
- crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
@@ -577,7 +577,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
mutex_lock(&crtc->dev->mode_config.mutex);
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
if (ret)
@@ -595,7 +595,7 @@ retry:
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_get(fb);
} else {
- fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
@@ -680,7 +680,7 @@ retry:
goto out;
}
- connector = drm_connector_lookup(dev, out_id);
+ connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
@@ -717,8 +717,9 @@ out:
kfree(connector_set);
drm_mode_destroy(dev, mode);
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index eab36a460638..5a84c3bc915d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -562,12 +562,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
- save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+ save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!save_encoder_crtcs)
return -ENOMEM;
- save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+ save_connector_encoders = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!save_connector_encoders) {
kfree(save_encoder_crtcs);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a43582076b20..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -106,6 +106,7 @@ int drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj);
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type);
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
@@ -141,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index f9e26dda56d6..9dd879589a2c 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -155,7 +155,7 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
int ret = 0;
if (drm_drv_uses_atomic_modeset(crtc->dev)) {
- ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL);
+ ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index d34e5096887a..053044201e31 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -263,12 +263,6 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
return aux_dev;
}
-static int auxdev_wait_atomic_t(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
@@ -283,7 +277,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
- wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t,
+ wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
minor = aux_dev->index;
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 0ef9011a1856..02a50929af67 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
{
u8 data;
int ret = 0;
+ int retry;
if (!mode) {
DRM_ERROR("NULL input\n");
@@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
}
/* Read Status: i2c over aux */
- ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
- &data, sizeof(data));
+ for (retry = 0; retry < 6; retry++) {
+ if (retry)
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dual_mode_read(adapter,
+ DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (!ret)
+ break;
+ }
+
if (ret < 0) {
- DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08af8d6b844b..b3d68964b407 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -137,8 +137,10 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
- case 162000:
default:
+ WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
+ DP_LINK_BW_1_62);
+ case 162000:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
@@ -151,8 +153,9 @@ EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
- case DP_LINK_BW_1_62:
default:
+ WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
+ case DP_LINK_BW_1_62:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 41b492f99955..70dcfa58d3c2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
}
raw->cur_len = idx;
}
@@ -538,6 +544,21 @@ fail_len:
return false;
}
+static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen) {
+ DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
+ idx, raw->curlen);
+ return false;
+ }
+ return true;
+}
+
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
@@ -567,6 +588,9 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
default:
DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
return false;
@@ -693,6 +717,22 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
return 0;
}
+static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ if (power_up)
+ req.req_type = DP_POWER_UP_PHY;
+ else
+ req.req_type = DP_POWER_DOWN_PHY;
+
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
@@ -1724,6 +1764,40 @@ fail_put:
return ret;
}
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int len, ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ drm_dp_put_port(port);
+ return -ENOMEM;
+ }
+
+ txmsg->dst = port->parent;
+ len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ ret = -EINVAL;
+ else
+ ret = 0;
+ }
+ kfree(txmsg);
+ drm_dp_put_port(port);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
+
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be38ac7050d4..a934fd5e7e55 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,8 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -286,13 +287,13 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
spin_lock_irqsave(&drm_minor_lock, flags);
minor = idr_find(&drm_minors_idr, minor_id);
if (minor)
- drm_dev_ref(minor->dev);
+ drm_dev_get(minor->dev);
spin_unlock_irqrestore(&drm_minor_lock, flags);
if (!minor) {
return ERR_PTR(-ENODEV);
} else if (drm_dev_is_unplugged(minor->dev)) {
- drm_dev_unref(minor->dev);
+ drm_dev_put(minor->dev);
return ERR_PTR(-ENODEV);
}
@@ -301,7 +302,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
void drm_minor_release(struct drm_minor *minor)
{
- drm_dev_unref(minor->dev);
+ drm_dev_put(minor->dev);
}
/**
@@ -326,11 +327,11 @@ void drm_minor_release(struct drm_minor *minor)
* When cleaning up a device instance everything needs to be done in reverse:
* First unpublish the device instance with drm_dev_unregister(). Then clean up
* any other resources allocated at device initialization and drop the driver's
- * reference to &drm_device using drm_dev_unref().
+ * reference to &drm_device using drm_dev_put().
*
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
- * drm_dev_ref() and drm_dev_unref() only carefully.
+ * drm_dev_get() and drm_dev_put() only carefully.
*
* It is recommended that drivers embed &struct drm_device into their own device
* structure, which is supported through drm_dev_init().
@@ -345,7 +346,7 @@ void drm_minor_release(struct drm_minor *minor)
* Cleans up all DRM device, calling drm_lastclose().
*
* Note: Use of this function is deprecated. It will eventually go away
- * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
+ * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
* instead to make sure that the device isn't userspace accessible any more
* while teardown is in progress, ensuring that userspace can't access an
* inconsistent state.
@@ -360,7 +361,7 @@ void drm_put_dev(struct drm_device *dev)
}
drm_dev_unregister(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_put_dev);
@@ -386,7 +387,7 @@ void drm_dev_unplug(struct drm_device *dev)
mutex_lock(&drm_global_mutex);
drm_device_set_unplugged(dev);
if (dev->open_count == 0)
- drm_dev_unref(dev);
+ drm_dev_put(dev);
mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -475,8 +476,8 @@ static void drm_fs_inode_free(struct inode *inode)
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
*
* Note that for purely virtual devices @parent can be NULL.
*
@@ -626,8 +627,8 @@ EXPORT_SYMBOL(drm_dev_fini);
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
*
* Note that for purely virtual devices @parent can be NULL.
*
@@ -670,36 +671,49 @@ static void drm_dev_release(struct kref *ref)
}
/**
- * drm_dev_ref - Take reference of a DRM device
+ * drm_dev_get - Take reference of a DRM device
* @dev: device to take reference of or NULL
*
* This increases the ref-count of @dev by one. You *must* already own a
- * reference when calling this. Use drm_dev_unref() to drop this reference
+ * reference when calling this. Use drm_dev_put() to drop this reference
* again.
*
* This function never fails. However, this function does not provide *any*
* guarantee whether the device is alive or running. It only provides a
* reference to the object and the memory associated with it.
*/
-void drm_dev_ref(struct drm_device *dev)
+void drm_dev_get(struct drm_device *dev)
{
if (dev)
kref_get(&dev->ref);
}
-EXPORT_SYMBOL(drm_dev_ref);
+EXPORT_SYMBOL(drm_dev_get);
/**
- * drm_dev_unref - Drop reference of a DRM device
+ * drm_dev_put - Drop reference of a DRM device
* @dev: device to drop reference of or NULL
*
* This decreases the ref-count of @dev by one. The device is destroyed if the
* ref-count drops to zero.
*/
-void drm_dev_unref(struct drm_device *dev)
+void drm_dev_put(struct drm_device *dev)
{
if (dev)
kref_put(&dev->ref, drm_dev_release);
}
+EXPORT_SYMBOL(drm_dev_put);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This is a compatibility alias for drm_dev_put() and should not be used by new
+ * code.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ drm_dev_put(dev);
+}
EXPORT_SYMBOL(drm_dev_unref);
static int create_compat_control_link(struct drm_device *dev)
@@ -839,7 +853,7 @@ EXPORT_SYMBOL(drm_dev_register);
*
* Unregister the DRM device from the system. This does the reverse of
* drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_unref() to drop their final reference.
+ * drm_dev_put() to drop their final reference.
*
* A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
* which can be called while there are still open users of @dev.
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6bb6337be920..cb487148359a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -82,6 +82,8 @@
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP (1 << 12)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -157,6 +159,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+ /* HTC Vive VR Headset */
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -1533,6 +1538,10 @@ static void connector_bad_edid(struct drm_connector *connector,
* level, drivers must make all reasonable efforts to expose it as an I2C
* adapter and use drm_get_edid() instead of abusing this function.
*
+ * The EDID may be overridden using debugfs override_edid or firmare EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
@@ -1542,6 +1551,17 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
+ struct edid *override = NULL;
+
+ if (connector->override_edid)
+ override = drm_edid_duplicate((const struct edid *)
+ connector->edid_blob_ptr->data);
+
+ if (!override)
+ override = drm_load_edid_firmware(connector);
+
+ if (!IS_ERR_OR_NULL(override))
+ return override;
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
@@ -1711,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1729,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
*
* This tells subsequent routines what fixes they need to apply.
*/
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
{
const struct edid_quirk *quirk;
int i;
@@ -2793,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2815,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
return edid_ext;
}
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, CEA_EXT);
}
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}
@@ -4343,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
}
static void drm_parse_cea_ext(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_display_info *info = &connector->display_info;
const u8 *edid_ext;
@@ -4377,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
-static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ info->width_mm = 0;
+ info->height_mm = 0;
+
+ info->bpc = 0;
+ info->color_formats = 0;
+ info->cea_rev = 0;
+ info->max_tmds_clock = 0;
+ info->dvi_dual = false;
+
+ info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
{
struct drm_display_info *info = &connector->display_info;
+ u32 quirks = edid_get_quirks(edid);
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -4392,11 +4434,15 @@ static void drm_add_display_info(struct drm_connector *connector,
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
+ DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
if (edid->revision < 3)
- return;
+ return quirks;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
- return;
+ return quirks;
drm_parse_cea_ext(connector, edid);
@@ -4416,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
- return;
+ return quirks;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4451,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ return quirks;
}
+EXPORT_SYMBOL_GPL(drm_add_display_info);
static int validate_displayid(u8 *displayid, int length, int idx)
{
@@ -4605,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
- quirks = edid_get_quirks(edid);
-
/*
* CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid);
+ quirks = drm_add_display_info(connector, edid);
/*
* EDID spec says modes should be preferred in this order:
@@ -4809,7 +4855,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable)
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink)
{
/*
* CEA-861:
@@ -4833,8 +4880,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field."
+ *
+ * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+ * by non-zero YQ when receiving RGB. There doesn't seem to be any
+ * good way to tell which version of CEA-861 the sink supports, so
+ * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+ * on on CEA-861-F.
*/
- if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+ if (!is_hdmi2_sink ||
+ rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1c0495acf341..a4915099aaa9 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,6 +31,22 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
+/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
+int __drm_set_edid_firmware_path(const char *path)
+{
+ scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path);
+
+ return 0;
+}
+EXPORT_SYMBOL(__drm_set_edid_firmware_path);
+
+/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
+int __drm_get_edid_firmware_path(char *buf, size_t bufsize)
+{
+ return scnprintf(buf, bufsize, "%s", edid_firmware);
+}
+EXPORT_SYMBOL(__drm_get_edid_firmware_path);
+
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 0708779840d2..59e0ebe733f8 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -220,13 +220,13 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
if (!encoder)
return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
crtc = drm_encoder_get_crtc(encoder);
- if (crtc)
+ if (crtc && drm_lease_held(file_priv, crtc->base.id))
enc_resp->crtc_id = crtc->base.id;
else
enc_resp->crtc_id = 0;
@@ -234,7 +234,8 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
+ encoder->possible_crtcs);
enc_resp->possible_clones = encoder->possible_clones;
return 0;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f2ee88363015..0e3c14174d08 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -38,7 +38,7 @@ struct drm_fbdev_cma {
* Provides helper functions for creating a cma (contiguous memory allocator)
* backed framebuffer.
*
- * drm_fb_cma_create() is used in the &drm_mode_config_funcs.fb_create
+ * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
* callback function to create a cma backed framebuffer.
*
* An fbdev framebuffer backed by cma is also available by calling
@@ -61,8 +61,8 @@ struct drm_fbdev_cma {
* }
*
* static struct drm_framebuffer_funcs driver_fb_funcs = {
- * .destroy = drm_fb_cma_destroy,
- * .create_handle = drm_fb_cma_create_handle,
+ * .destroy = drm_gem_fb_destroy,
+ * .create_handle = drm_gem_fb_create_handle,
* .dirty = driver_fb_dirty,
* };
*
@@ -80,57 +80,6 @@ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
return container_of(helper, struct drm_fbdev_cma, fb_helper);
}
-void drm_fb_cma_destroy(struct drm_framebuffer *fb)
-{
- drm_gem_fb_destroy(fb);
-}
-EXPORT_SYMBOL(drm_fb_cma_destroy);
-
-int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned int *handle)
-{
- return drm_gem_fb_create_handle(fb, file_priv, handle);
-}
-EXPORT_SYMBOL(drm_fb_cma_create_handle);
-
-/**
- * drm_fb_cma_create_with_funcs() - helper function for the
- * &drm_mode_config_funcs.fb_create
- * callback
- * @dev: DRM device
- * @file_priv: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
- * @funcs: vtable to be used for the new framebuffer object
- *
- * This can be used to set &drm_framebuffer_funcs for drivers that need the
- * &drm_framebuffer_funcs.dirty callback. Use drm_fb_cma_create() if you don't
- * need to change &drm_framebuffer_funcs.
- */
-struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs)
-{
- return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, funcs);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
-
-/**
- * drm_fb_cma_create() - &drm_mode_config_funcs.fb_create callback function
- * @dev: DRM device
- * @file_priv: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * If your hardware has special alignment or pitch requirements these should be
- * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
- * you need to set &drm_framebuffer_funcs.dirty.
- */
-struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_create);
-
/**
* drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
* @fb: The framebuffer
@@ -181,26 +130,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-/**
- * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
- * @plane: Which plane
- * @state: Plane state attach fence to
- *
- * This should be set as the &struct drm_plane_helper_funcs.prepare_fb hook.
- *
- * This function checks if the plane FB has an dma-buf attached, extracts
- * the exclusive fence and attaches it to plane state for the atomic helper
- * to wait on.
- *
- * There is no need for cleanup_fb for CMA based framebuffer drivers.
- */
-int drm_fb_cma_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return drm_gem_fb_prepare_fb(plane, state);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
-
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b8f013ffa65..e56166334455 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -910,6 +910,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation || !fb_helper)
return;
+ cancel_work_sync(&fb_helper->resume_work);
+ cancel_work_sync(&fb_helper->dirty_work);
+
info = fb_helper->fbdev;
if (info) {
if (info->cmap.len)
@@ -918,9 +921,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
fb_helper->fbdev = NULL;
- cancel_work_sync(&fb_helper->resume_work);
- cancel_work_sync(&fb_helper->dirty_work);
-
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n");
+
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+ restore_fbdev_mode(fb_helper);
return -EAGAIN;
}
@@ -2033,6 +2037,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
+ if (connector->display_info.non_desktop)
+ return false;
+
if (strict)
enable = connector->status == connector_status_connected;
else
@@ -2052,7 +2059,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
any_enabled |= enabled[i];
}
@@ -2266,7 +2274,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
- crtcs = kzalloc(fb_helper->connector_count *
+ crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index af279844d7ce..279c1035c12d 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -381,7 +381,7 @@ int drm_mode_rmfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, *id);
+ fb = drm_framebuffer_lookup(dev, file_priv, *id);
if (!fb)
return -ENOENT;
@@ -450,7 +450,7 @@ int drm_mode_getfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, r->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
@@ -515,7 +515,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, r->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
@@ -681,6 +681,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
/**
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
* @dev: drm device
+ * @file_priv: drm file to check for lease against.
* @id: id of the fb object
*
* If successful, this grabs an additional reference to the framebuffer -
@@ -688,12 +689,13 @@ EXPORT_SYMBOL(drm_framebuffer_init);
* again, using drm_framebuffer_put().
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
- obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+ obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB);
if (obj)
fb = obj_to_fb(obj);
return fb;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c55f338e380b..55d6182555c7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -282,15 +282,6 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_gem_object *obj;
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
@@ -334,6 +325,12 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
if (!obj)
return -ENOENT;
+ /* Don't allow imported objects to be mapped */
+ if (obj->import_attach) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -537,7 +534,7 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
- * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 373e33f22be4..020e7668dfab 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -112,7 +112,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) {
- dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
+ dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
size);
ret = -ENOMEM;
goto error;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index d54a083dc5dd..aa8cb9bfa499 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -27,19 +27,24 @@
* DOC: overview
*
* This library provides helpers for drivers that don't subclass
- * &drm_framebuffer and and use &drm_gem_object for their backing storage.
+ * &drm_framebuffer and use &drm_gem_object for their backing storage.
*
* Drivers without additional needs to validate framebuffers can simply use
- * drm_gem_fb_create() and everything is wired up automatically. But all
- * parts can be used individually.
+ * drm_gem_fb_create() and everything is wired up automatically. Other drivers
+ * can use all parts independently.
*/
/**
- * drm_gem_fb_get_obj() - Get GEM object for framebuffer
- * @fb: The framebuffer
- * @plane: Which plane
+ * drm_gem_fb_get_obj() - Get GEM object backing the framebuffer
+ * @fb: Framebuffer
+ * @plane: Plane index
*
- * Returns the GEM object for given framebuffer.
+ * No additional reference is taken beyond the one that the &drm_frambuffer
+ * already holds.
+ *
+ * Returns:
+ * Pointer to &drm_gem_object for the given framebuffer and plane index or NULL
+ * if it does not exist.
*/
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
@@ -82,7 +87,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
/**
* drm_gem_fb_destroy - Free GEM backed framebuffer
- * @fb: DRM framebuffer
+ * @fb: Framebuffer
*
* Frees a GEM backed framebuffer with its backing buffer(s) and the structure
* itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
@@ -102,12 +107,13 @@ EXPORT_SYMBOL(drm_gem_fb_destroy);
/**
* drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
- * @fb: DRM framebuffer
- * @file: drm file
- * @handle: handle created
+ * @fb: Framebuffer
+ * @file: DRM file to register the handle for
+ * @handle: Pointer to return the created handle
*
+ * This function creates a handle for the GEM object backing the framebuffer.
* Drivers can use this as their &drm_framebuffer_funcs->create_handle
- * callback.
+ * callback. The GETFB IOCTL calls into this callback.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -120,18 +126,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
- * drm_gem_fb_create_with_funcs() - helper function for the
+ * drm_gem_fb_create_with_funcs() - Helper function for the
* &drm_mode_config_funcs.fb_create
* callback
* @dev: DRM device
- * @file: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
* need to change &drm_framebuffer_funcs.
* The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
@@ -154,7 +163,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!objs[i]) {
- DRM_DEV_ERROR(dev->dev, "Failed to lookup GEM\n");
+ DRM_DEBUG_KMS("Failed to lookup GEM object\n");
ret = -ENOENT;
goto err_gem_object_put;
}
@@ -192,15 +201,26 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
};
/**
- * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
+ * drm_gem_fb_create() - Helper function for the
+ * &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
- * @file: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ *
+ * This function creates a new framebuffer object described by
+ * &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
+ * backing the framebuffer.
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. The function does buffer size
* validation. Use drm_gem_fb_create_with_funcs() if you need to set
* &drm_framebuffer_funcs.dirty.
+ *
+ * Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
+ * The ADDFB2 IOCTL calls into this callback.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
@@ -212,15 +232,15 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
/**
- * drm_gem_fb_prepare_fb() - Prepare gem framebuffer
- * @plane: Which plane
- * @state: Plane state attach fence to
+ * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
+ * @plane: Plane
+ * @state: Plane state the fence will be attached to
*
- * This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
- *
- * This function checks if the plane FB has an dma-buf attached, extracts
- * the exclusive fence and attaches it to plane state for the atomic helper
- * to wait on.
+ * This function prepares a GEM backed framebuffer for scanout by checking if
+ * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
+ * exclusive fence and attaches it to the plane state for the atomic helper to
+ * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
+ * callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
@@ -232,7 +252,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (plane->state->fb == state->fb || !state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
@@ -246,17 +266,19 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
/**
- * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
+ * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
+ * emulation
* @dev: DRM device
* @sizes: fbdev size description
- * @pitch_align: optional pitch alignment
+ * @pitch_align: Optional pitch alignment
* @obj: GEM object backing the framebuffer
* @funcs: vtable to be used for the new framebuffer object
*
- * This function creates a framebuffer for use with fbdev emulation.
+ * This function creates a framebuffer from a &drm_fb_helper_surface_size
+ * description for use in the &drm_fb_helper_funcs.fb_probe callback.
*
* Returns:
- * Pointer to a drm_framebuffer on success or an error pointer on failure.
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index fbc3f308fa19..c9d5a6cd4d41 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -55,7 +55,6 @@ int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_vblank.c */
-extern unsigned int drm_timestamp_monotonic;
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
@@ -71,6 +70,12 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index a9ae6dd2d593..4aafe4802099 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -235,7 +235,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
/* Only some caps make sense with UMS/render-only drivers. */
switch (req->capability) {
case DRM_CAP_TIMESTAMP_MONOTONIC:
- req->value = drm_timestamp_monotonic;
+ req->value = 1;
return 0;
case DRM_CAP_PRIME:
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
@@ -663,6 +663,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 6e35a56a6102..93e2b30fe1a5 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <drm/drmP.h>
#include "drm_crtc_helper_internal.h"
@@ -33,6 +34,33 @@ MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
+#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE)
+
+/* Backward compatibility for drm_kms_helper.edid_firmware */
+static int edid_firmware_set(const char *val, const struct kernel_param *kp)
+{
+ DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n");
+
+ return __drm_set_edid_firmware_path(val);
+}
+
+static int edid_firmware_get(char *buffer, const struct kernel_param *kp)
+{
+ return __drm_get_edid_firmware_path(buffer, PAGE_SIZE);
+}
+
+static const struct kernel_param_ops edid_firmware_ops = {
+ .set = edid_firmware_set,
+ .get = edid_firmware_get,
+};
+
+module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644);
+__MODULE_PARM_TYPE(edid_firmware, "charp");
+MODULE_PARM_DESC(edid_firmware,
+ "DEPRECATED. Use drm.edid_firmware module parameter instead.");
+
+#endif
+
static int __init drm_kms_helper_init(void)
{
int ret;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
new file mode 100644
index 000000000000..59849f02e2ad
--- /dev/null
+++ b/drivers/gpu/drm/drm_lease.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright © 2017 Keith Packard <keithp@keithp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include "drm_internal.h"
+#include "drm_legacy.h"
+#include "drm_crtc_internal.h"
+#include <drm/drm_lease.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_crtc_helper.h>
+
+#define drm_for_each_lessee(lessee, lessor) \
+ list_for_each_entry((lessee), &(lessor)->lessees, lessee_list)
+
+static uint64_t drm_lease_idr_object;
+
+/**
+ * drm_lease_owner - return ancestor owner drm_master
+ * @master: drm_master somewhere within tree of lessees and lessors
+ *
+ * RETURN:
+ *
+ * drm_master at the top of the tree (i.e, with lessor NULL
+ */
+struct drm_master *drm_lease_owner(struct drm_master *master)
+{
+ while (master->lessor != NULL)
+ master = master->lessor;
+ return master;
+}
+EXPORT_SYMBOL(drm_lease_owner);
+
+/**
+ * _drm_find_lessee - find lessee by id (idr_mutex held)
+ * @master: drm_master of lessor
+ * @id: lessee_id
+ *
+ * RETURN:
+ *
+ * drm_master of the lessee if valid, NULL otherwise
+ */
+
+static struct drm_master*
+_drm_find_lessee(struct drm_master *master, int lessee_id)
+{
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ return idr_find(&drm_lease_owner(master)->lessee_idr, lessee_id);
+}
+
+/**
+ * _drm_lease_held_master - check to see if an object is leased (or owned) by master (idr_mutex held)
+ * @master: the master to check the lease status of
+ * @id: the id to check
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+static int _drm_lease_held_master(struct drm_master *master, int id)
+{
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ if (master->lessor)
+ return idr_find(&master->leases, id) != NULL;
+ return true;
+}
+
+/**
+ * _drm_has_leased - check to see if an object has been leased (idr_mutex held)
+ * @master: the master to check the lease status of
+ * @id: the id to check
+ *
+ * Checks if any lessee of 'master' holds a lease on 'id'. Return
+ * value:
+ *
+ * true Some lessee holds a lease on the object.
+ * false No lessee has a lease on the object.
+ */
+static bool _drm_has_leased(struct drm_master *master, int id)
+{
+ struct drm_master *lessee;
+
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ drm_for_each_lessee(lessee, master)
+ if (_drm_lease_held_master(lessee, id))
+ return true;
+ return false;
+}
+
+/**
+ * _drm_lease_held - check drm_mode_object lease status (idr_mutex held)
+ * @master: the drm_master
+ * @id: the object id
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+bool _drm_lease_held(struct drm_file *file_priv, int id)
+{
+ if (file_priv == NULL || file_priv->master == NULL)
+ return true;
+
+ return _drm_lease_held_master(file_priv->master, id);
+}
+EXPORT_SYMBOL(_drm_lease_held);
+
+/**
+ * drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
+ * @master: the drm_master
+ * @id: the object id
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+bool drm_lease_held(struct drm_file *file_priv, int id)
+{
+ struct drm_master *master;
+ bool ret;
+
+ if (file_priv == NULL || file_priv->master == NULL)
+ return true;
+
+ master = file_priv->master;
+ mutex_lock(&master->dev->mode_config.idr_mutex);
+ ret = _drm_lease_held_master(master, id);
+ mutex_unlock(&master->dev->mode_config.idr_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_lease_held);
+
+/**
+ * drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
+ * @file_priv: requestor file
+ * @crtcs: bitmask of crtcs to check
+ *
+ * Reconstructs a crtc mask based on the crtcs which are visible
+ * through the specified file.
+ */
+uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
+{
+ struct drm_master *master;
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ int count_in, count_out;
+ uint32_t crtcs_out = 0;
+
+ if (file_priv == NULL || file_priv->master == NULL)
+ return crtcs_in;
+
+ master = file_priv->master;
+ dev = master->dev;
+
+ count_in = count_out = 0;
+ mutex_lock(&master->dev->mode_config.idr_mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (_drm_lease_held_master(master, crtc->base.id)) {
+ uint32_t mask_in = 1ul << count_in;
+ if ((crtcs_in & mask_in) != 0) {
+ uint32_t mask_out = 1ul << count_out;
+ crtcs_out |= mask_out;
+ }
+ count_out++;
+ }
+ count_in++;
+ }
+ mutex_unlock(&master->dev->mode_config.idr_mutex);
+ return crtcs_out;
+}
+EXPORT_SYMBOL(drm_lease_filter_crtcs);
+
+/*
+ * drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
+ * @lessor: lease holder (or owner) of objects
+ * @leases: objects to lease to the new drm_master
+ *
+ * Uses drm_master_create to allocate a new drm_master, then checks to
+ * make sure all of the desired objects can be leased, atomically
+ * leasing them to the new drmmaster.
+ *
+ * ERR_PTR(-EACCESS) some other master holds the title to any object
+ * ERR_PTR(-ENOENT) some object is not a valid DRM object for this device
+ * ERR_PTR(-EBUSY) some other lessee holds title to this object
+ * ERR_PTR(-EEXIST) same object specified more than once in the provided list
+ * ERR_PTR(-ENOMEM) allocation failed
+ */
+static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr *leases)
+{
+ struct drm_device *dev = lessor->dev;
+ int error;
+ struct drm_master *lessee;
+ int object;
+ int id;
+ void *entry;
+
+ DRM_DEBUG_LEASE("lessor %d\n", lessor->lessee_id);
+
+ lessee = drm_master_create(lessor->dev);
+ if (!lessee) {
+ DRM_DEBUG_LEASE("drm_master_create failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ /* Insert the new lessee into the tree */
+ id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+ if (id < 0) {
+ error = id;
+ goto out_lessee;
+ }
+
+ lessee->lessee_id = id;
+ lessee->lessor = drm_master_get(lessor);
+ list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
+ idr_for_each_entry(leases, entry, object) {
+ error = 0;
+ if (!idr_find(&dev->mode_config.crtc_idr, object))
+ error = -ENOENT;
+ else if (!_drm_lease_held_master(lessor, object))
+ error = -EACCES;
+ else if (_drm_has_leased(lessor, object))
+ error = -EBUSY;
+
+ if (error != 0) {
+ DRM_DEBUG_LEASE("object %d failed %d\n", object, error);
+ goto out_lessee;
+ }
+ }
+
+ /* Move the leases over */
+ lessee->leases = *leases;
+ DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return lessee;
+
+out_lessee:
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ drm_master_put(&lessee);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * drm_lease_destroy - a master is going away (idr_mutex not held)
+ * @master: the drm_master being destroyed
+ *
+ * All lessees will have been destroyed as they
+ * hold a reference on their lessor. Notify any
+ * lessor for this master so that it can check
+ * the list of lessees.
+ */
+void drm_lease_destroy(struct drm_master *master)
+{
+ struct drm_device *dev = master->dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ DRM_DEBUG_LEASE("drm_lease_destroy %d\n", master->lessee_id);
+
+ /* This master is referenced by all lessees, hence it cannot be destroyed
+ * until all of them have been
+ */
+ WARN_ON(!list_empty(&master->lessees));
+
+ /* Remove this master from the lessee idr in the owner */
+ if (master->lessee_id != 0) {
+ DRM_DEBUG_LEASE("remove master %d from device list of lessees\n", master->lessee_id);
+ idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
+ }
+
+ /* Remove this master from any lessee list it may be on */
+ list_del(&master->lessee_list);
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ if (master->lessor) {
+ /* Tell the master to check the lessee list */
+ drm_sysfs_hotplug_event(dev);
+ drm_master_put(&master->lessor);
+ }
+
+ DRM_DEBUG_LEASE("drm_lease_destroy done %d\n", master->lessee_id);
+}
+
+/**
+ * _drm_lease_revoke - revoke access to all leased objects (idr_mutex held)
+ * @master: the master losing its lease
+ */
+static void _drm_lease_revoke(struct drm_master *top)
+{
+ int object;
+ void *entry;
+ struct drm_master *master = top;
+
+ lockdep_assert_held(&top->dev->mode_config.idr_mutex);
+
+ /*
+ * Walk the tree starting at 'top' emptying all leases. Because
+ * the tree is fully connected, we can do this without recursing
+ */
+ for (;;) {
+ DRM_DEBUG_LEASE("revoke leases for %p %d\n", master, master->lessee_id);
+
+ /* Evacuate the lease */
+ idr_for_each_entry(&master->leases, entry, object)
+ idr_remove(&master->leases, object);
+
+ /* Depth-first list walk */
+
+ /* Down */
+ if (!list_empty(&master->lessees)) {
+ master = list_first_entry(&master->lessees, struct drm_master, lessee_list);
+ } else {
+ /* Up */
+ while (master != top && master == list_last_entry(&master->lessor->lessees, struct drm_master, lessee_list))
+ master = master->lessor;
+
+ if (master == top)
+ break;
+
+ /* Over */
+ master = list_entry(master->lessee_list.next, struct drm_master, lessee_list);
+ }
+ }
+}
+
+/**
+ * drm_lease_revoke - revoke access to all leased objects (idr_mutex not held)
+ * @top: the master losing its lease
+ */
+void drm_lease_revoke(struct drm_master *top)
+{
+ mutex_lock(&top->dev->mode_config.idr_mutex);
+ _drm_lease_revoke(top);
+ mutex_unlock(&top->dev->mode_config.idr_mutex);
+}
+
+static int validate_lease(struct drm_device *dev,
+ struct drm_file *lessor_priv,
+ int object_count,
+ struct drm_mode_object **objects)
+{
+ int o;
+ int has_crtc = -1;
+ int has_connector = -1;
+ int has_plane = -1;
+
+ /* we want to confirm that there is at least one crtc, plane
+ connector object. */
+
+ for (o = 0; o < object_count; o++) {
+ if (objects[o]->type == DRM_MODE_OBJECT_CRTC && has_crtc == -1) {
+ has_crtc = o;
+ }
+ if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
+ has_connector = o;
+
+ if (lessor_priv->universal_planes) {
+ if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
+ has_plane = o;
+ }
+ }
+ if (has_crtc == -1 || has_connector == -1)
+ return -EINVAL;
+ if (lessor_priv->universal_planes && has_plane == -1)
+ return -EINVAL;
+ return 0;
+}
+
+static int fill_object_idr(struct drm_device *dev,
+ struct drm_file *lessor_priv,
+ struct idr *leases,
+ int object_count,
+ u32 *object_ids)
+{
+ struct drm_mode_object **objects;
+ u32 o;
+ int ret;
+ objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
+ GFP_KERNEL);
+ if (!objects)
+ return -ENOMEM;
+
+ /* step one - get references to all the mode objects
+ and check for validity. */
+ for (o = 0; o < object_count; o++) {
+ if ((int) object_ids[o] < 0) {
+ ret = -EINVAL;
+ goto out_free_objects;
+ }
+
+ objects[o] = drm_mode_object_find(dev, lessor_priv,
+ object_ids[o],
+ DRM_MODE_OBJECT_ANY);
+ if (!objects[o]) {
+ ret = -ENOENT;
+ goto out_free_objects;
+ }
+
+ if (!drm_mode_object_lease_required(objects[o]->type)) {
+ ret = -EINVAL;
+ goto out_free_objects;
+ }
+ }
+
+ ret = validate_lease(dev, lessor_priv, object_count, objects);
+ if (ret)
+ goto out_free_objects;
+
+ /* add their IDs to the lease request - taking into account
+ universal planes */
+ for (o = 0; o < object_count; o++) {
+ struct drm_mode_object *obj = objects[o];
+ u32 object_id = objects[o]->id;
+ DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id);
+
+ /*
+ * We're using an IDR to hold the set of leased
+ * objects, but we don't need to point at the object's
+ * data structure from the lease as the main crtc_idr
+ * will be used to actually find that. Instead, all we
+ * really want is a 'leased/not-leased' result, for
+ * which any non-NULL pointer will work fine.
+ */
+ ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ if (obj->type == DRM_MODE_OBJECT_CRTC && !lessor_priv->universal_planes) {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ if (crtc->cursor) {
+ ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object cursor plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ }
+ }
+ }
+
+ ret = 0;
+out_free_objects:
+ for (o = 0; o < object_count; o++) {
+ if (objects[o])
+ drm_mode_object_put(objects[o]);
+ }
+ kfree(objects);
+ return ret;
+}
+
+/**
+ * drm_mode_create_lease_ioctl - create a new lease
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_create_lease
+ * @file_priv: the file being manipulated
+ *
+ * The master associated with the specified file will have a lease
+ * created containing the objects specified in the ioctl structure.
+ * A file descriptor will be allocated for that and returned to the
+ * application.
+ */
+int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_create_lease *cl = data;
+ size_t object_count;
+ int ret = 0;
+ struct idr leases;
+ struct drm_master *lessor = lessor_priv->master;
+ struct drm_master *lessee = NULL;
+ struct file *lessee_file = NULL;
+ struct file *lessor_file = lessor_priv->filp;
+ struct drm_file *lessee_priv;
+ int fd = -1;
+ uint32_t *object_ids;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* Do not allow sub-leases */
+ if (lessor->lessor)
+ return -EINVAL;
+
+ /* need some objects */
+ if (cl->object_count == 0)
+ return -EINVAL;
+
+ if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK)))
+ return -EINVAL;
+
+ object_count = cl->object_count;
+
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
+ if (IS_ERR(object_ids))
+ return PTR_ERR(object_ids);
+
+ idr_init(&leases);
+
+ /* fill and validate the object idr */
+ ret = fill_object_idr(dev, lessor_priv, &leases,
+ object_count, object_ids);
+ kfree(object_ids);
+ if (ret) {
+ idr_destroy(&leases);
+ return ret;
+ }
+
+ /* Allocate a file descriptor for the lease */
+ fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
+ if (fd < 0) {
+ idr_destroy(&leases);
+ return fd;
+ }
+
+ DRM_DEBUG_LEASE("Creating lease\n");
+ lessee = drm_lease_create(lessor, &leases);
+
+ if (IS_ERR(lessee)) {
+ ret = PTR_ERR(lessee);
+ goto out_leases;
+ }
+
+ /* Clone the lessor file to create a new file for us */
+ DRM_DEBUG_LEASE("Allocating lease file\n");
+ path_get(&lessor_file->f_path);
+ lessee_file = alloc_file(&lessor_file->f_path,
+ lessor_file->f_mode,
+ fops_get(lessor_file->f_inode->i_fop));
+
+ if (IS_ERR(lessee_file)) {
+ ret = PTR_ERR(lessee_file);
+ goto out_lessee;
+ }
+
+ /* Initialize the new file for DRM */
+ DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
+ ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
+ if (ret)
+ goto out_lessee_file;
+
+ lessee_priv = lessee_file->private_data;
+
+ /* Change the file to a master one */
+ drm_master_put(&lessee_priv->master);
+ lessee_priv->master = lessee;
+ lessee_priv->is_master = 1;
+ lessee_priv->authenticated = 1;
+
+ /* Hook up the fd */
+ fd_install(fd, lessee_file);
+
+ /* Pass fd back to userspace */
+ DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
+ cl->fd = fd;
+ cl->lessee_id = lessee->lessee_id;
+
+ DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+ return 0;
+
+out_lessee_file:
+ fput(lessee_file);
+
+out_lessee:
+ drm_master_put(&lessee);
+
+out_leases:
+ put_unused_fd(fd);
+ idr_destroy(&leases);
+
+ DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
+ return ret;
+}
+
+/**
+ * drm_mode_list_lessees_ioctl - list lessee ids
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_list_lessees
+ * @lessor_priv: the file being manipulated
+ *
+ * Starting from the master associated with the specified file,
+ * the master with the provided lessee_id is found, and then
+ * an array of lessee ids associated with leases from that master
+ * are returned.
+ */
+
+int drm_mode_list_lessees_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_list_lessees *arg = data;
+ __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
+ __u32 count_lessees = arg->count_lessees;
+ struct drm_master *lessor = lessor_priv->master, *lessee;
+ int count;
+ int ret = 0;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ count = 0;
+ drm_for_each_lessee(lessee, lessor) {
+ /* Only list un-revoked leases */
+ if (!idr_is_empty(&lessee->leases)) {
+ if (count_lessees > count) {
+ DRM_DEBUG_LEASE("Add lessee %d\n", lessee->lessee_id);
+ ret = put_user(lessee->lessee_id, lessee_ids + count);
+ if (ret)
+ break;
+ }
+ count++;
+ }
+ }
+
+ DRM_DEBUG_LEASE("Lessor leases to %d\n", count);
+ if (ret == 0)
+ arg->count_lessees = count;
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
+
+/**
+ * drm_mode_get_lease_ioctl - list leased objects
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_get_lease
+ * @file_priv: the file being manipulated
+ *
+ * Return the list of leased objects for the specified lessee
+ */
+
+int drm_mode_get_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessee_priv)
+{
+ struct drm_mode_get_lease *arg = data;
+ __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
+ __u32 count_objects = arg->count_objects;
+ struct drm_master *lessee = lessee_priv->master;
+ struct idr *object_idr;
+ int count;
+ void *entry;
+ int object;
+ int ret = 0;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ if (lessee->lessor == NULL)
+ /* owner can use all objects */
+ object_idr = &lessee->dev->mode_config.crtc_idr;
+ else
+ /* lessee can only use allowed object */
+ object_idr = &lessee->leases;
+
+ count = 0;
+ idr_for_each_entry(object_idr, entry, object) {
+ if (count_objects > count) {
+ DRM_DEBUG_LEASE("adding object %d\n", object);
+ ret = put_user(object, object_ids + count);
+ if (ret)
+ break;
+ }
+ count++;
+ }
+
+ DRM_DEBUG("lease holds %d objects\n", count);
+ if (ret == 0)
+ arg->count_objects = count;
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
+
+/**
+ * drm_mode_revoke_lease_ioctl - revoke lease
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_revoke_lease
+ * @file_priv: the file being manipulated
+ *
+ * This removes all of the objects from the lease without
+ * actually getting rid of the lease itself; that way all
+ * references to it still work correctly
+ */
+int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_revoke_lease *arg = data;
+ struct drm_master *lessor = lessor_priv->master;
+ struct drm_master *lessee;
+ int ret = 0;
+
+ DRM_DEBUG_LEASE("revoke lease for %d\n", arg->lessee_id);
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ lessee = _drm_find_lessee(lessor, arg->lessee_id);
+
+ /* No such lessee */
+ if (!lessee) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ /* Lease is not held by lessor */
+ if (lessee->lessor != lessor) {
+ ret = -EACCES;
+ goto fail;
+ }
+
+ _drm_lease_revoke(lessee);
+
+fail:
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8ea74bc..c3c79ee6119e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
+ struct drm_mm *mm = old->mm;
+
DRM_MM_BUG_ON(!old->allocated);
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+ rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb_hole_size,
&new->rb_hole_size,
- &old->mm->holes_size);
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
- &old->mm->holes_addr);
+ &mm->holes_addr);
}
old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 74f6ff5df656..256de7313612 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -122,10 +122,12 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
drm_for_each_crtc(crtc, dev) {
- if (count < card_res->count_crtcs &&
- put_user(crtc->base.id, crtc_id + count))
- return -EFAULT;
- count++;
+ if (drm_lease_held(file_priv, crtc->base.id)) {
+ if (count < card_res->count_crtcs &&
+ put_user(crtc->base.id, crtc_id + count))
+ return -EFAULT;
+ count++;
+ }
}
card_res->count_crtcs = count;
@@ -143,12 +145,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (count < card_res->count_connectors &&
- put_user(connector->base.id, connector_id + count)) {
- drm_connector_list_iter_end(&conn_iter);
- return -EFAULT;
+ if (drm_lease_held(file_priv, connector->base.id)) {
+ if (count < card_res->count_connectors &&
+ put_user(connector->base.id, connector_id + count)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return -EFAULT;
+ }
+ count++;
}
- count++;
}
card_res->count_connectors = count;
drm_connector_list_iter_end(&conn_iter);
@@ -378,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
+ init_llist_head(&dev->mode_config.connector_free_list);
+ INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
drm_mode_create_standard_properties(dev);
/* Just to be sure */
@@ -385,7 +392,6 @@ void drm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
dev->mode_config.num_total_plane = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
@@ -428,6 +434,8 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_connector_put(connector);
}
drm_connector_list_iter_end(&conn_iter);
+ /* connector_iter drops references in a work item. */
+ flush_work(&dev->mode_config.connector_free_work);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 1055533792f3..ce4d2fb32810 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -104,7 +104,27 @@ void drm_mode_object_unregister(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+/**
+ * drm_lease_required - check types which must be leased to be used
+ * @type: type of object
+ *
+ * Returns whether the provided type of drm_mode_object must
+ * be owned or leased to be used by a process.
+ */
+bool drm_mode_object_lease_required(uint32_t type)
+{
+ switch(type) {
+ case DRM_MODE_OBJECT_CRTC:
+ case DRM_MODE_OBJECT_CONNECTOR:
+ case DRM_MODE_OBJECT_PLANE:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
@@ -116,6 +136,10 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
if (obj && obj->id != id)
obj = NULL;
+ if (obj && drm_mode_object_lease_required(obj->type) &&
+ !_drm_lease_held(file_priv, obj->id))
+ obj = NULL;
+
if (obj && obj->free_cb) {
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
@@ -128,6 +152,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
+ * @file_priv: drm file
* @id: id of the mode object
* @type: type of the mode object
*
@@ -136,11 +161,12 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
* by callind drm_mode_object_put().
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
- obj = __drm_mode_object_find(dev, id, type);
+ obj = __drm_mode_object_find(dev, file_priv, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -247,8 +273,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
-int __drm_object_property_get_value(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val)
+static int __drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val)
{
int i;
@@ -358,7 +385,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -ENOENT;
goto out;
@@ -480,7 +507,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!arg_obj)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index af4e906c630d..963e23db0fe7 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -39,23 +39,28 @@
*
* The basic usage pattern is to::
*
- * drm_modeset_acquire_init(&ctx)
+ * drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
* retry:
* foreach (lock in random_ordered_set_of_locks) {
- * ret = drm_modeset_lock(lock, &ctx)
+ * ret = drm_modeset_lock(lock, ctx)
* if (ret == -EDEADLK) {
- * drm_modeset_backoff(&ctx);
- * goto retry;
+ * ret = drm_modeset_backoff(ctx);
+ * if (!ret)
+ * goto retry;
* }
+ * if (ret)
+ * goto out;
* }
* ... do stuff ...
- * drm_modeset_drop_locks(&ctx);
- * drm_modeset_acquire_fini(&ctx);
+ * out:
+ * drm_modeset_drop_locks(ctx);
+ * drm_modeset_acquire_fini(ctx);
*
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
- * by passing a NULL instead of ctx in the drm_modeset_lock()
- * call and, when done, by calling drm_modeset_unlock().
+ * by passing a NULL instead of ctx in the drm_modeset_lock() call or
+ * calling drm_modeset_lock_single_interruptible(). To unlock afterwards
+ * call drm_modeset_unlock().
*
* On top of these per-object locks using &ww_mutex there's also an overall
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
@@ -88,7 +93,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
if (WARN_ON(!ctx))
return;
@@ -178,7 +183,11 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
- * @flags: for future
+ * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
+ *
+ * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
+ * all calls to drm_modeset_lock() will perform an interruptible
+ * wait.
*/
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags)
@@ -186,6 +195,9 @@ void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
memset(ctx, 0, sizeof(*ctx));
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
+
+ if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
+ ctx->interruptible = true;
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
@@ -261,8 +273,19 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
return ret;
}
-static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
- bool interruptible)
+/**
+ * drm_modeset_backoff - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
+ * you must call this function to drop all currently held locks and
+ * block until the contended lock becomes available.
+ *
+ * This function returns 0 on success, or -ERESTARTSYS if this context
+ * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
+ * wait has been interrupted.
+ */
+int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
struct drm_modeset_lock *contended = ctx->contended;
@@ -273,36 +296,11 @@ static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
drm_modeset_drop_locks(ctx);
- return modeset_lock(contended, ctx, interruptible, true);
-}
-
-/**
- * drm_modeset_backoff - deadlock avoidance backoff
- * @ctx: the acquire context
- *
- * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
- * you must call this function to drop all currently held locks and
- * block until the contended lock becomes available.
- */
-void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
-{
- modeset_backoff(ctx, false);
+ return modeset_lock(contended, ctx, ctx->interruptible, true);
}
EXPORT_SYMBOL(drm_modeset_backoff);
/**
- * drm_modeset_backoff_interruptible - deadlock avoidance backoff
- * @ctx: the acquire context
- *
- * Interruptible version of drm_modeset_backoff()
- */
-int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
-{
- return modeset_backoff(ctx, true);
-}
-EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
-
-/**
* drm_modeset_lock_init - initialize lock
* @lock: lock to init
*/
@@ -324,14 +322,18 @@ EXPORT_SYMBOL(drm_modeset_lock_init);
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
*
+ * If the @ctx is not NULL and initialized with
+ * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
+ * -ERESTARTSYS when interrupted.
+ *
* If @ctx is NULL then the function call behaves like a normal,
- * non-nesting mutex_lock() call.
+ * uninterruptible non-nesting mutex_lock() call.
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
- return modeset_lock(lock, ctx, false, false);
+ return modeset_lock(lock, ctx, ctx->interruptible, false);
ww_mutex_lock(&lock->mutex, NULL);
return 0;
@@ -339,21 +341,19 @@ int drm_modeset_lock(struct drm_modeset_lock *lock,
EXPORT_SYMBOL(drm_modeset_lock);
/**
- * drm_modeset_lock_interruptible - take modeset lock
+ * drm_modeset_lock_single_interruptible - take a single modeset lock
* @lock: lock to take
- * @ctx: acquire ctx
*
- * Interruptible version of drm_modeset_lock()
+ * This function behaves as drm_modeset_lock() with a NULL context,
+ * but performs interruptible waits.
+ *
+ * This function returns 0 on success, or -ERESTARTSYS when interrupted.
*/
-int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
- struct drm_modeset_acquire_ctx *ctx)
+int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
{
- if (ctx)
- return modeset_lock(lock, ctx, true, false);
-
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
-EXPORT_SYMBOL(drm_modeset_lock_interruptible);
+EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
/**
* drm_modeset_unlock - drop modeset lock
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 8dafbdfcd2ea..4c191c050e7d 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -233,6 +233,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!panel && !bridge)
return -EINVAL;
+ if (panel)
+ *panel = NULL;
remote = of_graph_get_remote_node(np, port, endpoint);
if (!remote)
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1235c9877d6f..4db9c515b74f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -274,7 +274,7 @@ err_agp:
drm_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 7a00351d5b5d..37a93cdffb4a 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -241,8 +241,6 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
list_add_tail(&plane->head, &config->plane_list);
plane->index = config->num_total_plane++;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- config->num_overlay_plane++;
drm_object_attach_property(&plane->base,
config->plane_type_property,
@@ -353,8 +351,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
list_del(&plane->head);
dev->mode_config.num_total_plane--;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- dev->mode_config.num_overlay_plane--;
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
@@ -462,43 +458,35 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
- int copied = 0;
- unsigned num_planes;
+ int count = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
config = &dev->mode_config;
-
- if (file_priv->universal_planes)
- num_planes = config->num_total_plane;
- else
- num_planes = config->num_overlay_plane;
+ plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
- if (num_planes &&
- (plane_resp->count_planes >= num_planes)) {
- plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
- /* Plane lists are invariant, no locking needed. */
- drm_for_each_plane(plane, dev) {
- /*
- * Unless userspace set the 'universal planes'
- * capability bit, only advertise overlays.
- */
- if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
- !file_priv->universal_planes)
- continue;
-
- if (put_user(plane->base.id, plane_ptr + copied))
+ drm_for_each_plane(plane, dev) {
+ /*
+ * Unless userspace set the 'universal planes'
+ * capability bit, only advertise overlays.
+ */
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+ !file_priv->universal_planes)
+ continue;
+
+ if (drm_lease_held(file_priv, plane->base.id)) {
+ if (count < plane_resp->count_planes &&
+ put_user(plane->base.id, plane_ptr + count))
return -EFAULT;
- copied++;
+ count++;
}
}
- plane_resp->count_planes = num_planes;
+ plane_resp->count_planes = count;
return 0;
}
@@ -513,14 +501,14 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- plane = drm_plane_find(dev, plane_resp->plane_id);
+ plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
if (!plane)
return -ENOENT;
drm_modeset_lock(&plane->mutex, NULL);
- if (plane->state && plane->state->crtc)
+ if (plane->state && plane->state->crtc && drm_lease_held(file_priv, plane->state->crtc->base.id))
plane_resp->crtc_id = plane->state->crtc->base.id;
- else if (!plane->state && plane->crtc)
+ else if (!plane->state && plane->crtc && drm_lease_held(file_priv, plane->crtc->base.id))
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
@@ -534,7 +522,9 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
drm_modeset_unlock(&plane->mutex);
plane_resp->plane_id = plane->base.id;
- plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
+ plane->possible_crtcs);
+
plane_resp->gamma_size = 0;
/*
@@ -667,7 +657,7 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
@@ -678,8 +668,9 @@ retry:
fail:
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -702,7 +693,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- plane = drm_plane_find(dev, plane_req->plane_id);
+ plane = drm_plane_find(dev, file_priv, plane_req->plane_id);
if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
@@ -710,14 +701,14 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
if (plane_req->fb_id) {
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
return -ENOENT;
}
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id);
if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
@@ -828,13 +819,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- crtc = drm_crtc_find(dev, req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
@@ -876,8 +867,9 @@ retry:
}
out:
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
@@ -942,7 +934,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
- crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id);
if (!crtc)
return -ENOENT;
@@ -985,7 +977,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
return -EINVAL;
}
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
@@ -1003,7 +995,7 @@ retry:
goto out;
}
- fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id);
if (!fb) {
ret = -ENOENT;
goto out;
@@ -1037,7 +1029,8 @@ retry:
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
- e->event.user_data = page_flip->user_data;
+ e->event.vbl.user_data = page_flip->user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
@@ -1074,8 +1067,9 @@ out:
crtc->primary->old_fb = NULL;
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 06aee1741e96..759ed93f4ba8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -354,7 +354,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
- connector_list = kzalloc(num_connectors * sizeof(*connector_list),
+ connector_list = kcalloc(num_connectors, sizeof(*connector_list),
GFP_KERNEL);
if (!connector_list)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 22408badc617..8de93a226c24 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -318,7 +318,7 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
if (IS_ERR(dma_buf))
return dma_buf;
- drm_dev_ref(dev);
+ drm_dev_get(dev);
drm_gem_object_get(exp_info->priv);
return dma_buf;
@@ -342,7 +342,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
/* drop the reference on the export fd holds */
drm_gem_object_put_unlocked(obj);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 904966cde32b..6dc2dde5b672 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -99,7 +99,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
/* Step 2: Validate against encoders and crtcs */
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_encoder *encoder = drm_encoder_find(dev, ids[i]);
+ struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
struct drm_crtc *crtc;
if (!encoder)
@@ -353,8 +353,6 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* drm_mode_probed_add(). New modes start their life with status as OK.
* Modes are added from a single source using the following priority order.
*
- * - debugfs 'override_edid' (used for testing only)
- * - firmware EDID (drm_load_edid_firmware())
* - &drm_connector_helper_funcs.get_modes vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
@@ -483,22 +481,7 @@ retry:
goto prune;
}
- if (connector->override_edid) {
- struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
-
- count = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- } else {
- struct edid *edid = drm_load_edid_firmware(connector);
- if (!IS_ERR_OR_NULL(edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- kfree(edid);
- }
- if (count == 0)
- count = (*connector_funcs->get_modes)(connector);
- }
+ count = (*connector_funcs->get_modes)(connector);
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index bc5128203056..bae50e6b819d 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -450,7 +450,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- property = drm_property_find(dev, out_resp->prop_id);
+ property = drm_property_find(dev, file_priv, out_resp->prop_id);
if (!property)
return -ENOENT;
@@ -634,7 +634,7 @@ struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
struct drm_mode_object *obj;
struct drm_property_blob *blob = NULL;
- obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ obj = __drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_BLOB);
if (obj)
blob = obj_to_blob(obj);
return blob;
@@ -897,7 +897,7 @@ bool drm_property_change_valid_get(struct drm_property *property,
if (value == 0)
return true;
- *ref = __drm_mode_object_find(property->dev, value,
+ *ref = __drm_mode_object_find(property->dev, NULL, value,
property->values[0]);
return *ref != NULL;
}
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 935653eb3616..657ea5ab6c3f 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -134,7 +134,6 @@ EXPORT_SYMBOL(drm_scdc_write);
* Returns:
* True if the scrambling is enabled, false otherwise.
*/
-
bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
{
u8 status;
@@ -142,7 +141,7 @@ bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
if (ret < 0) {
- DRM_ERROR("Failed to read scrambling status, error %d\n", ret);
+ DRM_ERROR("Failed to read scrambling status: %d\n", ret);
return false;
}
@@ -162,7 +161,6 @@ EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
* Returns:
* True if scrambling is set/reset successfully, false otherwise.
*/
-
bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
{
u8 config;
@@ -170,7 +168,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read tmds config, err=%d\n", ret);
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
return false;
}
@@ -181,7 +179,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_ERROR("Failed to enable scrambling, error %d\n", ret);
+ DRM_ERROR("Failed to enable scrambling: %d\n", ret);
return false;
}
@@ -225,7 +223,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read tmds config, err=%d\n", ret);
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
return false;
}
@@ -236,7 +234,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_ERROR("Failed to set TMDS clock ratio, error %d\n", ret);
+ DRM_ERROR("Failed to set TMDS clock ratio: %d\n", ret);
return false;
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0422b8c2c2e7..f776fc1cc543 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_syncobj_free);
-static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle, uint32_t flags)
+/**
+ * drm_syncobj_create - create a new syncobj
+ * @out_syncobj: returned syncobj
+ * @flags: DRM_SYNCOBJ_* flags
+ * @fence: if non-NULL, the syncobj will represent this fence
+ */
+int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+ struct dma_fence *fence)
{
int ret;
struct drm_syncobj *syncobj;
@@ -284,6 +290,25 @@ static int drm_syncobj_create(struct drm_file *file_private,
}
}
+ if (fence)
+ drm_syncobj_replace_fence(syncobj, fence);
+
+ *out_syncobj = syncobj;
+ return 0;
+}
+EXPORT_SYMBOL(drm_syncobj_create);
+
+/**
+ * drm_syncobj_get_handle - get a handle from a syncobj
+ */
+int drm_syncobj_get_handle(struct drm_file *file_private,
+ struct drm_syncobj *syncobj, u32 *handle)
+{
+ int ret;
+
+ /* take a reference to put in the idr */
+ drm_syncobj_get(syncobj);
+
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
@@ -299,6 +324,22 @@ static int drm_syncobj_create(struct drm_file *file_private,
*handle = ret;
return 0;
}
+EXPORT_SYMBOL(drm_syncobj_get_handle);
+
+static int drm_syncobj_create_as_handle(struct drm_file *file_private,
+ u32 *handle, uint32_t flags)
+{
+ int ret;
+ struct drm_syncobj *syncobj;
+
+ ret = drm_syncobj_create(&syncobj, flags, NULL);
+ if (ret)
+ return ret;
+
+ ret = drm_syncobj_get_handle(file_private, syncobj, handle);
+ drm_syncobj_put(syncobj);
+ return ret;
+}
static int drm_syncobj_destroy(struct drm_file *file_private,
u32 handle)
@@ -345,33 +386,38 @@ static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
return 0;
}
-static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
- u32 handle, int *p_fd)
+int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
- struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret;
int fd;
- if (!syncobj)
- return -EINVAL;
-
fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0) {
- drm_syncobj_put(syncobj);
+ if (fd < 0)
return fd;
- }
if (!syncobj->file) {
ret = drm_syncobj_alloc_file(syncobj);
- if (ret)
- goto out_put_fd;
+ if (ret) {
+ put_unused_fd(fd);
+ return ret;
+ }
}
fd_install(fd, syncobj->file);
- drm_syncobj_put(syncobj);
*p_fd = fd;
return 0;
-out_put_fd:
- put_unused_fd(fd);
+}
+EXPORT_SYMBOL(drm_syncobj_get_fd);
+
+static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
+ u32 handle, int *p_fd)
+{
+ struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+ int ret;
+
+ if (!syncobj)
+ return -EINVAL;
+
+ ret = drm_syncobj_get_fd(syncobj, p_fd);
drm_syncobj_put(syncobj);
return ret;
}
@@ -417,8 +463,8 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
return 0;
}
-int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
+ int fd, int handle)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -438,8 +484,8 @@ int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return 0;
}
-int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+static int drm_syncobj_export_sync_file(struct drm_file *file_private,
+ int handle, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -522,8 +568,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
- return drm_syncobj_create(file_private,
- &args->handle, args->flags);
+ return drm_syncobj_create_as_handle(file_private,
+ &args->handle, args->flags);
}
int
@@ -799,7 +845,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
}
static int drm_syncobj_array_find(struct drm_file *file_private,
- void *user_handles, uint32_t count_handles,
+ void __user *user_handles,
+ uint32_t count_handles,
struct drm_syncobj ***syncobjs_out)
{
uint32_t i, *handles;
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index a8370775ed50..baccc63db106 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -62,5 +62,5 @@ TRACE_EVENT(drm_vblank_event_delivered,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 70f2b9593edc..3717b3df34a4 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -78,28 +78,20 @@
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq);
+ ktime_t *tvblank, bool in_vblank_irq);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
- struct timeval *t_vblank, u32 last)
+ ktime_t t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -108,7 +100,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
vblank->last = last;
write_seqlock(&vblank->seqlock);
- vblank->time = *t_vblank;
+ vblank->time = t_vblank;
vblank->count += vblank_count_inc;
write_sequnlock(&vblank->seqlock);
}
@@ -151,7 +143,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
{
u32 cur_vblank;
bool rc;
- struct timeval t_vblank;
+ ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
spin_lock(&dev->vblank_time_lock);
@@ -171,13 +163,13 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
* interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
*/
if (!rc)
- t_vblank = (struct timeval) {0, 0};
+ t_vblank = 0;
/*
* +1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
- store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
+ store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
spin_unlock(&dev->vblank_time_lock);
}
@@ -200,7 +192,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
- struct timeval t_vblank;
+ ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
@@ -225,11 +217,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
} else if (rc && framedur_ns) {
- const struct timeval *t_old;
- u64 diff_ns;
-
- t_old = &vblank->time;
- diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
+ u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
/*
* Figure out how many vblanks we've missed based
@@ -263,7 +251,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
}
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
- " current=%u, diff=%u, hw=%u hw_last=%u\n",
+ " current=%llu, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
if (diff == 0) {
@@ -278,9 +266,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* for now, to mark the vblanktimestamp as invalid.
*/
if (!rc && !in_vblank_irq)
- t_vblank = (struct timeval) {0, 0};
+ t_vblank = 0;
- store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
+ store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
@@ -311,8 +299,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u32 vblank;
unsigned long flags;
- WARN(!dev->driver->get_vblank_timestamp,
- "This function requires support for accurate vblank timestamps.");
+ WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -379,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
@@ -448,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
+ timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
}
@@ -556,7 +543,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
- * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
@@ -584,10 +571,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe,
int *max_error,
- struct timeval *vblank_time,
+ ktime_t *vblank_time,
bool in_vblank_irq)
{
- struct timeval tv_etime;
+ struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
struct drm_crtc *crtc;
@@ -676,41 +663,31 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
- if (!drm_timestamp_monotonic)
- etime = ktime_mono_to_real(etime);
-
/* save this only for debugging purposes */
- tv_etime = ktime_to_timeval(etime);
+ ts_etime = ktime_to_timespec64(etime);
+ ts_vblank_time = ktime_to_timespec64(*vblank_time);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
etime = ktime_sub_ns(etime, delta_ns);
- *vblank_time = ktime_to_timeval(etime);
+ *vblank_time = etime;
- DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
pipe, hpos, vpos,
- (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
- (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
- duration_ns/1000, i);
+ (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
+ (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
+ duration_ns / 1000, i);
return true;
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
-static struct timeval get_drm_timestamp(void)
-{
- ktime_t now;
-
- now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
- return ktime_to_timeval(now);
-}
-
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
* @pipe: index of CRTC whose vblank timestamp to retrieve
- * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
@@ -728,7 +705,7 @@ static struct timeval get_drm_timestamp(void)
*/
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq)
+ ktime_t *tvblank, bool in_vblank_irq)
{
bool ret = false;
@@ -744,7 +721,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* Return current monotonic/gettimeofday timestamp as best estimate.
*/
if (!ret)
- *tvblank = get_drm_timestamp();
+ *tvblank = ktime_get();
return ret;
}
@@ -762,21 +739,35 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* Returns:
* The software vblank counter.
*/
-u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+u64 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
-static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ * system timestamp corresponding to that vblank counter value.
+ * @dev: DRM device
+ * @pipe: index of CRTC whose counter to retrieve
+ * @vblanktime: Pointer to ktime_t to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * This is the legacy version of drm_crtc_vblank_count_and_time().
+ */
+static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ ktime_t *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 vblank_count;
+ u64 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs)) {
- *vblanktime = (struct timeval) { 0 };
+ *vblanktime = 0;
return 0;
}
@@ -793,15 +784,15 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value
* @crtc: which counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ * @vblanktime: Pointer to time to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*/
-u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime)
+u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ ktime_t *vblanktime)
{
return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
vblanktime);
@@ -810,15 +801,30 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
- unsigned long seq, struct timeval *now)
+ u64 seq, ktime_t now)
{
- e->event.sequence = seq;
- e->event.tv_sec = now->tv_sec;
- e->event.tv_usec = now->tv_usec;
-
- trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
- e->event.sequence);
+ struct timespec64 tv;
+ switch (e->event.base.type) {
+ case DRM_EVENT_VBLANK:
+ case DRM_EVENT_FLIP_COMPLETE:
+ tv = ktime_to_timespec64(now);
+ e->event.vbl.sequence = seq;
+ /*
+ * e->event is a user space structure, with hardcoded unsigned
+ * 32-bit seconds/microseconds. This is safe as we always use
+ * monotonic timestamps since linux-4.15
+ */
+ e->event.vbl.tv_sec = tv.tv_sec;
+ e->event.vbl.tv_usec = tv.tv_nsec / 1000;
+ break;
+ case DRM_EVENT_CRTC_SEQUENCE:
+ if (seq)
+ e->event.seq.sequence = seq;
+ e->event.seq.time_ns = ktime_to_ns(now);
+ break;
+ }
+ trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
drm_send_event_locked(dev, &e->base);
}
@@ -869,8 +875,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
assert_spin_locked(&dev->event_lock);
e->pipe = pipe;
- e->event.sequence = drm_vblank_count(dev, pipe);
- e->event.crtc_id = crtc->base.id;
+ e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
@@ -890,19 +895,19 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
struct drm_device *dev = crtc->dev;
- unsigned int seq, pipe = drm_crtc_index(crtc);
- struct timeval now;
+ u64 seq;
+ unsigned int pipe = drm_crtc_index(crtc);
+ ktime_t now;
if (dev->num_crtcs > 0) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
- now = get_drm_timestamp();
+ now = ktime_get();
}
e->pipe = pipe;
- e->event.crtc_id = crtc->base.id;
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
@@ -1013,7 +1018,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1100,9 +1105,10 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
- struct timeval now;
+
+ ktime_t now;
unsigned long irqflags;
- unsigned int seq;
+ u64 seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return;
@@ -1137,11 +1143,11 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: "
- "wanted %u, current %u\n",
- e->event.sequence, seq);
+ "wanted %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1310,20 +1316,21 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static inline bool vblank_passed(u32 seq, u32 ref)
+static inline bool vblank_passed(u64 seq, u64 ref)
{
return (seq - ref) <= (1 << 23);
}
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
+ u64 req_seq,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
- struct timeval now;
+ ktime_t now;
unsigned long flags;
- unsigned int seq;
+ u64 seq;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -1334,8 +1341,14 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
e->pipe = pipe;
e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = vblwait->request.signal;
+ e->event.base.length = sizeof(e->event.vbl);
+ e->event.vbl.user_data = vblwait->request.signal;
+ e->event.vbl.crtc_id = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ if (crtc)
+ e->event.vbl.crtc_id = crtc->base.id;
+ }
spin_lock_irqsave(&dev->event_lock, flags);
@@ -1358,21 +1371,20 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
- vblwait->request.sequence, seq, pipe);
+ DRM_DEBUG("event on vblank count %llu, current %llu, crtc %u\n",
+ req_seq, seq, pipe);
- trace_drm_vblank_event_queued(file_priv, pipe,
- vblwait->request.sequence);
+ trace_drm_vblank_event_queued(file_priv, pipe, req_seq);
- e->event.sequence = vblwait->request.sequence;
- if (vblank_passed(seq, vblwait->request.sequence)) {
+ e->sequence = req_seq;
+ if (vblank_passed(seq, req_seq)) {
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
vblwait->reply.sequence = seq;
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
- vblwait->reply.sequence = vblwait->request.sequence;
+ vblwait->reply.sequence = req_seq;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1398,13 +1410,49 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
_DRM_VBLANK_NEXTONMISS));
}
+/*
+ * Widen a 32-bit param to 64-bits.
+ *
+ * \param narrow 32-bit value (missing upper 32 bits)
+ * \param near 64-bit value that should be 'close' to near
+ *
+ * This function returns a 64-bit value using the lower 32-bits from
+ * 'narrow' and constructing the upper 32-bits so that the result is
+ * as close as possible to 'near'.
+ */
+
+static u64 widen_32_to_64(u32 narrow, u64 near)
+{
+ return near + (s32) (narrow - near);
+}
+
+static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
+ struct drm_wait_vblank_reply *reply)
+{
+ ktime_t now;
+ struct timespec64 ts;
+
+ /*
+ * drm_wait_vblank_reply is a UAPI structure that uses 'long'
+ * to store the seconds. This is safe as we always use monotonic
+ * timestamps since linux-4.15.
+ */
+ reply->sequence = drm_vblank_count_and_time(dev, pipe, &now);
+ ts = ktime_to_timespec64(now);
+ reply->tval_sec = (u32)ts.tv_sec;
+ reply->tval_usec = ts.tv_nsec / 1000;
+}
+
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_crtc *crtc;
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
- unsigned int flags, seq, pipe, high_pipe;
+ u64 req_seq, seq;
+ unsigned int pipe_index;
+ unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
return -EINVAL;
@@ -1425,9 +1473,25 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
if (high_pipe)
- pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ pipe_index = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
else
- pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ pipe_index = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+ /* Convert lease-relative crtc index into global crtc index */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ pipe = 0;
+ drm_for_each_crtc(crtc, dev) {
+ if (drm_lease_held(file_priv, crtc->base.id)) {
+ if (pipe_index == 0)
+ break;
+ pipe_index--;
+ }
+ pipe++;
+ }
+ } else {
+ pipe = pipe_index;
+ }
+
if (pipe >= dev->num_crtcs)
return -EINVAL;
@@ -1439,12 +1503,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (dev->vblank_disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
- struct timeval now;
-
- vblwait->reply.sequence =
- drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
+ drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
return 0;
}
@@ -1457,9 +1516,12 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
+ req_seq = seq + vblwait->request.sequence;
+ vblwait->request.sequence = req_seq;
vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
case _DRM_VBLANK_ABSOLUTE:
+ req_seq = widen_32_to_64(vblwait->request.sequence, seq);
break;
default:
ret = -EINVAL;
@@ -1467,31 +1529,30 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- vblank_passed(seq, vblwait->request.sequence))
- vblwait->request.sequence = seq + 1;
+ vblank_passed(seq, req_seq)) {
+ req_seq = seq + 1;
+ vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS;
+ vblwait->request.sequence = req_seq;
+ }
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
*/
- return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
+ return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv);
}
- if (vblwait->request.sequence != seq) {
- DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
- vblwait->request.sequence, pipe);
+ if (req_seq != seq) {
+ DRM_DEBUG("waiting on vblank count %llu, crtc %u\n",
+ req_seq, pipe);
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
vblank_passed(drm_vblank_count(dev, pipe),
- vblwait->request.sequence) ||
+ req_seq) ||
!READ_ONCE(vblank->enabled));
}
if (ret != -EINTR) {
- struct timeval now;
-
- vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
+ drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
DRM_DEBUG("crtc %d returning %u to client\n",
pipe, vblwait->reply.sequence);
@@ -1507,8 +1568,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned int seq;
+ ktime_t now;
+ u64 seq;
assert_spin_locked(&dev->event_lock);
@@ -1517,15 +1578,15 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
- if (!vblank_passed(seq, e->event.sequence))
+ if (!vblank_passed(seq, e->sequence))
continue;
- DRM_DEBUG("vblank event on %u, current %u\n",
- e->event.sequence, seq);
+ DRM_DEBUG("vblank event on %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
trace_drm_vblank_event(pipe, seq);
@@ -1588,7 +1649,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
return true;
}
@@ -1611,3 +1672,166 @@ bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);
+
+/*
+ * Get crtc VBLANK count.
+ *
+ * \param dev DRM device
+ * \param data user arguement, pointing to a drm_crtc_get_sequence structure.
+ * \param file_priv drm file private for the user's open file descriptor
+ */
+
+int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+ struct drm_vblank_crtc *vblank;
+ int pipe;
+ struct drm_crtc_get_sequence *get_seq = data;
+ ktime_t now;
+ bool vblank_enabled;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ pipe = drm_crtc_index(crtc);
+
+ vblank = &dev->vblank[pipe];
+ vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
+
+ if (!vblank_enabled) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ return ret;
+ }
+ }
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state)
+ get_seq->active = crtc->state->enable;
+ else
+ get_seq->active = crtc->enabled;
+ drm_modeset_unlock(&crtc->mutex);
+ get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now);
+ get_seq->sequence_ns = ktime_to_ns(now);
+ if (!vblank_enabled)
+ drm_crtc_vblank_put(crtc);
+ return 0;
+}
+
+/*
+ * Queue a event for VBLANK sequence
+ *
+ * \param dev DRM device
+ * \param data user arguement, pointing to a drm_crtc_queue_sequence structure.
+ * \param file_priv drm file private for the user's open file descriptor
+ */
+
+int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+ struct drm_vblank_crtc *vblank;
+ int pipe;
+ struct drm_crtc_queue_sequence *queue_seq = data;
+ ktime_t now;
+ struct drm_pending_vblank_event *e;
+ u32 flags;
+ u64 seq;
+ u64 req_seq;
+ int ret;
+ unsigned long spin_flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ flags = queue_seq->flags;
+ /* Check valid flag bits */
+ if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE|
+ DRM_CRTC_SEQUENCE_NEXT_ON_MISS))
+ return -EINVAL;
+
+ pipe = drm_crtc_index(crtc);
+
+ vblank = &dev->vblank[pipe];
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ goto err_free;
+ }
+
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+ req_seq = queue_seq->sequence;
+
+ if (flags & DRM_CRTC_SEQUENCE_RELATIVE)
+ req_seq += seq;
+
+ if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && vblank_passed(seq, req_seq))
+ req_seq = seq + 1;
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_CRTC_SEQUENCE;
+ e->event.base.length = sizeof(e->event.seq);
+ e->event.seq.user_data = queue_seq->user_data;
+
+ spin_lock_irqsave(&dev->event_lock, spin_flags);
+
+ /*
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_crtc_vblank_get() protects against vblank disable from another source.
+ */
+ if (!READ_ONCE(vblank->enabled)) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+ &e->event.base);
+
+ if (ret)
+ goto err_unlock;
+
+ e->sequence = req_seq;
+
+ if (vblank_passed(seq, req_seq)) {
+ drm_crtc_vblank_put(crtc);
+ send_vblank_event(dev, e, seq, now);
+ queue_seq->sequence = seq;
+ } else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ queue_seq->sequence = req_seq;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, spin_flags);
+ return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, spin_flags);
+ drm_crtc_vblank_put(crtc);
+err_free:
+ kfree(e);
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 38b477b5fbf9..a29b8f59eb15 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -7,8 +7,6 @@ config DRM_ETNAVIV
select SHMEM
select SYNC_FILE
select TMPFS
- select IOMMU_API
- select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index ab3f551831d7..1281c8d4fae5 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -11,6 +11,7 @@ etnaviv-y := \
etnaviv_gpu.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
- etnaviv_mmu.o
+ etnaviv_mmu.o \
+ etnaviv_perfmon.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index ed9588f36bc9..9e7098e3207f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -250,6 +250,42 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
}
}
+/* Append a 'sync point' to the ring buffer. */
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 dwords, target;
+
+ /*
+ * We need at most 3 dwords in the return target:
+ * 1 event + 1 end + 1 wait + 1 link.
+ */
+ dwords = 4;
+ target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+ /* Signal sync point event */
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
+
+ /* Stop the FE to 'pause' the GPU */
+ CMD_END(buffer);
+
+ /* Append waitlink */
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
+ buffer->user_size - 4);
+
+ /*
+ * Kick off the 'sync point' command by replacing the previous
+ * WAIT with a link to the address in the ring buffer.
+ */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(dwords),
+ target);
+}
+
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 633e0f07cbac..66ac79558bbd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -19,6 +19,7 @@
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_256K
#define SUBALLOC_GRANULE SZ_4K
@@ -87,9 +88,10 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos)
+ size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_cmdbuf *cmdbuf;
+ struct etnaviv_perfmon_request *pmrs;
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
sizeof(*cmdbuf));
int granule_offs, order, ret;
@@ -98,6 +100,12 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
if (!cmdbuf)
return NULL;
+ sz = sizeof(*pmrs) * nr_pmrs;
+ pmrs = kzalloc(sz, GFP_KERNEL);
+ if (!pmrs)
+ goto out_free_cmdbuf;
+
+ cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
@@ -124,6 +132,10 @@ retry:
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
return cmdbuf;
+
+out_free_cmdbuf:
+ kfree(cmdbuf);
+ return NULL;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@@ -139,6 +151,7 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
+ kfree(cmdbuf->pmrs);
kfree(cmdbuf);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index 80d78076c679..b6348b9f2a9d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -21,6 +21,7 @@
struct etnaviv_gpu;
struct etnaviv_cmdbuf_suballoc;
+struct etnaviv_perfmon_request;
struct etnaviv_cmdbuf {
/* suballocator this cmdbuf is allocated from */
@@ -38,6 +39,9 @@ struct etnaviv_cmdbuf {
u32 exec_state;
/* per GPU in-flight list */
struct list_head node;
+ /* perfmon requests */
+ unsigned int nr_pmrs;
+ struct etnaviv_perfmon_request *pmrs;
/* BOs attached to this command buffer */
unsigned int nr_bos;
struct etnaviv_vram_mapping *bo_map[0];
@@ -49,7 +53,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos);
+ size_t nr_bos, size_t nr_pmrs);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 2cb4773823c2..491eddf9b150 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -23,6 +23,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
static bool reglog;
@@ -451,6 +452,46 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
return ret;
}
+static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_domain *args = data;
+ struct etnaviv_gpu *gpu;
+
+ /* reject as long as the feature isn't stable */
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_dom(gpu, args);
+}
+
+static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_signal *args = data;
+ struct etnaviv_gpu *gpu;
+
+ /* reject as long as the feature isn't stable */
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_sig(gpu, args);
+}
+
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
@@ -463,6 +504,8 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 058389f93b69..d249acb6da08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -26,7 +26,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/sizes.h>
@@ -92,15 +91,12 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
- u32 size, u32 flags);
-struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags);
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 57881167ccd2..daee3f1196df 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -704,25 +704,6 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
-struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = __etnaviv_gem_new(dev, size, flags);
- if (IS_ERR(obj))
- return obj;
-
- ret = etnaviv_gem_obj_add(dev, obj);
- if (ret < 0) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
- }
-
- return obj;
-}
-
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res)
@@ -779,7 +760,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
up_read(&mm->mmap_sem);
if (ret < 0) {
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
@@ -852,7 +833,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
}
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -886,7 +867,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- release_pages(etnaviv_obj->pages, npages, 0);
+ release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46dfe0737f43..ff911541a190 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -21,6 +21,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
+#include "etnaviv_perfmon.h"
/*
* Cmdstream submission:
@@ -283,6 +284,54 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
return 0;
}
+static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
+ struct etnaviv_cmdbuf *cmdbuf,
+ const struct drm_etnaviv_gem_submit_pmr *pmrs,
+ u32 nr_pms)
+{
+ u32 i;
+
+ for (i = 0; i < nr_pms; i++) {
+ const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
+ struct etnaviv_gem_submit_bo *bo;
+ int ret;
+
+ ret = submit_bo(submit, r->read_idx, &bo);
+ if (ret)
+ return ret;
+
+ /* at offset 0 a sequence number gets stored used for userspace sync */
+ if (r->read_offset == 0) {
+ DRM_ERROR("perfmon request: offset is 0");
+ return -EINVAL;
+ }
+
+ if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
+ DRM_ERROR("perfmon request: offset %u outside object", i);
+ return -EINVAL;
+ }
+
+ if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
+ DRM_ERROR("perfmon request: flags are not valid");
+ return -EINVAL;
+ }
+
+ if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
+ DRM_ERROR("perfmon request: domain or signal not valid");
+ return -EINVAL;
+ }
+
+ cmdbuf->pmrs[i].flags = r->flags;
+ cmdbuf->pmrs[i].domain = r->domain;
+ cmdbuf->pmrs[i].signal = r->signal;
+ cmdbuf->pmrs[i].sequence = r->sequence;
+ cmdbuf->pmrs[i].offset = r->read_offset;
+ cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
+ }
+
+ return 0;
+}
+
static void submit_cleanup(struct etnaviv_gem_submit *submit)
{
unsigned i;
@@ -306,6 +355,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
+ struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
@@ -347,11 +397,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+ pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
- args->nr_bos);
- if (!bos || !relocs || !stream || !cmdbuf) {
+ args->nr_bos, args->nr_pmrs);
+ if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
ret = -ENOMEM;
goto err_submit_cmds;
}
@@ -373,6 +424,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds;
}
+ ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
+ args->nr_pmrs * sizeof(*pmrs));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+ cmdbuf->nr_pmrs = args->nr_pmrs;
+
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
@@ -441,6 +500,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
+ ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
+ if (ret)
+ goto out;
+
memcpy(cmdbuf->vaddr, stream, args->stream_size);
cmdbuf->user_size = ALIGN(args->stream_size, 8);
@@ -496,6 +559,8 @@ err_submit_cmds:
kvfree(bos);
if (relocs)
kvfree(relocs);
+ if (pmrs)
+ kvfree(pmrs);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4b152e0d31a6..e19cbe05da2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -25,6 +25,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@@ -420,9 +421,10 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
gpu->base_rate_shader >> gpu->freq_scale);
} else {
unsigned int fscale = 1 << (6 - gpu->freq_scale);
- u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
+ clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
}
@@ -433,24 +435,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
unsigned long timeout;
bool failed = true;
- /* TODO
- *
- * - clock gating
- * - puls eater
- * - what about VG?
- */
-
/* We hope that the GPU resets in under one second */
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
/* enable clock */
- etnaviv_gpu_update_clock(gpu);
-
- control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
-
- /* Wait for stable clock. Vivante's code waited for 1ms */
- usleep_range(1000, 10000);
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ etnaviv_gpu_load_clock(gpu, control);
/* isolate the GPU. */
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
@@ -461,7 +453,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
/* wait for reset. */
- msleep(1);
+ usleep_range(10, 20);
/* reset soft reset bit. */
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
@@ -490,6 +482,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
continue;
}
+ /* disable debug registers, as they are not normally needed */
+ control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
failed = false;
break;
}
@@ -721,7 +717,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
- gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
+ gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
if (!gpu->buffer) {
ret = -ENOMEM;
dev_err(gpu->dev, "could not create command buffer\n");
@@ -739,10 +735,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Setup event management */
spin_lock_init(&gpu->event_spinlock);
init_completion(&gpu->event_free);
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- gpu->event[i].used = false;
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
complete(&gpu->event_free);
- }
/* Now program the hardware */
mutex_lock(&gpu->lock);
@@ -926,7 +921,7 @@ static void recover_worker(struct work_struct *work)
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
recover_work);
unsigned long flags;
- unsigned int i;
+ unsigned int i = 0;
dev_err(gpu->dev, "hangcheck recover!\n");
@@ -945,14 +940,12 @@ static void recover_worker(struct work_struct *work)
/* complete all events, the GPU won't do it after the reset */
spin_lock_irqsave(&gpu->event_spinlock, flags);
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- if (!gpu->event[i].used)
- continue;
+ for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
- gpu->event[i].used = false;
complete(&gpu->event_free);
}
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
@@ -1140,30 +1133,45 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
* event management:
*/
-static unsigned int event_alloc(struct etnaviv_gpu *gpu)
+static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
+ unsigned int *events)
{
- unsigned long ret, flags;
- unsigned int i, event = ~0U;
+ unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
+ unsigned i, acquired = 0;
- ret = wait_for_completion_timeout(&gpu->event_free,
- msecs_to_jiffies(10 * 10000));
- if (!ret)
- dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ for (i = 0; i < nr_events; i++) {
+ unsigned long ret;
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ ret = wait_for_completion_timeout(&gpu->event_free, timeout);
- /* find first free event */
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- if (gpu->event[i].used == false) {
- gpu->event[i].used = true;
- event = i;
- break;
+ if (!ret) {
+ dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ goto out;
}
+
+ acquired++;
+ timeout = ret;
+ }
+
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+ for (i = 0; i < nr_events; i++) {
+ int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
+
+ events[i] = event;
+ memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
+ set_bit(event, gpu->event_bitmap);
}
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
- return event;
+ return 0;
+
+out:
+ for (i = 0; i < acquired; i++)
+ complete(&gpu->event_free);
+
+ return -EBUSY;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@@ -1172,12 +1180,12 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
spin_lock_irqsave(&gpu->event_spinlock, flags);
- if (gpu->event[event].used == false) {
+ if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
- gpu->event[event].used = false;
+ clear_bit(event, gpu->event_bitmap);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
complete(&gpu->event_free);
@@ -1311,12 +1319,71 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
pm_runtime_put_autosuspend(gpu->dev);
}
+static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event, unsigned int flags)
+{
+ const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ unsigned int i;
+
+ for (i = 0; i < cmdbuf->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+
+ if (pmr->flags == flags)
+ etnaviv_perfmon_process(gpu, pmr);
+ }
+}
+
+static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ u32 val;
+
+ /* disable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+
+ /* enable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
+}
+
+static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ unsigned int i;
+ u32 val;
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+
+ for (i = 0; i < cmdbuf->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+
+ *pmr->bo_vma = pmr->sequence;
+ }
+
+ /* disable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ /* enable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+}
+
+
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
struct dma_fence *fence;
- unsigned int event, i;
+ unsigned int i, nr_events = 1, event[3];
int ret;
ret = etnaviv_gpu_pm_get_sync(gpu);
@@ -1332,10 +1399,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
*
*/
- event = event_alloc(gpu);
- if (unlikely(event == ~0U)) {
- DRM_ERROR("no free event\n");
- ret = -EBUSY;
+ /*
+ * if there are performance monitor requests we need to have
+ * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
+ * requests.
+ * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
+ * and update the sequence number for userspace.
+ */
+ if (cmdbuf->nr_pmrs)
+ nr_events = 3;
+
+ ret = event_alloc(gpu, nr_events, event);
+ if (ret) {
+ DRM_ERROR("no free events\n");
goto out_pm_put;
}
@@ -1343,12 +1419,14 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
- event_free(gpu, event);
+ for (i = 0; i < nr_events; i++)
+ event_free(gpu, event[i]);
+
ret = -ENOMEM;
goto out_unlock;
}
- gpu->event[event].fence = fence;
+ gpu->event[event[0]].fence = fence;
submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence->seqno;
@@ -1358,7 +1436,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
gpu->lastctx = cmdbuf->ctx;
}
- etnaviv_buffer_queue(gpu, event, cmdbuf);
+ if (cmdbuf->nr_pmrs) {
+ gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
+ gpu->event[event[1]].cmdbuf = cmdbuf;
+ etnaviv_sync_point_queue(gpu, event[1]);
+ }
+
+ etnaviv_buffer_queue(gpu, event[0], cmdbuf);
+
+ if (cmdbuf->nr_pmrs) {
+ gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
+ gpu->event[event[2]].cmdbuf = cmdbuf;
+ etnaviv_sync_point_queue(gpu, event[2]);
+ }
cmdbuf->fence = fence;
list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
@@ -1394,6 +1484,24 @@ out_pm_put:
return ret;
}
+static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+
+ event->sync_point(gpu, event);
+ etnaviv_gpu_start_fe(gpu, addr + 2, 2);
+}
+
+static void sync_point_worker(struct work_struct *work)
+{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ sync_point_work);
+
+ etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
+ event_free(gpu, gpu->sync_point_event);
+}
+
/*
* Init/Cleanup:
*/
@@ -1440,7 +1548,15 @@ static irqreturn_t irq_handler(int irq, void *data)
dev_dbg(gpu->dev, "event %u\n", event);
+ if (gpu->event[event].sync_point) {
+ gpu->sync_point_event = event;
+ etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
+ }
+
fence = gpu->event[event].fence;
+ if (!fence)
+ continue;
+
gpu->event[event].fence = NULL;
dma_fence_signal(fence);
@@ -1645,6 +1761,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_LIST_HEAD(&gpu->active_cmd_list);
INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 689cb8f3680c..4f10f147297a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -88,13 +88,17 @@ struct etnaviv_chip_identity {
};
struct etnaviv_event {
- bool used;
struct dma_fence *fence;
+ struct etnaviv_cmdbuf *cmdbuf;
+
+ void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
+#define ETNA_NR_EVENTS 30
+
struct etnaviv_gpu {
struct drm_device *drm;
struct thermal_cooling_device *cooling;
@@ -112,7 +116,8 @@ struct etnaviv_gpu {
u32 memory_base;
/* event management: */
- struct etnaviv_event event[30];
+ DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
+ struct etnaviv_event event[ETNA_NR_EVENTS];
struct completion event_free;
spinlock_t event_spinlock;
@@ -133,6 +138,10 @@ struct etnaviv_gpu {
/* worker for handling active-list retiring: */
struct work_struct retire_work;
+ /* worker for handling 'sync' points: */
+ struct work_struct sync_point_work;
+ int sync_point_event;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 7a7c97f599d7..14e24ac6573f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -31,174 +30,115 @@
#define GPU_MEM_START 0x80000000
-struct etnaviv_iommu_domain_pgtable {
- u32 *pgtable;
- dma_addr_t paddr;
+struct etnaviv_iommuv1_domain {
+ struct etnaviv_iommu_domain base;
+ u32 *pgtable_cpu;
+ dma_addr_t pgtable_dma;
};
-struct etnaviv_iommu_domain {
- struct iommu_domain domain;
- struct device *dev;
- void *bad_page_cpu;
- dma_addr_t bad_page_dma;
- struct etnaviv_iommu_domain_pgtable pgtable;
- spinlock_t map_lock;
-};
-
-static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
-{
- return container_of(domain, struct etnaviv_iommu_domain, domain);
-}
-
-static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
- size_t size)
-{
- pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
- if (!pgtable->pgtable)
- return -ENOMEM;
-
- return 0;
-}
-
-static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
- size_t size)
+static struct etnaviv_iommuv1_domain *
+to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
- dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
-}
-
-static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
- unsigned long iova)
-{
- /* calcuate index into page table */
- unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
- phys_addr_t paddr;
-
- paddr = pgtable->pgtable[index];
-
- return paddr;
+ return container_of(domain, struct etnaviv_iommuv1_domain, base);
}
-static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
- unsigned long iova, phys_addr_t paddr)
-{
- /* calcuate index into page table */
- unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
-
- pgtable->pgtable[index] = paddr;
-}
-
-static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
+static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
{
u32 *p;
- int ret, i;
-
- etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
- SZ_4K,
- &etnaviv_domain->bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->bad_page_cpu)
+ int i;
+
+ etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
+ etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->base.bad_page_cpu)
return -ENOMEM;
- p = etnaviv_domain->bad_page_cpu;
+ p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
- if (ret < 0) {
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
- return ret;
+ etnaviv_domain->pgtable_cpu =
+ dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
+ &etnaviv_domain->pgtable_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->pgtable_cpu) {
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
+ return -ENOMEM;
}
for (i = 0; i < PT_ENTRIES; i++)
- etnaviv_domain->pgtable.pgtable[i] =
- etnaviv_domain->bad_page_dma;
-
- spin_lock_init(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[i] =
+ etnaviv_domain->base.bad_page_dma;
return 0;
}
-static void etnaviv_domain_free(struct iommu_domain *domain)
+static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
- pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+ dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
+ etnaviv_domain->pgtable_cpu,
+ etnaviv_domain->pgtable_dma);
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
kfree(etnaviv_domain);
}
-static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- spin_lock(&etnaviv_domain->map_lock);
- pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
- spin_unlock(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[index] = paddr;
return 0;
}
-static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
unsigned long iova, size_t size)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- spin_lock(&etnaviv_domain->map_lock);
- pgtable_write(&etnaviv_domain->pgtable, iova,
- etnaviv_domain->bad_page_dma);
- spin_unlock(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
return SZ_4K;
}
-static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
-
- return pgtable_read(&etnaviv_domain->pgtable, iova);
-}
-
-static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
{
return PT_SIZE;
}
-static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
- memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
+ memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
}
-static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
- .ops = {
- .domain_free = etnaviv_domain_free,
- .map = etnaviv_iommuv1_map,
- .unmap = etnaviv_iommuv1_unmap,
- .iova_to_phys = etnaviv_iommu_iova_to_phys,
- .pgsize_bitmap = SZ_4K,
- },
- .dump_size = etnaviv_iommuv1_dump_size,
- .dump = etnaviv_iommuv1_dump,
-};
-
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_domain *etnaviv_domain =
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable;
@@ -210,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */
- pgtable = (u32)etnaviv_domain->pgtable.paddr;
+ pgtable = (u32)etnaviv_domain->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -219,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
+const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_domain_free,
+ .map = etnaviv_iommuv1_map,
+ .unmap = etnaviv_iommuv1_unmap,
+ .dump_size = etnaviv_iommuv1_dump_size,
+ .dump = etnaviv_iommuv1_dump,
+};
+
+struct etnaviv_iommu_domain *
+etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_domain *etnaviv_domain;
+ struct etnaviv_iommuv1_domain *etnaviv_domain;
+ struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
if (!etnaviv_domain)
return NULL;
- etnaviv_domain->dev = gpu->dev;
+ domain = &etnaviv_domain->base;
- etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
- etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
- etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
- etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
- etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
+ domain->dev = gpu->dev;
+ domain->base = GPU_MEM_START;
+ domain->size = PT_ENTRIES * SZ_4K;
+ domain->ops = &etnaviv_iommuv1_ops;
ret = __etnaviv_iommu_init(etnaviv_domain);
if (ret)
goto out_free;
- return &etnaviv_domain->domain;
+ return &etnaviv_domain->base;
out_free:
kfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index 8b51e7c16feb..01d59bf70d78 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -18,11 +18,14 @@
#define __ETNAVIV_IOMMU_H__
struct etnaviv_gpu;
+struct etnaviv_iommu_domain;
-struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
+struct etnaviv_iommu_domain *
+etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
-struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
+struct etnaviv_iommu_domain *
+etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index cbe447ac5974..fc60fc8ddbf0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -40,10 +39,7 @@
#define MMUv2_MAX_STLB_ENTRIES 1024
struct etnaviv_iommuv2_domain {
- struct iommu_domain domain;
- struct device *dev;
- void *bad_page_cpu;
- dma_addr_t bad_page_dma;
+ struct etnaviv_iommu_domain base;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
dma_addr_t stlb_dma[1024];
};
-static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
+static struct etnaviv_iommuv2_domain *
+to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
- return container_of(domain, struct etnaviv_iommuv2_domain, domain);
+ return container_of(domain, struct etnaviv_iommuv2_domain, base);
}
-static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
if (size != SZ_4K)
return -EINVAL;
- if (prot & IOMMU_WRITE)
+ if (prot & ETNAVIV_PROT_WRITE)
entry |= MMUv2_PTE_WRITEABLE;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
return 0;
}
-static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -97,38 +95,26 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
return SZ_4K;
}
-static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
- int mtlb_entry, stlb_entry;
-
- mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
- stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
-
- return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
-}
-
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
u32 *p;
int ret, i, j;
/* allocate scratch page */
- etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
- SZ_4K,
- &etnaviv_domain->bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->bad_page_cpu) {
+ etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
+ etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->base.bad_page_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->bad_page_cpu;
+ p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+ etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
GFP_KERNEL);
@@ -140,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
/* pre-populate STLB pages (may want to switch to on-demand later) */
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
etnaviv_domain->stlb_cpu[i] =
- dma_alloc_coherent(etnaviv_domain->dev,
+ dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->stlb_dma[i],
GFP_KERNEL);
@@ -159,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
return 0;
fail_mem:
- if (etnaviv_domain->bad_page_cpu)
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ if (etnaviv_domain->base.bad_page_cpu)
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
if (etnaviv_domain->mtlb_cpu)
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
@@ -179,23 +165,23 @@ fail_mem:
return ret;
}
-static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
+static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int i;
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
@@ -203,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
vfree(etnaviv_domain);
}
-static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -217,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
return dump_size;
}
-static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -230,18 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
-static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
- .ops = {
- .domain_free = etnaviv_iommuv2_domain_free,
- .map = etnaviv_iommuv2_map,
- .unmap = etnaviv_iommuv2_unmap,
- .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
- .pgsize_bitmap = SZ_4K,
- },
- .dump_size = etnaviv_iommuv2_dump_size,
- .dump = etnaviv_iommuv2_dump,
-};
-
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
@@ -254,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
- (u32)etnaviv_domain->bad_page_dma);
+ (u32)etnaviv_domain->base.bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+
+const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
+ .free = etnaviv_iommuv2_domain_free,
+ .map = etnaviv_iommuv2_map,
+ .unmap = etnaviv_iommuv2_unmap,
+ .dump_size = etnaviv_iommuv2_dump_size,
+ .dump = etnaviv_iommuv2_dump,
+};
+
+struct etnaviv_iommu_domain *
+etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain;
+ struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
if (!etnaviv_domain)
return NULL;
- etnaviv_domain->dev = gpu->dev;
+ domain = &etnaviv_domain->base;
- etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
- etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
- etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
- etnaviv_domain->domain.geometry.aperture_start = 0;
- etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
+ domain->dev = gpu->dev;
+ domain->base = 0;
+ domain->size = (u64)SZ_1G * 4;
+ domain->ops = &etnaviv_iommuv2_ops;
ret = etnaviv_iommuv2_init(etnaviv_domain);
if (ret)
goto out_free;
- return &etnaviv_domain->domain;
+ return &etnaviv_domain->base;
out_free:
vfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index f103e787de94..35074b944778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -22,17 +22,64 @@
#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
- unsigned long iova, int flags, void *arg)
+static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ size_t unmapped_page, unmapped = 0;
+ size_t pgsize = SZ_4K;
+
+ if (!IS_ALIGNED(iova | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, pgsize);
+ return;
+ }
+
+ while (unmapped < size) {
+ unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
}
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
- struct iommu_domain *domain = iommu->domain;
+ unsigned long orig_iova = iova;
+ size_t pgsize = SZ_4K;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, pgsize);
+ return -EINVAL;
+ }
+
+ while (size) {
+ ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
@@ -47,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
- ret = iommu_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
@@ -62,27 +109,24 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- iommu_unmap(domain, da, bytes);
+ etnaviv_domain_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len)
+static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len)
{
- struct iommu_domain *domain = iommu->domain;
+ struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- size_t unmapped;
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
+ etnaviv_domain_unmap(domain, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
@@ -90,8 +134,6 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
da += bytes;
}
-
- return 0;
}
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
@@ -237,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
- IOMMU_READ | IOMMU_WRITE);
+ ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
drm_mm_remove_node(node);
@@ -271,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
{
drm_mm_takedown(&mmu->mm);
- iommu_domain_free(mmu->domain);
+ mmu->domain->ops->free(mmu->domain);
kfree(mmu);
}
@@ -303,11 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
mutex_init(&mmu->lock);
INIT_LIST_HEAD(&mmu->mappings);
- drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
- mmu->domain->geometry.aperture_end -
- mmu->domain->geometry.aperture_start + 1);
-
- iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
+ drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
return mmu;
}
@@ -338,8 +376,8 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
mutex_unlock(&mmu->lock);
return ret;
}
- ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
- IOMMU_READ);
+ ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
+ size, ETNAVIV_PROT_READ);
if (ret < 0) {
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
@@ -362,25 +400,17 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
if (mmu->version == ETNAVIV_IOMMU_V2) {
mutex_lock(&mmu->lock);
- iommu_unmap(mmu->domain,iova, size);
+ etnaviv_domain_unmap(mmu->domain, iova, size);
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
}
}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
- struct etnaviv_iommu_ops *ops;
-
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
-
- return ops->dump_size(iommu->domain);
+ return iommu->domain->ops->dump_size(iommu->domain);
}
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
{
- struct etnaviv_iommu_ops *ops;
-
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
-
- ops->dump(iommu->domain, buf);
+ iommu->domain->ops->dump(iommu->domain, buf);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index 54be289e5981..ab603f5166b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -17,7 +17,8 @@
#ifndef __ETNAVIV_MMU_H__
#define __ETNAVIV_MMU_H__
-#include <linux/iommu.h>
+#define ETNAVIV_PROT_READ (1 << 0)
+#define ETNAVIV_PROT_WRITE (1 << 1)
enum etnaviv_iommu_version {
ETNAVIV_IOMMU_V1 = 0,
@@ -26,16 +27,31 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu;
struct etnaviv_vram_mapping;
+struct etnaviv_iommu_domain;
-struct etnaviv_iommu_ops {
- struct iommu_ops ops;
- size_t (*dump_size)(struct iommu_domain *);
- void (*dump)(struct iommu_domain *, void *);
+struct etnaviv_iommu_domain_ops {
+ void (*free)(struct etnaviv_iommu_domain *);
+ int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ size_t size);
+ size_t (*dump_size)(struct etnaviv_iommu_domain *);
+ void (*dump)(struct etnaviv_iommu_domain *, void *);
+};
+
+struct etnaviv_iommu_domain {
+ struct device *dev;
+ void *bad_page_cpu;
+ dma_addr_t bad_page_dma;
+ u64 base;
+ u64 size;
+
+ const struct etnaviv_iommu_domain_ops *ops;
};
struct etnaviv_iommu {
struct etnaviv_gpu *gpu;
- struct iommu_domain *domain;
+ struct etnaviv_iommu_domain *domain;
enum etnaviv_iommu_version version;
@@ -49,18 +65,11 @@ struct etnaviv_iommu {
struct etnaviv_gem_object;
-int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
- int cnt);
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len, int prot);
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len);
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
-void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
struct drm_mm_node *vram_node, size_t size,
@@ -73,6 +82,7 @@ size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
new file mode 100644
index 000000000000..768f5aafdd18
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_perfmon.h"
+#include "state_hi.xml.h"
+
+struct etnaviv_pm_domain;
+
+struct etnaviv_pm_signal {
+ char name[64];
+ u32 data;
+
+ u32 (*sample)(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal);
+};
+
+struct etnaviv_pm_domain {
+ char name[64];
+
+ /* profile register */
+ u32 profile_read;
+ u32 profile_config;
+
+ u8 nr_signals;
+ const struct etnaviv_pm_signal *signal;
+};
+
+struct etnaviv_pm_domain_meta {
+ const struct etnaviv_pm_domain *domains;
+ u32 nr_domains;
+};
+
+static u32 simple_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ return gpu_read(gpu, signal->data);
+}
+
+static u32 perf_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ gpu_write(gpu, domain->profile_config, signal->data);
+
+ return gpu_read(gpu, domain->profile_read);
+}
+
+static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ u32 value = 0;
+ unsigned i;
+
+ for (i = 0; i < gpu->identity.pixel_pipes; i++) {
+ clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
+ clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(i);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+ gpu_write(gpu, domain->profile_config, signal->data);
+ value += gpu_read(gpu, domain->profile_read);
+ }
+
+ /* switch back to pixel pipe 0 to prevent GPU hang */
+ clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
+ clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(0);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+
+ return value;
+}
+
+static const struct etnaviv_pm_domain doms_3d[] = {
+ {
+ .name = "HI",
+ .profile_read = VIVS_MC_PROFILE_HI_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 5,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_CYCLES",
+ VIVS_HI_PROFILE_TOTAL_CYCLES,
+ &simple_reg_read
+ },
+ {
+ "IDLE_CYCLES",
+ VIVS_HI_PROFILE_IDLE_CYCLES,
+ &simple_reg_read
+ },
+ {
+ "AXI_CYCLES_READ_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_DATA_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 5,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "SH",
+ .profile_read = VIVS_MC_PROFILE_SH_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "SHADER_CYCLES",
+ VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
+ &perf_reg_read
+ },
+ {
+ "PS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "RENDERED_PIXEL_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "VS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "RENDERED_VERTICE_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "VTX_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "VTX_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "PXL_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "PXL_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "PA",
+ .profile_read = VIVS_MC_PROFILE_PA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 6,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "INPUT_VTX_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "INPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "OUTPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "DEPTH_CLIPPED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "TRIVIAL_REJECTED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "CULLED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "SE",
+ .profile_read = VIVS_MC_PROFILE_SE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 2,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "CULLED_TRIANGLE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CULLED_LINES_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "RA",
+ .profile_read = VIVS_MC_PROFILE_RA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 7,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "VALID_PIXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
+ &perf_reg_read
+ },
+ {
+ "VALID_QUAD_COUNT_AFTER_EARLY_Z",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_PRIMITIVE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "PIPE_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "PREFETCH_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "CULLED_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "TX",
+ .profile_read = VIVS_MC_PROFILE_TX_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_BILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TRILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_DISCARDED_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_IN_8B_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_HIT_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "MC",
+ .profile_read = VIVS_MC_PROFILE_MC_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 3,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_READ_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_READ_REQ_8B_FROM_IP",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_2d[] = {
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 1,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXELS_RENDERED_2D",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
+ &pipe_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_vg[] = {
+};
+
+static const struct etnaviv_pm_domain_meta doms_meta[] = {
+ {
+ .nr_domains = ARRAY_SIZE(doms_3d),
+ .domains = &doms_3d[0]
+ },
+ {
+ .nr_domains = ARRAY_SIZE(doms_2d),
+ .domains = &doms_2d[0]
+ },
+ {
+ .nr_domains = ARRAY_SIZE(doms_vg),
+ .domains = &doms_vg[0]
+ }
+};
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+ const struct etnaviv_pm_domain *dom;
+
+ if (domain->iter >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + domain->iter;
+
+ domain->id = domain->iter;
+ domain->nr_signals = dom->nr_signals;
+ strncpy(domain->name, dom->name, sizeof(domain->name));
+
+ domain->iter++;
+ if (domain->iter == meta->nr_domains)
+ domain->iter = 0xff;
+
+ return 0;
+}
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+
+ if (signal->domain >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + signal->domain;
+
+ if (signal->iter > dom->nr_signals)
+ return -EINVAL;
+
+ sig = &dom->signal[signal->iter];
+
+ signal->id = signal->iter;
+ strncpy(signal->name, sig->name, sizeof(signal->name));
+
+ signal->iter++;
+ if (signal->iter == dom->nr_signals)
+ signal->iter = 0xffff;
+
+ return 0;
+}
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
+ const struct etnaviv_pm_domain *dom;
+
+ if (r->domain >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + r->domain;
+
+ if (r->signal > dom->nr_signals)
+ return -EINVAL;
+
+ return 0;
+}
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+ u32 *bo = pmr->bo_vma;
+ u32 val;
+
+ dom = meta->domains + pmr->domain;
+ sig = &dom->signal[pmr->signal];
+ val = sig->sample(gpu, dom, sig);
+
+ *(bo + pmr->offset) = val;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
new file mode 100644
index 000000000000..35dce194cb00
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_PERFMON_H__
+#define __ETNAVIV_PERFMON_H__
+
+struct etnaviv_gpu;
+struct drm_etnaviv_pm_domain;
+struct drm_etnaviv_pm_signal;
+
+struct etnaviv_perfmon_request
+{
+ u32 flags;
+ u8 domain;
+ u8 signal;
+ u32 sequence;
+
+ /* bo to store a value */
+ u32 *bo_vma;
+ u32 offset;
+};
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain);
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal);
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state);
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr);
+
+#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 305dc3d4ff77..5a7c9d8abd6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -3,6 +3,7 @@ config DRM_EXYNOS
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 6ce0821590df..dc01342e759a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -95,8 +95,23 @@ static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_OK;
}
+static bool exynos_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_fixup)
+ return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
+ adjusted_mode);
+
+ return true;
+}
+
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.mode_valid = exynos_crtc_mode_valid,
+ .mode_fixup = exynos_crtc_mode_fixup,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 82b72425a42f..27e423b87266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -37,8 +37,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static struct device *exynos_drm_get_dma_device(void);
-
int exynos_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -148,7 +146,7 @@ static struct drm_driver exynos_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
.gem_prime_vmap = exynos_drm_gem_prime_vmap,
@@ -301,6 +299,27 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
+static struct device *exynos_drm_get_dma_device(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+ struct device *dev;
+
+ if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+ continue;
+
+ while ((dev = bus_find_device(&platform_bus_type, NULL,
+ &info->driver->driver,
+ (void *)platform_bus_type.match))) {
+ put_device(dev);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -469,27 +488,6 @@ static struct platform_driver exynos_drm_platform_driver = {
},
};
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static void exynos_drm_unregister_devices(void)
{
int i;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f8bae4cb4823..589d465a7f88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -136,6 +136,9 @@ struct exynos_drm_crtc_ops {
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
+ bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -191,11 +194,6 @@ struct drm_exynos_file_private {
/*
* Exynos drm private structure.
*
- * @da_start: start address to device address space.
- * with iommu, device address space starts from this address
- * otherwise default one.
- * @da_space_size: size of device address space.
- * if 0 then default value is used for it.
* @pending: the crtcs that have pending updates to finish
* @lock: protect access to @pending
* @wait: wait an atomic commit to finish
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 077de014d610..11cc01b47bc0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
if (IS_ERR(exynos_gem))
return exynos_gem;
+ if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+ /*
+ * when no IOMMU is available, all allocated buffers are
+ * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+ */
+ flags &= ~EXYNOS_BO_NONCONTIG;
+ DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+ }
+
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
@@ -506,6 +515,12 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
/* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
+}
+
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e86d1a9518c3..5a4c7de80f65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -117,6 +117,8 @@ int exynos_drm_gem_fault(struct vm_fault *vmf);
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
/* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index ba4a32b132ba..2174814273e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -420,11 +420,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&mic->bridge);
- if (ret) {
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
- }
+ drm_bridge_add(&mic->bridge);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 53e03f8af3d5..e6b0940b1ac2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.atomic_flush = exynos_crtc_handle_event,
};
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = (void *)arg;
+ struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
ctx->pdev = pdev;
- setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+ timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 0109ff40b1db..82d1b7e2febe 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -40,7 +40,7 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-
+#include <sound/hdmi-codec.h>
#include <drm/exynos_drm.h>
#include <media/cec-notifier.h>
@@ -111,15 +111,20 @@ struct hdmi_driver_data {
struct string_array_spec clk_muxes;
};
+struct hdmi_audio {
+ struct platform_device *pdev;
+ struct hdmi_audio_infoframe infoframe;
+ struct hdmi_codec_params params;
+ bool mute;
+};
+
struct hdmi_context {
struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- bool powered;
bool dvi_mode;
struct delayed_work hotplug_work;
- struct drm_display_mode current_mode;
struct cec_notifier *notifier;
const struct hdmi_driver_data *drv_data;
@@ -137,6 +142,11 @@ struct hdmi_context {
struct regulator *reg_hdmi_en;
struct exynos_drm_clk phy_clk;
struct drm_bridge *bridge;
+
+ /* mutex protecting subsequent fields below */
+ struct mutex mutex;
+ struct hdmi_audio audio;
+ bool powered;
};
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -298,6 +308,15 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
+ .pixel_clock = 85500000,
+ .conf = {
+ 0x01, 0xd1, 0x24, 0x11, 0x40, 0x40, 0xd0, 0x08,
+ 0x84, 0xa0, 0xd6, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x90, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
.pixel_clock = 106500000,
.conf = {
0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -768,8 +787,25 @@ static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
return ret;
}
+static int hdmi_audio_infoframe_apply(struct hdmi_context *hdata)
+{
+ struct hdmi_audio_infoframe *infoframe = &hdata->audio.infoframe;
+ u8 buf[HDMI_INFOFRAME_SIZE(AUDIO)];
+ int len;
+
+ len = hdmi_audio_infoframe_pack(infoframe, buf, sizeof(buf));
+ if (len < 0)
+ return len;
+
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, len);
+
+ return 0;
+}
+
static void hdmi_reg_infoframes(struct hdmi_context *hdata)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
union hdmi_infoframe frm;
u8 buf[25];
int ret;
@@ -783,8 +819,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
return;
}
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
- &hdata->current_mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
@@ -794,8 +829,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
}
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
- &hdata->current_mode);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi, m);
if (!ret)
ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
sizeof(buf));
@@ -805,15 +839,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
}
- ret = hdmi_audio_infoframe_init(&frm.audio);
- if (!ret) {
- frm.audio.channels = 2;
- ret = hdmi_audio_infoframe_pack(&frm.audio, buf, sizeof(buf));
- }
- if (ret > 0) {
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
- hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, ret);
- }
+ hdmi_audio_infoframe_apply(hdata);
}
static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
@@ -1003,23 +1029,18 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u32 freq)
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
}
-static void hdmi_audio_init(struct hdmi_context *hdata)
+static void hdmi_audio_config(struct hdmi_context *hdata)
{
- u32 sample_rate, bits_per_sample;
- u32 data_num, bit_ch, sample_frq;
- u32 val;
-
- sample_rate = 44100;
- bits_per_sample = 16;
+ u32 bit_ch = 1;
+ u32 data_num, val;
+ int i;
- switch (bits_per_sample) {
+ switch (hdata->audio.params.sample_width) {
case 20:
data_num = 2;
- bit_ch = 1;
break;
case 24:
data_num = 3;
- bit_ch = 1;
break;
default:
data_num = 1;
@@ -1027,7 +1048,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
break;
}
- hdmi_reg_acr(hdata, sample_rate);
+ hdmi_reg_acr(hdata, hdata->audio.params.sample_rate);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
@@ -1037,12 +1058,6 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
-
- sample_frq = (sample_rate == 44100) ? 0 :
- (sample_rate == 48000) ? 2 :
- (sample_rate == 32000) ? 3 :
- (sample_rate == 96000) ? 0xa : 0x0;
-
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
@@ -1066,39 +1081,33 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
| HDMI_I2S_SET_SDATA_BIT(data_num)
| HDMI_I2S_BASIC_FORMAT);
- /* Configure register related to CUV information */
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
- | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
- | HDMI_I2S_COPYRIGHT
- | HDMI_I2S_LINEAR_PCM
- | HDMI_I2S_CONSUMER_FORMAT);
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
- | HDMI_I2S_SET_SMP_FREQ(sample_frq));
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
- HDMI_I2S_ORG_SMP_FREQ_44_1
- | HDMI_I2S_WORD_LEN_MAX24_24BITS
- | HDMI_I2S_WORD_LEN_MAX_24BITS);
+ /* Configuration of the audio channel status registers */
+ for (i = 0; i < HDMI_I2S_CH_ST_MAXNUM; i++)
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST(i),
+ hdata->audio.params.iec.status[i]);
hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
}
-static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
+static void hdmi_audio_control(struct hdmi_context *hdata)
{
+ bool enable = !hdata->audio.mute;
+
if (hdata->dvi_mode)
return;
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
- hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, enable ?
+ HDMI_AVI_CON_EVERY_VSYNC : HDMI_AUI_CON_NO_TRAN);
+ hdmi_reg_writemask(hdata, HDMI_CON_0, enable ?
HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
}
static void hdmi_start(struct hdmi_context *hdata, bool start)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
u32 val = start ? HDMI_TG_EN : 0;
- if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
val |= HDMI_FIELD_EN;
hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
@@ -1168,7 +1177,7 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
{
- struct drm_display_mode *m = &hdata->current_mode;
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
unsigned int val;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
@@ -1247,7 +1256,19 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
{
- struct drm_display_mode *m = &hdata->current_mode;
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
+ struct drm_display_mode *am =
+ &hdata->encoder.crtc->state->adjusted_mode;
+ int hquirk = 0;
+
+ /*
+ * In case video mode coming from CRTC differs from requested one HDMI
+ * sometimes is able to almost properly perform conversion - only
+ * first line is distorted.
+ */
+ if ((m->vdisplay != am->vdisplay) &&
+ (m->hdisplay == 1280 || m->hdisplay == 1024 || m->hdisplay == 1366))
+ hquirk = 258;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
@@ -1341,8 +1362,9 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
- hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
- hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2,
+ m->htotal - m->hdisplay - hquirk);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay + hquirk);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
if (hdata->drv_data == &exynos5433_hdmi_driver_data)
hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
@@ -1380,10 +1402,11 @@ static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
int ret;
const u8 *phy_conf;
- ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
+ ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
DRM_ERROR("failed to find hdmiphy conf\n");
return;
@@ -1406,28 +1429,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
hdmiphy_wait_for_pll(hdata);
}
+/* Should be called with hdata->mutex mutex held */
static void hdmi_conf_apply(struct hdmi_context *hdata)
{
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
- hdmi_audio_init(hdata);
+ hdmi_audio_config(hdata);
hdmi_mode_apply(hdata);
- hdmi_audio_control(hdata, true);
-}
-
-static void hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct hdmi_context *hdata = encoder_to_hdmi(encoder);
- struct drm_display_mode *m = adjusted_mode;
-
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
- m->hdisplay, m->vdisplay,
- m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
- "INTERLACED" : "PROGRESSIVE");
-
- drm_mode_copy(&hdata->current_mode, m);
+ hdmi_audio_control(hdata);
}
static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
@@ -1439,6 +1448,7 @@ static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
}
+/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_enable(struct hdmi_context *hdata)
{
if (hdata->powered)
@@ -1461,6 +1471,7 @@ static void hdmiphy_enable(struct hdmi_context *hdata)
hdata->powered = true;
}
+/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_disable(struct hdmi_context *hdata)
{
if (!hdata->powered)
@@ -1486,33 +1497,42 @@ static void hdmi_enable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ mutex_lock(&hdata->mutex);
+
hdmiphy_enable(hdata);
hdmi_conf_apply(hdata);
+
+ mutex_unlock(&hdata->mutex);
}
static void hdmi_disable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
- if (!hdata->powered)
+ mutex_lock(&hdata->mutex);
+
+ if (hdata->powered) {
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * To achieve such sequence HDMI is disabled together with
+ * HDMI PHY, via pipe clock callback.
+ */
+ mutex_unlock(&hdata->mutex);
+ cancel_delayed_work(&hdata->hotplug_work);
+ cec_notifier_set_phys_addr(hdata->notifier,
+ CEC_PHYS_ADDR_INVALID);
return;
+ }
- /*
- * The SFRs of VP and Mixer are updated by Vertical Sync of
- * Timing generator which is a part of HDMI so the sequence
- * to disable TV Subsystem should be as following,
- * VP -> Mixer -> HDMI
- *
- * To achieve such sequence HDMI is disabled together with HDMI PHY, via
- * pipe clock callback.
- */
- cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
+ mutex_unlock(&hdata->mutex);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
- .mode_set = hdmi_mode_set,
.enable = hdmi_enable,
.disable = hdmi_disable,
};
@@ -1521,6 +1541,99 @@ static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.mute = true;
+
+ if (hdata->powered)
+ hdmi_audio_control(hdata);
+
+ mutex_unlock(&hdata->mutex);
+}
+
+static int hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
+ daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+ daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.params = *params;
+
+ if (hdata->powered) {
+ hdmi_audio_config(hdata);
+ hdmi_audio_infoframe_apply(hdata);
+ }
+
+ mutex_unlock(&hdata->mutex);
+
+ return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool mute)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.mute = mute;
+
+ if (hdata->powered)
+ hdmi_audio_control(hdata);
+
+ mutex_unlock(&hdata->mutex);
+
+ return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+ struct drm_connector *connector = &hdata->connector;
+
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .digital_mute = hdmi_audio_digital_mute,
+ .get_eld = hdmi_audio_get_eld,
+};
+
+static int hdmi_register_audio_device(struct hdmi_context *hdata)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 6,
+ .i2s = 1,
+ };
+
+ hdata->audio.pdev = platform_device_register_data(
+ hdata->dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(hdata->audio.pdev);
+}
+
static void hdmi_hotplug_work_func(struct work_struct *work)
{
struct hdmi_context *hdata;
@@ -1596,11 +1709,14 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
{
struct hdmi_context *hdata = container_of(clk, struct hdmi_context,
phy_clk);
+ mutex_lock(&hdata->mutex);
if (enable)
hdmiphy_enable(hdata);
else
hdmiphy_disable(hdata);
+
+ mutex_unlock(&hdata->mutex);
}
static int hdmi_bridge_init(struct hdmi_context *hdata)
@@ -1811,6 +1927,7 @@ out:
static int hdmi_probe(struct platform_device *pdev)
{
+ struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
@@ -1826,6 +1943,8 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->dev = dev;
+ mutex_init(&hdata->mutex);
+
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1885,12 +2004,26 @@ static int hdmi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- ret = component_add(&pdev->dev, &hdmi_component_ops);
+ audio_infoframe = &hdata->audio.infoframe;
+ hdmi_audio_infoframe_init(audio_infoframe);
+ audio_infoframe->coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ audio_infoframe->sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ audio_infoframe->sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+ audio_infoframe->channels = 2;
+
+ ret = hdmi_register_audio_device(hdata);
if (ret)
goto err_notifier_put;
+ ret = component_add(&pdev->dev, &hdmi_component_ops);
+ if (ret)
+ goto err_unregister_audio;
+
return ret;
+err_unregister_audio:
+ platform_device_unregister(hdata->audio.pdev);
+
err_notifier_put:
cec_notifier_put(hdata->notifier);
pm_runtime_disable(dev);
@@ -1914,6 +2047,7 @@ static int hdmi_remove(struct platform_device *pdev)
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
+ platform_device_unregister(hdata->audio.pdev);
cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
@@ -1929,6 +2063,8 @@ static int hdmi_remove(struct platform_device *pdev)
put_device(&hdata->ddc_adpt->dev);
+ mutex_destroy(&hdata->mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 002755415e00..dc5d79465f9b 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -67,19 +67,6 @@
#define MXR_FORMAT_ARGB4444 6
#define MXR_FORMAT_ARGB8888 7
-struct mixer_resources {
- int irq;
- void __iomem *mixer_regs;
- void __iomem *vp_regs;
- spinlock_t reg_slock;
- struct clk *mixer;
- struct clk *vp;
- struct clk *hdmi;
- struct clk *sclk_mixer;
- struct clk *sclk_hdmi;
- struct clk *mout_mixer;
-};
-
enum mixer_version_id {
MXR_VER_0_0_0_16,
MXR_VER_16_0_33_0,
@@ -117,8 +104,18 @@ struct mixer_context {
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
- struct mixer_resources mixer_res;
+ int irq;
+ void __iomem *mixer_regs;
+ void __iomem *vp_regs;
+ spinlock_t reg_slock;
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *hdmi;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *mout_mixer;
enum mixer_version_id mxr_ver;
+ int scan_value;
};
struct mixer_drv_data {
@@ -194,44 +191,44 @@ static inline bool is_alpha_format(unsigned int pixel_format)
}
}
-static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+static inline u32 vp_reg_read(struct mixer_context *ctx, u32 reg_id)
{
- return readl(res->vp_regs + reg_id);
+ return readl(ctx->vp_regs + reg_id);
}
-static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+static inline void vp_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
- writel(val, res->vp_regs + reg_id);
+ writel(val, ctx->vp_regs + reg_id);
}
-static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+static inline void vp_reg_writemask(struct mixer_context *ctx, u32 reg_id,
u32 val, u32 mask)
{
- u32 old = vp_reg_read(res, reg_id);
+ u32 old = vp_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
- writel(val, res->vp_regs + reg_id);
+ writel(val, ctx->vp_regs + reg_id);
}
-static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+static inline u32 mixer_reg_read(struct mixer_context *ctx, u32 reg_id)
{
- return readl(res->mixer_regs + reg_id);
+ return readl(ctx->mixer_regs + reg_id);
}
-static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+static inline void mixer_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
- writel(val, res->mixer_regs + reg_id);
+ writel(val, ctx->mixer_regs + reg_id);
}
-static inline void mixer_reg_writemask(struct mixer_resources *res,
+static inline void mixer_reg_writemask(struct mixer_context *ctx,
u32 reg_id, u32 val, u32 mask)
{
- u32 old = mixer_reg_read(res, reg_id);
+ u32 old = mixer_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
- writel(val, res->mixer_regs + reg_id);
+ writel(val, ctx->mixer_regs + reg_id);
}
static void mixer_regs_dump(struct mixer_context *ctx)
@@ -239,7 +236,7 @@ static void mixer_regs_dump(struct mixer_context *ctx)
#define DUMPREG(reg_id) \
do { \
DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+ (u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
@@ -271,7 +268,7 @@ static void vp_regs_dump(struct mixer_context *ctx)
#define DUMPREG(reg_id) \
do { \
DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+ (u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
@@ -301,7 +298,7 @@ do { \
#undef DUMPREG
}
-static inline void vp_filter_set(struct mixer_resources *res,
+static inline void vp_filter_set(struct mixer_context *ctx,
int reg_id, const u8 *data, unsigned int size)
{
/* assure 4-byte align */
@@ -309,24 +306,23 @@ static inline void vp_filter_set(struct mixer_resources *res,
for (; size; size -= 4, reg_id += 4, data += 4) {
u32 val = (data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | data[3];
- vp_reg_write(res, reg_id, val);
+ vp_reg_write(ctx, reg_id, val);
}
}
-static void vp_default_filter(struct mixer_resources *res)
+static void vp_default_filter(struct mixer_context *ctx)
{
- vp_filter_set(res, VP_POLY8_Y0_LL,
+ vp_filter_set(ctx, VP_POLY8_Y0_LL,
filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
- vp_filter_set(res, VP_POLY4_Y0_LL,
+ vp_filter_set(ctx, VP_POLY4_Y0_LL,
filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
- vp_filter_set(res, VP_POLY4_C0_LL,
+ vp_filter_set(ctx, VP_POLY4_C0_LL,
filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
bool alpha)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
@@ -335,13 +331,12 @@ static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
}
- mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
val, MXR_GRP_CFG_MISC_MASK);
}
static void mixer_cfg_vp_blend(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
/*
@@ -351,51 +346,39 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx)
* support blending of the video layer through this.
*/
val = 0;
- mixer_reg_write(res, MXR_VIDEO_CFG, val);
+ mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
/* block update on vsync */
- mixer_reg_writemask(res, MXR_STATUS, enable ?
+ mixer_reg_writemask(ctx, MXR_STATUS, enable ?
MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
VP_SHADOW_UPDATE_ENABLE : 0);
}
-static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
/* choosing between interlace and progressive mode */
val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ?
MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE;
- if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
- /* choosing between proper HD and SD mode */
- if (height <= 480)
- val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (height <= 576)
- val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (height <= 720)
- val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (height <= 1080)
- val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
- else
- val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- }
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ mixer_reg_write(ctx, MXR_RESOLUTION,
+ MXR_MXR_RES_HEIGHT(height) | MXR_MXR_RES_WIDTH(width));
+ else
+ val |= ctx->scan_value;
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
switch (height) {
@@ -408,45 +391,44 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
default:
val = MXR_CFG_RGB709_16_235;
/* Configure the BT.709 CSC matrix for full range RGB. */
- mixer_reg_write(res, MXR_CM_COEFF_Y,
+ mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
MXR_CM_COEFF_RGB_FULL);
- mixer_reg_write(res, MXR_CM_COEFF_CB,
+ mixer_reg_write(ctx, MXR_CM_COEFF_CB,
MXR_CSC_CT(-0.102, -0.338, 0.440));
- mixer_reg_write(res, MXR_CM_COEFF_CR,
+ mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
break;
}
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
unsigned int priority, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val = enable ? ~0 : 0;
switch (win) {
case 0:
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP0_VAL(priority),
MXR_LAYER_CFG_GRP0_MASK);
break;
case 1:
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP1_VAL(priority),
MXR_LAYER_CFG_GRP1_MASK);
break;
case VP_DEFAULT_WIN:
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
- mixer_reg_writemask(res, MXR_CFG, val,
+ vp_reg_writemask(ctx, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(ctx, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_VP_VAL(priority),
MXR_LAYER_CFG_VP_MASK);
}
@@ -456,30 +438,34 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
static void mixer_run(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
}
static void mixer_stop(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
int timeout = 20;
- mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
- while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ while (!(mixer_reg_read(ctx, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
--timeout)
usleep_range(10000, 12000);
}
+static void mixer_commit(struct mixer_context *ctx)
+{
+ struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
+
+ mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_run(ctx);
+}
+
static void vp_video_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
- struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
- struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
@@ -493,8 +479,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
@@ -503,63 +488,59 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
}
} else {
- __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
luma_addr[1] = 0;
chroma_addr[1] = 0;
}
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
- vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+ vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
- vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+ vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+ vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
/* chroma plane for NV12/NV21 is half the height of the luma plane */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+ vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
- vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
- vp_reg_write(res, VP_SRC_H_POSITION,
+ vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
+ vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
+ vp_reg_write(ctx, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(state->src.x));
- vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
+ vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
- vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
- vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
+ vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
+ vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
- vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
+ vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
+ vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
- vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
+ vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
+ vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
}
- vp_reg_write(res, VP_H_RATIO, state->h_ratio);
- vp_reg_write(res, VP_V_RATIO, state->v_ratio);
+ vp_reg_write(ctx, VP_H_RATIO, state->h_ratio);
+ vp_reg_write(ctx, VP_V_RATIO, state->v_ratio);
- vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+ vp_reg_write(ctx, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
/* set buffer address to vp */
- vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
- vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
- vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
- vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+ vp_reg_write(ctx, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(ctx, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(ctx, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, plane->index, priority, true);
mixer_cfg_vp_blend(ctx);
- mixer_run(ctx);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
vp_regs_dump(ctx);
@@ -567,9 +548,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
static void mixer_layer_update(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx,
@@ -577,8 +556,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
- struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
- struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
@@ -623,45 +600,30 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- else
- __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
-
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
/* setup format */
- mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+ mixer_reg_write(ctx, MXR_GRAPHIC_SPAN(win),
fb->pitches[0] / fb->format->cpp[0]);
- /* setup display size */
- if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
- win == DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
- val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
- mixer_reg_write(res, MXR_RESOLUTION, val);
- }
-
val = MXR_GRP_WH_WIDTH(state->src.w);
val |= MXR_GRP_WH_HEIGHT(state->src.h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
- mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+ mixer_reg_write(ctx, MXR_GRAPHIC_WH(win), val);
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+ mixer_reg_write(ctx, MXR_GRAPHIC_DXY(win), val);
/* set buffer address to mixer */
- mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+ mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format));
@@ -670,22 +632,19 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_layer_update(ctx);
- mixer_run(ctx);
-
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
}
static void vp_win_reset(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
unsigned int tries = 100;
- vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ vp_reg_write(ctx, VP_SRESET, VP_SRESET_PROCESSING);
while (--tries) {
/* waiting until VP_SRESET_PROCESSING is 0 */
- if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ if (~vp_reg_read(ctx, VP_SRESET) & VP_SRESET_PROCESSING)
break;
mdelay(10);
}
@@ -694,57 +653,55 @@ static void vp_win_reset(struct mixer_context *ctx)
static void mixer_win_reset(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
/* set output in RGB888 mode */
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
/* 16 beat burst in DMA */
- mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ mixer_reg_writemask(ctx, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* reset default layer priority */
- mixer_reg_write(res, MXR_LAYER_CFG, 0);
+ mixer_reg_write(ctx, MXR_LAYER_CFG, 0);
/* set all background colors to RGB (0,0,0) */
- mixer_reg_write(res, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
- mixer_reg_write(res, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
- mixer_reg_write(res, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
- vp_default_filter(res);
+ vp_default_filter(ctx);
}
/* disable all layers */
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
/* set all source image offsets to zero */
- mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+ mixer_reg_write(ctx, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(ctx, MXR_GRAPHIC_SXY(1), 0);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
}
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
- struct mixer_resources *res = &ctx->mixer_res;
u32 val, base, shadow;
- spin_lock(&res->reg_slock);
+ spin_lock(&ctx->reg_slock);
/* read interrupt status for handling and clearing flags for VSYNC */
- val = mixer_reg_read(res, MXR_INT_STATUS);
+ val = mixer_reg_read(ctx, MXR_INT_STATUS);
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
@@ -754,13 +711,13 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
goto out;
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
goto out;
}
@@ -770,9 +727,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- mixer_reg_write(res, MXR_INT_STATUS, val);
+ mixer_reg_write(ctx, MXR_INT_STATUS, val);
- spin_unlock(&res->reg_slock);
+ spin_unlock(&ctx->reg_slock);
return IRQ_HANDLED;
}
@@ -780,26 +737,25 @@ out:
static int mixer_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
int ret;
- spin_lock_init(&mixer_res->reg_slock);
+ spin_lock_init(&mixer_ctx->reg_slock);
- mixer_res->mixer = devm_clk_get(dev, "mixer");
- if (IS_ERR(mixer_res->mixer)) {
+ mixer_ctx->mixer = devm_clk_get(dev, "mixer");
+ if (IS_ERR(mixer_ctx->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
return -ENODEV;
}
- mixer_res->hdmi = devm_clk_get(dev, "hdmi");
- if (IS_ERR(mixer_res->hdmi)) {
+ mixer_ctx->hdmi = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(mixer_ctx->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
- return PTR_ERR(mixer_res->hdmi);
+ return PTR_ERR(mixer_ctx->hdmi);
}
- mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR(mixer_res->sclk_hdmi)) {
+ mixer_ctx->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ if (IS_ERR(mixer_ctx->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
return -ENODEV;
}
@@ -809,9 +765,9 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- mixer_res->mixer_regs = devm_ioremap(dev, res->start,
+ mixer_ctx->mixer_regs = devm_ioremap(dev, res->start,
resource_size(res));
- if (mixer_res->mixer_regs == NULL) {
+ if (mixer_ctx->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
@@ -828,7 +784,7 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
dev_err(dev, "request interrupt failed.\n");
return ret;
}
- mixer_res->irq = res->start;
+ mixer_ctx->irq = res->start;
return 0;
}
@@ -836,30 +792,29 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
static int vp_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- mixer_res->vp = devm_clk_get(dev, "vp");
- if (IS_ERR(mixer_res->vp)) {
+ mixer_ctx->vp = devm_clk_get(dev, "vp");
+ if (IS_ERR(mixer_ctx->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) {
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
+ mixer_ctx->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_ctx->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
return -ENODEV;
}
- mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
- if (IS_ERR(mixer_res->mout_mixer)) {
+ mixer_ctx->mout_mixer = devm_clk_get(dev, "mout_mixer");
+ if (IS_ERR(mixer_ctx->mout_mixer)) {
dev_err(dev, "failed to get clock 'mout_mixer'\n");
return -ENODEV;
}
- if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
- clk_set_parent(mixer_res->mout_mixer,
- mixer_res->sclk_hdmi);
+ if (mixer_ctx->sclk_hdmi && mixer_ctx->mout_mixer)
+ clk_set_parent(mixer_ctx->mout_mixer,
+ mixer_ctx->sclk_hdmi);
}
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
@@ -868,9 +823,9 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- mixer_res->vp_regs = devm_ioremap(dev, res->start,
+ mixer_ctx->vp_regs = devm_ioremap(dev, res->start,
resource_size(res));
- if (mixer_res->vp_regs == NULL) {
+ if (mixer_ctx->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
@@ -914,15 +869,14 @@ static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return 0;
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -930,7 +884,6 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
@@ -938,8 +891,8 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
return;
/* disable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
@@ -972,7 +925,6 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", plane->index);
@@ -980,9 +932,9 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&mixer_ctx->reg_slock, flags);
mixer_cfg_layer(mixer_ctx, plane->index, 0, false);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&mixer_ctx->reg_slock, flags);
}
static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -999,7 +951,6 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
static void mixer_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
- struct mixer_resources *res = &ctx->mixer_res;
if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
@@ -1010,14 +961,17 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_vsync_set_update(ctx, false);
- mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(ctx, MXR_INT_STATUS, ~0,
+ MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
}
mixer_win_reset(ctx);
+ mixer_commit(ctx);
+
mixer_vsync_set_update(ctx, true);
set_bit(MXR_BIT_POWERED, &ctx->flags);
@@ -1044,26 +998,75 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
-/* Only valid for Mixer version 16.0.33.0 */
-static int mixer_atomic_check(struct exynos_drm_crtc *crtc,
- struct drm_crtc_state *state)
+static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct drm_display_mode *mode = &state->adjusted_mode;
- u32 w, h;
+ struct mixer_context *ctx = crtc->ctx;
+ u32 w = mode->hdisplay, h = mode->vdisplay;
- w = mode->hdisplay;
- h = mode->vdisplay;
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h,
+ mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return MODE_OK;
if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
- (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
- (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
- return 0;
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return MODE_OK;
+
+ if ((w == 1024 && h == 768) ||
+ (w == 1366 && h == 768) ||
+ (w == 1280 && h == 1024))
+ return MODE_OK;
+
+ return MODE_BAD;
+}
+
+static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mixer_context *ctx = crtc->ctx;
+ int width = mode->hdisplay, height = mode->vdisplay, i;
+
+ struct {
+ int hdisplay, vdisplay, htotal, vtotal, scan_val;
+ } static const modes[] = {
+ { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
+ { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
+ { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
+ { 1920, 1080, 2200, 1125, MXR_CFG_SCAN_HD_1080 |
+ MXR_CFG_SCAN_HD }
+ };
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
+ else
+ __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
+
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i)
+ if (width <= modes[i].hdisplay && height <= modes[i].vdisplay) {
+ ctx->scan_value = modes[i].scan_val;
+ if (width < modes[i].hdisplay ||
+ height < modes[i].vdisplay) {
+ adjusted_mode->hdisplay = modes[i].hdisplay;
+ adjusted_mode->hsync_start = modes[i].hdisplay;
+ adjusted_mode->hsync_end = modes[i].htotal;
+ adjusted_mode->htotal = modes[i].htotal;
+ adjusted_mode->vdisplay = modes[i].vdisplay;
+ adjusted_mode->vsync_start = modes[i].vdisplay;
+ adjusted_mode->vsync_end = modes[i].vtotal;
+ adjusted_mode->vtotal = modes[i].vtotal;
+ }
+
+ return true;
+ }
- return -EINVAL;
+ return false;
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
@@ -1075,7 +1078,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.update_plane = mixer_update_plane,
.disable_plane = mixer_disable_plane,
.atomic_flush = mixer_atomic_flush,
- .atomic_check = mixer_atomic_check,
+ .mode_valid = mixer_mode_valid,
+ .mode_fixup = mixer_mode_fixup,
};
static const struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1217,14 +1221,13 @@ static int mixer_remove(struct platform_device *pdev)
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
- struct mixer_resources *res = &ctx->mixer_res;
- clk_disable_unprepare(res->hdmi);
- clk_disable_unprepare(res->mixer);
+ clk_disable_unprepare(ctx->hdmi);
+ clk_disable_unprepare(ctx->mixer);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- clk_disable_unprepare(res->vp);
+ clk_disable_unprepare(ctx->vp);
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags))
- clk_disable_unprepare(res->sclk_mixer);
+ clk_disable_unprepare(ctx->sclk_mixer);
}
return 0;
@@ -1233,28 +1236,27 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev)
static int __maybe_unused exynos_mixer_resume(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
- struct mixer_resources *res = &ctx->mixer_res;
int ret;
- ret = clk_prepare_enable(res->mixer);
+ ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
return ret;
}
- ret = clk_prepare_enable(res->hdmi);
+ ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- ret = clk_prepare_enable(res->vp);
+ ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
- ret = clk_prepare_enable(res->sclk_mixer);
+ ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index a0507dc18d9e..04be0f7e8193 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -419,11 +419,9 @@
#define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c)
#define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020)
#define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024)
-#define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028)
-#define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c)
-#define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030)
-#define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034)
-#define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038)
+/* n must be within range 0...(HDMI_I2S_CH_ST_MAXNUM - 1) */
+#define HDMI_I2S_CH_ST_MAXNUM 5
+#define HDMI_I2S_CH_ST(n) HDMI_I2S_BASE(0x028 + 4 * (n))
#define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c)
#define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040)
#define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 58e9e0601a61..faf17b83b910 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
return PTR_ERR(fsl_dev->state);
}
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk);
return 0;
@@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock();
@@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm);
- enable_irq(fsl_dev->irq);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index d9d6cc1c8e39..ddc68e476a4d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -20,7 +21,7 @@
static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index edd7d8127d19..c54806d08dd7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
{
struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_sysfs;
- drm_object_property_set_value(&connector->base,
- mode_config->dpms_property,
- DRM_MODE_DPMS_OFF);
-
ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index c52f9adf5e04..a4bb89b7878f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1901,10 +1901,8 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
if (is_edp(gma_encoder)) {
/* cdv_intel_panel_destroy_backlight(connector->dev); */
- if (intel_dp->panel_fixed_mode) {
- kfree(intel_dp->panel_fixed_mode);
- intel_dp->panel_fixed_mode = NULL;
- }
+ kfree(intel_dp->panel_fixed_mode);
+ intel_dp->panel_fixed_mode = NULL;
}
i2c_del_adapter(&intel_dp->adapter);
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 531e4450c000..5c066448be5b 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -99,7 +99,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ if (temp & PIPEACONF_PIPE_STATE)
break;
}
}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index d75ecb3bdee7..1fa163373a47 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -237,7 +237,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
if (!gct)
- return -1;
+ return -ENOMEM;
gct_virtual = ioremap(addr + sizeof(vbt),
sizeof(*gct) * vbt.panel_count);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e787d376ba67..84507912be84 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -37,6 +37,7 @@
#include "psb_drv.h"
#include "psb_intel_sdvo_regs.h"
#include "psb_intel_reg.h"
+#include <linux/kernel.h>
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
@@ -62,8 +63,6 @@ static const char *tv_format_names[] = {
"SECAM_60"
};
-#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-
struct psb_intel_sdvo {
struct gma_encoder base;
@@ -148,7 +147,7 @@ struct psb_intel_sdvo_connector {
int force_audio;
/* This contains all current supported TV format */
- u8 tv_format_supported[TV_FORMAT_NUM];
+ u8 tv_format_supported[ARRAY_SIZE(tv_format_names)];
int format_supported_num;
struct drm_property *tv_format;
@@ -1709,7 +1708,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == psb_intel_sdvo_connector->tv_format) {
- if (val >= TV_FORMAT_NUM)
+ if (val >= ARRAY_SIZE(tv_format_names))
return -EINVAL;
if (psb_intel_sdvo->tv_format_index ==
@@ -2269,7 +2268,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
return false;
psb_intel_sdvo_connector->format_supported_num = 0;
- for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ for (i = 0 ; i < ARRAY_SIZE(tv_format_names); i++)
if (format_map & (1 << i))
psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index ec4dd9df9150..f4eba87c96f3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -36,7 +36,7 @@ static int hibmc_connector_mode_valid(struct drm_connector *connector,
static struct drm_encoder *
hibmc_connector_best_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
static const struct drm_connector_helper_funcs
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 9823477b1855..2269be91f3e1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
+ ade_ldi_set_mode(acrtc, mode, adj_mode);
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index e27352ca26c4..ddb0403f1975 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -22,6 +22,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
@@ -56,7 +57,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
}
static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = kirin_fbdev_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -236,8 +237,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
}
remote = of_graph_get_remote_node(np, 0, 0);
- if (IS_ERR(remote))
- return PTR_ERR(remote);
+ if (!remote)
+ return -ENODEV;
drm_of_component_match_add(dev, &match, compare_of, remote);
of_node_put(remote);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index e9e8ae2ec06b..544a8a2d3562 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -485,7 +485,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
return 0;
}
-static struct i2c_device_id ch7006_ids[] = {
+static const struct i2c_device_id ch7006_ids[] = {
{ "ch7006", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index db0b03fb0ff1..ecaa58757529 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -415,7 +415,7 @@ sil164_encoder_init(struct i2c_client *client,
return 0;
}
-static struct i2c_device_id sil164_ids[] = {
+static const struct i2c_device_id sil164_ids[] = {
{ "sil164", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 54e3255dde13..127815253a84 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = (struct tda998x_priv *)data;
+ struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1492,8 +1492,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
- setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
- (unsigned long)priv);
+ timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
/* wake up the device: */
@@ -1746,7 +1745,7 @@ static const struct of_device_id tda998x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
-static struct i2c_device_id tda998x_ids[] = {
+static const struct i2c_device_id tda998x_ids[] = {
{ "tda998x", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index e9e64e8e9765..dfd95889f4b7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -12,6 +12,7 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
+ select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2e034efc4d6d..2acf3b3c5f9d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -48,6 +48,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
+ i915_gemfs.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
@@ -60,9 +61,11 @@ i915-y += i915_cmd_parser.o \
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
+ intel_uc_fw.o \
+ intel_guc.o \
intel_guc_ct.o \
intel_guc_log.o \
- intel_guc_loader.o \
+ intel_guc_fw.o \
intel_huc.o \
i915_guc_submission.o
@@ -140,7 +143,8 @@ i915-y += i915_perf.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
- i915_oa_glk.o
+ i915_oa_glk.o \
+ i915_oa_cflgt2.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
@@ -151,5 +155,3 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
-
-CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index ca3d1925beda..7c9ec4f4f36c 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
- list_add_tail(&reg->link,
- &dev_priv->mm.fence_list);
+ i915_unreserve_fence(reg);
+ vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
- struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
- i = 0;
- list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
- reg = list_entry(pos, struct drm_i915_fence_reg, link);
- if (reg->pin_count || reg->vma)
- continue;
- list_del(pos);
+
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = i915_reserve_fence(dev_priv);
+ if (IS_ERR(reg))
+ goto out_free_fence;
+
vgpu->fence.regs[i] = reg;
- if (++i == vgpu_fence_sz(vgpu))
- break;
}
- if (i != vgpu_fence_sz(vgpu))
- goto out_free_fence;
_clear_vgpu_fence(vgpu);
@@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
+ gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
- list_add_tail(&reg->link,
- &dev_priv->mm.fence_list);
+ i915_unreserve_fence(reg);
+ vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ff3154fe6588..4ce2e6bd0680 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
- u64 first_gfn, first_mfn;
+ phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
+ unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
+ u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
+ if (map) {
+ vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
+ MEMREMAP_WC);
+ if (!vgpu->gm.aperture_va)
+ return -ENOMEM;
+ } else {
+ memunmap(vgpu->gm.aperture_va);
+ vgpu->gm.aperture_va = NULL;
+ }
+
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
- first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
- first_mfn,
- vgpu_aperture_sz(vgpu) >>
- PAGE_SHIFT, map);
- if (ret)
+ aperture_pa >> PAGE_SHIFT,
+ aperture_sz >> PAGE_SHIFT,
+ map);
+ if (ret) {
+ memunmap(vgpu->gm.aperture_va);
+ vgpu->gm.aperture_va = NULL;
return ret;
+ }
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
@@ -194,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return 0;
}
+static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+ u32 new = *(u32 *)(p_data);
+
+ if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
+ /* We don't have rom, return size of 0. */
+ *pval = 0;
+ else
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -275,7 +303,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
@@ -286,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
switch (rounddown(offset, 4)) {
+ case PCI_ROM_ADDRESS:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
+
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
@@ -361,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+
+ memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d4726a3358a4..85d4c57870fb 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static uint32_t find_bb_size(struct parser_exec_state *s)
+static int find_bb_size(struct parser_exec_state *s)
{
unsigned long gma = 0;
struct cmd_info *info;
- uint32_t bb_size = 0;
+ int bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
- uint32_t bb_size;
+ int bb_size;
void *dst = NULL;
int ret = 0;
@@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
+ if (bb_size < 0)
+ return -EINVAL;
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
@@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- u32 *cs;
+ void *shadow_ring_buffer_va;
+ int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- /* allocate shadow ring buffer */
- cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
+ void *va = vgpu->reserve_ring_buffer_va[ring_id];
+ /* realloc the new ring buffer if needed */
+ vgpu->reserve_ring_buffer_va[ring_id] =
+ krealloc(va, workload->rb_len, GFP_KERNEL);
+ if (!vgpu->reserve_ring_buffer_va[ring_id]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ return -ENOMEM;
+ }
+ vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
+ }
+
+ shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = cs;
+ workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top, cs);
+ gma_head, gma_top, shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
+ shadow_ring_buffer_va += ret;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
+ shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
- intel_ring_advance(workload->req, cs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c318439a659..355120865efd 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index e5320b4eb698..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
+ return 0;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
@@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
- return;
+ return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
-}
-
-static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
-{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
-
- intel_vgpu_pin_mm(workload->shadow_mm);
- intel_vgpu_sync_oos_pages(workload->vgpu);
- intel_vgpu_flush_post_shadow(workload->vgpu);
- prepare_shadow_batch_buffer(workload);
- prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (!workload->emulate_schedule_in)
- return 0;
-
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
-
- return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -489,13 +471,68 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
-static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
- if (!wa_ctx->indirect_ctx.obj)
- return;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+ int ret;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ goto out;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (!workload->emulate_schedule_in)
+ return 0;
- i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ if (!ret)
+ goto out;
+ else
+ gvt_vgpu_err("fail to emulate execlist schedule in\n");
+
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+out:
+ return ret;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
@@ -511,8 +548,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
@@ -819,10 +858,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+
}
+#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
@@ -842,7 +892,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
if (!vgpu->workloads)
return -ENOMEM;
+ /* each ring has a shadow ring buffer until vgpu destroyed */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ vgpu->reserve_ring_buffer_va[i] =
+ kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
+ if (!vgpu->reserve_ring_buffer_va[i]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ goto out;
+ }
+ vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+ }
return 0;
+out:
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (vgpu->reserve_ring_buffer_size[i]) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+ }
+ return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index e6dfc3331f4b..8e331142badb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
@@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return 0;
- atomic_inc(&mm->pincount);
-
if (!mm->shadowed) {
ret = shadow_mm(mm);
if (ret)
return ret;
}
+ atomic_inc(&mm->pincount);
list_del_init(&mm->lru_list);
list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
@@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
*/
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
- se.val64 |= PPAT_CACHED_INDEX;
+ se.val64 |= PPAT_CACHED;
for (i = 0; i < page_entry_num; i++)
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index c27c6838eaca..aaa347f8620c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt)
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
- info->cfg_space_size = 256;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 44b719eda8c4..9c2e7c0aa38f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -80,6 +80,7 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
+ void *aperture_va;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
@@ -99,7 +100,6 @@ struct intel_vgpu_mmio {
bool disable_warn_untrack;
};
-#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
#define INTEL_GVT_MAX_BAR_NUM 4
struct intel_vgpu_pci_bar {
@@ -108,7 +108,7 @@ struct intel_vgpu_pci_bar {
};
struct intel_vgpu_cfg_space {
- unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
};
@@ -165,6 +165,9 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ /* 1/2K for each reserve ring buffer */
+ void *reserve_ring_buffer_va[I915_NUM_ENGINES];
+ int reserve_ring_buffer_size[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
@@ -474,6 +477,13 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
+{
+ /* We are 64bit bar. */
+ return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+}
+
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e71b92..1f840f6b81bb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
return 0;
}
-static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+/**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+ unsigned int offset)
{
enum intel_engine_id id;
struct intel_engine_cs *engine;
- reg &= ~GENMASK(11, 0);
+ offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) {
- if (engine->mmio_base == reg)
+ if (engine->mmio_base == offset)
return id;
}
- return -1;
+ return -ENODEV;
}
#define offset_to_fence_num(offset) \
@@ -1381,40 +1390,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1432,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ring_id;
+ u32 ring_base;
+
+ ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+ /**
+ * Read HW reg in following case
+ * a. the offset isn't a ring mmio
+ * b. the offset's ring is running on hw.
+ * c. the offset is ring time stamp mmio
+ */
+ if (ring_id >= 0)
+ ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+ if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+ mmio_hw_access_pre(dev_priv);
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ mmio_hw_access_post(dev_priv);
+ }
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
@@ -1470,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1671,8 +1664,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2557,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 83e88c70272a..96060920a6fe 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
-static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
- int pos = PCI_BASE_ADDRESS_0;
- start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_MASK;
- mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_64:
start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
- + pos + 4));
+ + bar + 4));
break;
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
@@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
return ((u64)start_hi << 32) | start_lo;
}
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
+ void *buf, unsigned int count, bool is_write)
+{
+ uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
+ int ret;
+
+ if (is_write)
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar_start + off, buf, count);
+ else
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar_start + off, buf, count);
+ return ret;
+}
+
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
@@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
- case VFIO_PCI_BAR1_REGION_INDEX:
- if (is_write) {
- uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
-
- ret = intel_gvt_ops->emulate_mmio_write(vgpu,
- bar0_start + pos, buf, count);
- } else {
- uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
-
- ret = intel_gvt_ops->emulate_mmio_read(vgpu,
- bar0_start + pos, buf, count);
- }
+ ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
+ buf, count, is_write);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
+ ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
+ buf, count, is_write);
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX:
case VFIO_PCI_BAR4_REGION_INDEX:
case VFIO_PCI_BAR5_REGION_INDEX:
@@ -970,7 +978,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.size = vgpu->gvt->device_info.cfg_space_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 980ec8906b1e..1e1310f50289 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -45,8 +45,7 @@
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
- u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
- ~GENMASK(3, 0);
+ u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
return gpa - gttmmio_gpa;
}
@@ -57,6 +56,38 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
+{
+ u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
+ u64 aperture_sz = vgpu_aperture_sz(vgpu);
+
+ return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
+}
+
+static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
+ void *pdata, unsigned int size, bool is_read)
+{
+ u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
+ u64 offset = gpa - aperture_gpa;
+
+ if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
+ gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
+ offset, size);
+ return -EINVAL;
+ }
+
+ if (!vgpu->gm.aperture_va) {
+ gvt_vgpu_err("BAR is not enabled\n");
+ return -ENXIO;
+ }
+
+ if (is_read)
+ memcpy(pdata, vgpu->gm.aperture_va + offset, size);
+ else
+ memcpy(vgpu->gm.aperture_va + offset, pdata, size);
+ return 0;
+}
+
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
@@ -133,6 +164,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
}
mutex_lock(&gvt->lock);
+ if (vgpu_gpa_is_aperture(vgpu, pa)) {
+ ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
@@ -224,6 +261,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock);
+ if (vgpu_gpa_is_aperture(vgpu, pa)) {
+ ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 32cd64ddad26..dbc04ad2c7a1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+ unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2ea542257f03..6d066cf35478 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
*/
if (mmio->in_context &&
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
- i915.enable_execlists)
+ i915_modparams.enable_execlists)
continue;
if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 391800d2067b..69f8f0d155b9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -87,7 +87,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EINVAL;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
return i915_gem_context_force_single_submission(req->ctx);
}
+static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ i915_reg_t reg;
+
+ reg = RING_INSTDONE(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD_UDW(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+}
+
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -140,9 +154,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
@@ -150,7 +165,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
@@ -161,7 +176,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,12 +185,16 @@ static int shadow_context_status_change(struct notifier_block *nb,
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
+ save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
+ case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+ save_ring_hw_state(workload->vgpu, ring_id);
+ break;
default:
WARN_ON(1);
return NOTIFY_OK;
@@ -201,6 +220,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
+static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ void *shadow_ring_buffer_va;
+ u32 *cs;
+
+ /* allocate shadow ring buffer */
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs)) {
+ gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
+ workload->rb_len);
+ return PTR_ERR(cs);
+ }
+
+ shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = cs;
+
+ memcpy(cs, shadow_ring_buffer_va,
+ workload->rb_len);
+
+ cs += workload->rb_len / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
+
+ return 0;
+}
+
+void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (!wa_ctx->indirect_ctx.obj)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -214,8 +270,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -231,35 +288,73 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto out;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_gem_request_get(rq);
-
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto out;
+ goto err_scan;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto out;
+ goto err_scan;
+ }
+
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto err_shadow;
}
ret = populate_shadow_context(workload);
if (ret)
- goto out;
-
+ goto err_unpin;
workload->shadowed = true;
+ return 0;
-out:
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
+
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+ ret = copy_workload_to_ring_buffer(workload);
+ if (ret)
+ goto err_unpin;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+ release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
}
@@ -269,8 +364,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -284,22 +377,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (workload->prepare) {
ret = workload->prepare(workload);
- if (ret)
+ if (ret) {
+ engine->context_unpin(engine, shadow_ctx);
goto out;
- }
-
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- goto out;
+ }
}
out:
@@ -408,7 +489,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
@@ -676,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
+ if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
+ vgpu->shadow_ctx->priority = INT_MAX;
+
vgpu->shadow_ctx->engine[RCS].initialised = true;
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 93a49eb0209e..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,4 +141,8 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e4d4b6b41e26..c65e381b85f3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include "intel_drv.h"
+#include "i915_guc_submission.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -67,7 +68,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
#undef PRINT_FLAG
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
+#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
kernel_param_unlock(THIS_MODULE);
@@ -82,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj)
static char get_pin_flag(struct drm_i915_gem_object *obj)
{
- return obj->pin_display ? 'p' : ' ';
+ return obj->pin_global ? 'p' : ' ';
}
static char get_tiling_flag(struct drm_i915_gem_object *obj)
@@ -97,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return !list_empty(&obj->userfault_link) ? 'g' : ' ';
+ return obj->userfault_count ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -118,6 +119,36 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
return size;
}
+static const char *
+stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
+{
+ size_t x = 0;
+
+ switch (page_sizes) {
+ case 0:
+ return "";
+ case I915_GTT_PAGE_SIZE_4K:
+ return "4K";
+ case I915_GTT_PAGE_SIZE_64K:
+ return "64K";
+ case I915_GTT_PAGE_SIZE_2M:
+ return "2M";
+ default:
+ if (!buf)
+ return "M";
+
+ if (page_sizes & I915_GTT_PAGE_SIZE_2M)
+ x += snprintf(buf + x, len - x, "2M, ");
+ if (page_sizes & I915_GTT_PAGE_SIZE_64K)
+ x += snprintf(buf + x, len - x, "64K, ");
+ if (page_sizes & I915_GTT_PAGE_SIZE_4K)
+ x += snprintf(buf + x, len - x, "4K, ");
+ buf[x-2] = '\0';
+
+ return buf;
+ }
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -149,15 +180,16 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
pin_count++;
}
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->pin_display)
- seq_printf(m, " (display)");
+ if (obj->pin_global)
+ seq_printf(m, " (global)");
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
- seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+ seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
i915_vma_is_ggtt(vma) ? "g" : "pp",
- vma->node.start, vma->node.size);
+ vma->node.start, vma->node.size,
+ stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
if (i915_vma_is_ggtt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
@@ -239,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
goto out;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (count == total)
break;
@@ -251,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
if (count == total)
break;
@@ -261,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
objects[count++] = obj;
total_obj_size += obj->base.size;
}
+ spin_unlock(&dev_priv->mm.obj_lock);
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
@@ -402,10 +437,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 count, mapped_count, purgeable_count, dpy_count;
- u64 size, mapped_size, purgeable_size, dpy_size;
+ u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
+ u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
struct drm_i915_gem_object *obj;
+ unsigned int page_sizes = 0;
struct drm_file *file;
+ char buf[80];
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -419,7 +456,10 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ huge_size = huge_count = 0;
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
size += obj->base.size;
++count;
@@ -432,15 +472,21 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mapped_count++;
mapped_size += obj->base.size;
}
+
+ if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ huge_count++;
+ huge_size += obj->base.size;
+ page_sizes |= obj->mm.page_sizes.sg;
+ }
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
size += obj->base.size;
++count;
- if (obj->pin_display) {
+ if (obj->pin_global) {
dpy_size += obj->base.size;
++dpy_count;
}
@@ -454,18 +500,33 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mapped_count++;
mapped_size += obj->base.size;
}
+
+ if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ huge_count++;
+ huge_size += obj->base.size;
+ page_sizes |= obj->mm.page_sizes.sg;
+ }
}
+ spin_unlock(&dev_priv->mm.obj_lock);
+
seq_printf(m, "%u bound objects, %llu bytes\n",
count, size);
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
seq_printf(m, "%u mapped objects, %llu bytes\n",
mapped_count, mapped_size);
- seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+ seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
+ huge_count,
+ stringify_page_sizes(page_sizes, buf, sizeof(buf)),
+ huge_size);
+ seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end);
+ seq_printf(m, "Supported page sizes: %s\n",
+ stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
+ buf, sizeof(buf)));
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -514,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_i915_private *dev_priv = node_to_i915(node);
struct drm_device *dev = &dev_priv->drm;
- bool show_pin_display_only = !!node->info_ent->data;
+ struct drm_i915_gem_object **objects;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
+ unsigned long nobject, n;
int count, ret;
+ nobject = READ_ONCE(dev_priv->mm.object_count);
+ objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
+ if (!objects)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (show_pin_display_only && !obj->pin_display)
- continue;
+ count = 0;
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
+ objects[count++] = obj;
+ if (count == nobject)
+ break;
+ }
+ spin_unlock(&dev_priv->mm.obj_lock);
+
+ total_obj_size = total_gtt_size = 0;
+ for (n = 0; n < count; n++) {
+ obj = objects[n];
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
- count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
+ kvfree(objects);
return 0;
}
@@ -589,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
-static void print_request(struct seq_file *m,
- struct drm_i915_gem_request *rq,
- const char *prefix)
-{
- seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
- rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
- rq->priotree.priority,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- rq->timeline->common->name);
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_request *req;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret, any;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- any = 0;
- for_each_engine(engine, dev_priv, id) {
- int count;
-
- count = 0;
- list_for_each_entry(req, &engine->timeline->requests, link)
- count++;
- if (count == 0)
- continue;
-
- seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->timeline->requests, link)
- print_request(m, req, " ");
-
- any++;
- }
- mutex_unlock(&dev->struct_mutex);
-
- if (any == 0)
- seq_puts(m, "No requests\n");
-
- return 0;
-}
-
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine)
{
@@ -1026,6 +1053,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1041,9 +1069,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 freq_sts;
+ u32 rpmodectl, freq_sts;
+
+ mutex_lock(&dev_priv->pcu_lock);
+
+ rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
- mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
@@ -1052,21 +1090,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_gpu_freq(dev_priv, rps->efficient_freq));
+ mutex_unlock(&dev_priv->pcu_lock);
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -1136,10 +1174,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_iir = I915_READ(GEN8_GT_IIR(2));
pm_mask = I915_READ(GEN6_PMINTRMSK);
}
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
- dev_priv->rps.pm_intrmsk_mbz);
+ rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
@@ -1159,8 +1204,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
- dev_priv->rps.up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1168,8 +1212,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
- dev_priv->rps.down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1191,22 +1234,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+ intel_gpu_freq(dev_priv, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ intel_gpu_freq(dev_priv, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -1267,7 +1310,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
seq_puts(m, "struct_mutex blocked for reset\n");
- if (!i915.enable_hangcheck) {
+ if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
return 0;
}
@@ -1422,6 +1465,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
+ seq_printf(m, "user.bypass_count = %u\n",
+ i915->uncore.user_forcewake.count);
+
for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
@@ -1444,21 +1490,11 @@ static void print_rc6_res(struct seq_file *m,
static int vlv_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rpmodectl1, rcctl1, pw_status;
+ u32 rcctl1, pw_status;
pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "Turbo enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
@@ -1476,7 +1512,7 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+ u32 gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
unsigned forcewake_count;
int count = 0;
@@ -1495,24 +1531,16 @@ static int gen6_drpc_info(struct seq_file *m)
gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
- rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
if (INTEL_GEN(dev_priv) >= 9) {
gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
@@ -1699,7 +1727,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915.enable_ips));
+ yesno(i915_modparams.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@ -1775,6 +1803,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
@@ -1786,19 +1815,17 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
goto out;
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq =
- dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
- max_gpu_freq =
- dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+ min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
+ max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
} else {
- min_gpu_freq = dev_priv->rps.min_freq_softlimit;
- max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+ min_gpu_freq = rps->min_freq_softlimit;
+ max_gpu_freq = rps->max_freq_softlimit;
}
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1817,7 +1844,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
out:
intel_runtime_pm_put(dev_priv);
@@ -2014,7 +2041,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
enum intel_engine_id id;
int ret;
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
@@ -2251,25 +2278,26 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct drm_file *file;
- seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+ seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
- atomic_read(&dev_priv->rps.num_waiters));
+ atomic_read(&rps->num_waiters));
seq_printf(m, "Frequency requested %d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
+ intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq),
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ intel_gpu_freq(dev_priv, rps->boost_freq));
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -2281,15 +2309,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
- atomic_read(&file_priv->rps.boosts));
+ atomic_read(&file_priv->rps_client.boosts));
rcu_read_unlock();
}
seq_printf(m, "Kernel (anonymous) boosts: %d\n",
- atomic_read(&dev_priv->rps.boosts));
+ atomic_read(&rps->boosts));
mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 &&
- dev_priv->rps.enabled &&
+ rps->enabled &&
dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2302,13 +2330,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(dev_priv->rps.power));
+ rps_power_to_str(rps->power));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- dev_priv->rps.up_threshold);
+ rps->up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- dev_priv->rps.down_threshold);
+ rps->down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2331,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct drm_printer p;
if (!HAS_HUC_UCODE(dev_priv))
return 0;
- seq_puts(m, "HuC firmware status:\n");
- seq_printf(m, "\tpath: %s\n", huc_fw->path);
- seq_printf(m, "\tfetch: %s\n",
- intel_uc_fw_status_repr(huc_fw->fetch_status));
- seq_printf(m, "\tload: %s\n",
- intel_uc_fw_status_repr(huc_fw->load_status));
- seq_printf(m, "\tversion wanted: %d.%d\n",
- huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
- seq_printf(m, "\tversion found: %d.%d\n",
- huc_fw->major_ver_found, huc_fw->minor_ver_found);
- seq_printf(m, "\theader: offset is %d; size = %d\n",
- huc_fw->header_offset, huc_fw->header_size);
- seq_printf(m, "\tuCode: offset is %d; size = %d\n",
- huc_fw->ucode_offset, huc_fw->ucode_size);
- seq_printf(m, "\tRSA: offset is %d; size = %d\n",
- huc_fw->rsa_offset, huc_fw->rsa_size);
+ p = drm_seq_file_printer(m);
+ intel_uc_fw_dump(&dev_priv->huc.fw, &p);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -2363,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct drm_printer p;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv))
return 0;
- seq_printf(m, "GuC firmware status:\n");
- seq_printf(m, "\tpath: %s\n",
- guc_fw->path);
- seq_printf(m, "\tfetch: %s\n",
- intel_uc_fw_status_repr(guc_fw->fetch_status));
- seq_printf(m, "\tload: %s\n",
- intel_uc_fw_status_repr(guc_fw->load_status));
- seq_printf(m, "\tversion wanted: %d.%d\n",
- guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
- seq_printf(m, "\tversion found: %d.%d\n",
- guc_fw->major_ver_found, guc_fw->minor_ver_found);
- seq_printf(m, "\theader: offset is %d; size = %d\n",
- guc_fw->header_offset, guc_fw->header_size);
- seq_printf(m, "\tuCode: offset is %d; size = %d\n",
- guc_fw->ucode_offset, guc_fw->ucode_size);
- seq_printf(m, "\tRSA: offset is %d; size = %d\n",
- guc_fw->rsa_offset, guc_fw->rsa_size);
+ p = drm_seq_file_printer(m);
+ intel_uc_fw_dump(&dev_priv->guc.fw, &p);
intel_runtime_pm_get(dev_priv);
@@ -2443,12 +2442,8 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
- client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
- seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
- client->wq_size, client->wq_offset, client->wq_tail);
-
- seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
@@ -2594,7 +2589,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
if (!dev_priv->guc.log.vma)
return -EINVAL;
- *val = i915.guc_log_level;
+ *val = i915_modparams.guc_log_level;
return 0;
}
@@ -3239,9 +3234,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ struct drm_printer p;
intel_runtime_pm_get(dev_priv);
@@ -3250,146 +3245,21 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
- for_each_engine(engine, dev_priv, id) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_gem_request *rq;
- struct rb_node *rb;
- u64 addr;
-
- seq_printf(m, "%s\n", engine->name);
- seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
- seq_printf(m, "\tReset count: %d\n",
- i915_reset_engine_count(error, engine));
-
- rcu_read_lock();
-
- seq_printf(m, "\tRequests:\n");
-
- rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tlast ");
-
- rq = i915_gem_find_active_request(engine);
- if (rq) {
- print_request(m, rq, "\t\tactive ");
- seq_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
- }
-
- seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
- I915_READ(RING_START(engine->mmio_base)),
- rq ? i915_ggtt_offset(rq->ring->vma) : 0);
- seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
- rq ? rq->ring->head : 0);
- seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
- rq ? rq->ring->tail : 0);
- seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
-
- rcu_read_unlock();
-
- addr = intel_engine_get_active_head(engine);
- seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
- upper_32_bits(addr), lower_32_bits(addr));
- addr = intel_engine_get_last_batch_head(engine);
- seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
- upper_32_bits(addr), lower_32_bits(addr));
-
- if (i915.enable_execlists) {
- u32 ptr, read, write;
- unsigned int idx;
-
- seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
- I915_READ(RING_EXECLIST_STATUS_LO(engine)),
- I915_READ(RING_EXECLIST_STATUS_HI(engine)));
-
- ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- read = GEN8_CSB_READ_PTR(ptr);
- write = GEN8_CSB_WRITE_PTR(ptr);
- seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
- read, write,
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)));
- if (read >= GEN8_CSB_ENTRIES)
- read = 0;
- if (write >= GEN8_CSB_ENTRIES)
- write = 0;
- if (read > write)
- write += GEN8_CSB_ENTRIES;
- while (read < write) {
- idx = ++read % GEN8_CSB_ENTRIES;
- seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
- idx,
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
- }
-
- rcu_read_lock();
- for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
- unsigned int count;
-
- rq = port_unpack(&engine->execlist_port[idx],
- &count);
- if (rq) {
- seq_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
- print_request(m, rq, "rq: ");
- } else {
- seq_printf(m, "\t\tELSP[%d] idle\n",
- idx);
- }
- }
- rcu_read_unlock();
-
- spin_lock_irq(&engine->timeline->lock);
- for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
-
- list_for_each_entry(rq, &p->requests,
- priotree.link)
- print_request(m, rq, "\t\tQ ");
- }
- spin_unlock_irq(&engine->timeline->lock);
- } else if (INTEL_GEN(dev_priv) > 6) {
- seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
- seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
- seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
- }
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_dump(engine, &p);
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+ intel_runtime_pm_put(dev_priv);
- seq_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
- }
- spin_unlock_irq(&b->rb_lock);
+ return 0;
+}
- seq_puts(m, "\n");
- }
+static int i915_shrinker_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+ seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
return 0;
}
@@ -3403,7 +3273,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id;
int j, ret;
- if (!i915.semaphores) {
+ if (!i915_modparams.semaphores) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -3523,6 +3393,57 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ int ret;
+ bool enable;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ if (!dev_priv->ipc_enabled && enable)
+ DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4203,8 +4124,7 @@ fault_irq_set(struct drm_i915_private *i915,
mutex_unlock(&i915->drm.struct_mutex);
/* Flush idle worker to disarm irq */
- while (flush_delayed_work(&i915->gt.idle_work))
- ;
+ drain_delayed_work(&i915->gt.idle_work);
return 0;
@@ -4259,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
i915_ring_test_irq_get, i915_ring_test_irq_set,
"0x%08llx\n");
-#define DROP_UNBOUND 0x1
-#define DROP_BOUND 0x2
-#define DROP_RETIRE 0x4
-#define DROP_ACTIVE 0x8
-#define DROP_FREED 0x10
-#define DROP_SHRINK_ALL 0x20
+#define DROP_UNBOUND BIT(0)
+#define DROP_BOUND BIT(1)
+#define DROP_RETIRE BIT(2)
+#define DROP_ACTIVE BIT(3)
+#define DROP_FREED BIT(4)
+#define DROP_SHRINK_ALL BIT(5)
+#define DROP_IDLE BIT(6)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
DROP_FREED | \
- DROP_SHRINK_ALL)
+ DROP_SHRINK_ALL |\
+ DROP_IDLE)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4286,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = &dev_priv->drm;
int ret = 0;
- DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -4317,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
+ if (val & DROP_IDLE)
+ drain_delayed_work(&dev_priv->gt.idle_work);
+
if (val & DROP_FREED) {
synchronize_rcu();
i915_gem_drain_freed_objects(dev_priv);
@@ -4337,7 +4263,7 @@ i915_max_freq_get(void *data, u64 *val)
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+ *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
return 0;
}
@@ -4345,6 +4271,7 @@ static int
i915_max_freq_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 hw_max, hw_min;
int ret;
@@ -4353,7 +4280,7 @@ i915_max_freq_set(void *data, u64 val)
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
return ret;
@@ -4362,20 +4289,20 @@ i915_max_freq_set(void *data, u64 val)
*/
val = intel_freq_opcode(dev_priv, val);
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
+ hw_max = rps->max_freq;
+ hw_min = rps->min_freq;
- if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
return -EINVAL;
}
- dev_priv->rps.max_freq_softlimit = val;
+ rps->max_freq_softlimit = val;
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return 0;
}
@@ -4392,7 +4319,7 @@ i915_min_freq_get(void *data, u64 *val)
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+ *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
return 0;
}
@@ -4400,6 +4327,7 @@ static int
i915_min_freq_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 hw_max, hw_min;
int ret;
@@ -4408,7 +4336,7 @@ i915_min_freq_set(void *data, u64 val)
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
return ret;
@@ -4417,21 +4345,21 @@ i915_min_freq_set(void *data, u64 val)
*/
val = intel_freq_opcode(dev_priv, val);
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
+ hw_max = rps->max_freq;
+ hw_min = rps->min_freq;
if (val < hw_min ||
- val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ val > hw_max || val > rps->max_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
return -EINVAL;
}
- dev_priv->rps.min_freq_softlimit = val;
+ rps->min_freq_softlimit = val;
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return 0;
}
@@ -4674,26 +4602,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return 0;
- intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_user_get(i915);
return 0;
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return 0;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_uncore_forcewake_user_put(i915);
+ intel_runtime_pm_put(i915);
return 0;
}
@@ -4783,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
- {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
@@ -4823,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
+ {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -4859,7 +4786,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_ipc_status", &i915_ipc_status_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f124de3a0668..2cf10d17acfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line)
{
- if (i915_load_fail_count >= i915.inject_load_failure)
+ if (i915_load_fail_count >= i915_modparams.inject_load_failure)
return false;
- if (++i915_load_fail_count == i915.inject_load_failure) {
+ if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure, func, line);
return true;
}
@@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
- return i915.inject_load_failure &&
- i915_load_fail_count == i915.inject_load_failure;
+ return i915_modparams.inject_load_failure &&
+ i915_load_fail_count == i915_modparams.inject_load_failure;
}
#define i915_load_error(dev_priv, fmt, ...) \
@@ -239,7 +239,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
@@ -320,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915.semaphores;
+ value = i915_modparams.semaphores;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -339,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
- value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ value = i915_modparams.enable_hangcheck &&
+ intel_has_gpu_reset(dev_priv);
if (value && intel_has_reset_engine(dev_priv))
value = 2;
break;
@@ -365,9 +367,18 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915_gem_mmap_gtt_version();
break;
case I915_PARAM_HAS_SCHEDULER:
- value = dev_priv->engine[RCS] &&
- dev_priv->engine[RCS]->schedule;
+ value = 0;
+ if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
+ value |= I915_SCHEDULER_CAP_ENABLED;
+ value |= I915_SCHEDULER_CAP_PRIORITY;
+
+ if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
+ i915_modparams.enable_execlists &&
+ !i915_modparams.enable_guc_submission)
+ value |= I915_SCHEDULER_CAP_PREEMPTION;
+ }
break;
+
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -604,9 +615,10 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_userptr(dev_priv);
+
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list));
@@ -868,6 +880,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, match_info, sizeof(*device_info));
device_info->device_id = dev_priv->drm.pdev->device;
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ sizeof(device_info->platform_mask) * BITS_PER_BYTE);
+ device_info->platform_mask = BIT(device_info->platform);
+
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
@@ -1001,6 +1017,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_init(dev_priv);
+ intel_uc_init_mmio(dev_priv);
+
ret = intel_engines_init_mmio(dev_priv);
if (ret)
goto err_uncore;
@@ -1030,9 +1048,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- i915.enable_execlists =
+ i915_modparams.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
- i915.enable_execlists);
+ i915_modparams.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
@@ -1040,12 +1058,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
- i915.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+ i915_modparams.enable_ppgtt =
+ intel_sanitize_enable_ppgtt(dev_priv,
+ i915_modparams.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
- i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
- DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
+ i915_modparams.semaphores =
+ intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
+ DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
+ yesno(i915_modparams.semaphores));
intel_uc_sanitize_options(dev_priv);
@@ -1276,7 +1297,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
/* Enable nuclear pageflip on ILK+ */
- if (!i915.nuclear_pageflip && match_info->gen < 5)
+ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
@@ -1340,7 +1361,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
- dev_priv->ipc_enabled = false;
+ intel_init_ipc(dev_priv);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
@@ -1571,7 +1592,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false);
- fw_csr = !IS_GEN9_LP(dev_priv) &&
+ fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
@@ -1693,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
@@ -2061,11 +2083,14 @@ static int i915_pm_resume(struct device *kdev)
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret;
- ret = i915_pm_suspend(kdev);
- if (ret)
- return ret;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(dev);
+ if (ret)
+ return ret;
+ }
ret = i915_gem_freeze(kdev_to_i915(kdev));
if (ret)
@@ -2076,11 +2101,14 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev)
{
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret;
- ret = i915_pm_suspend_late(kdev);
- if (ret)
- return ret;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(dev, true);
+ if (ret)
+ return ret;
+ }
ret = i915_gem_freeze_late(kdev_to_i915(kdev));
if (ret)
@@ -2476,7 +2504,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
+ if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2518,12 +2546,12 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
- WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+ WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
- dev_priv->pm.suspended = true;
+ dev_priv->runtime_pm.suspended = true;
/*
* FIXME: We really should find a document that references the arguments
@@ -2569,11 +2597,11 @@ static int intel_runtime_resume(struct device *kdev)
DRM_DEBUG_KMS("Resuming device\n");
- WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+ WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- dev_priv->pm.suspended = false;
+ dev_priv->runtime_pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -2591,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
+ intel_uncore_runtime_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -2608,6 +2638,8 @@ static int intel_runtime_resume(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
+ intel_enable_ipc(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 18d9da53282b..54b5d4c582b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170818"
-#define DRIVER_TIMESTAMP 1503088845
+#define DRIVER_DATE "20171023"
+#define DRIVER_TIMESTAMP 1508748913
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -93,7 +93,7 @@
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!WARN(i915.verbose_state_checks, format)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
DRM_ERROR(format); \
unlikely(__ret_warn_on); \
})
@@ -126,7 +126,7 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
- WARN_ON(val >> 16);
+ WARN_ON(val > U16_MAX);
fp.val = val << 16;
return fp;
@@ -163,8 +163,8 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
{
uint_fixed_16_16_t fp;
- WARN_ON(val >> 32);
- fp.val = clamp_t(uint32_t, val, 0, ~0);
+ WARN_ON(val > U32_MAX);
+ fp.val = (uint32_t) val;
return fp;
}
@@ -181,8 +181,8 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
- WARN_ON(intermediate_val >> 32);
- return clamp_t(uint32_t, intermediate_val, 0, ~0);
+ WARN_ON(intermediate_val > U32_MAX);
+ return (uint32_t) intermediate_val;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
@@ -211,8 +211,8 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
- WARN_ON(interm_val >> 32);
- return clamp_t(uint32_t, interm_val, 0, ~0);
+ WARN_ON(interm_val > U32_MAX);
+ return (uint32_t) interm_val;
}
static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
@@ -569,6 +569,24 @@ struct i915_hotplug {
(__i)++) \
for_each_if (plane_state)
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if (plane)
+
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -591,7 +609,7 @@ struct drm_i915_file_private {
struct intel_rps_client {
atomic_t boosts;
- } rps;
+ } rps_client;
unsigned int bsd_engine;
@@ -707,8 +725,7 @@ struct drm_i915_display_funcs {
struct drm_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state);
- void (*update_crtcs)(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask);
+ void (*update_crtcs)(struct drm_atomic_state *state);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
@@ -759,7 +776,6 @@ struct intel_csr {
func(has_fpga_dbg); \
func(has_full_ppgtt); \
func(has_full_48bit_ppgtt); \
- func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
@@ -767,8 +783,8 @@ struct intel_csr {
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
+ func(has_logical_ring_preemption); \
func(has_overlay); \
- func(has_pipe_cxsr); \
func(has_pooled_eu); \
func(has_psr); \
func(has_rc6); \
@@ -780,7 +796,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv);
+ func(supports_tv); \
+ func(has_ipc);
struct sseu_dev_info {
u8 slice_mask;
@@ -834,20 +851,30 @@ enum intel_platform {
};
struct intel_device_info {
- u32 display_mmio_offset;
u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ u8 ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ u32 display_mmio_offset;
+
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 gen;
- u16 gen_mask;
- enum intel_platform platform;
- u8 ring_mask; /* Rings supported by the HW */
- u8 num_rings;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
+
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
@@ -956,6 +983,7 @@ struct i915_gpu_state {
pid_t pid;
u32 handle;
u32 hw_id;
+ int priority;
int ban_score;
int active;
int guilty;
@@ -978,11 +1006,13 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
+ int priority;
int ban_score;
u32 seqno;
u32 head;
u32 tail;
- } *requests, execlist[2];
+ } *requests, execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -1077,6 +1107,16 @@ struct intel_fbc {
int src_w;
int src_h;
bool visible;
+ /*
+ * Display surface base address adjustement for
+ * pageflips. Note that on gen4+ this only adjusts up
+ * to a tile, offsets within a tile are handled in
+ * the hw itself (with the TILEOFF register).
+ */
+ int adjusted_x;
+ int adjusted_y;
+
+ int y;
} plane;
struct {
@@ -1107,6 +1147,7 @@ struct intel_fbc {
} fb;
int cfb_size;
+ unsigned int gen9_wa_cfb_stride;
} params;
struct intel_fbc_work {
@@ -1159,6 +1200,14 @@ struct i915_psr {
bool y_cord_support;
bool colorimetry_support;
bool alpm;
+
+ void (*enable_source)(struct intel_dp *,
+ const struct intel_crtc_state *);
+ void (*disable_source)(struct intel_dp *,
+ const struct intel_crtc_state *);
+ void (*enable_sink)(struct intel_dp *);
+ void (*activate)(struct intel_dp *);
+ void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -1277,7 +1326,7 @@ struct intel_rps_ei {
u32 media_c0;
};
-struct intel_gen6_power_mgmt {
+struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
@@ -1318,20 +1367,26 @@ struct intel_gen6_power_mgmt {
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled;
- struct delayed_work autoenable_work;
atomic_t num_waiters;
atomic_t boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
+};
- /*
- * Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested. Note that
- * this lock may be held for long periods of time when
- * talking to hw - so only take it when talking to hw!
- */
- struct mutex hw_lock;
+struct intel_rc6 {
+ bool enabled;
+};
+
+struct intel_llc_pstate {
+ bool enabled;
+};
+
+struct intel_gen6_power_mgmt {
+ struct intel_rps rps;
+ struct intel_rc6 rc6;
+ struct intel_llc_pstate llc_pstate;
+ struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
@@ -1444,6 +1499,9 @@ struct i915_gem_mm {
* always the inner lock when overlapping with struct_mutex. */
struct mutex stolen_lock;
+ /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
+ spinlock_t obj_lock;
+
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -1464,10 +1522,21 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
+ spinlock_t free_lock;
+
+ /**
+ * Small stash of WC pages
+ */
+ struct pagevec wc_stash;
/** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */
+ /**
+ * tmpfs instance used for shmem backed objects
+ */
+ struct vfsmount *gemfs;
+
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@@ -1709,6 +1778,8 @@ struct intel_vbt_data {
u16 panel_id;
struct mipi_config *config;
struct mipi_pps_data *pps;
+ u16 bl_ports;
+ u16 cabc_ports;
u8 seq_version;
u32 size;
u8 *data;
@@ -1718,7 +1789,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
int child_dev_num;
- union child_device_config *child_dev;
+ struct child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -1812,6 +1883,20 @@ struct skl_wm_level {
uint8_t plane_res_l;
};
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ uint32_t width;
+ uint8_t cpp;
+ uint32_t plane_pixel_rate;
+ uint32_t y_min_scanlines;
+ uint32_t plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ uint32_t linetime_us;
+};
+
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1890,13 +1975,7 @@ struct i915_wa_reg {
u32 mask;
};
-/*
- * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
- * allowing it for RCS as we don't foresee any requirement of having
- * a whitelist for other engines. When it is really required for
- * other engines then the limit need to be increased.
- */
-#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
+#define I915_MAX_WA_REGS 16
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
@@ -2197,8 +2276,11 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ /* Context used internally to idle the GPU and setup initial state */
+ struct i915_gem_context *kernel_context;
+ /* Context only to be used for injecting preemption commands */
+ struct i915_gem_context *preempt_context;
struct i915_vma *semaphore;
struct drm_dma_handle *status_page_dmah;
@@ -2307,6 +2389,8 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ struct intel_ppat ppat;
+
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -2329,7 +2413,8 @@ struct drm_i915_private {
struct mutex dpll_lock;
unsigned int active_crtcs;
- unsigned int min_pixclk[I915_MAX_PIPES];
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -2351,8 +2436,16 @@ struct drm_i915_private {
/* Cannot be determined by PCIID. You must always read a register. */
u32 edram_cap;
- /* gen6+ rps state */
- struct intel_gen6_power_mgmt rps;
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested. Note that
+ * this lock may be held for long periods of time when
+ * talking to hw - so only take it when talking to hw!
+ */
+ struct mutex pcu_lock;
+
+ /* gen6+ GT PM state */
+ struct intel_gen6_power_mgmt gt_pm;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
@@ -2463,7 +2556,7 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
- struct i915_runtime_pm pm;
+ struct i915_runtime_pm runtime_pm;
struct {
bool initialized;
@@ -2786,8 +2879,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
- (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
- ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
* for_each_sgt_page - iterate over the pages of the given sg_table
@@ -2799,8 +2892,38 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
((__pp) = (__iter).pfn == 0 ? NULL : \
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
- (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
- ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+
+static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+{
+ unsigned int page_sizes;
+
+ page_sizes = 0;
+ while (sg) {
+ GEM_BUG_ON(sg->offset);
+ GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
+ page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ return page_sizes;
+}
+
+static inline unsigned int i915_sg_segment_size(void)
+{
+ unsigned int size = swiotlb_max_segment();
+
+ if (size == 0)
+ return SCATTERLIST_MAX_SEGMENT;
+
+ size = rounddown(size, PAGE_SIZE);
+ /* swiotlb_max_segment_size can return 1 byte when it means one page. */
+ if (size < PAGE_SIZE)
+ size = PAGE_SIZE;
+
+ return size;
+}
static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv)
@@ -2817,23 +2940,21 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
+
+#define INTEL_GEN_MASK(s, e) ( \
+ BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
+ BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
+ GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
+ (s) != GEN_FOREVER ? (s) - 1 : 0) \
+)
+
/*
* Returns true if Gen is in inclusive range [Start, End].
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(dev_priv, s, e) ({ \
- unsigned int __s = (s), __e = (e); \
- BUILD_BUG_ON(!__builtin_constant_p(s)); \
- BUILD_BUG_ON(!__builtin_constant_p(e)); \
- if ((__s) != GEN_FOREVER) \
- __s = (s) - 1; \
- if ((__e) == GEN_FOREVER) \
- __e = BITS_PER_LONG - 1; \
- else \
- __e = (e) - 1; \
- !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
-})
+#define IS_GEN(dev_priv, s, e) \
+ (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
/*
* Return true if revision is in range [since,until] inclusive.
@@ -2843,38 +2964,39 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830)
-#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G)
-#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X)
-#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G)
-#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G)
-#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM)
-#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G)
-#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM)
-#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G)
-#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM)
-#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45)
-#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45)
+#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+
+#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
+#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
+#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
+#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
+#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
+#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
+#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
+#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
+#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
+#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
+#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
+#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
-#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW)
-#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
+#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
+#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
-#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
-#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
- INTEL_DEVID(dev_priv) == 0x0152 || \
- INTEL_DEVID(dev_priv) == 0x015a)
-#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
-#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
-#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
-#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL)
-#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE)
-#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
-#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
-#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
-#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
-#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
+#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
+#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
+ (dev_priv)->info.gt == 1)
+#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
+#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
+#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
+#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
+#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
+#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
+#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
+#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
+#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2886,11 +3008,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2911,17 +3033,19 @@ intel_info(const struct drm_i915_private *dev_priv)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
+ (dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+ (dev_priv)->info.gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
+ (dev_priv)->info.gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
+ (dev_priv)->info.gt == 2)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2962,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define CNL_REVID_A0 0x0
#define CNL_REVID_B0 0x1
+#define CNL_REVID_C0 0x2
#define IS_CNL_REVID(p, since, until) \
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
@@ -3012,9 +3137,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
-#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
+#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
+#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
+ GEM_BUG_ON((sizes) == 0); \
+ ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+})
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
@@ -3032,9 +3161,12 @@ intel_info(const struct drm_i915_private *dev_priv)
* even when in MSI mode. This results in spurious interrupt warnings if the
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
+ *
+ * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
+ * interrupts.
*/
-#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
-#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) true
+#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -3046,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
@@ -3065,6 +3196,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@@ -3210,7 +3343,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
unsigned long delay;
- if (unlikely(!i915.enable_hangcheck))
+ if (unlikely(!i915_modparams.enable_hangcheck))
return;
/* Don't continually defer the hangcheck so that it is always run at
@@ -3243,6 +3376,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask);
@@ -3424,7 +3559,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
+ struct sg_table *pages,
+ unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
@@ -3438,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
return __i915_gem_object_get_pages(obj);
}
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count);
}
@@ -3455,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
}
@@ -3646,8 +3788,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
}
/* i915_gem_fence_reg.c */
-int __must_check i915_vma_get_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+struct drm_i915_fence_reg *
+i915_reserve_fence(struct drm_i915_private *dev_priv);
+void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
@@ -4333,11 +4476,12 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-static inline bool
-intel_engine_can_store_dword(struct intel_engine_cs *engine)
+static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
- return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
- engine->class);
+ if (INTEL_GEN(i915) >= 10)
+ return CNL_HWS_CSB_WRITE_INDEX;
+ else
+ return I915_HWS_CSB_WRITE_INDEX;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc1faa49687d..ad4050f7ab3b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@@ -55,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_display;
+ return obj->pin_global; /* currently in use by HW, keep flushed */
}
static int
@@ -161,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
drm_dma_handle_t *phys;
@@ -170,19 +170,20 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
struct scatterlist *sg;
char *vaddr;
int i;
+ int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
- obj->base.size,
+ roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -191,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
- st = ERR_CAST(page);
+ err = PTR_ERR(page);
goto err_phys;
}
@@ -208,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
@@ -226,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys;
- return st;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
err_phys:
drm_pci_free(obj->base.dev, phys);
- return st;
+
+ return err;
}
static void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -353,7 +358,7 @@ static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
struct drm_i915_gem_request *rq;
@@ -386,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps) {
+ if (rps_client) {
if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps);
+ gen6_rps_boost(rq, rps_client);
else
- rps = NULL;
+ rps_client = NULL;
}
timeout = i915_wait_request(rq, flags, timeout);
@@ -406,7 +411,7 @@ static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
@@ -425,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
- rps);
+ rps_client);
if (timeout < 0)
break;
@@ -442,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0) {
- timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout,
+ rps_client);
prune_fences = timeout >= 0;
}
@@ -538,7 +544,7 @@ int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
@@ -550,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->resv,
flags, timeout,
- rps);
+ rps_client);
return timeout < 0 ? timeout : 0;
}
@@ -558,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
- return &fpriv->rps;
+ return &fpriv->rps_client;
}
static int
@@ -694,10 +700,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (!HAS_LLC(dev_priv)) {
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put(dev_priv);
}
@@ -1013,17 +1019,20 @@ gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
- void *vaddr;
+ void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ vaddr = io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data,
+ (void __force *)vaddr + offset,
+ length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
- vaddr = (void __force *)
- io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_to_user(user_data, vaddr + offset, length);
+ vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data,
+ (void __force *)vaddr + offset,
+ length);
io_mapping_unmap(vaddr);
}
return unwritten;
@@ -1047,7 +1056,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONFAULT |
+ PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -1189,18 +1200,18 @@ ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
- void *vaddr;
+ void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
+ vaddr = io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
- vaddr = (void __force *)
- io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_from_user(vaddr + offset, user_data, length);
+ vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user((void __force *)vaddr + offset,
+ user_data, length);
io_mapping_unmap(vaddr);
}
@@ -1229,9 +1240,27 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_runtime_pm_get(i915);
+ if (i915_gem_object_has_struct_page(obj)) {
+ /*
+ * Avoid waking the device up if we can fallback, as
+ * waking/resuming is very slow (worst-case 10-100 ms
+ * depending on PCI sleeps and our own resume time).
+ * This easily dwarfs any performance advantage from
+ * using the cache bypass of indirect GGTT access.
+ */
+ if (!intel_runtime_pm_get_if_in_use(i915)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ } else {
+ /* No backing pages, no fallback, we must force GGTT access */
+ intel_runtime_pm_get(i915);
+ }
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONFAULT |
+ PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -1244,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
+ goto out_rpm;
GEM_BUG_ON(!node.allocated);
}
@@ -1307,8 +1336,9 @@ out_unpin:
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(i915);
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1524,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct list_head *list;
struct i915_vma *vma;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
break;
@@ -1538,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
}
i915 = to_i915(obj->base.dev);
+ spin_lock(&i915->mm.obj_lock);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->global_link, list);
+ list_move_tail(&obj->mm.link, list);
+ spin_unlock(&i915->mm.obj_lock);
}
/**
@@ -1902,22 +1936,27 @@ int i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_unpin;
- ret = i915_vma_get_fence(vma);
+ ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
- /* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(dev_priv);
- if (list_empty(&obj->userfault_link))
- list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
+ if (ret)
+ goto err_fence;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+ GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
+ i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
@@ -1969,6 +2008,25 @@ err:
return ret;
}
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!obj->userfault_count);
+
+ obj->userfault_count = 0;
+ list_del(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ i915_vma_unset_userfault(vma);
+ }
+}
+
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1999,12 +2057,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (list_empty(&obj->userfault_link))
+ if (!obj->userfault_count)
goto out;
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
+ __i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
@@ -2032,11 +2088,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link) {
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
- }
+ &dev_priv->mm.userfault_list, userfault_link)
+ __i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -2059,7 +2112,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (!reg->vma)
continue;
- GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
@@ -2164,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
struct address_space *mapping;
lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
@@ -2223,13 +2276,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
if (i915_gem_object_has_pinned_pages(obj))
return;
GEM_BUG_ON(obj->bind_count);
- if (!READ_ONCE(obj->mm.pages))
+ if (!i915_gem_object_has_pages(obj))
return;
/* May be called by shrinker from within get_pages() (on another bo) */
@@ -2243,6 +2297,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
pages = fetch_and_zero(&obj->mm.pages);
GEM_BUG_ON(!pages);
+ spin_lock(&i915->mm.obj_lock);
+ list_del(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+
if (obj->mm.mapping) {
void *ptr;
@@ -2260,6 +2318,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2290,8 +2350,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const unsigned long page_count = obj->base.size / PAGE_SIZE;
@@ -2302,7 +2361,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment;
+ unsigned int max_segment = i915_sg_segment_size();
+ unsigned int sg_page_sizes;
gfp_t noreclaim;
int ret;
@@ -2313,18 +2373,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_segment();
- if (!max_segment)
- max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2338,6 +2394,7 @@ rebuild_st:
sg = st->sgl;
st->nents = 0;
+ sg_page_sizes = 0;
for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2390,8 +2447,10 @@ rebuild_st:
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
- if (i)
+ if (i) {
+ sg_page_sizes |= sg->length;
sg = sg_next(sg);
+ }
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
@@ -2402,8 +2461,10 @@ rebuild_st:
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
- if (sg) /* loop terminated early; short sg table */
+ if (sg) { /* loop terminated early; short sg table */
+ sg_page_sizes |= sg->length;
sg_mark_end(sg);
+ }
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim(st);
@@ -2432,7 +2493,9 @@ rebuild_st:
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
- return st;
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
err_sg:
sg_mark_end(sg);
@@ -2453,12 +2516,17 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ERR_PTR(ret);
+ return ret;
}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+ struct sg_table *pages,
+ unsigned int sg_page_sizes)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ int i;
+
lockdep_assert_held(&obj->mm.lock);
obj->mm.get_page.sg_pos = pages->sgl;
@@ -2467,30 +2535,48 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
if (i915_gem_object_is_tiled(obj) &&
- to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
+
+ GEM_BUG_ON(!sg_page_sizes);
+ obj->mm.page_sizes.phys = sg_page_sizes;
+
+ /*
+ * Calculate the supported page-sizes which fit into the given
+ * sg_page_sizes. This will give us the page-sizes which we may be able
+ * to use opportunistically when later inserting into the GTT. For
+ * example if phys=2G, then in theory we should be able to use 1G, 2M,
+ * 64K or 4K pages, although in practice this will depend on a number of
+ * other factors.
+ */
+ obj->mm.page_sizes.sg = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ if (obj->mm.page_sizes.phys & ~0u << i)
+ obj->mm.page_sizes.sg |= BIT(i);
+ }
+ GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+ spin_lock(&i915->mm.obj_lock);
+ list_add(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
}
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct sg_table *pages;
-
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+ int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- pages = obj->ops->get_pages(obj);
- if (unlikely(IS_ERR(pages)))
- return PTR_ERR(pages);
+ err = obj->ops->get_pages(obj);
+ GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
- __i915_gem_object_set_pages(obj, pages);
- return 0;
+ return err;
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2508,7 +2594,9 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
@@ -2591,7 +2679,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
@@ -2601,7 +2691,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
atomic_inc(&obj->mm.pages_pin_count);
pinned = false;
}
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
@@ -2656,7 +2746,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
* allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten.
*/
- if (READ_ONCE(obj->mm.pages))
+ if (i915_gem_object_has_pages(obj))
return -ENODEV;
if (obj->mm.madv != I915_MADV_WILLNEED)
@@ -2800,7 +2890,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request = NULL;
- /* Prevent the signaler thread from updating the request
+ /*
+ * During the reset sequence, we must prevent the engine from
+ * entering RC6. As the context state is undefined until we restart
+ * the engine, if it does enter RC6 during the reset, the state
+ * written to the powercontext is undefined and so we may lose
+ * GPU state upon resume, i.e. fail to restart after a reset.
+ */
+ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+ /*
+ * Prevent the signaler thread from updating the request
* state (by calling dma_fence_signal) as we are processing
* the reset. The write from the GPU of the seqno is
* asynchronous and the signaler thread may see a different
@@ -2811,7 +2911,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
kthread_park(engine->breadcrumbs.signaler);
- /* Prevent request submission to the hardware until we have
+ /*
+ * Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its engine->irq_tasklet *just* as we are
@@ -2819,8 +2920,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Turning off the engine->irq_tasklet until the reset is over
* prevents the race.
*/
- tasklet_kill(&engine->irq_tasklet);
- tasklet_disable(&engine->irq_tasklet);
+ tasklet_kill(&engine->execlists.irq_tasklet);
+ tasklet_disable(&engine->execlists.irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2999,8 +3100,10 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->irq_tasklet);
+ tasklet_enable(&engine->execlists.irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
+
+ intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -3018,9 +3121,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_gem_request_submit(request);
+}
+
+static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+{
unsigned long flags;
- GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
@@ -3029,81 +3138,59 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
-static void engine_set_wedged(struct intel_engine_cs *engine)
+void i915_gem_set_wedged(struct drm_i915_private *i915)
{
- struct drm_i915_gem_request *request;
- unsigned long flags;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- /* We need to be sure that no thread is running the old callback as
- * we install the nop handler (otherwise we would submit a request
- * to hardware that will never complete). In order to prevent this
- * race, we wait until the machine is idle before making the swap
- * (using stop_machine()).
+ /*
+ * First, stop submission to hw, but do not yet complete requests by
+ * rolling the global seqno forward (since this would complete requests
+ * for which we haven't set the fence error to EIO yet).
*/
- engine->submit_request = nop_submit_request;
-
- /* Mark all executing requests as skipped */
- spin_lock_irqsave(&engine->timeline->lock, flags);
- list_for_each_entry(request, &engine->timeline->requests, link)
- if (!i915_gem_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ for_each_engine(engine, i915, id)
+ engine->submit_request = nop_submit_request;
/*
- * Clear the execlists queue up before freeing the requests, as those
- * are the ones that keep the context and ringbuffer backing objects
- * pinned in place.
+ * Make sure no one is running the old callback before we proceed with
+ * cancelling requests and resetting the completion tracking. Otherwise
+ * we might submit a request to the hardware which never completes.
*/
+ synchronize_rcu();
- if (i915.enable_execlists) {
- struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
- unsigned int n;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
-
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- i915_gem_request_put(port_request(&port[n]));
- memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- engine->execlist_queue = RB_ROOT;
- engine->execlist_first = NULL;
-
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ for_each_engine(engine, i915, id) {
+ /* Mark all executing requests as skipped */
+ engine->cancel_requests(engine);
- /* The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
+ /*
+ * Only once we've force-cancelled all in-flight requests can we
+ * start to complete all requests.
*/
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ engine->submit_request = nop_complete_submit_request;
}
- /* Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
+ /*
+ * Make sure no request can slip through without getting completed by
+ * either this call here to intel_engine_init_global_seqno, or the one
+ * in nop_complete_submit_request.
*/
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
-}
+ synchronize_rcu();
-static int __i915_gem_set_wedged_BKL(void *data)
-{
- struct drm_i915_private *i915 = data;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ for_each_engine(engine, i915, id) {
+ unsigned long flags;
- for_each_engine(engine, i915, id)
- engine_set_wedged(engine);
+ /* Mark all pending requests as complete so that any concurrent
+ * (lockless) lookup doesn't try and wait upon the request as we
+ * reset it.
+ */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
set_bit(I915_WEDGED, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
-
- return 0;
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
-{
- stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3267,11 +3354,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
+ GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv)
continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
-
GEM_BUG_ON(vma->obj != obj);
/* We allow the process to have multiple handles to the same
@@ -3385,24 +3472,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
-static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
-{
- return wait_for(intel_engine_is_idle(engine), timeout_ms);
-}
-
static int wait_for_engines(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
- i915_gem_set_wedged(i915);
- return -EIO;
- }
-
- GEM_BUG_ON(intel_engine_get_seqno(engine) !=
- intel_engine_last_submit(engine));
+ if (wait_for(intel_engines_are_idle(i915), 50)) {
+ DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+ i915_gem_set_wedged(i915);
+ return -EIO;
}
return 0;
@@ -3452,7 +3527,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_display))
+ if (!READ_ONCE(obj->pin_global))
return;
mutex_lock(&obj->base.dev->struct_mutex);
@@ -3819,10 +3894,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- /* Mark the pin_display early so that we account for the
+ /* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains.
*/
- obj->pin_display++;
+ obj->pin_global++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -3838,7 +3913,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
- goto err_unpin_display;
+ goto err_unpin_global;
}
/* As the user may map the buffer once pinned in the display plane
@@ -3869,7 +3944,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
}
if (IS_ERR(vma))
- goto err_unpin_display;
+ goto err_unpin_global;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
@@ -3884,8 +3959,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return vma;
-err_unpin_display:
- obj->pin_display--;
+err_unpin_global:
+ obj->pin_global--;
return vma;
}
@@ -3894,10 +3969,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(vma->obj->pin_display == 0))
+ if (WARN_ON(vma->obj->pin_global == 0))
return;
- if (--vma->obj->pin_display == 0)
+ if (--vma->obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
@@ -4016,42 +4091,47 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (!view && flags & PIN_MAPPABLE) {
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (obj->base.size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ obj->base.size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
vma = i915_vma_instance(obj, vm, view);
if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
- return ERR_PTR(-ENOSPC);
+ if (flags & PIN_NONBLOCK) {
+ if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
+ return ERR_PTR(-ENOSPC);
- if (flags & PIN_MAPPABLE) {
- /* If the required space is larger than the available
- * aperture, we will not able to find a slot for the
- * object and unbinding the object now will be in
- * vain. Worse, doing so may cause us to ping-pong
- * the object in and out of the Global GTT and
- * waste a lot of cycles under the mutex.
- */
- if (vma->fence_size > dev_priv->ggtt.mappable_end)
- return ERR_PTR(-E2BIG);
-
- /* If NONBLOCK is set the caller is optimistically
- * trying to cache the full object within the mappable
- * aperture, and *must* have a fallback in place for
- * situations where we cannot bind the object. We
- * can be a little more lax here and use the fallback
- * more often to avoid costly migrations of ourselves
- * and other objects within the aperture.
- *
- * Half-the-aperture is used as a simple heuristic.
- * More interesting would to do search for a free
- * block prior to making the commitment to unbind.
- * That caters for the self-harm case, and with a
- * little more heuristics (e.g. NOFAULT, NOEVICT)
- * we could try to minimise harm to others.
- */
- if (flags & PIN_NONBLOCK &&
+ if (flags & PIN_MAPPABLE &&
vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -4232,7 +4312,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4251,7 +4331,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+ if (obj->mm.madv == I915_MADV_DONTNEED &&
+ !i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4277,8 +4358,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_link);
- INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4308,6 +4387,30 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.pwrite = i915_gem_object_pwrite_gtt,
};
+static int i915_gem_object_create_shmem(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ unsigned long flags = VM_NORESERVE;
+ struct file *filp;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ if (i915->mm.gemfs)
+ filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+ flags);
+ else
+ filp = shmem_file_setup("i915", size, flags);
+
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ obj->filp = filp;
+
+ return 0;
+}
+
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
@@ -4332,7 +4435,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (obj == NULL)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
+ ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
if (ret)
goto fail;
@@ -4409,13 +4512,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
{
struct drm_i915_gem_object *obj, *on;
- mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- llist_for_each_entry(obj, freed, freed) {
+ llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
trace_i915_gem_object_destroy(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) {
@@ -4426,16 +4530,24 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
- list_del(&obj->global_link);
- }
- intel_runtime_pm_put(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ /* This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ if (i915_gem_object_has_pages(obj)) {
+ spin_lock(&i915->mm.obj_lock);
+ list_del_init(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+ }
- cond_resched();
+ mutex_unlock(&i915->drm.struct_mutex);
- llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
@@ -4443,7 +4555,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
@@ -4454,16 +4566,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17);
i915_gem_object_free(obj);
+
+ if (on)
+ cond_resched();
}
+ intel_runtime_pm_put(i915);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
struct llist_node *freed;
- freed = llist_del_all(&i915->mm.free_list);
- if (unlikely(freed))
+ /* Free the oldest, most stale object to keep the free_list short */
+ freed = NULL;
+ if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+ /* Only one consumer of llist_del_first() allowed */
+ spin_lock(&i915->mm.free_lock);
+ freed = llist_del_first(&i915->mm.free_list);
+ spin_unlock(&i915->mm.free_lock);
+ }
+ if (unlikely(freed)) {
+ freed->next = NULL;
__i915_gem_free_objects(i915, freed);
+ }
}
static void __i915_gem_free_work(struct work_struct *work)
@@ -4480,11 +4605,17 @@ static void __i915_gem_free_work(struct work_struct *work)
* unbound now.
*/
+ spin_lock(&i915->mm.free_lock);
while ((freed = llist_del_all(&i915->mm.free_list))) {
+ spin_unlock(&i915->mm.free_lock);
+
__i915_gem_free_objects(i915, freed);
if (need_resched())
- break;
+ return;
+
+ spin_lock(&i915->mm.free_lock);
}
+ spin_unlock(&i915->mm.free_lock);
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
@@ -4543,6 +4674,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
+ if (i915_terminally_wedged(&i915->gpu_error)) {
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_unset_wedged(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
@@ -4575,17 +4712,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- ret = i915_gem_switch_to_kernel_context(dev_priv);
- if (ret)
- goto err_unlock;
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (ret)
+ goto err_unlock;
- ret = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- goto err_unlock;
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret && ret != -EIO)
+ goto err_unlock;
- assert_kernel_context_is_current(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+ }
i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -4597,14 +4736,14 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
/* As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- while (flush_delayed_work(&dev_priv->gt.idle_work))
- ;
+ drain_delayed_work(&dev_priv->gt.idle_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
- WARN_ON(!intel_engines_are_idle(dev_priv));
+ if (WARN_ON(!intel_engines_are_idle(dev_priv)))
+ i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
/*
* Neither the BIOS, ourselves or any other kernel
@@ -4626,11 +4765,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machine in an unusable condition.
*/
i915_gem_sanitize(dev_priv);
- goto out_rpm_put;
+
+ intel_runtime_pm_put(dev_priv);
+ return 0;
err_unlock:
mutex_unlock(&dev->struct_mutex);
-out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -4643,6 +4783,7 @@ void i915_gem_resume(struct drm_i915_private *dev_priv)
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
+ i915_gem_restore_fences(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4756,6 +4897,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
+ if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = -EIO;
+ goto out;
+ }
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
@@ -4786,7 +4931,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return false;
/* TODO: make semaphores and Execlists play nicely together */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return false;
if (value >= 0)
@@ -4803,11 +4948,18 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ /*
+ * We need to fallback to 4K pages since gvt gtt handling doesn't
+ * support huge page entries - we will need to check either hypervisor
+ * mm can support huge guest page or just do emulation in gvt.
+ */
+ if (intel_vgpu_active(dev_priv))
+ mkwrite_device_info(dev_priv)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K;
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
@@ -4815,18 +4967,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
}
+ ret = i915_gem_init_userptr(dev_priv);
+ if (ret)
+ return ret;
+
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = i915_gem_init_userptr(dev_priv);
- if (ret)
- goto out_unlock;
-
ret = i915_gem_init_ggtt(dev_priv);
if (ret)
goto out_unlock;
@@ -4845,8 +4998,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
- i915_gem_set_wedged(dev_priv);
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_gem_set_wedged(dev_priv);
+ }
ret = 0;
}
@@ -4946,11 +5101,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
goto err_priorities;
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+
+ spin_lock_init(&dev_priv->mm.obj_lock);
+ spin_lock_init(&dev_priv->mm.free_lock);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4962,6 +5121,10 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->fb_tracking.lock);
+ err = i915_gemfs_init(dev_priv);
+ if (err)
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
+
return 0;
err_priorities:
@@ -5000,6 +5163,8 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
+
+ i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -5038,12 +5203,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ spin_lock(&dev_priv->mm.obj_lock);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_link)
+ list_for_each_entry(obj, *p, mm.link)
__start_cpu_write(obj);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ spin_unlock(&dev_priv->mm.obj_lock);
return 0;
}
@@ -5362,7 +5527,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = obj->mm.pages;
+ pages = fetch_and_zero(&obj->mm.pages);
+ if (pages) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ __i915_gem_object_reset_page_iter(obj);
+
+ spin_lock(&i915->mm.obj_lock);
+ list_del(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+ }
+
obj->ops = &i915_gem_phys_ops;
err = ____i915_gem_object_get_pages(obj);
@@ -5389,6 +5564,7 @@ err_unlock:
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
#include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 8a04d33055be..f663cd919795 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
intel_fb_obj_flush(obj, ORIGIN_CPU);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8afd2ce59b8d..f782cf2069c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -107,14 +107,9 @@ static void lut_close(struct i915_gem_context *ctx)
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
- struct drm_i915_gem_object *obj = vma->obj;
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
-
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
-
- __i915_gem_object_release_unless_active(obj);
+ __i915_gem_object_release_unless_active(vma->obj);
}
rcu_read_unlock();
}
@@ -200,6 +195,11 @@ static void context_close(struct i915_gem_context *ctx)
{
i915_gem_context_set_closed(ctx);
+ /*
+ * The LUT uses the VMA as a backpointer to unref the object,
+ * so we need to clear the LUT before we close all the VMA (inside
+ * the ppgtt).
+ */
lut_close(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
@@ -316,7 +316,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
+ if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -409,7 +409,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!i915.enable_guc_submission)
+ if (!i915_modparams.enable_guc_submission)
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -418,14 +418,43 @@ out:
return ctx;
}
+static struct i915_gem_context *
+create_kernel_context(struct drm_i915_private *i915, int prio)
+{
+ struct i915_gem_context *ctx;
+
+ ctx = i915_gem_create_context(i915, NULL);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ i915_gem_context_clear_bannable(ctx);
+ ctx->priority = prio;
+ ctx->ring_size = PAGE_SIZE;
+
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+ return ctx;
+}
+
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+ struct i915_gem_context *ctx;
+
+ /* Keep the context ref so that we can free it immediately ourselves */
+ ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+ context_close(ctx);
+ i915_gem_context_free(ctx);
+}
+
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
+ int err;
- /* Init should only be called once per module load. Eventually the
- * restriction on the context_disabled check can be loosened. */
- if (WARN_ON(dev_priv->kernel_context))
- return 0;
+ GEM_BUG_ON(dev_priv->kernel_context);
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -433,7 +462,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
@@ -443,28 +472,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
- ctx = i915_gem_create_context(dev_priv, NULL);
+ /* lowest priority; idle task */
+ ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default global context (error %ld)\n",
- PTR_ERR(ctx));
- return PTR_ERR(ctx);
+ DRM_ERROR("Failed to create default global context\n");
+ err = PTR_ERR(ctx);
+ goto err;
}
-
- /* For easy recognisablity, we want the kernel context to be 0 and then
+ /*
+ * For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id.
*/
GEM_BUG_ON(ctx->hw_id);
-
- i915_gem_context_clear_bannable(ctx);
- ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+ /* highest priority; preempting task */
+ ctx = create_kernel_context(dev_priv, INT_MAX);
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("Failed to create default preempt context\n");
+ err = PTR_ERR(ctx);
+ goto err_kernel_context;
+ }
+ dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" :
"fake");
return 0;
+
+err_kernel_context:
+ destroy_kernel_context(&dev_priv->kernel_context);
+err:
+ return err;
}
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -485,7 +524,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
}
/* Force the GPU state to be restored on enabling */
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
@@ -509,15 +548,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- /* Keep the context so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
- context_close(ctx);
- i915_gem_context_free(ctx);
+ destroy_kernel_context(&i915->preempt_context);
+ destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
ida_destroy(&i915->contexts.hw_ida);
@@ -570,7 +604,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
enum intel_engine_id id;
const int num_rings =
/* Use an extended w/a on gen7 if signalling from other rings */
- (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
+ (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
@@ -839,7 +873,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return 0;
if (!req->ctx->engine[engine->id].state) {
@@ -1038,6 +1072,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BANNABLE:
args->value = i915_gem_context_is_bannable(ctx);
break;
+ case I915_CONTEXT_PARAM_PRIORITY:
+ args->value = ctx->priority;
+ break;
default:
ret = -EINVAL;
break;
@@ -1093,6 +1130,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
i915_gem_context_clear_bannable(ctx);
break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ {
+ int priority = args->value;
+
+ if (args->size)
+ ret = -EINVAL;
+ else if (!to_i915(dev)->engine[RCS]->schedule)
+ ret = -ENODEV;
+ else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+ priority < I915_CONTEXT_MIN_USER_PRIORITY)
+ ret = -EINVAL;
+ else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+ !capable(CAP_SYS_NICE))
+ ret = -EPERM;
+ else
+ ctx->priority = priority;
+ }
+ break;
+
default:
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6176e589cf09..864439a214c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,11 +256,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return drm_gem_dmabuf_export(dev, &exp_info);
}
-static struct sg_table *
-i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- return dma_buf_map_attachment(obj->base.import_attach,
- DMA_BIDIRECTIONAL);
+ struct sg_table *pages;
+ unsigned int sg_page_sizes;
+
+ pages = dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
}
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e161d383b526..8daa8a78cdc0 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,10 @@
#include "intel_drv.h"
#include "i915_trace.h"
+I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
+ bool fail_if_busy:1;
+} igt_evict_ctl;)
+
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -81,7 +85,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false;
list_add(&vma->evict_link, unwind);
@@ -205,6 +209,9 @@ search_again:
* the kernel's there is no more we can evict.
*/
if (!ggtt_is_idle(dev_priv)) {
+ if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
+ return -EBUSY;
+
ret = ggtt_flush(dev_priv);
if (ret)
return ret;
@@ -330,6 +337,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
+ ret = -ENOSPC;
+ break;
+ }
+
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 83876a1c8d98..435ed95df144 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -58,6 +58,7 @@ enum {
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_VALIDATED BIT(30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 30)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -268,6 +269,11 @@ static inline u64 gen8_noncanonical_addr(u64 address)
return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}
+static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
+{
+ return eb->engine->needs_cmd_parser && eb->batch_len;
+}
+
static int eb_create(struct i915_execbuffer *eb)
{
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
@@ -365,12 +371,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
return false;
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- if (unlikely(i915_vma_get_fence(vma))) {
+ if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
- if (i915_vma_pin_fence(vma))
+ if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
@@ -383,7 +389,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- i915_vma_unpin_fence(vma);
+ __i915_vma_unpin_fence(vma);
__i915_vma_unpin(vma);
}
@@ -561,13 +567,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- err = i915_vma_get_fence(vma);
+ err = i915_vma_pin_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
return err;
}
- if (i915_vma_pin_fence(vma))
+ if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
@@ -678,7 +684,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
- struct drm_i915_gem_object *uninitialized_var(obj);
+ struct drm_i915_gem_object *obj;
unsigned int i;
int err;
@@ -724,19 +730,17 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
+ /* transfer ref to ctx */
vma->open_count++;
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
lut->handle = handle;
- /* transfer ref to ctx */
- obj = NULL;
-
add_vma:
err = eb_add_vma(eb, i, vma);
if (unlikely(err))
- goto err_obj;
+ goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
@@ -765,8 +769,7 @@ add_vma:
return eb_reserve(eb);
err_obj:
- if (obj)
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
err_vma:
eb->vma[i] = NULL;
return err;
@@ -975,7 +978,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
return ERR_PTR(err);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
@@ -1163,6 +1168,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
+ /* If we need to copy for the cmdparser, we will stall anyway */
+ if (eb_use_cmdparser(eb))
+ return ERR_PTR(-EWOULDBLOCK);
+
+ if (!intel_engine_can_store_dword(eb->engine))
+ return ERR_PTR(-ENODEV);
+
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1187,9 +1199,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true)) &&
- __intel_engine_can_store_dword(eb->reloc_cache.gen,
- eb->engine->class)) {
+ !reservation_object_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1581,7 +1591,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
const unsigned int count = eb->buffer_count;
unsigned int i;
- if (unlikely(i915.prefault_disable))
+ if (unlikely(i915_modparams.prefault_disable))
return 0;
for (i = 0; i < count; i++) {
@@ -2190,6 +2200,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
int out_fence_fd = -1;
int err;
+ BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
@@ -2303,7 +2314,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (eb.engine->needs_cmd_parser && eb.batch_len) {
+ if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file));
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 5fe2cd8c8f28..012250f25255 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
/* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- i915_gem_release_mmap(fence->vma->obj);
+ GEM_BUG_ON(fence->vma->fence != fence);
+ i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
@@ -280,8 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
@@ -299,6 +299,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *fence;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
if (fence->pin_count)
continue;
@@ -313,7 +315,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/**
- * i915_vma_get_fence - set up fencing for a vma
+ * i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
@@ -331,10 +333,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
* 0 on success, negative error code on failure.
*/
int
-i915_vma_get_fence(struct i915_vma *vma)
+i915_vma_pin_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
@@ -344,6 +347,8 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ fence->pin_count++;
if (!fence->dirty) {
list_move_tail(&fence->link,
&fence->i915->mm.fence_list);
@@ -353,10 +358,76 @@ i915_vma_get_fence(struct i915_vma *vma)
fence = fence_find(vma->vm->i915);
if (IS_ERR(fence))
return PTR_ERR(fence);
+
+ GEM_BUG_ON(fence->pin_count);
+ fence->pin_count++;
} else
return 0;
- return fence_update(fence, set);
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ fence->pin_count--;
+ return err;
+}
+
+/**
+ * i915_reserve_fence - Reserve a fence for vGPU
+ * @dev_priv: i915 device private
+ *
+ * This function walks the fence regs looking for a free one and remove
+ * it from the fence_list. It is used to reserve fence for vGPU to use.
+ */
+struct drm_i915_fence_reg *
+i915_reserve_fence(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_fence_reg *fence;
+ int count;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Keep at least one fence available for the display engine. */
+ count = 0;
+ list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
+ count += !fence->pin_count;
+ if (count <= 1)
+ return ERR_PTR(-ENOSPC);
+
+ fence = fence_find(dev_priv);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (fence->vma) {
+ /* Force-remove fence from VMA */
+ ret = fence_update(fence, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ list_del(&fence->link);
+ return fence;
+}
+
+/**
+ * i915_unreserve_fence - Reclaim a reserved fence
+ * @fence: the fence reg
+ *
+ * This function add a reserved fence register from vGPU to the fence_list.
+ */
+void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
+{
+ lockdep_assert_held(&fence->i915->drm.struct_mutex);
+
+ list_add(&fence->link, &fence->i915->mm.fence_list);
}
/**
@@ -378,8 +449,10 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+ GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
if (fence->vma)
- i915_gem_release_mmap(fence->vma->obj);
+ i915_vma_revoke_mmap(fence->vma);
}
}
@@ -399,13 +472,15 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
+ GEM_BUG_ON(vma && vma->fence != reg);
+
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ad524cb0f6fc..2af65ecf2df8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -135,11 +135,12 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{
- bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+ if (!dev_priv->info.has_aliasing_ppgtt)
+ return 0;
+
has_full_ppgtt = dev_priv->info.has_full_ppgtt;
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
@@ -149,9 +150,6 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
}
- if (!has_aliasing_ppgtt)
- return 0;
-
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -180,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
+ if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
if (has_full_48bit_ppgtt)
return 3;
@@ -188,7 +186,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 2;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ return 1;
}
static int ppgtt_bind_vma(struct i915_vma *vma,
@@ -205,8 +203,6 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
return ret;
}
- vma->pages = vma->obj->mm.pages;
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -222,6 +218,30 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
+static int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
@@ -230,13 +250,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
switch (level) {
case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED_INDEX;
+ pte |= PPAT_UNCACHED;
break;
case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC_INDEX;
+ pte |= PPAT_DISPLAY_ELLC;
break;
default:
- pte |= PPAT_CACHED_INDEX;
+ pte |= PPAT_CACHED;
break;
}
@@ -249,9 +269,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE_INDEX;
+ pde |= PPAT_CACHED_PDE;
else
- pde |= PPAT_UNCACHED_INDEX;
+ pde |= PPAT_UNCACHED;
return pde;
}
@@ -356,39 +376,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct page *page;
+ struct pagevec *pvec = &vm->free_pages;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (vm->free_pages.nr)
- return vm->free_pages.pages[--vm->free_pages.nr];
+ if (likely(pvec->nr))
+ return pvec->pages[--pvec->nr];
+
+ if (!vm->pt_kmap_wc)
+ return alloc_page(gfp);
+
+ /* A placeholder for a specific mutex to guard the WC stash */
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+
+ /* Look in our global stash of WC pages... */
+ pvec = &vm->i915->mm.wc_stash;
+ if (likely(pvec->nr))
+ return pvec->pages[--pvec->nr];
+
+ /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+ do {
+ struct page *page;
- page = alloc_page(gfp);
- if (!page)
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ break;
+
+ pvec->pages[pvec->nr++] = page;
+ } while (pagevec_space(pvec));
+
+ if (unlikely(!pvec->nr))
return NULL;
- if (vm->pt_kmap_wc)
- set_pages_array_wc(&page, 1);
+ set_pages_array_wc(pvec->pages, pvec->nr);
- return page;
+ return pvec->pages[--pvec->nr];
}
-static void vm_free_pages_release(struct i915_address_space *vm)
+static void vm_free_pages_release(struct i915_address_space *vm,
+ bool immediate)
{
- GEM_BUG_ON(!pagevec_count(&vm->free_pages));
+ struct pagevec *pvec = &vm->free_pages;
+
+ GEM_BUG_ON(!pagevec_count(pvec));
- if (vm->pt_kmap_wc)
- set_pages_array_wb(vm->free_pages.pages,
- pagevec_count(&vm->free_pages));
+ if (vm->pt_kmap_wc) {
+ struct pagevec *stash = &vm->i915->mm.wc_stash;
- __pagevec_release(&vm->free_pages);
+ /* When we use WC, first fill up the global stash and then
+ * only if full immediately free the overflow.
+ */
+
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ if (pagevec_space(stash)) {
+ do {
+ stash->pages[stash->nr++] =
+ pvec->pages[--pvec->nr];
+ if (!pvec->nr)
+ return;
+ } while (pagevec_space(stash));
+
+ /* As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (!immediate)
+ return;
+ }
+
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ }
+
+ __pagevec_release(pvec);
}
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
if (!pagevec_add(&vm->free_pages, page))
- vm_free_pages_release(vm);
+ vm_free_pages_release(vm, false);
}
static int __setup_page_dma(struct i915_address_space *vm,
@@ -434,10 +501,8 @@ static void fill_page_dma(struct i915_address_space *vm,
const u64 val)
{
u64 * const vaddr = kmap_atomic(p->page);
- int i;
- for (i = 0; i < 512; i++)
- vaddr[i] = val;
+ memset64(vaddr, val, PAGE_SIZE / sizeof(val));
kunmap_atomic(vaddr);
}
@@ -452,12 +517,73 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
+ struct page *page = NULL;
+ dma_addr_t addr;
+ int order;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert().
+ *
+ * TODO: we should really consider write-protecting the scratch-page and
+ * sharing between ppgtt
+ */
+ if (i915_vm_is_48bit(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ order = get_order(I915_GTT_PAGE_SIZE_64K);
+ page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
+ if (page) {
+ addr = dma_map_page(vm->dma, page, 0,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_pages(page, order);
+ page = NULL;
+ }
+
+ if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
+ dma_unmap_page(vm->dma, addr,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(page, order);
+ page = NULL;
+ }
+ }
+ }
+
+ if (!page) {
+ order = 0;
+ page = alloc_page(gfp | __GFP_ZERO);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+ }
+
+ vm->scratch_page.page = page;
+ vm->scratch_page.daddr = addr;
+ vm->scratch_page.order = order;
+
+ return 0;
}
static void cleanup_scratch_page(struct i915_address_space *vm)
{
- cleanup_page_dma(vm, &vm->scratch_page);
+ struct i915_page_dma *p = &vm->scratch_page;
+
+ dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, p->order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -925,6 +1051,105 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
+ struct i915_page_directory_pointer **pdps,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ do {
+ struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
+ struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+ unsigned int page_size;
+ bool maybe_64K = false;
+ gen8_pte_t encode = pte_encode;
+ gen8_pte_t *vaddr;
+ u16 index, max;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
+ index = idx.pde;
+ max = I915_PDES;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ encode |= GEN8_PDE_PS_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt = pd->page_table[idx.pde];
+
+ index = idx.pte;
+ max = GEN8_PTES;
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT))
+ maybe_64K = true;
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K && index < max &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT)))
+ maybe_64K = false;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < max);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K &&
+ (index == max ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
@@ -935,11 +1160,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
- &idx, cache_level))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ } else {
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+
+ while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+ &iter, &idx, cache_level))
+ GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
}
static void gen8_free_page_tables(struct i915_address_space *vm,
@@ -1098,19 +1330,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
unsigned int pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
+ int count = gen8_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind;
- gen8_initialize_pt(vm, pt);
+ if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
+ gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
- pt->used_ptes += gen8_pte_count(start, length);
+ pt->used_ptes += count;
}
return 0;
@@ -1333,18 +1568,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1ULL << 48 :
1ULL << 32;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
-
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ppgtt->base.pt_kmap_wc = true;
+ ret = gen8_init_scratch(&ppgtt->base);
+ if (ret) {
+ ppgtt->base.total = 0;
+ return ret;
+ }
+
if (use_4lvl(vm)) {
ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret)
@@ -1381,6 +1616,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->debug_dump = gen8_dump_ppgtt;
return 0;
@@ -1652,6 +1889,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
} while (1);
kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
@@ -1820,6 +2059,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->debug_dump = gen6_dump_ppgtt;
@@ -1859,13 +2100,13 @@ static void i915_address_space_init(struct i915_address_space *vm,
INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages, false);
+ pagevec_init(&vm->free_pages);
}
static void i915_address_space_fini(struct i915_address_space *vm)
{
if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm);
+ vm_free_pages_release(vm, true);
i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
@@ -1878,15 +2119,32 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_BC(dev_priv))
+ else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(dev_priv) <= 10)
+ I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
+ I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
}
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
@@ -1896,7 +2154,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return 0;
if (!USES_PPGTT(dev_priv))
@@ -2331,12 +2589,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- if (unlikely(!vma->pages)) {
- int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (obj->gt_ro)
@@ -2346,6 +2598,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
@@ -2373,12 +2627,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- if (unlikely(!vma->pages)) {
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -2393,7 +2641,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
vma->node.start,
vma->size);
if (ret)
- goto err_pages;
+ return ret;
}
appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
@@ -2407,17 +2655,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
return 0;
-
-err_pages:
- if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
- if (vma->pages != vma->obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
- }
- return ret;
}
static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
@@ -2455,6 +2692,21 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
+static int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -2591,6 +2843,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
+ struct pagevec *pvec;
ggtt->base.closed = true;
@@ -2614,6 +2867,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
}
ggtt->base.cleanup(&ggtt->base);
+
+ pvec = &dev_priv->mm.wc_stash;
+ if (pvec->nr) {
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ __pagevec_release(pvec);
+ }
+
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
@@ -2709,13 +2969,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
- * On BXT writes larger than 64 bit to the GTT pagetable range will be
- * dropped. For WC mappings in general we have 64 byte burst writes
- * when the WC buffer is flushed, so we can't use it, but have to
+ * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2735,41 +2995,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static struct intel_ppat_entry *
+__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
+{
+ struct intel_ppat_entry *entry = &ppat->entries[index];
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(test_bit(index, ppat->used));
+
+ entry->ppat = ppat;
+ entry->value = value;
+ kref_init(&entry->ref);
+ set_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+
+ return entry;
+}
+
+static void __free_ppat_entry(struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(!test_bit(index, ppat->used));
+
+ entry->value = ppat->clear_value;
+ clear_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+}
+
+/**
+ * intel_ppat_get - get a usable PPAT entry
+ * @i915: i915 device instance
+ * @value: the PPAT value required by the caller
+ *
+ * The function tries to search if there is an existing PPAT entry which
+ * matches with the required value. If perfectly matched, the existing PPAT
+ * entry will be used. If only partially matched, it will try to check if
+ * there is any available PPAT index. If yes, it will allocate a new PPAT
+ * index for the required entry and update the HW. If not, the partially
+ * matched entry will be used.
+ */
+const struct intel_ppat_entry *
+intel_ppat_get(struct drm_i915_private *i915, u8 value)
+{
+ struct intel_ppat *ppat = &i915->ppat;
+ struct intel_ppat_entry *entry;
+ unsigned int scanned, best_score;
+ int i;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ scanned = best_score = 0;
+ for_each_set_bit(i, ppat->used, ppat->max_entries) {
+ unsigned int score;
+
+ score = ppat->match(ppat->entries[i].value, value);
+ if (score > best_score) {
+ entry = &ppat->entries[i];
+ if (score == INTEL_PPAT_PERFECT_MATCH) {
+ kref_get(&entry->ref);
+ return entry;
+ }
+ best_score = score;
+ }
+ scanned++;
+ }
+
+ if (scanned == ppat->max_entries) {
+ if (!best_score)
+ return ERR_PTR(-ENOSPC);
+
+ kref_get(&entry->ref);
+ return entry;
+ }
+
+ i = find_first_zero_bit(ppat->used, ppat->max_entries);
+ entry = __alloc_ppat_entry(ppat, i, value);
+ ppat->update_hw(i915);
+ return entry;
+}
+
+static void release_ppat(struct kref *kref)
+{
+ struct intel_ppat_entry *entry =
+ container_of(kref, struct intel_ppat_entry, ref);
+ struct drm_i915_private *i915 = entry->ppat->i915;
+
+ __free_ppat_entry(entry);
+ entry->ppat->update_hw(i915);
+}
+
+/**
+ * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
+ * @entry: an intel PPAT entry
+ *
+ * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
+ * entry is dynamically allocated, its reference count will be decreased. Once
+ * the reference count becomes into zero, the PPAT index becomes free again.
+ */
+void intel_ppat_put(const struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ kref_put(&ppat->entries[index].ref, release_ppat);
+}
+
+static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
+ I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
+ clear_bit(i, ppat->dirty);
+ }
+}
+
+static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ u64 pat = 0;
+ int i;
+
+ for (i = 0; i < ppat->max_entries; i++)
+ pat |= GEN8_PPAT(i, ppat->entries[i].value);
+
+ bitmap_clear(ppat->dirty, 0, ppat->max_entries);
+
+ I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static unsigned int bdw_private_pat_match(u8 src, u8 dst)
+{
+ unsigned int score = 0;
+ enum {
+ AGE_MATCH = BIT(0),
+ TC_MATCH = BIT(1),
+ CA_MATCH = BIT(2),
+ };
+
+ /* Cache attribute has to be matched. */
+ if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
+ return 0;
+
+ score |= CA_MATCH;
+
+ if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
+ score |= TC_MATCH;
+
+ if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
+ score |= AGE_MATCH;
+
+ if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
+ return INTEL_PPAT_PERFECT_MATCH;
+
+ return score;
+}
+
+static unsigned int chv_private_pat_match(u8 src, u8 dst)
+{
+ return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
+ INTEL_PPAT_PERFECT_MATCH : 0;
+}
+
+static void cnl_setup_private_ppat(struct intel_ppat *ppat)
{
+ ppat->max_entries = 8;
+ ppat->update_hw = cnl_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
+
/* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(dev_priv)) {
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
+ if (!USES_PPGTT(ppat->i915)) {
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
-
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- if (!USES_PPGTT(dev_priv))
+ if (!USES_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -2783,17 +3211,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
* So we can still hold onto all our assumptions wrt cpu
* clflushing on LLC machines.
*/
- pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
+ return;
+ }
- /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
- * write would work. */
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = chv_private_pat_match;
+ ppat->clear_value = CHV_PPAT_SNOOP;
/*
* Map WB on BDW to snooped on CHV.
@@ -2813,17 +3250,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 1, 0);
+ __alloc_ppat_entry(ppat, 2, 0);
+ __alloc_ppat_entry(ppat, 3, 0);
+ __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2834,6 +3269,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
+static void setup_private_pat(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ ppat->i915 = dev_priv;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(ppat);
+ else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+ chv_setup_private_ppat(ppat);
+ else
+ bdw_setup_private_ppat(ppat);
+
+ GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
+
+ for_each_clear_bit(i, ppat->used, ppat->max_entries) {
+ ppat->entries[i].value = ppat->clear_value;
+ ppat->entries[i].ppat = ppat;
+ set_bit(i, ppat->dirty);
+ }
+
+ ppat->update_hw(dev_priv);
+}
+
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -2866,17 +3326,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
}
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
-
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.insert_page = gen8_ggtt_insert_page;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
@@ -2894,6 +3348,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->invalidate = gen6_ggtt_invalidate;
+ setup_private_pat(dev_priv);
+
return ggtt_probe_common(ggtt, size);
}
@@ -2933,6 +3389,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -2978,6 +3436,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
@@ -3014,7 +3474,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
+ if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
}
@@ -3127,8 +3587,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_link) {
+ list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3151,13 +3610,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ bitmap_set(ppat->dirty, 0, ppat->max_entries);
+ dev_priv->ppat.update_hw(dev_priv);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b4e3aa7c0ce1..93211a96fdad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,13 @@
#include "i915_gem_request.h"
#include "i915_selftest.h"
-#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_PAGE_SIZE_4K BIT(12)
+#define I915_GTT_PAGE_SIZE_64K BIT(16)
+#define I915_GTT_PAGE_SIZE_2M BIT(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
@@ -126,13 +132,13 @@ typedef u64 gen8_ppgtt_pml4e_t;
* tables */
#define GEN8_PDPE_MASK 0x1ff
-#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
-#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
#define CHV_PPAT_SNOOP (1<<6)
-#define GEN8_PPAT_AGE(x) (x<<4)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
@@ -143,6 +149,14 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+#define GEN8_PPAT_GET_CA(x) ((x) & 3)
+#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
+#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
+#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
struct sg_table;
struct intel_rotation_info {
@@ -202,6 +216,7 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
+ int order;
union {
dma_addr_t daddr;
@@ -324,6 +339,8 @@ struct i915_address_space {
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
};
@@ -336,6 +353,12 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
return (vm->total - 1) >> 32;
}
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -536,6 +559,37 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ggtt, base);
}
+#define INTEL_MAX_PPAT_ENTRIES 8
+#define INTEL_PPAT_PERFECT_MATCH (~0U)
+
+struct intel_ppat;
+
+struct intel_ppat_entry {
+ struct intel_ppat *ppat;
+ struct kref ref;
+ u8 value;
+};
+
+struct intel_ppat {
+ struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
+ DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
+ DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
+ unsigned int max_entries;
+ u8 clear_value;
+ /*
+ * Return a score to show how two PPAT values match,
+ * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
+ */
+ unsigned int (*match)(u8 src, u8 dst);
+ void (*update_hw)(struct drm_i915_private *i915);
+
+ struct drm_i915_private *i915;
+};
+
+const struct intel_ppat_entry *
+intel_ppat_get(struct drm_i915_private *i915, u8 value);
+void intel_ppat_put(const struct intel_ppat_entry *entry);
+
int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index c1f64ddaf8aa..ee83ec838ee7 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -44,12 +44,12 @@ static void internal_free_pages(struct sg_table *st)
kfree(st);
}
-static struct sg_table *
-i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int sg_page_sizes;
unsigned int npages;
int max_order;
gfp_t gfp;
@@ -78,16 +78,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = st->sgl;
st->nents = 0;
+ sg_page_sizes = 0;
do {
int order = min(fls(npages) - 1, max_order);
@@ -105,6 +106,7 @@ create_st:
} while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ sg_page_sizes |= PAGE_SIZE << order;
st->nents++;
npages -= 1 << order;
@@ -132,13 +134,17 @@ create_st:
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
- return st;
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
err:
sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg);
internal_free_pages(st);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
}
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index c30d8f808185..63ce38c1cce9 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
- struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *,
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
- struct list_head global_link;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -123,6 +122,7 @@ struct drm_i915_gem_object {
/**
* Whether the object is currently in the GGTT mmap.
*/
+ unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
@@ -160,7 +160,8 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int active_count;
- unsigned int pin_display;
+ /** Count of how many global VMA are currently pinned for use by HW */
+ unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
@@ -169,6 +170,35 @@ struct drm_i915_gem_object {
struct sg_table *pages;
void *mapping;
+ /* TODO: whack some of this into the error state */
+ struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * of the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+
+ /**
+ * The actual gtt page size usage. Since we can have
+ * multiple vma associated with this object we need to
+ * prevent any trampling of state, hence a copy of this
+ * struct also lives in each vma, therefore the gtt
+ * value here should only be read/write through the vma.
+ */
+ unsigned int gtt;
+ } page_sizes;
+
+ I915_SELFTEST_DECLARE(unsigned int page_mask);
+
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */
@@ -178,6 +208,12 @@ struct drm_i915_gem_object {
} get_page;
/**
+ * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * locked by i915->mm.obj_lock.
+ */
+ struct list_head link;
+
+ /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 4dd4c2159a92..3703dc91eeda 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
return 0;
/* Recreate the page after shrinking */
- if (!so->vma->obj->mm.pages)
+ if (!i915_gem_object_has_pages(so->vma->obj))
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 813a3b546d6e..d140fcf5c6a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt)
INIT_LIST_HEAD(&pt->signalers_list);
INIT_LIST_HEAD(&pt->waiters_list);
INIT_LIST_HEAD(&pt->link);
- pt->priority = INT_MIN;
+ pt->priority = I915_PRIORITY_INVALID;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
spin_lock_irq(&request->lock);
if (request->waitboost)
- atomic_dec(&request->i915->rps.num_waiters);
+ atomic_dec(&request->i915->gt_pm.rps.num_waiters);
dma_fence_signal_locked(&request->fence);
spin_unlock_irq(&request->lock);
@@ -556,7 +556,16 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_gem_request_submit(request);
+ /*
+ * We need to serialize use of the submit_request() callback with its
+ * hotplugging performed during an emergency i915_gem_set_wedged().
+ * We use the RCU mechanism to mark the critical section in order to
+ * force i915_gem_set_wedged() to wait until the submit_request() is
+ * completed before proceeding.
+ */
+ rcu_read_lock();
request->engine->submit_request(request);
+ rcu_read_unlock();
break;
case FENCE_FREE:
@@ -587,6 +596,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ /*
+ * Preempt contexts are reserved for exclusive use to inject a
+ * preemption context switch. They are never to be used for any trivial
+ * request!
+ */
+ GEM_BUG_ON(ctx == dev_priv->preempt_context);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
@@ -1021,12 +1037,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
- u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ u32 seqno, int state, unsigned long timeout_us)
{
struct intel_engine_cs *engine = req->engine;
unsigned int irq, cpu;
+ GEM_BUG_ON(!seqno);
+
+ /*
+ * Only wait for the request if we know it is likely to complete.
+ *
+ * We don't track the timestamps around requests, nor the average
+ * request length, so we do not have a good indicator that this
+ * request will complete within the timeout. What we do know is the
+ * order in which requests are executed by the engine and so we can
+ * tell if the request has started. If the request hasn't started yet,
+ * it is a fair assumption that it will not complete within our
+ * relatively short timeout.
+ */
+ if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+ return false;
+
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
@@ -1040,12 +1072,8 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (seqno != i915_gem_request_global_seqno(req))
- break;
-
- if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
- seqno))
- return true;
+ if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+ return seqno == i915_gem_request_global_seqno(req);
/* Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
@@ -1156,7 +1184,7 @@ restart:
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
/* Optimistic short spin before touching IRQs */
- if (i915_spin_request(req, state, 5))
+ if (__i915_spin_request(req, wait.seqno, state, 5))
goto complete;
set_current_state(state);
@@ -1213,7 +1241,7 @@ wakeup:
continue;
/* Only spin if we know the GPU is processing this request */
- if (i915_spin_request(req, state, 2))
+ if (__i915_spin_request(req, wait.seqno, state, 2))
break;
if (!intel_wait_check_request(&wait, req)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 49a4c8994ff0..26249f39de67 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -30,6 +30,8 @@
#include "i915_gem.h"
#include "i915_sw_fence.h"
+#include <uapi/drm/i915_drm.h>
+
struct drm_file;
struct drm_i915_gem_object;
struct drm_i915_gem_request;
@@ -69,9 +71,14 @@ struct i915_priotree {
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
int priority;
-#define I915_PRIORITY_MAX 1024
-#define I915_PRIORITY_NORMAL 0
-#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
};
struct i915_gem_capture_list {
@@ -313,26 +320,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- seqno - 1);
-}
-
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
- u32 seqno;
-
- seqno = i915_gem_request_global_seqno(req);
- if (!seqno)
- return false;
-
- return __i915_gem_request_started(req, seqno);
-}
-
-static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!seqno);
@@ -352,21 +339,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
return __i915_gem_request_completed(req, seqno);
}
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
- u32 seqno, int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
- int state, unsigned long timeout_us)
-{
- u32 seqno;
-
- seqno = i915_gem_request_global_seqno(request);
- if (!seqno)
- return 0;
-
- return (__i915_gem_request_started(request, seqno) &&
- __i915_spin_request(request, seqno, state, timeout_us));
-}
-
/* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 74002b2d1b6f..3770e3323fc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static bool any_vma_pinned(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- /* Only GGTT vma may be permanently pinned, and are always
- * at the start of the list. We can stop hunting as soon
- * as we see a ppGTT vma.
- */
- if (!i915_vma_is_ggtt(vma))
- break;
-
- if (i915_vma_is_pinned(vma))
- return true;
- }
-
- return false;
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -97,9 +78,6 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- if (!obj->mm.pages)
- return false;
-
/* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
- if (any_vma_pinned(obj))
+ /* If any vma are "permanently" pinned, it will prevent us from
+ * reclaiming the obj->mm.pages. We only allow scanout objects to claim
+ * a permanent pin, along with a few others like the context objects.
+ * To simplify the scan, and to avoid walking the list of vma under the
+ * object, we just check the count of its permanently pinned.
+ */
+ if (READ_ONCE(obj->pin_global))
return false;
/* We can only return physical pages to the system if we can either
@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
- return !READ_ONCE(obj->mm.pages);
+ return !i915_gem_object_has_pages(obj);
}
/**
@@ -178,6 +162,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!shrinker_lock(dev_priv, &unlock))
return 0;
+ /*
+ * When shrinking the active list, also consider active contexts.
+ * Active contexts are pinned until they are retired, and so can
+ * not be simply unbound to retire and unpin their pages. To shrink
+ * the contexts, we must wait until the gpu is idle.
+ *
+ * We don't care about errors here; if we cannot wait upon the GPU,
+ * we will free as much as we can and hope to get a second chance.
+ */
+ if (flags & I915_SHRINK_ACTIVE)
+ i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -217,15 +213,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
continue;
INIT_LIST_HEAD(&still_in_list);
+
+ /*
+ * We serialize our access to unreferenced objects through
+ * the use of the struct_mutex. While the objects are not
+ * yet freed (due to RCU then a workqueue) we still want
+ * to be able to shrink their pages, so they remain on
+ * the unbound/bound list until actually freed.
+ */
+ spin_lock(&dev_priv->mm.obj_lock);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_link))) {
- list_move_tail(&obj->global_link, &still_in_list);
- if (!obj->mm.pages) {
- list_del_init(&obj->global_link);
- continue;
- }
+ mm.link))) {
+ list_move_tail(&obj->mm.link, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
obj->mm.madv != I915_MADV_DONTNEED)
@@ -243,20 +244,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
- if (!obj->mm.pages) {
+ if (!i915_gem_object_has_pages(obj)) {
__i915_gem_object_invalidate(obj);
- list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
- scanned += obj->base.size >> PAGE_SHIFT;
}
+ scanned += obj->base.size >> PAGE_SHIFT;
+
+ spin_lock(&dev_priv->mm.obj_lock);
}
list_splice_tail(&still_in_list, phase->list);
+ spin_unlock(&dev_priv->mm.obj_lock);
}
if (flags & I915_SHRINK_BOUND)
@@ -302,28 +307,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!shrinker_lock(dev_priv, &unlock))
- return 0;
-
- i915_gem_retire_requests(dev_priv);
+ unsigned long num_objects = 0;
+ unsigned long count = 0;
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
- if (can_release_pages(obj))
+ spin_lock(&i915->mm.obj_lock);
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+ if (can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
+ num_objects++;
+ }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+ if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
- }
+ num_objects++;
+ }
+ spin_unlock(&i915->mm.obj_lock);
- shrinker_unlock(dev_priv, unlock);
+ /* Update our preferred vmscan batch size for the next pass.
+ * Our rough guess for an effective batch size is roughly 2
+ * available GEM objects worth of pages. That is we don't want
+ * the shrinker to fire, until it is worth the cost of freeing an
+ * entire GEM object.
+ */
+ if (num_objects) {
+ unsigned long avg = 2 * count / num_objects;
+
+ i915->mm.shrinker.batch =
+ max((i915->mm.shrinker.batch + avg) >> 1,
+ 128ul /* default SHRINK_BATCH */);
+ }
return count;
}
@@ -400,10 +416,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
- bool unlock;
-
- if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
- return NOTIFY_DONE;
freed_pages = i915_gem_shrink_all(dev_priv);
@@ -412,26 +424,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
- if (!obj->mm.pages)
- continue;
-
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (!obj->mm.pages)
- continue;
-
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size >> PAGE_SHIFT;
}
-
- shrinker_unlock(dev_priv, unlock);
+ spin_unlock(&dev_priv->mm.obj_lock);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
@@ -498,6 +504,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ dev_priv->mm.shrinker.batch = 4096;
WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 507c9f0d8df1..03e7abc7e043 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static struct sg_table *
-i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- return i915_pages_create_for_stolen(obj->base.dev,
- obj->stolen->start,
- obj->stolen->size);
+ struct sg_table *pages =
+ i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
+
+ return 0;
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
@@ -718,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fb5231f98c0d..1294cf695df0 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* due to the change in swizzling.
*/
mutex_lock(&obj->mm.lock);
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 709efe2357ea..382a77a1097e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work)
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- WARN_ONCE(obj->mm.pages,
- "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ WARN_ONCE(i915_gem_object_has_pages(obj),
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
obj->bind_count,
atomic_read(&obj->mm.pages_pin_count),
- obj->pin_display);
+ obj->pin_global);
mutex_unlock(&obj->base.dev->struct_mutex);
@@ -164,7 +164,6 @@ static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm)
{
struct i915_mmu_notifier *mn;
- int ret;
mn = kmalloc(sizeof(*mn), GFP_KERNEL);
if (mn == NULL)
@@ -173,20 +172,14 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ mn->wq = alloc_workqueue("i915-userptr-release",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
}
- /* Protected by mmap_sem (write-lock) */
- ret = __mmu_notifier_register(&mn->mn, mm);
- if (ret) {
- destroy_workqueue(mn->wq);
- kfree(mn);
- return ERR_PTR(ret);
- }
-
return mn;
}
@@ -210,23 +203,42 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
- struct i915_mmu_notifier *mn = mm->mn;
+ struct i915_mmu_notifier *mn;
+ int err = 0;
mn = mm->mn;
if (mn)
return mn;
+ mn = i915_mmu_notifier_create(mm->mm);
+ if (IS_ERR(mn))
+ err = PTR_ERR(mn);
+
down_write(&mm->mm->mmap_sem);
mutex_lock(&mm->i915->mm_lock);
- if ((mn = mm->mn) == NULL) {
- mn = i915_mmu_notifier_create(mm->mm);
- if (!IS_ERR(mn))
- mm->mn = mn;
+ if (mm->mn == NULL && !err) {
+ /* Protected by mmap_sem (write-lock) */
+ err = __mmu_notifier_register(&mn->mn, mm->mm);
+ if (!err) {
+ /* Protected by mm_lock */
+ mm->mn = fetch_and_zero(&mn);
+ }
+ } else if (mm->mn) {
+ /*
+ * Someone else raced and successfully installed the mmu
+ * notifier, we can cancel our own errors.
+ */
+ err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
- return mn;
+ if (mn && !IS_ERR(mn)) {
+ destroy_workqueue(mn->wq);
+ kfree(mn);
+ }
+
+ return err ? ERR_PTR(err) : mm->mn;
}
static int
@@ -399,64 +411,47 @@ struct get_pages_work {
struct task_struct *task;
};
-#if IS_ENABLED(CONFIG_SWIOTLB)
-#define swiotlb_active() swiotlb_nr_tbl()
-#else
-#define swiotlb_active() 0
-#endif
-
-static int
-st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
-{
- struct scatterlist *sg;
- int ret, n;
-
- *st = kmalloc(sizeof(**st), GFP_KERNEL);
- if (*st == NULL)
- return -ENOMEM;
-
- if (swiotlb_active()) {
- ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
- if (ret)
- goto err;
-
- for_each_sg((*st)->sgl, sg, num_pages, n)
- sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
- } else {
- ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
- 0, num_pages << PAGE_SHIFT,
- GFP_KERNEL);
- if (ret)
- goto err;
- }
-
- return 0;
-
-err:
- kfree(*st);
- *st = NULL;
- return ret;
-}
-
static struct sg_table *
-__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
+ struct page **pvec, int num_pages)
{
- struct sg_table *pages;
+ unsigned int max_segment = i915_sg_segment_size();
+ struct sg_table *st;
+ unsigned int sg_page_sizes;
int ret;
- ret = st_set_pages(&pages, pvec, num_pages);
- if (ret)
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+alloc_table:
+ ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
+ 0, num_pages << PAGE_SHIFT,
+ max_segment,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(st);
return ERR_PTR(ret);
+ }
- ret = i915_gem_gtt_prepare_pages(obj, pages);
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
- sg_free_table(pages);
- kfree(pages);
+ sg_free_table(st);
+
+ if (max_segment > PAGE_SIZE) {
+ max_segment = PAGE_SIZE;
+ goto alloc_table;
+ }
+
+ kfree(st);
return ERR_PTR(ret);
}
- return pages;
+ sg_page_sizes = i915_sg_page_sizes(st->sgl);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return st;
}
static int
@@ -540,9 +535,9 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) {
- pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ pages = __i915_gem_userptr_alloc_pages(obj, pvec,
+ npages);
if (!IS_ERR(pages)) {
- __i915_gem_object_set_pages(obj, pages);
pinned = 0;
pages = NULL;
}
@@ -554,7 +549,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -603,8 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN);
}
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
@@ -633,9 +627,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return ERR_CAST(obj->userptr.work);
+ return PTR_ERR(obj->userptr.work);
else
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
pvec = NULL;
@@ -661,17 +655,17 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pages = __i915_gem_userptr_get_pages_schedule(obj);
active = pages == ERR_PTR(-EAGAIN);
} else {
- pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
active = !IS_ERR(pages);
}
if (active)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
- return pages;
+ return PTR_ERR_OR_ZERO(pages);
}
static void
@@ -834,7 +828,9 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
- alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
+ alloc_workqueue("i915-userptr-acquire",
+ WQ_HIGHPRI | WQ_UNBOUND,
+ 0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
new file mode 100644
index 000000000000..888b7d3f04c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+
+#include "i915_drv.h"
+#include "i915_gemfs.h"
+
+int i915_gemfs_init(struct drm_i915_private *i915)
+{
+ struct file_system_type *type;
+ struct vfsmount *gemfs;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ return -ENODEV;
+
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
+
+ /*
+ * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
+ * likely 2M. Note that within_size may overallocate huge-pages, if say
+ * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
+ * memory pressure shmem should split any huge-pages which can be
+ * shrunk.
+ */
+
+ if (has_transparent_hugepage()) {
+ struct super_block *sb = gemfs->mnt_sb;
+ /* FIXME: Disabled until we get W/A for read BW issue. */
+ char options[] = "huge=never";
+ int flags = 0;
+ int err;
+
+ err = sb->s_op->remount_fs(sb, &flags, options);
+ if (err) {
+ kern_unmount(gemfs);
+ return err;
+ }
+ }
+
+ i915->mm.gemfs = gemfs;
+
+ return 0;
+}
+
+void i915_gemfs_fini(struct drm_i915_private *i915)
+{
+ kern_unmount(i915->mm.gemfs);
+}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h
new file mode 100644
index 000000000000..cca8bdc5b93e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gemfs.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEMFS_H__
+#define __I915_GEMFS_H__
+
+struct drm_i915_private;
+
+int i915_gemfs_init(struct drm_i915_private *i915);
+
+void i915_gemfs_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0c779671fe2d..653fb69e7ecb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno,
+ erq->context, erq->seqno, erq->priority,
jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail);
}
@@ -388,14 +388,16 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
+ err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->ban_score, ctx->guilty, ctx->active);
+ ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ int n;
+
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
err_printf(m, " engine reset count: %u\n", ee->reset_count);
- error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
- error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
+ for (n = 0; n < ee->num_ports; n++) {
+ err_printf(m, " ELSP[%d]:", n);
+ error_print_request(m, " ", &ee->execlist[n]);
+ }
+
error_print_context(m, " Active context: ", &ee->context);
}
@@ -567,7 +572,7 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *p)
{
-#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
+#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
@@ -861,7 +866,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
-#define FREE(T, x) free_param(#T, &error->params.x);
+#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
@@ -1266,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
+ erq->priority = request->priotree.priority;
erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
@@ -1327,17 +1333,19 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct execlist_port *port = engine->execlist_port;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
- struct drm_i915_gem_request *rq = port_request(&port[n]);
+ for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
record_request(rq, &ee->execlist[n]);
}
+
+ ee->num_ports = n;
}
static void record_context(struct drm_i915_error_context *e,
@@ -1357,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
+ e->priority = ctx->priority;
e->ban_score = atomic_read(&ctx->ban_score);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
@@ -1554,7 +1563,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
- if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
return;
error->guc_log = i915_error_object_create(dev_priv,
@@ -1665,8 +1674,8 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
error->awake = dev_priv->gt.awake;
- error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
- error->suspended = dev_priv->pm.suspended;
+ error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
+ error->suspended = dev_priv->runtime_pm.suspended;
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
@@ -1696,8 +1705,8 @@ static int capture(void *data)
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
- error->params = i915;
-#define DUP(T, x) dup_param(#T, &error->params.x);
+ error->params = i915_modparams;
+#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
@@ -1751,7 +1760,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error;
unsigned long flags;
- if (!i915.error_capture)
+ if (!i915_modparams.error_capture)
return;
if (READ_ONCE(dev_priv->gpu_error.first_error))
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 48a1e9349a2c..f84c267728fd 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -21,12 +21,13 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/circ_buf.h>
-#include "i915_drv.h"
-#include "intel_uc.h"
+#include <linux/circ_buf.h>
#include <trace/events/dma_fence.h>
+#include "i915_guc_submission.h"
+#include "i915_drv.h"
+
/**
* DOC: GuC-based command submission
*
@@ -192,13 +193,12 @@ static int __create_doorbell(struct i915_guc_client *client)
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = client->doorbell_cookie;
+ doorbell->cookie = 0;
err = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (err) {
+ if (err)
doorbell->db_status = GUC_DOORBELL_DISABLED;
- doorbell->cookie = 0;
- }
+
return err;
}
@@ -306,7 +306,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->db_base_addr = 0;
desc->stage_id = client->stage_id;
- desc->wq_size_bytes = client->wq_size;
+ desc->wq_size_bytes = GUC_WQ_SIZE;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
@@ -338,7 +338,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
- uint32_t guc_engine_id = engine->guc_id;
+ u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
@@ -388,13 +388,13 @@ static void guc_stage_desc_init(struct intel_guc *guc,
gfx_addr = guc_ggtt_offset(client->vma);
desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+ desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
desc->process_desc = gfx_addr + client->proc_desc_offset;
- desc->wq_addr = gfx_addr + client->wq_offset;
- desc->wq_size = client->wq_size;
+ desc->wq_addr = gfx_addr + GUC_DB_SIZE;
+ desc->wq_size = GUC_WQ_SIZE;
- desc->desc_private = (uintptr_t)client;
+ desc->desc_private = ptr_to_u64(client);
}
static void guc_stage_desc_fini(struct intel_guc *guc,
@@ -406,82 +406,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
memset(desc, 0, sizeof(*desc));
}
-/**
- * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
- * @request: request associated with the commands
- *
- * Return: 0 if space is available
- * -EAGAIN if space is not currently available
- *
- * This function must be called (and must return 0) before a request
- * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it must be balanced by a corresponding
- * call to submit().
- *
- * Reservation allows the caller to determine in advance that space
- * will be available for the next submission before committing resources
- * to it, and helps avoid late failures with complicated recovery paths.
- */
-int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
-{
- const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *client = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = __get_process_desc(client);
- u32 freespace;
- int ret;
-
- spin_lock_irq(&client->wq_lock);
- freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
- freespace -= client->wq_rsvd;
- if (likely(freespace >= wqi_size)) {
- client->wq_rsvd += wqi_size;
- ret = 0;
- } else {
- client->no_wq_space++;
- ret = -EAGAIN;
- }
- spin_unlock_irq(&client->wq_lock);
-
- return ret;
-}
-
-static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&client->wq_lock, flags);
- client->wq_rsvd += size;
- spin_unlock_irqrestore(&client->wq_lock, flags);
-}
-
-void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
-{
- const int wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *client = request->i915->guc.execbuf_client;
-
- GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
- guc_client_update_wq_rsvd(client, -wqi_size);
-}
-
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size/sizeof(u32) - 1;
+ const u32 wqi_len = wqi_size / sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *ctx = rq->ctx;
struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
- u32 freespace, tail, wq_off;
+ u32 ring_tail, wq_off;
- /* Free space is guaranteed, see i915_guc_wq_reserve() above */
- freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
- GEM_BUG_ON(freespace < wqi_size);
+ lockdep_assert_held(&client->wq_lock);
- /* The GuC firmware wants the tail index in QWords, not bytes */
- tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
- GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
+ ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -491,29 +432,29 @@ static void guc_wq_item_append(struct i915_guc_client *client,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
- GEM_BUG_ON(client->wq_rsvd < wqi_size);
- /* postincrement WQ tail for next time */
- wq_off = client->wq_tail;
+ /* Free space is guaranteed. */
+ wq_off = READ_ONCE(desc->tail);
+ GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
+ GUC_WQ_SIZE) < wqi_size);
GEM_BUG_ON(wq_off & (wqi_size - 1));
- client->wq_tail += wqi_size;
- client->wq_tail &= client->wq_size - 1;
- client->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
wqi = client->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (engine->guc_id << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
+ (wqi_len << WQ_LEN_SHIFT) |
+ (engine->guc_id << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
- /* The GuC wants only the low-order word of the context descriptor */
- wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
+ wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
- wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
+
+ /* Postincrement WQ tail for next time. */
+ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
static void guc_reset_wq(struct i915_guc_client *client)
@@ -522,106 +463,64 @@ static void guc_reset_wq(struct i915_guc_client *client)
desc->head = 0;
desc->tail = 0;
-
- client->wq_tail = 0;
}
-static int guc_ring_doorbell(struct i915_guc_client *client)
+static void guc_ring_doorbell(struct i915_guc_client *client)
{
- struct guc_process_desc *desc = __get_process_desc(client);
- union guc_doorbell_qw db_cmp, db_exc, db_ret;
- union guc_doorbell_qw *db;
- int attempt = 2, ret = -EAGAIN;
+ struct guc_doorbell_info *db;
+ u32 cookie;
- /* Update the tail so it is visible to GuC */
- desc->tail = client->wq_tail;
-
- /* current cookie */
- db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = client->doorbell_cookie;
-
- /* cookie to be updated */
- db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = client->doorbell_cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
+ lockdep_assert_held(&client->wq_lock);
/* pointer of current doorbell cacheline */
- db = (union guc_doorbell_qw *)__get_doorbell(client);
-
- while (attempt--) {
- /* lets ring the doorbell */
- db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
- db_cmp.value_qw, db_exc.value_qw);
-
- /* if the exchange was successfully executed */
- if (db_ret.value_qw == db_cmp.value_qw) {
- /* db was successfully rung */
- client->doorbell_cookie = db_exc.cookie;
- ret = 0;
- break;
- }
-
- /* XXX: doorbell was lost and need to acquire it again */
- if (db_ret.db_status == GUC_DOORBELL_DISABLED)
- break;
+ db = __get_doorbell(client);
- DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
- db_cmp.cookie, db_ret.cookie);
+ /* we're not expecting the doorbell cookie to change behind our back */
+ cookie = READ_ONCE(db->cookie);
+ WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
- /* update the cookie to newly read cookie from GuC */
- db_cmp.cookie = db_ret.cookie;
- db_exc.cookie = db_ret.cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
- }
-
- return ret;
+ /* XXX: doorbell was lost and need to acquire it again */
+ GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
/**
- * __i915_guc_submit() - Submit commands through GuC
- * @rq: request associated with the commands
- *
- * The caller must have already called i915_guc_wq_reserve() above with
- * a result of 0 (success), guaranteeing that there is space in the work
- * queue for the new request, so enqueuing the item cannot fail.
- *
- * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when _reserve() says there's no space, or calls _submit()
- * a different number of times from (successful) calls to _reserve().
+ * i915_guc_submit() - Submit commands through GuC
+ * @engine: engine associated with the commands
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void __i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = rq->i915;
- struct intel_engine_cs *engine = rq->engine;
- unsigned int engine_id = engine->id;
- struct intel_guc *guc = &rq->i915->guc;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
- unsigned long flags;
- int b_ret;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const unsigned int engine_id = engine->id;
+ unsigned int n;
- /* WA to flush out the pending GMADR writes to ring buffer. */
- if (i915_vma_is_map_and_fenceable(rq->ring->vma))
- POSTING_READ_FW(GUC_STATUS);
+ for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct drm_i915_gem_request *rq;
+ unsigned int count;
- spin_lock_irqsave(&client->wq_lock, flags);
+ rq = port_unpack(&port[n], &count);
+ if (rq && count == 0) {
+ port_set(&port[n], port_pack(rq, ++count));
- guc_wq_item_append(client, rq);
- b_ret = guc_ring_doorbell(client);
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
- client->submissions[engine_id] += 1;
+ spin_lock(&client->wq_lock);
- spin_unlock_irqrestore(&client->wq_lock, flags);
-}
+ guc_wq_item_append(client, rq);
+ guc_ring_doorbell(client);
-static void i915_guc_submit(struct drm_i915_gem_request *rq)
-{
- __i915_gem_request_submit(rq);
- __i915_guc_submit(rq);
+ client->submissions[engine_id] += 1;
+
+ spin_unlock(&client->wq_lock);
+ }
+ }
}
static void nested_enable_signaling(struct drm_i915_gem_request *rq)
@@ -655,27 +554,33 @@ static void port_assign(struct execlist_port *port,
if (port_isset(port))
i915_gem_request_put(port_request(port));
- port_set(port, i915_gem_request_get(rq));
+ port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
nested_enable_signaling(rq);
}
-static bool i915_guc_dequeue(struct intel_engine_cs *engine)
+static void i915_guc_dequeue(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlist_port;
- struct drm_i915_gem_request *last = port_request(port);
- struct rb_node *rb;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ struct drm_i915_gem_request *last = NULL;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
bool submit = false;
+ struct rb_node *rb;
+
+ if (port_isset(port))
+ port++;
spin_lock_irq(&engine->timeline->lock);
- rb = engine->execlist_first;
- GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+ rb = execlists->first;
+ GEM_BUG_ON(rb_first(&execlists->queue) != rb);
while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
- if (port != engine->execlist_port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -689,50 +594,51 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&rq->priotree.link);
rq->priotree.priority = INT_MAX;
- i915_guc_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, engine));
+ __i915_gem_request_submit(rq);
+ trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb = rb_next(rb);
- rb_erase(&p->node, &engine->execlist_queue);
+ rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
- engine->execlist_first = rb;
- if (submit)
+ execlists->first = rb;
+ if (submit) {
port_assign(port, last);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+ i915_guc_submit(engine);
+ }
spin_unlock_irq(&engine->timeline->lock);
-
- return submit;
}
static void i915_guc_irq_handler(unsigned long data)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
struct drm_i915_gem_request *rq;
- bool submit;
- do {
- rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ rq = port_request(&port[0]);
+ while (rq && i915_gem_request_completed(rq)) {
+ trace_i915_gem_request_out(rq);
+ i915_gem_request_put(rq);
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
+ execlists_port_complete(execlists, port);
- rq = port_request(&port[0]);
- }
+ rq = port_request(&port[0]);
+ }
+ if (!rq)
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
- submit = false;
- if (!port_count(&port[1]))
- submit = i915_guc_dequeue(engine);
- } while (submit);
+ if (!port_isset(last_port))
+ i915_guc_dequeue(engine);
}
/*
@@ -741,48 +647,6 @@ static void i915_guc_irq_handler(unsigned long data)
* path of i915_guc_submit() above.
*/
-/**
- * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
- * @guc: the guc
- * @size: size of area to allocate (both virtual space and memory)
- *
- * This is a wrapper to create an object for use with the GuC. In order to
- * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
- * both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
- * range is reserved inside GuC.
- *
- * Return: A i915_vma if successful, otherwise an ERR_PTR.
- */
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- obj = i915_gem_object_create(dev_priv, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
- if (IS_ERR(vma))
- goto err;
-
- ret = i915_vma_pin(vma, 0, PAGE_SIZE,
- PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
@@ -894,8 +758,8 @@ static int guc_init_doorbell_hw(struct intel_guc *guc)
*/
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
- uint32_t engines,
- uint32_t priority,
+ u32 engines,
+ u32 priority,
struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
@@ -913,8 +777,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->engines = engines;
client->priority = priority;
client->doorbell_id = GUC_DOORBELL_INVALID;
- client->wq_offset = GUC_DB_SIZE;
- client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
@@ -996,28 +858,39 @@ static void guc_client_free(struct i915_guc_client *client)
kfree(client);
}
+static void guc_policy_init(struct guc_policy *policy)
+{
+ policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
+ policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
+ policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
+ policy->policy_flags = 0;
+}
+
static void guc_policies_init(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
- policies->dpc_promote_time = 500000;
+ policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
- policy->execution_quantum = 1000000;
- policy->preemption_time = 500000;
- policy->fault_time = 250000;
- policy->policy_flags = 0;
+ guc_policy_init(policy);
}
}
policies->is_valid = 1;
}
+/*
+ * The first 80 dwords of the register state context, containing the
+ * execlists and ppgtt registers.
+ */
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+
static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1032,6 +905,8 @@ static int guc_ads_create(struct intel_guc *guc)
} __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
+ const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
GEM_BUG_ON(guc->ads_vma);
@@ -1062,13 +937,20 @@ static int guc_ads_create(struct intel_guc *guc)
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
- * to find it.
+ * to find it. Note that we have to skip our header (1 page),
+ * because our GuC shared data is there.
*/
blob->ads.golden_context_lrca =
- dev_priv->engine[RCS]->status_page.ggtt_offset;
+ guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
+ /*
+ * The GuC expects us to exclude the portion of the context image that
+ * it skips from the size it is to read. It starts reading from after
+ * the execlist context (so skipping the first page [PPHWSP] and 80
+ * dwords). Weird guc is weird.
+ */
for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
+ blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
@@ -1149,6 +1031,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1185,12 +1068,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
* result in the register bit being left SET!
*/
- dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
- dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+ rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1209,8 +1093,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
I915_WRITE(GUC_WD_VECS_IER, 0);
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
@@ -1221,6 +1105,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
int err;
+ /*
+ * We're using GuC work items for submitting work through GuC. Since
+ * we're coalescing multiple requests from a single context into a
+ * single work item prior to assigning it to execlist_port, we can
+ * never have more work items than the total number of ports (for all
+ * engines). The GuC firmware is controlling the HEAD of work queue,
+ * and it is guaranteed that it will remove the work item from the
+ * queue before our request is completed.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
+ sizeof(struct guc_wq_item) *
+ I915_NUM_ENGINES > GUC_WQ_SIZE);
+
if (!client) {
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
@@ -1248,24 +1145,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- const int wqi_size = sizeof(struct guc_wq_item);
- struct drm_i915_gem_request *rq;
-
+ struct intel_engine_execlists * const execlists = &engine->execlists;
/* The tasklet was initialised by execlists, and may be in
* a state of flux (across a reset) and so we just want to
* take over the callback without changing any other state
* in the tasklet.
*/
- engine->irq_tasklet.func = i915_guc_irq_handler;
+ execlists->irq_tasklet.func = i915_guc_irq_handler;
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-
- /* Replay the current set of previously submitted requests */
- spin_lock_irq(&engine->timeline->lock);
- list_for_each_entry(rq, &engine->timeline->requests, link) {
- guc_client_update_wq_rsvd(client, wqi_size);
- __i915_guc_submit(rq);
- }
- spin_unlock_irq(&engine->timeline->lock);
+ tasklet_schedule(&execlists->irq_tasklet);
}
return 0;
@@ -1288,55 +1176,3 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_client_free(guc->execbuf_client);
guc->execbuf_client = NULL;
}
-
-/**
- * intel_guc_suspend() - notify GuC entering suspend state
- * @dev_priv: i915 device private
- */
-int intel_guc_suspend(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- gen9_disable_guc_interrupts(dev_priv);
-
- ctx = dev_priv->kernel_context;
-
- data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
- /* any value greater than GUC_POWER_D0 */
- data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
-
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
-
-/**
- * intel_guc_resume() - notify GuC resuming from suspend state
- * @dev_priv: i915 device private
- */
-int intel_guc_resume(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- if (i915.guc_log_level >= 0)
- gen9_enable_guc_interrupts(dev_priv);
-
- ctx = dev_priv->kernel_context;
-
- data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
- data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
-
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.h b/drivers/gpu/drm/i915/i915_guc_submission.h
new file mode 100644
index 000000000000..cb4353b59059
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_submission.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GUC_SUBMISSION_H_
+#define _I915_GUC_SUBMISSION_H_
+
+#include <linux/spinlock.h>
+
+#include "i915_gem.h"
+
+struct drm_i915_private;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process descriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ */
+struct i915_guc_client {
+ struct i915_vma *vma;
+ void *vaddr;
+ struct i915_gem_context *owner;
+ struct intel_guc *guc;
+
+ /* bitmap of (host) engine ids */
+ u32 engines;
+ u32 priority;
+ u32 stage_id;
+ u32 proc_desc_offset;
+
+ u16 doorbell_id;
+ unsigned long doorbell_offset;
+
+ spinlock_t wq_lock;
+ /* Per-engine counts of GuC submissions */
+ u64 submissions[I915_NUM_ENGINES];
+};
+
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b63893eeca73..f8205841868b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -126,7 +126,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)
-#define GEN5_IRQ_RESET(type) do { \
+#define GEN3_IRQ_RESET(type) do { \
I915_WRITE(type##IMR, 0xffffffff); \
POSTING_READ(type##IMR); \
I915_WRITE(type##IER, 0); \
@@ -136,10 +136,20 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
POSTING_READ(type##IIR); \
} while (0)
+#define GEN2_IRQ_RESET(type) do { \
+ I915_WRITE16(type##IMR, 0xffff); \
+ POSTING_READ16(type##IMR); \
+ I915_WRITE16(type##IER, 0); \
+ I915_WRITE16(type##IIR, 0xffff); \
+ POSTING_READ16(type##IIR); \
+ I915_WRITE16(type##IIR, 0xffff); \
+ POSTING_READ16(type##IIR); \
+} while (0)
+
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
+static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u32 val = I915_READ(reg);
@@ -155,20 +165,43 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ u16 val = I915_READ16(reg);
+
+ if (val == 0)
+ return;
+
+ WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
+ I915_WRITE16(reg, 0xffff);
+ POSTING_READ16(reg);
+ I915_WRITE16(reg, 0xffff);
+ POSTING_READ16(reg);
+}
+
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
+ gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
-#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, type##IIR); \
+#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
+ gen3_assert_iir_is_zero(dev_priv, type##IIR); \
I915_WRITE(type##IER, (ier_val)); \
I915_WRITE(type##IMR, (imr_val)); \
POSTING_READ(type##IMR); \
} while (0)
+#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
+ gen2_assert_iir_is_zero(dev_priv, type##IIR); \
+ I915_WRITE16(type##IER, (ier_val)); \
+ I915_WRITE16(type##IMR, (imr_val)); \
+ POSTING_READ16(type##IMR); \
+} while (0)
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -336,7 +369,7 @@ void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
__gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
+static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
@@ -347,7 +380,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
POSTING_READ(reg);
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -357,7 +390,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -371,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
- dev_priv->rps.pm_iir = 0;
+ dev_priv->gt_pm.rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON_ONCE(dev_priv->rps.pm_iir);
+ WARN_ON_ONCE(rps->pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
- dev_priv->rps.interrupts_enabled = true;
+ rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -391,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (!READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.interrupts_enabled = false;
+ rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
@@ -405,11 +442,11 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
synchronize_irq(dev_priv->drm.irq);
/* Now that we will not be generating any more work, flush any
- * outsanding tasks. As we are called on the RPS idle path,
+ * outstanding tasks. As we are called on the RPS idle path,
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/
- cancel_work_sync(&dev_priv->rps.work);
+ cancel_work_sync(&rps->work);
gen6_reset_rps_interrupts(dev_priv);
}
@@ -534,62 +571,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
POSTING_READ(SDEIMR);
}
-static void
-__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 enable_mask, u32 status_mask)
-{
- i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
-
- lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
-
- if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask))
- return;
-
- if ((pipestat & enable_mask) == enable_mask)
- return;
-
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
-
- /* Enable the interrupt, clear any pending status */
- pipestat |= enable_mask | status_mask;
- I915_WRITE(reg, pipestat);
- POSTING_READ(reg);
-}
-
-static void
-__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 enable_mask, u32 status_mask)
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+ u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+ u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
-
- if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask))
- return;
-
- if ((pipestat & enable_mask) == 0)
- return;
-
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
- pipestat &= ~enable_mask;
- I915_WRITE(reg, pipestat);
- POSTING_READ(reg);
-}
-
-static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
-{
- u32 enable_mask = status_mask << 16;
+ if (INTEL_GEN(dev_priv) < 5)
+ goto out;
/*
* On pipe A we don't support the PSR interrupt yet,
@@ -612,35 +603,59 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+out:
+ WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask);
+
return enable_mask;
}
-void
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask)
+void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 status_mask)
{
+ i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
- status_mask);
- else
- enable_mask = status_mask << 16;
- __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+ WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
+
+ if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | status_mask);
+ POSTING_READ(reg);
}
-void
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask)
+void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 status_mask)
{
+ i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
- status_mask);
- else
- enable_mask = status_mask << 16;
- __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+ WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
+
+ if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | status_mask);
+ POSTING_READ(reg);
}
/**
@@ -772,6 +787,57 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 htotal = mode->crtc_htotal;
+ u32 clock = mode->crtc_clock;
+ u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+
+ /*
+ * To avoid the race condition where we might cross into the
+ * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * during the same frame.
+ */
+ do {
+ /*
+ * This field provides read back of the display
+ * pipe frame time stamp. The time stamp value
+ * is sampled at every start of vertical blank.
+ */
+ scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+
+ /*
+ * The TIMESTAMP_CTR register has the current
+ * time stamp value.
+ */
+ scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+
+ scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ } while (scan_post_time != scan_prev_time);
+
+ scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+ scanline = min(scanline, vtotal - 1);
+ scanline = (scanline + vblank_start) % vtotal;
+
+ return scanline;
+}
+
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
@@ -788,6 +854,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
+ if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ return __intel_get_crtc_scanline_from_timestamp(crtc);
+
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
@@ -1005,6 +1074,8 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
+ bool wakeup = engine->irq_seqno_barrier;
+
/* We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
@@ -1017,12 +1088,18 @@ static void notify_ring(struct intel_engine_cs *engine)
* and many waiters.
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &wait->request->fence.flags))
- rq = i915_gem_request_get(wait->request);
+ wait->seqno)) {
+ struct drm_i915_gem_request *waiter = wait->request;
+
+ wakeup = true;
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &waiter->fence.flags) &&
+ intel_wait_check_request(wait, waiter))
+ rq = i915_gem_request_get(waiter);
+ }
- wake_up_process(wait->tsk);
+ if (wakeup)
+ wake_up_process(wait->tsk);
} else {
__intel_engine_disarm_breadcrumbs(engine);
}
@@ -1046,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+ memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
u32 events = 0;
@@ -1078,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * dev_priv->rps.up_threshold)
+ if (c0 > time * rps->up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * dev_priv->rps.down_threshold)
+ else if (c0 < time * rps->down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
- dev_priv->rps.ei = now;
+ rps->ei = now;
return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, rps.work);
+ container_of(work, struct drm_i915_private, gt_pm.rps.work);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
bool client_boost = false;
int new_delay, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled) {
- pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
- client_boost = atomic_read(&dev_priv->rps.num_waiters);
+ if (rps->interrupts_enabled) {
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1108,18 +1187,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
goto out;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
- adj = dev_priv->rps.last_adj;
- new_delay = dev_priv->rps.cur_freq;
- min = dev_priv->rps.min_freq_softlimit;
- max = dev_priv->rps.max_freq_softlimit;
+ adj = rps->last_adj;
+ new_delay = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
if (client_boost)
- max = dev_priv->rps.max_freq;
- if (client_boost && new_delay < dev_priv->rps.boost_freq) {
- new_delay = dev_priv->rps.boost_freq;
+ max = rps->max_freq;
+ if (client_boost && new_delay < rps->boost_freq) {
+ new_delay = rps->boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1127,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
- if (new_delay >= dev_priv->rps.max_freq_softlimit)
+ if (new_delay >= rps->max_freq_softlimit)
adj = 0;
} else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
- else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.min_freq_softlimit;
+ if (rps->cur_freq > rps->efficient_freq)
+ new_delay = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_delay = rps->min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
@@ -1143,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
- if (new_delay <= dev_priv->rps.min_freq_softlimit)
+ if (new_delay <= rps->min_freq_softlimit)
adj = 0;
} else { /* unknown event */
adj = 0;
}
- dev_priv->rps.last_adj = adj;
+ rps->last_adj = adj;
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
@@ -1159,15 +1238,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (intel_set_rps(dev_priv, new_delay)) {
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- dev_priv->rps.last_adj = 0;
+ rps->last_adj = 0;
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled)
+ if (rps->interrupts_enabled)
gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1305,10 +1384,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
- if (port_count(&engine->execlist_port[0])) {
+ if (READ_ONCE(engine->execlists.active)) {
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet = true;
}
@@ -1316,11 +1396,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- tasklet |= i915.enable_guc_submission;
+ tasklet |= i915_modparams.enable_guc_submission;
}
if (tasklet)
- tasklet_hi_schedule(&engine->irq_tasklet);
+ tasklet_hi_schedule(&execlists->irq_tasklet);
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -1573,11 +1653,11 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* bonkers. So let's just wait for the next vblank and read
* out the buggy result.
*
- * On CHV sometimes the second CRC is bonkers as well, so
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
* don't trust that one either.
*/
if (pipe_crc->skipped == 0 ||
- (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -1649,12 +1729,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- if (dev_priv->rps.interrupts_enabled) {
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&dev_priv->rps.work);
+ if (rps->interrupts_enabled) {
+ rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ schedule_work(&rps->work);
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -1706,8 +1788,21 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
}
}
-static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
- u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(PIPESTAT(pipe),
+ PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS);
+
+ dev_priv->pipestat_irq_mask[pipe] = 0;
+ }
+}
+
+static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
int pipe;
@@ -1720,7 +1815,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
i915_reg_t reg;
- u32 mask, iir_bit = 0;
+ u32 status_mask, enable_mask, iir_bit = 0;
/*
* PIPESTAT bits get signalled even when the interrupt is
@@ -1731,7 +1826,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
*/
/* fifo underruns are filterered in the underrun handler. */
- mask = PIPE_FIFO_UNDERRUN_STATUS;
+ status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
case PIPE_A:
@@ -1745,25 +1840,92 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- mask |= dev_priv->pipestat_irq_mask[pipe];
+ status_mask |= dev_priv->pipestat_irq_mask[pipe];
- if (!mask)
+ if (!status_mask)
continue;
reg = PIPESTAT(pipe);
- mask |= PIPESTAT_INT_ENABLE_MASK;
- pipe_stats[pipe] = I915_READ(reg) & mask;
+ pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
* Clear the PIPE*STAT regs before the IIR
*/
- if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
- PIPESTAT_INT_STATUS_MASK))
- I915_WRITE(reg, pipe_stats[pipe]);
+ if (pipe_stats[pipe])
+ I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
}
spin_unlock(&dev_priv->irq_lock);
}
+static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u16 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+}
+
+static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ bool blc_event = false;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev_priv);
+}
+
+static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ bool blc_event = false;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev_priv);
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev_priv);
+}
+
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
@@ -1879,7 +2041,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -1963,7 +2125,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -2100,18 +2262,14 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
u32 serr_int = I915_READ(SERR_INT);
+ enum pipe pipe;
if (serr_int & SERR_INT_POISON)
DRM_ERROR("PCH poison interrupt\n");
- if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
-
- if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
-
- if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
+ for_each_pipe(dev_priv, pipe)
+ if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
+ intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2860,7 +3018,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- GEN5_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -2888,15 +3046,13 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN5_IRQ_RESET(GT);
+ GEN3_IRQ_RESET(GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN5_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
- enum pipe pipe;
-
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -2905,14 +3061,9 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
- PIPE_FIFO_UNDERRUN_STATUS |
- PIPESTAT_INT_STATUS_MASK);
- dev_priv->pipestat_irq_mask[pipe] = 0;
- }
+ i9xx_pipestat_irq_reset(dev_priv);
- GEN5_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(VLV_);
dev_priv->irq_mask = ~0;
}
@@ -2922,8 +3073,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 enable_mask;
enum pipe pipe;
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
+ pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
@@ -2943,7 +3093,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -2952,9 +3102,10 @@ static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(HWSTAM, 0xffffffff);
+ if (IS_GEN5(dev_priv))
+ I915_WRITE(HWSTAM, 0xffffffff);
- GEN5_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(DE);
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -2963,7 +3114,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void valleyview_irq_preinstall(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3001,9 +3152,9 @@ static void gen8_irq_reset(struct drm_device *dev)
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
- GEN5_IRQ_RESET(GEN8_DE_PORT_);
- GEN5_IRQ_RESET(GEN8_DE_MISC_);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3016,10 +3167,17 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
+
+ if (!intel_irqs_enabled(dev_priv)) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3029,15 +3187,22 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
+
+ if (!intel_irqs_enabled(dev_priv)) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->drm.irq);
}
-static void cherryview_irq_preinstall(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3046,7 +3211,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3111,7 +3276,15 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
+ u32 val, hotplug;
+
+ /* Display WA #1179 WaHardHangonHotPlug: cnp */
+ if (HAS_PCH_CNP(dev_priv)) {
+ val = I915_READ(SOUTH_CHICKEN1);
+ val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
+ val |= CHASSIS_CLK_REQ_DURATION(0xf);
+ I915_WRITE(SOUTH_CHICKEN1, val);
+ }
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
@@ -3238,10 +3411,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else
+ else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+ else
+ mask = SDE_GMBUS_CPT;
- gen5_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3272,7 +3447,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
@@ -3285,7 +3460,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
}
dev_priv->pm_imr = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3296,18 +3471,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
+ DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A |
- DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
- DE_POISON);
+ DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
+ DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_DP_A_HOTPLUG);
@@ -3315,11 +3486,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask = ~display_mask;
- I915_WRITE(HWSTAM, 0xeffe);
-
ibx_irq_pre_postinstall(dev);
- GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3429,15 +3598,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 9) {
- de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
- GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
- de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
- GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3449,19 +3616,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
- for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
+ }
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (IS_GEN9_LP(dev_priv))
bxt_hpd_detection_setup(dev_priv);
@@ -3505,98 +3671,36 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void gen8_irq_uninstall(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!dev_priv)
- return;
-
- gen8_irq_reset(dev);
-}
-
-static void valleyview_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
-
- gen5_gt_irq_reset(dev_priv);
-
- I915_WRITE(HWSTAM, 0xffffffff);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void cherryview_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ i9xx_pipestat_irq_reset(dev_priv);
- gen8_gt_irq_reset(dev_priv);
-
- GEN5_IRQ_RESET(GEN8_PCU_);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void ironlake_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- ironlake_irq_reset(dev);
-}
-
-static void i8xx_irq_preinstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
+ I915_WRITE16(HWSTAM, 0xffff);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE16(IMR, 0xffff);
- I915_WRITE16(IER, 0x0);
- POSTING_READ16(IER);
+ GEN2_IRQ_RESET();
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ u16 enable_mask;
- I915_WRITE16(EMR,
- ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+ I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- I915_WRITE16(IER,
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_USER_INTERRUPT);
- POSTING_READ16(IER);
+ enable_mask =
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -3608,17 +3712,11 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-/*
- * Returns true when a page flip has completed.
- */
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u16 iir, new_iir;
- u32 pipe_stats[2];
- int pipe;
- irqreturn_t ret;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -3626,96 +3724,50 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- ret = IRQ_NONE;
- iir = I915_READ16(IIR);
- if (iir == 0)
- goto out;
+ do {
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 iir;
- while (iir) {
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ break;
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
+ ret = IRQ_HANDLED;
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff)
- I915_WRITE(reg, pipe_stats[pipe]);
- }
- spin_unlock(&dev_priv->irq_lock);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE16(IIR, iir);
- new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- for_each_pipe(dev_priv, pipe) {
- int plane = pipe;
- if (HAS_FBC(dev_priv))
- plane = !plane;
-
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
- }
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- iir = new_iir;
- }
- ret = IRQ_HANDLED;
+ i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i8xx_irq_uninstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- for_each_pipe(dev_priv, pipe) {
- /* Clear enable bits; then clear status bits */
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
- }
- I915_WRITE16(IMR, 0xffff);
- I915_WRITE16(IER, 0x0);
- I915_WRITE16(IIR, I915_READ16(IIR));
-}
-
-static void i915_irq_preinstall(struct drm_device * dev)
+static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
- I915_WRITE16(HWSTAM, 0xeffe);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- POSTING_READ(IER);
+ i9xx_pipestat_irq_reset(dev_priv);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ GEN3_IRQ_RESET();
}
static int i915_irq_postinstall(struct drm_device *dev)
@@ -3723,15 +3775,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
@@ -3740,20 +3791,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
-
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- I915_WRITE(IMR, dev_priv->irq_mask);
- I915_WRITE(IER, enable_mask);
- POSTING_READ(IER);
-
- i915_enable_asle_pipestat(dev_priv);
+ GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -3762,6 +3806,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
+ i915_enable_asle_pipestat(dev_priv);
+
return 0;
}
@@ -3769,8 +3815,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- int pipe, ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -3778,131 +3823,56 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- iir = I915_READ(IIR);
do {
- bool irq_received = (iir) != 0;
- bool blc_event = false;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
-
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /* Clear the PIPE*STAT regs before the IIR */
- if (pipe_stats[pipe] & 0x8000ffff) {
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = true;
- }
- }
- spin_unlock(&dev_priv->irq_lock);
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 iir;
- if (!irq_received)
+ iir = I915_READ(IIR);
+ if (iir == 0)
break;
- /* Consume port. Then clear IIR or we'll miss events */
+ ret = IRQ_HANDLED;
+
if (I915_HAS_HOTPLUG(dev_priv) &&
- iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
- if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- }
+ iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- for_each_pipe(dev_priv, pipe) {
- int plane = pipe;
- if (HAS_FBC(dev_priv))
- plane = !plane;
-
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
- }
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- ret = IRQ_HANDLED;
- iir = new_iir;
- } while (iir);
+ i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i915_irq_uninstall(struct drm_device * dev)
+static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- if (I915_HAS_HOTPLUG(dev_priv)) {
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
-
- I915_WRITE16(HWSTAM, 0xffff);
- for_each_pipe(dev_priv, pipe) {
- /* Clear enable bits; then clear status bits */
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
- }
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- I915_WRITE(IIR, I915_READ(IIR));
-}
-
-static void i965_irq_preinstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- I915_WRITE(HWSTAM, 0xeffe);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- POSTING_READ(IER);
+ i9xx_pipestat_irq_reset(dev_priv);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ GEN3_IRQ_RESET();
}
static int i965_irq_postinstall(struct drm_device *dev)
@@ -3911,31 +3881,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 error_mask;
- /* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
-
- enable_mask = ~dev_priv->irq_mask;
- enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
- enable_mask |= I915_USER_INTERRUPT;
-
- if (IS_G4X(dev_priv))
- enable_mask |= I915_BSD_USER_INTERRUPT;
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
@@ -3951,12 +3896,34 @@ static int i965_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask);
- I915_WRITE(IER, enable_mask);
- POSTING_READ(IER);
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev_priv))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irq(&dev_priv->irq_lock);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
@@ -3992,9 +3959,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- int ret = IRQ_NONE, pipe;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -4002,121 +3967,46 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- iir = I915_READ(IIR);
-
- for (;;) {
- bool irq_received = (iir) != 0;
- bool blc_event = false;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
-
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = true;
- }
- }
- spin_unlock(&dev_priv->irq_lock);
+ do {
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 iir;
- if (!irq_received)
+ iir = I915_READ(IIR);
+ if (iir == 0)
break;
ret = IRQ_HANDLED;
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
- if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- }
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
+
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- for_each_pipe(dev_priv, pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- }
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
+ i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i965_irq_uninstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- if (!dev_priv)
- return;
-
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- I915_WRITE(HWSTAM, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe),
- I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
- I915_WRITE(IIR, I915_READ(IIR));
-}
-
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4127,11 +4017,12 @@ static void i965_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
intel_hpd_init_work(dev_priv);
- INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&rps->work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
@@ -4147,7 +4038,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- dev_priv->rps.pm_intrmsk_mbz = 0;
+ rps->pm_intrmsk_mbz = 0;
/*
* SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
@@ -4156,10 +4047,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_GEN(dev_priv) <= 7)
- dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_GEN(dev_priv) >= 8)
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
@@ -4197,17 +4088,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
- dev->driver->irq_preinstall = cherryview_irq_preinstall;
+ dev->driver->irq_preinstall = cherryview_irq_reset;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
- dev->driver->irq_uninstall = cherryview_irq_uninstall;
+ dev->driver->irq_uninstall = cherryview_irq_reset;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
- dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_preinstall = valleyview_irq_reset;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
- dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->irq_uninstall = valleyview_irq_reset;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
@@ -4215,7 +4106,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
- dev->driver->irq_uninstall = gen8_irq_uninstall;
+ dev->driver->irq_uninstall = gen8_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
@@ -4229,29 +4120,29 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->irq_uninstall = ironlake_irq_reset;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
if (IS_GEN2(dev_priv)) {
- dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_preinstall = i8xx_irq_reset;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
- dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
- dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
dev->driver->irq_handler = i915_irq_handler;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
- dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_preinstall = i965_irq_reset;
dev->driver->irq_postinstall = i965_irq_postinstall;
- dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_uninstall = i965_irq_reset;
dev->driver->irq_handler = i965_irq_handler;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
@@ -4293,7 +4184,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
- dev_priv->pm.irqs_enabled = true;
+ dev_priv->runtime_pm.irqs_enabled = true;
return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
}
@@ -4309,7 +4200,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
drm_irq_uninstall(&dev_priv->drm);
intel_hpd_cancel_work(dev_priv);
- dev_priv->pm.irqs_enabled = false;
+ dev_priv->runtime_pm.irqs_enabled = false;
}
/**
@@ -4322,7 +4213,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
- dev_priv->pm.irqs_enabled = false;
+ dev_priv->runtime_pm.irqs_enabled = false;
synchronize_irq(dev_priv->drm.irq);
}
@@ -4335,7 +4226,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->pm.irqs_enabled = true;
+ dev_priv->runtime_pm.irqs_enabled = true;
dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
new file mode 100644
index 000000000000..368c87d7ee9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -0,0 +1,109 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cflgt2.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x00000000 },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x00000000 },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x00000000 },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
+ { _MMIO(0x9888), 0x11810000 },
+ { _MMIO(0x9888), 0x07810013 },
+ { _MMIO(0x9888), 0x1f810000 },
+ { _MMIO(0x9888), 0x1d810000 },
+ { _MMIO(0x9888), 0x1b930040 },
+ { _MMIO(0x9888), 0x07e54000 },
+ { _MMIO(0x9888), 0x1f908000 },
+ { _MMIO(0x9888), 0x11900000 },
+ { _MMIO(0x9888), 0x37900000 },
+ { _MMIO(0x9888), 0x53900000 },
+ { _MMIO(0x9888), 0x45900000 },
+ { _MMIO(0x9888), 0x33900000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
+{
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
new file mode 100644
index 000000000000..1f3268ef2ea2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CFLGT2_H__
+#define __I915_OA_CFLGT2_H__
+
+extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ab003dca113..b4faeb6aa2bd 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -25,235 +25,168 @@
#include "i915_params.h"
#include "i915_drv.h"
-struct i915_params i915 __read_mostly = {
- .modeset = -1,
- .panel_ignore_lid = 1,
- .semaphores = -1,
- .lvds_channel_mode = 0,
- .panel_use_ssc = -1,
- .vbt_sdvo_panel_type = -1,
- .enable_rc6 = -1,
- .enable_dc = -1,
- .enable_fbc = -1,
- .enable_execlists = -1,
- .enable_hangcheck = true,
- .enable_ppgtt = -1,
- .enable_psr = -1,
- .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
- .disable_power_well = -1,
- .enable_ips = 1,
- .fastboot = 0,
- .prefault_disable = 0,
- .load_detect_test = 0,
- .force_reset_modeset_test = 0,
- .reset = 2,
- .error_capture = true,
- .invert_brightness = 0,
- .disable_display = 0,
- .enable_cmd_parser = true,
- .use_mmio_flip = 0,
- .mmio_debug = 0,
- .verbose_state_checks = 1,
- .nuclear_pageflip = 0,
- .edp_vswing = 0,
- .enable_guc_loading = 0,
- .enable_guc_submission = 0,
- .guc_log_level = -1,
- .guc_firmware_path = NULL,
- .huc_firmware_path = NULL,
- .enable_dp_mst = true,
- .inject_load_failure = 0,
- .enable_dpcd_backlight = false,
- .enable_gvt = false,
+#define i915_param_named(name, T, perm, desc) \
+ module_param_named(name, i915_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+#define i915_param_named_unsafe(name, T, perm, desc) \
+ module_param_named_unsafe(name, i915_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+
+struct i915_params i915_modparams __read_mostly = {
+#define MEMBER(T, member, value) .member = (value),
+ I915_PARAMS_FOR_EACH(MEMBER)
+#undef MEMBER
};
-module_param_named(modeset, i915.modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
+i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
+i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
-MODULE_PARM_DESC(semaphores,
+i915_param_named_unsafe(semaphores, int, 0400,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
-module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
-MODULE_PARM_DESC(enable_rc6,
+i915_param_named_unsafe(enable_rc6, int, 0400,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
-module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
-MODULE_PARM_DESC(enable_dc,
+i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
-module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
-MODULE_PARM_DESC(enable_fbc,
+i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
-MODULE_PARM_DESC(lvds_channel_mode,
+i915_param_named_unsafe(lvds_channel_mode, int, 0400,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
+i915_param_named_unsafe(panel_use_ssc, int, 0600,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
-module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
+i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named_unsafe(reset, i915.reset, int, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
+i915_param_named_unsafe(reset, int, 0600,
+ "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
-module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400);
-MODULE_PARM_DESC(vbt_firmware,
- "Load VBT from specified file under /lib/firmware");
+i915_param_named_unsafe(vbt_firmware, charp, 0400,
+ "Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-module_param_named(error_capture, i915.error_capture, bool, 0600);
-MODULE_PARM_DESC(error_capture,
+i915_param_named(error_capture, bool, 0600,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
-module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
+i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
-MODULE_PARM_DESC(enable_ppgtt,
+i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
-MODULE_PARM_DESC(enable_execlists,
+i915_param_named_unsafe(enable_execlists, int, 0400,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
-module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
- "Default: -1 (use per-chip default)");
+i915_param_named_unsafe(enable_psr, int, 0600,
+ "Enable PSR "
+ "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "Default: -1 (use per-chip default)");
-module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400);
-MODULE_PARM_DESC(alpha_support,
+i915_param_named_unsafe(alpha_support, bool, 0400,
"Enable alpha quality driver support for latest hardware. "
"See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
-module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
-MODULE_PARM_DESC(disable_power_well,
+i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
-MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
-module_param_named(fastboot, i915.fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot,
+i915_param_named(fastboot, bool, 0600,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
-MODULE_PARM_DESC(prefault_disable,
+i915_param_named_unsafe(prefault_disable, bool, 0600,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
-module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
-MODULE_PARM_DESC(load_detect_test,
+i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
-module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
-MODULE_PARM_DESC(force_reset_modeset_test,
+i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
"Force a modeset during gpu reset for testing (default:false). "
"For developers only.");
-module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
-MODULE_PARM_DESC(invert_brightness,
+i915_param_named_unsafe(invert_brightness, int, 0600,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
-module_param_named(disable_display, i915.disable_display, bool, 0400);
-MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+i915_param_named(disable_display, bool, 0400,
+ "Disable display (default: false)");
-module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
-MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (true=enabled [default], false=disabled)");
+i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
+ "Enable command parsing (true=enabled [default], false=disabled)");
-module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
-MODULE_PARM_DESC(use_mmio_flip,
- "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-
-module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
-MODULE_PARM_DESC(mmio_debug,
+i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
-module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
-MODULE_PARM_DESC(verbose_state_checks,
+i915_param_named(verbose_state_checks, bool, 0600,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400);
-MODULE_PARM_DESC(nuclear_pageflip,
- "Force enable atomic functionality on platforms that don't have full support yet.");
+i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
+ "Force enable atomic functionality on platforms that don't have full support yet.");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
-module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
-MODULE_PARM_DESC(edp_vswing,
- "Ignore/Override vswing pre-emph table selection from VBT "
- "(0=use value from vbt [default], 1=low power swing(200mV),"
- "2=default swing(400mV))");
-
-module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
-MODULE_PARM_DESC(enable_guc_loading,
- "Enable GuC firmware loading "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
-MODULE_PARM_DESC(enable_guc_submission,
- "Enable GuC submission "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
-MODULE_PARM_DESC(guc_log_level,
+i915_param_named_unsafe(edp_vswing, int, 0400,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
+
+i915_param_named_unsafe(enable_guc_loading, int, 0400,
+ "Enable GuC firmware loading "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+i915_param_named_unsafe(enable_guc_submission, int, 0400,
+ "Enable GuC submission "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
-module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
-MODULE_PARM_DESC(guc_firmware_path,
+i915_param_named_unsafe(guc_firmware_path, charp, 0400,
"GuC firmware path to use instead of the default one");
-module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
-MODULE_PARM_DESC(huc_firmware_path,
+i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
-module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
-MODULE_PARM_DESC(enable_dp_mst,
+i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
-module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
-MODULE_PARM_DESC(inject_load_failure,
+
+i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
-module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
-MODULE_PARM_DESC(enable_dpcd_backlight,
+
+i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
-module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
-MODULE_PARM_DESC(enable_gvt,
+i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index ac844709c97e..c7292268ed43 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -27,56 +27,55 @@
#include <linux/cache.h> /* for __read_mostly */
-#define I915_PARAMS_FOR_EACH(func) \
- func(char *, vbt_firmware); \
- func(int, modeset); \
- func(int, panel_ignore_lid); \
- func(int, semaphores); \
- func(int, lvds_channel_mode); \
- func(int, panel_use_ssc); \
- func(int, vbt_sdvo_panel_type); \
- func(int, enable_rc6); \
- func(int, enable_dc); \
- func(int, enable_fbc); \
- func(int, enable_ppgtt); \
- func(int, enable_execlists); \
- func(int, enable_psr); \
- func(int, disable_power_well); \
- func(int, enable_ips); \
- func(int, invert_brightness); \
- func(int, enable_guc_loading); \
- func(int, enable_guc_submission); \
- func(int, guc_log_level); \
- func(char *, guc_firmware_path); \
- func(char *, huc_firmware_path); \
- func(int, use_mmio_flip); \
- func(int, mmio_debug); \
- func(int, edp_vswing); \
- func(int, reset); \
- func(unsigned int, inject_load_failure); \
+#define I915_PARAMS_FOR_EACH(param) \
+ param(char *, vbt_firmware, NULL) \
+ param(int, modeset, -1) \
+ param(int, panel_ignore_lid, 1) \
+ param(int, semaphores, -1) \
+ param(int, lvds_channel_mode, 0) \
+ param(int, panel_use_ssc, -1) \
+ param(int, vbt_sdvo_panel_type, -1) \
+ param(int, enable_rc6, -1) \
+ param(int, enable_dc, -1) \
+ param(int, enable_fbc, -1) \
+ param(int, enable_ppgtt, -1) \
+ param(int, enable_execlists, -1) \
+ param(int, enable_psr, -1) \
+ param(int, disable_power_well, -1) \
+ param(int, enable_ips, 1) \
+ param(int, invert_brightness, 0) \
+ param(int, enable_guc_loading, 0) \
+ param(int, enable_guc_submission, 0) \
+ param(int, guc_log_level, -1) \
+ param(char *, guc_firmware_path, NULL) \
+ param(char *, huc_firmware_path, NULL) \
+ param(int, mmio_debug, 0) \
+ param(int, edp_vswing, 0) \
+ param(int, reset, 2) \
+ param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
- func(bool, alpha_support); \
- func(bool, enable_cmd_parser); \
- func(bool, enable_hangcheck); \
- func(bool, fastboot); \
- func(bool, prefault_disable); \
- func(bool, load_detect_test); \
- func(bool, force_reset_modeset_test); \
- func(bool, error_capture); \
- func(bool, disable_display); \
- func(bool, verbose_state_checks); \
- func(bool, nuclear_pageflip); \
- func(bool, enable_dp_mst); \
- func(bool, enable_dpcd_backlight); \
- func(bool, enable_gvt)
+ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
+ param(bool, enable_cmd_parser, true) \
+ param(bool, enable_hangcheck, true) \
+ param(bool, fastboot, false) \
+ param(bool, prefault_disable, false) \
+ param(bool, load_detect_test, false) \
+ param(bool, force_reset_modeset_test, false) \
+ param(bool, error_capture, true) \
+ param(bool, disable_display, false) \
+ param(bool, verbose_state_checks, true) \
+ param(bool, nuclear_pageflip, false) \
+ param(bool, enable_dp_mst, true) \
+ param(bool, enable_dpcd_backlight, false) \
+ param(bool, enable_gvt, false)
-#define MEMBER(T, member) T member
+#define MEMBER(T, member, ...) T member;
struct i915_params {
I915_PARAMS_FOR_EACH(MEMBER);
};
#undef MEMBER
-extern struct i915_params i915 __read_mostly;
+extern struct i915_params i915_modparams __read_mostly;
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 09d97e0990b7..6458c309c039 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,8 +54,14 @@
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+#define GLK_COLORS \
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
/* Keep in gen based order, and chronological order within a gen */
+
+#define GEN_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K
+
#define GEN2_FEATURES \
.gen = 2, .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
@@ -63,22 +69,24 @@
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info intel_i845g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I845G,
};
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info intel_i85x_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
@@ -86,7 +94,7 @@ static const struct intel_device_info intel_i85x_info = {
.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I865G,
};
@@ -95,10 +103,12 @@ static const struct intel_device_info intel_i865g_info = {
.gen = 3, .num_pipes = 2, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -106,7 +116,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info intel_i915gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915GM,
.is_mobile = 1,
@@ -118,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -127,7 +137,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info intel_i945gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -138,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_G33,
.has_hotplug = 1,
.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_PINEVIEW, .is_mobile = 1,
.has_hotplug = 1,
@@ -157,37 +167,39 @@ static const struct intel_device_info intel_pineview_info = {
.has_hotplug = 1, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965G,
.has_overlay = 1,
.hws_needs_physical = 1,
+ .has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
.hws_needs_physical = 1,
+ .has_snoop = false,
};
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_G45,
- .has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
- .has_pipe_cxsr = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
@@ -195,17 +207,18 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
.gen = 5, .num_pipes = 2, \
.has_hotplug = 1, \
- .has_gmbus_irq = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
};
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
.is_mobile = 1, .has_fbc = 1,
@@ -219,20 +232,39 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_gmbus_irq = 1, \
.has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_sandybridge_d_info = {
- GEN6_FEATURES,
- .platform = INTEL_SANDYBRIDGE,
+#define SNB_D_PLATFORM \
+ GEN6_FEATURES, \
+ .platform = INTEL_SANDYBRIDGE
+
+static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
+ SNB_D_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_info = {
- GEN6_FEATURES,
- .platform = INTEL_SANDYBRIDGE,
- .is_mobile = 1,
+static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
+ SNB_D_PLATFORM,
+ .gt = 2,
+};
+
+#define SNB_M_PLATFORM \
+ GEN6_FEATURES, \
+ .platform = INTEL_SANDYBRIDGE, \
+ .is_mobile = 1
+
+
+static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
+ SNB_M_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
+ SNB_M_PLATFORM,
+ .gt = 2,
};
#define GEN7_FEATURES \
@@ -243,33 +275,52 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_gmbus_irq = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
-static const struct intel_device_info intel_ivybridge_d_info = {
- GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
- .has_l3_dpf = 1,
+#define IVB_D_PLATFORM \
+ GEN7_FEATURES, \
+ .platform = INTEL_IVYBRIDGE, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
+ IVB_D_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_info = {
- GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
- .is_mobile = 1,
- .has_l3_dpf = 1,
+static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
+ IVB_D_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info = {
+#define IVB_M_PLATFORM \
+ GEN7_FEATURES, \
+ .platform = INTEL_IVYBRIDGE, \
+ .is_mobile = 1, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
+ IVB_M_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
+ IVB_M_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info __initconst = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
+ .gt = 2,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info intel_valleyview_info __initconst = {
.platform = INTEL_VALLEYVIEW,
.gen = 7,
.is_lp = 1,
@@ -277,18 +328,19 @@ static const struct intel_device_info intel_valleyview_info = {
.has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmbus_irq = 1,
.has_gmch_display = 1,
.has_hotplug = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
+ .has_snoop = true,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS
};
-#define HSW_FEATURES \
+#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
@@ -299,35 +351,66 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
-static const struct intel_device_info intel_haswell_info = {
- HSW_FEATURES,
- .platform = INTEL_HASWELL,
- .has_l3_dpf = 1,
+#define HSW_PLATFORM \
+ G75_FEATURES, \
+ .platform = INTEL_HASWELL, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_haswell_gt1_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_haswell_gt2_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_haswell_gt3_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 3,
};
-#define BDW_FEATURES \
- HSW_FEATURES, \
+#define GEN8_FEATURES \
+ G75_FEATURES, \
BDW_COLORS, \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
#define BDW_PLATFORM \
- BDW_FEATURES, \
+ GEN8_FEATURES, \
.gen = 8, \
.platform = INTEL_BROADWELL
-static const struct intel_device_info intel_broadwell_info = {
+static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
+ BDW_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
BDW_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
BDW_PLATFORM,
+ .gt = 3,
+ /* According to the device ID those devices are GT3, they were
+ * previously treated as not GT3, keep it like that.
+ */
+};
+
+static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
+ BDW_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info intel_cherryview_info __initconst = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
@@ -338,33 +421,61 @@ static const struct intel_device_info intel_cherryview_info = {
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
- .has_gmbus_irq = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.has_reset_engine = 1,
+ .has_snoop = true,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PAGE_SIZES,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
CHV_COLORS,
};
-#define SKL_PLATFORM \
- BDW_FEATURES, \
- .gen = 9, \
- .platform = INTEL_SKYLAKE, \
+#define GEN9_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
+
+#define GEN9_FEATURES \
+ GEN8_FEATURES, \
+ GEN9_DEFAULT_PAGE_SIZES, \
+ .has_logical_ring_preemption = 1, \
.has_csr = 1, \
.has_guc = 1, \
+ .has_ipc = 1, \
.ddb_size = 896
-static const struct intel_device_info intel_skylake_info = {
+#define SKL_PLATFORM \
+ GEN9_FEATURES, \
+ .gen = 9, \
+ .platform = INTEL_SKYLAKE
+
+static const struct intel_device_info intel_skylake_gt1_info __initconst = {
SKL_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info intel_skylake_gt2_info __initconst = {
SKL_PLATFORM,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .gt = 2,
+};
+
+#define SKL_GT3_PLUS_PLATFORM \
+ SKL_PLATFORM, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+
+
+static const struct intel_device_info intel_skylake_gt3_info __initconst = {
+ SKL_GT3_PLUS_PLATFORM,
+ .gt = 3,
+};
+
+static const struct intel_device_info intel_skylake_gt4_info __initconst = {
+ SKL_GT3_PLUS_PLATFORM,
+ .gt = 4,
};
#define GEN9_LP_FEATURES \
@@ -377,80 +488,93 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_ddi = 1, \
.has_fpga_dbg = 1, \
.has_fbc = 1, \
+ .has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
.has_csr = 1, \
.has_resource_streamer = 1, \
.has_rc6 = 1, \
.has_dp_mst = 1, \
- .has_gmbus_irq = 1, \
.has_logical_ring_contexts = 1, \
+ .has_logical_ring_preemption = 1, \
.has_guc = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
.has_reset_engine = 1, \
+ .has_snoop = true, \
+ .has_ipc = 1, \
+ GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info intel_broxton_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_BROXTON,
.ddb_size = 512,
- .has_reset_engine = false,
};
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info intel_geminilake_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE,
.ddb_size = 1024,
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+ GLK_COLORS,
};
#define KBL_PLATFORM \
- BDW_FEATURES, \
+ GEN9_FEATURES, \
.gen = 9, \
- .platform = INTEL_KABYLAKE, \
- .has_csr = 1, \
- .has_guc = 1, \
- .ddb_size = 896
+ .platform = INTEL_KABYLAKE
-static const struct intel_device_info intel_kabylake_info = {
+static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
KBL_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
KBL_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
+ KBL_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
#define CFL_PLATFORM \
- .is_alpha_support = 1, \
- BDW_FEATURES, \
+ GEN9_FEATURES, \
.gen = 9, \
- .platform = INTEL_COFFEELAKE, \
- .has_csr = 1, \
- .has_guc = 1, \
- .ddb_size = 896
+ .platform = INTEL_COFFEELAKE
+
+static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
+ CFL_PLATFORM,
+ .gt = 1,
+};
-static const struct intel_device_info intel_coffeelake_info = {
+static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
CFL_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
CFL_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cannonlake_info = {
- BDW_FEATURES,
+#define GEN10_FEATURES \
+ GEN9_FEATURES, \
+ .ddb_size = 1024, \
+ GLK_COLORS
+
+static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
+ GEN10_FEATURES,
.is_alpha_support = 1,
.platform = INTEL_CANNONLAKE,
.gen = 10,
- .ddb_size = 1024,
- .has_csr = 1,
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+ .gt = 2,
};
/*
@@ -476,31 +600,40 @@ static const struct pci_device_id pciidlist[] = {
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
- INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+ INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
+ INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
+ INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
+ INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
- INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
- INTEL_HSW_IDS(&intel_haswell_info),
+ INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
+ INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
+ INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
+ INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
+ INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
+ INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
+ INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
INTEL_VLV_IDS(&intel_valleyview_info),
- INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+ INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
+ INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
- INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
+ INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_info),
+ INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
+ INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+ INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_GLK_IDS(&intel_geminilake_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
+ INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_CFL_S_IDS(&intel_coffeelake_info),
- INTEL_CFL_H_IDS(&intel_coffeelake_info),
- INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
- INTEL_CNL_IDS(&intel_cannonlake_info),
+ INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
+ INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -510,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
i915_driver_unload(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -519,7 +652,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *) ent->driver_data;
int err;
- if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
"See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
"to enable support in this kernel version, or check for kernel updates.\n");
@@ -577,10 +710,10 @@ static int __init i915_init(void)
* vga_text_mode_force boot option.
*/
- if (i915.modeset == 0)
+ if (i915_modparams.modeset == 0)
use_kms = false;
- if (vgacon_text_force() && i915.modeset == -1)
+ if (vgacon_text_force() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 370b9d248fed..59ee808f8fd9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -206,6 +206,7 @@
#include "i915_oa_kblgt2.h"
#include "i915_oa_kblgt3.h"
#include "i915_oa_glk.h"
+#include "i915_oa_cflgt2.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1213,7 +1214,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1259,7 +1260,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915.enable_execlists) {
+ if (i915_modparams.enable_execlists) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1850,8 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN9(dev_priv)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2931,6 +2931,9 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_kblgt3(dev_priv);
} else if (IS_GEMINILAKE(dev_priv)) {
i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@@ -3409,7 +3412,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (i915.enable_execlists) {
+ } else if (i915_modparams.enable_execlists) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
@@ -3457,6 +3460,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
break;
case INTEL_SKYLAKE:
case INTEL_KABYLAKE:
+ case INTEL_COFFEELAKE:
dev_priv->perf.oa.timestamp_frequency = 12000000;
break;
default:
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 0679a58cdbae..195203f298df 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -53,6 +53,7 @@ enum vgt_g2v_type {
* VGT capabilities type
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+#define VGT_CAPS_HWSP_EMULATION BIT(3)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c9bcc6c45012..3866c49bc390 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2336,7 +2336,7 @@ enum i915_power_well_id {
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2371,8 +2371,12 @@ enum i915_power_well_id {
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2491,6 +2495,7 @@ enum i915_power_well_id {
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
@@ -2728,6 +2733,11 @@ enum i915_power_well_id {
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_SHIFT 22
+#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
+#define GEN10_F2_SS_DIS_SHIFT 18
+#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2743,6 +2753,9 @@ enum i915_power_well_id {
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN10_EU_DISABLE3 _MMIO(0x9140)
+#define GEN10_EU_DIS_SS_MASK 0xff
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3806,6 +3819,22 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+/*
+ * GEN10 clock gating regs
+ */
+#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
+#define SARBUNIT_CLKGATE_DIS (1 << 5)
+
/*
* Display engine regs
*/
@@ -4036,7 +4065,7 @@ enum {
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
-#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
@@ -5210,7 +5239,7 @@ enum {
#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
-#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */
#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
@@ -5652,8 +5681,7 @@ enum {
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE_C (1<<29)
-#define CBR_DPLLBMD_PIPE_B (1<<18)
+#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -6902,7 +6930,7 @@ enum {
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
-#define SKL_RC_HASH_OUTSIDE (1 << 15)
+#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
@@ -6916,6 +6944,10 @@ enum {
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL0_PWR_DOWN (1 << 10)
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define FBC_STRIDE_OVERRIDE (1 << 13)
+#define FBC_STRIDE_MASK 0x1FFF
+
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@@ -6934,6 +6966,7 @@ enum {
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1<<6)
+#define DISP_IPC_ENABLE (1<<3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
@@ -6969,12 +7002,19 @@ enum {
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
@@ -7018,6 +7058,7 @@ enum {
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
+#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@@ -7139,9 +7180,6 @@ enum {
#define SERR_INT _MMIO(0xc4040)
#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
-#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
-#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
/* digital port hotplug */
@@ -7454,6 +7492,8 @@ enum {
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
+#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
#define SPT_PWM_GRANULARITY (1<<0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
@@ -7471,6 +7511,7 @@ enum {
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
@@ -7937,8 +7978,8 @@ enum {
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
-#define GEN6_PCODE_WRITE_RC6VIDS 0x4
-#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
@@ -7957,7 +7998,9 @@ enum {
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
-#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+ /* See also IPS_CTL */
+#define IPS_PCODE_CONTROL (1 << 30)
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN9_PCODE_SAGV_CONTROL 0x21
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
@@ -8045,10 +8088,12 @@ enum {
#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
+#define THROTTLE_12_5 (7<<2)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -8060,9 +8105,11 @@ enum {
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
+#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
@@ -8575,7 +8622,7 @@ enum skl_power_gate {
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@@ -8782,6 +8829,15 @@ enum skl_power_gate {
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
+
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
@@ -9363,4 +9419,8 @@ enum skl_power_gate {
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
+#define MMCD_PCLA (1 << 31)
+#define MMCD_HOTSPOT_EN (1 << 27)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5c86925a0294..8f3aa4dc0c98 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -108,8 +108,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_restore_fences(dev_priv);
-
if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index f29540f922af..e8ca67a129d2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
@@ -40,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr);
}
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+ debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+}
+
static inline void debug_fence_activate(struct i915_sw_fence *fence)
{
debug_object_activate(fence, &i915_sw_fence_debug_descr);
@@ -78,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{
}
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+}
+
static inline void debug_fence_activate(struct i915_sw_fence *fence)
{
}
@@ -356,31 +366,44 @@ struct i915_sw_dma_fence_cb {
struct i915_sw_fence *fence;
struct dma_fence *dma;
struct timer_list timer;
+ struct irq_work work;
};
-static void timer_i915_sw_fence_wake(unsigned long data)
+static void timer_i915_sw_fence_wake(struct timer_list *t)
{
- struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
+ struct i915_sw_fence *fence;
+
+ fence = xchg(&cb->fence, NULL);
+ if (!fence)
+ return;
pr_warn("asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- dma_fence_put(cb->dma);
- cb->dma = NULL;
- i915_sw_fence_complete(cb->fence);
- cb->timer.function = NULL;
+ i915_sw_fence_complete(fence);
}
static void dma_i915_sw_fence_wake(struct dma_fence *dma,
struct dma_fence_cb *data)
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_fence *fence;
+
+ fence = xchg(&cb->fence, NULL);
+ if (fence)
+ i915_sw_fence_complete(fence);
+
+ irq_work_queue(&cb->work);
+}
+
+static void irq_i915_sw_fence_work(struct irq_work *wrk)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work);
del_timer_sync(&cb->timer);
- if (cb->timer.function)
- i915_sw_fence_complete(cb->fence);
dma_fence_put(cb->dma);
kfree(cb);
@@ -411,9 +434,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
i915_sw_fence_await(fence);
cb->dma = NULL;
- __setup_timer(&cb->timer,
- timer_i915_sw_fence_wake, (unsigned long)cb,
- TIMER_IRQSAFE);
+ timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
+ init_irq_work(&cb->work, irq_i915_sw_fence_work);
if (timeout) {
cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
@@ -492,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/lib_sw_fence.c"
#include "selftests/i915_sw_fence.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d61c8727f756..791759f632e1 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
}
static ssize_t
@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
ret = intel_gpu_freq(dev_priv, ret);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.cur_freq));
+ dev_priv->gt_pm.rps.cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.boost_freq));
+ dev_priv->gt_pm.rps.boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -301,12 +302,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
/* Validate against (static) hardware limits */
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+ if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
- mutex_lock(&dev_priv->rps.hw_lock);
- dev_priv->rps.boost_freq = val;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
+ rps->boost_freq = val;
+ mutex_unlock(&dev_priv->pcu_lock);
return count;
}
@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.efficient_freq));
+ dev_priv->gt_pm.rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.max_freq_softlimit));
+ dev_priv->gt_pm.rps.max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -344,34 +346,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val < rps->min_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
- if (val > dev_priv->rps.rp0_freq)
+ if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
intel_gpu_freq(dev_priv, val));
- dev_priv->rps.max_freq_softlimit = val;
+ rps->max_freq_softlimit = val;
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.min_freq_softlimit));
+ dev_priv->gt_pm.rps.min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -401,30 +404,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val > dev_priv->rps.max_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val > rps->max_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
- dev_priv->rps.min_freq_softlimit = val;
+ rps->min_freq_softlimit = val;
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ val = intel_gpu_freq(dev_priv, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ val = intel_gpu_freq(dev_priv, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ val = intel_gpu_freq(dev_priv, rps->min_freq);
else
BUG();
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index ef72da74b87f..4e76768ffa95 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -346,7 +346,7 @@ TRACE_EVENT(i915_gem_object_create,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, size)
+ __field(u64, size)
),
TP_fast_assign(
@@ -354,7 +354,7 @@ TRACE_EVENT(i915_gem_object_create,
__entry->size = obj->base.size;
),
- TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+ TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_shrink,
@@ -385,7 +385,7 @@ TRACE_EVENT(i915_vma_bind,
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u64, offset)
- __field(u32, size)
+ __field(u64, size)
__field(unsigned, flags)
),
@@ -397,7 +397,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags;
),
- TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
+ TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
@@ -411,7 +411,7 @@ TRACE_EVENT(i915_vma_unbind,
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u64, offset)
- __field(u32, size)
+ __field(u64, size)
),
TP_fast_assign(
@@ -421,18 +421,18 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
+ TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_pwrite,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, offset)
- __field(u32, len)
+ __field(u64, offset)
+ __field(u64, len)
),
TP_fast_assign(
@@ -441,18 +441,18 @@ TRACE_EVENT(i915_gem_object_pwrite,
__entry->len = len;
),
- TP_printk("obj=%p, offset=%u, len=%u",
+ TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_pread,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, offset)
- __field(u32, len)
+ __field(u64, offset)
+ __field(u64, len)
),
TP_fast_assign(
@@ -461,17 +461,17 @@ TRACE_EVENT(i915_gem_object_pread,
__entry->len = len;
),
- TP_printk("obj=%p, offset=%u, len=%u",
+ TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_fault,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, index)
+ __field(u64, index)
__field(bool, gtt)
__field(bool, write)
),
@@ -483,7 +483,7 @@ TRACE_EVENT(i915_gem_object_fault,
__entry->write = write;
),
- TP_printk("obj=%p, %s index=%u %s",
+ TP_printk("obj=%p, %s index=%llu %s",
__entry->obj,
__entry->gtt ? "GTT" : "CPU",
__entry->index,
@@ -516,14 +516,14 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
);
TRACE_EVENT(i915_gem_evict,
- TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+ TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
TP_ARGS(vm, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
- __field(u32, size)
- __field(u32, align)
+ __field(u64, size)
+ __field(u64, align)
__field(unsigned int, flags)
),
@@ -535,43 +535,11 @@ TRACE_EVENT(i915_gem_evict,
__entry->flags = flags;
),
- TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+ TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
__entry->dev, __entry->vm, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
-TRACE_EVENT(i915_gem_evict_everything,
- TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
-
- TP_printk("dev=%d", __entry->dev)
-);
-
-TRACE_EVENT(i915_gem_evict_vm,
- TP_PROTO(struct i915_address_space *vm),
- TP_ARGS(vm),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(struct i915_address_space *, vm)
- ),
-
- TP_fast_assign(
- __entry->dev = vm->i915->drm.primary->index;
- __entry->vm = vm;
- ),
-
- TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
-);
-
TRACE_EVENT(i915_gem_evict_node,
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
TP_ARGS(vm, node, flags),
@@ -594,12 +562,29 @@ TRACE_EVENT(i915_gem_evict_node,
__entry->flags = flags;
),
- TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
+ TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
__entry->dev, __entry->vm,
__entry->start, __entry->size,
__entry->color, __entry->flags)
);
+TRACE_EVENT(i915_gem_evict_vm,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = vm->i915->drm.primary->index;
+ __entry->vm = vm;
+ ),
+
+ TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
+);
+
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from),
@@ -650,29 +635,6 @@ TRACE_EVENT(i915_gem_request_queue,
__entry->flags)
);
-TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
- TP_ARGS(req, invalidate, flush),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, ring)
- __field(u32, invalidate)
- __field(u32, flush)
- ),
-
- TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->invalidate = invalidate;
- __entry->flush = flush;
- ),
-
- TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
- __entry->dev, __entry->ring,
- __entry->invalidate, __entry->flush)
-);
-
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
@@ -1032,5 +994,5 @@ TRACE_EVENT(switch_mm,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 12fc250b47b9..af3d7cc53fa1 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -99,6 +99,11 @@
__T; \
})
+static inline u64 ptr_to_u64(const void *ptr)
+{
+ return (uintptr_t)ptr;
+}
+
#define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \
(T *)(uintptr_t)(x); \
@@ -119,4 +124,17 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+/*
+ * Wait until the work is finally complete, even if it tries to postpone
+ * by requeueing itself. Note, that if the worker never cancels itself,
+ * we will spin forever.
+ */
+static inline void drain_delayed_work(struct delayed_work *dw)
+{
+ do {
+ while (flush_delayed_work(dw))
+ ;
+ } while (delayed_work_pending(dw));
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index b72bd2956b70..bb8338450dc1 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+static inline bool
+intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 02d1a5eacb00..fbfab2f33023 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -54,12 +54,21 @@ i915_vma_retire(struct i915_gem_active *active,
if (--obj->active_count)
return;
+ /* Prune the shared fence arrays iff completely idle (inc. external) */
+ if (reservation_object_trylock(obj->resv)) {
+ if (reservation_object_test_signaled_rcu(obj->resv, true))
+ reservation_object_add_excl_fence(obj->resv, NULL);
+ reservation_object_unlock(obj->resv);
+ }
+
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
+ spin_lock(&rq->i915->mm.obj_lock);
if (obj->bind_count)
- list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+ list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
+ spin_unlock(&rq->i915->mm.obj_lock);
obj->mm.dirty = true; /* be paranoid */
@@ -266,6 +275,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
+ GEM_BUG_ON(!vma->pages);
+
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
@@ -278,13 +289,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
+ int err;
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(vma->vm->i915);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ err = -ENODEV;
+ goto err;
+ }
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
@@ -294,14 +308,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start,
vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
+ if (ptr == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
vma->iomap = ptr;
}
__i915_vma_pin(vma);
+
+ err = i915_vma_pin_fence(vma);
+ if (err)
+ goto err_unpin;
+
return ptr;
+
+err_unpin:
+ __i915_vma_unpin(vma);
+err:
+ return IO_ERR_PTR(err);
+}
+
+void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+
+ GEM_BUG_ON(vma->iomap == NULL);
+
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
}
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
@@ -471,25 +507,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (ret)
return ret;
+ GEM_BUG_ON(vma->pages);
+
+ ret = vma->vm->set_pages(vma);
+ if (ret)
+ goto err_unpin;
+
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) {
ret = -EINVAL;
- goto err_unpin;
+ goto err_clear;
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level,
flags);
if (ret)
- goto err_unpin;
+ goto err_clear;
} else {
+ /*
+ * We only support huge gtt pages through the 48b PPGTT,
+ * however we also don't want to force any alignment for
+ * objects which need to be tightly packed into the low 32bits.
+ *
+ * Note that we assume that GGTT are limited to 4GiB for the
+ * forseeable future. See also i915_ggtt_offset().
+ */
+ if (upper_32_bits(end - 1) &&
+ vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ /*
+ * We can't mix 64K and 4K PTEs in the same page-table
+ * (2M block), and so to avoid the ugliness and
+ * complexity of coloring we opt for just aligning 64K
+ * objects to 2M.
+ */
+ u64 page_alignment =
+ rounddown_pow_of_two(vma->page_sizes.sg |
+ I915_GTT_PAGE_SIZE_2M);
+
+ /*
+ * Check we don't expand for the limited Global GTT
+ * (mappable aperture is even more precious!). This
+ * also checks that we exclude the aliasing-ppgtt.
+ */
+ GEM_BUG_ON(i915_vma_is_ggtt(vma));
+
+ alignment = max(alignment, page_alignment);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ }
+
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level,
start, end, flags);
if (ret)
- goto err_unpin;
+ goto err_clear;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -497,13 +572,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
+err_clear:
+ vma->vm->clear_pages(vma);
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
@@ -512,20 +593,24 @@ err_unpin:
static void
i915_vma_remove(struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ vma->vm->clear_pages(vma);
+
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
+ spin_lock(&i915->mm.obj_lock);
if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
+ list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -569,8 +654,8 @@ int __i915_vma_do_pin(struct i915_vma *vma,
err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
- GEM_BUG_ON(vma->pages);
i915_vma_remove(vma);
+ GEM_BUG_ON(vma->pages);
}
err_unpin:
__i915_vma_unpin(vma);
@@ -620,6 +705,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL;
}
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+ struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ u64 vma_offset;
+
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+ if (!i915_vma_has_userfault(vma))
+ return;
+
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(!vma->obj->userfault_count);
+
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+
+ i915_vma_unset_userfault(vma);
+ if (!--vma->obj->userfault_count)
+ list_del(&vma->obj->userfault_link);
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -683,11 +792,13 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret;
/* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
+ i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
}
+ GEM_BUG_ON(vma->fence);
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
@@ -695,13 +806,6 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (vma->pages != obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
i915_vma_remove(vma);
destroy:
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e811067c7724..1e2bc9b3c3ac 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,6 +55,7 @@ struct i915_vma {
void __iomem *iomap;
u64 size;
u64 display_alignment;
+ struct i915_page_sizes page_sizes;
u32 fence_size;
u32 fence_alignment;
@@ -65,7 +66,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
unsigned int open_count;
- unsigned int flags;
+ unsigned long flags;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
@@ -87,6 +88,8 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
+#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -145,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED;
}
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
+{
+ return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
@@ -243,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
@@ -321,12 +341,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
* Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
+void i915_vma_unpin_iomap(struct i915_vma *vma);
static inline struct page *i915_vma_first_page(struct i915_vma *vma)
{
@@ -349,15 +364,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
+
+static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
}
/**
@@ -372,10 +385,8 @@ static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
+ if (vma->fence)
+ __i915_vma_unpin_fence(vma);
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index ee76fab7bb6f..8e6dc159f64d 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -107,7 +107,9 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
-int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
@@ -124,7 +126,7 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
* anything driver-specific we need to test in that case, so
* just return success.
*/
- if (!intel_state->base.crtc && !plane->state->crtc)
+ if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
@@ -194,16 +196,21 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
- return intel_plane_atomic_calc_changes(&crtc_state->base, state);
+ return intel_plane_atomic_calc_changes(old_crtc_state,
+ &crtc_state->base,
+ old_plane_state,
+ state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_plane_state *new_plane_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_crtc_state *drm_crtc_state;
-
- crtc = crtc ? crtc : plane->state->crtc;
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
@@ -214,29 +221,33 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
- if (WARN_ON(!drm_crtc_state))
- return -EINVAL;
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
- to_intel_plane_state(state));
+ return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
+ to_intel_crtc_state(new_crtc_state),
+ to_intel_plane_state(old_plane_state),
+ to_intel_plane_state(new_plane_state));
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_plane_state *intel_state =
- to_intel_plane_state(plane->state);
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ const struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, intel_plane);
+ struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+
+ if (new_plane_state->base.visible) {
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
- if (intel_state->base.visible) {
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
intel_plane->update_plane(intel_plane,
- to_intel_crtc_state(crtc->state),
- intel_state);
+ new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 27743be5b768..0ddba16fde1b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -754,7 +754,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= I915_MAX_PIPES))
+ if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
return NULL;
/* MST */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 5d4cd3d00564..fd23023df7c1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915.vbt_sdvo_panel_type;
+ index = i915_modparams.vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
@@ -431,70 +431,31 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
-static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+static const struct child_device_config *
+child_device_ptr(const struct bdb_general_definitions *defs, int i)
{
- const struct bdb_general_definitions *general;
-
- general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (general) {
- u16 block_size = get_blocksize(general);
- if (block_size >= sizeof(*general)) {
- int bus_pin = general->crt_ddc_gmbus_pin;
- DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
- dev_priv->vbt.crt_ddc_pin = bus_pin;
- } else {
- DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
- }
- }
-}
-
-static const union child_device_config *
-child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
-{
- return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
+ return (const void *) &defs->devices[i * defs->child_dev_size];
}
static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- struct sdvo_device_mapping *p_mapping;
- const struct bdb_general_definitions *p_defs;
- const struct old_child_dev_config *child; /* legacy */
- int i, child_device_num, count;
- u16 block_size;
-
- p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
- return;
- }
+ struct sdvo_device_mapping *mapping;
+ const struct child_device_config *child;
+ int i, count = 0;
/*
- * Only parse SDVO mappings when the general definitions block child
- * device size matches that of the *legacy* child device config
- * struct. Thus, SDVO mapping will be skipped for newer VBT.
+ * Only parse SDVO mappings on gens that could have SDVO. This isn't
+ * accurate and doesn't have to be, as long as it's not too strict.
*/
- if (p_defs->child_dev_size != sizeof(*child)) {
- DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
+ if (!IS_GEN(dev_priv, 3, 7)) {
+ DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return;
}
- /* get the block size of general definitions */
- block_size = get_blocksize(p_defs);
- /* get the number of child device */
- child_device_num = (block_size - sizeof(*p_defs)) /
- p_defs->child_dev_size;
- count = 0;
- for (i = 0; i < child_device_num; i++) {
- child = &child_device_ptr(p_defs, i)->old;
- if (!child->device_type) {
- /* skip the device block if device type is invalid */
- continue;
- }
+
+ for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ child = dev_priv->vbt.child_dev + i;
+
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
/*
@@ -514,20 +475,20 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
- if (!p_mapping->initialized) {
- p_mapping->dvo_port = child->dvo_port;
- p_mapping->slave_addr = child->slave_addr;
- p_mapping->dvo_wiring = child->dvo_wiring;
- p_mapping->ddc_pin = child->ddc_pin;
- p_mapping->i2c_pin = child->i2c_pin;
- p_mapping->initialized = 1;
+ mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
+ if (!mapping->initialized) {
+ mapping->dvo_port = child->dvo_port;
+ mapping->slave_addr = child->slave_addr;
+ mapping->dvo_wiring = child->dvo_wiring;
+ mapping->ddc_pin = child->ddc_pin;
+ mapping->i2c_pin = child->i2c_pin;
+ mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- p_mapping->dvo_port,
- p_mapping->slave_addr,
- p_mapping->dvo_wiring,
- p_mapping->ddc_pin,
- p_mapping->i2c_pin);
+ mapping->dvo_port,
+ mapping->slave_addr,
+ mapping->dvo_wiring,
+ mapping->ddc_pin,
+ mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -545,7 +506,6 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
/* No SDVO device info is found */
DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
- return;
}
static void
@@ -577,7 +537,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
- const struct edp_link_params *edp_link_params;
+ const struct edp_fast_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
@@ -601,7 +561,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Get the eDP sequencing and link info */
edp_pps = &edp->power_seqs[panel_type];
- edp_link_params = &edp->link_params[panel_type];
+ edp_link_params = &edp->fast_link_params[panel_type];
dev_priv->vbt.edp.pps = *edp_pps;
@@ -676,8 +636,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
uint8_t vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915.edp_vswing) {
- dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
+ if (i915_modparams.edp_vswing) {
+ dev_priv->vbt.edp.low_vswing =
+ i915_modparams.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp.low_vswing = vswing == 0;
@@ -730,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
+static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+ u16 version, enum port port)
+{
+ if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
+ dev_priv->vbt.dsi.bl_ports = BIT(port);
+ if (dev_priv->vbt.dsi.config->cabc_supported)
+ dev_priv->vbt.dsi.cabc_ports = BIT(port);
+
+ return;
+ }
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+
+ if (!dev_priv->vbt.dsi.config->cabc_supported)
+ return;
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ dev_priv->vbt.dsi.cabc_ports =
+ BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+}
+
static void
parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -738,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct mipi_config *config;
const struct mipi_pps_data *pps;
int panel_type = dev_priv->vbt.panel_type;
+ enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!intel_bios_is_dsi_present(dev_priv, NULL))
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
/* Initialize this to undefined indicating no generic MIPI support */
@@ -781,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return;
}
- /*
- * These fields are introduced from the VBT version 197 onwards,
- * so making sure that these bits are set zero in the previous
- * versions.
- */
- if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
- dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
- dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
- }
+ parse_dsi_backlight_ports(dev_priv, bdb->version, port);
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
@@ -1110,10 +1106,26 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
}
}
+static const u8 cnp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+{
+ if (HAS_PCH_CNP(dev_priv) &&
+ vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
+ return cnp_ddc_pin_map[vbt_pin];
+
+ return vbt_pin;
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- const struct bdb_header *bdb)
+ u8 bdb_version)
{
- union child_device_config *it, *child = NULL;
+ struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
uint8_t hdmi_level_shift;
int i, j;
@@ -1141,7 +1153,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (dvo_ports[port][j] == -1)
break;
- if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (it->dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -1154,14 +1166,21 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->common.aux_channel;
- ddc_pin = child->common.ddc_pin;
+ aux_channel = child->aux_channel;
+ ddc_pin = child->ddc_pin;
+
+ is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+ is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
- is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
- is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
- is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
- is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
if (port == PORT_A && is_dvi) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
@@ -1195,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- info->alternate_ddc_pin = ddc_pin;
-
- /*
- * All VBTs that we got so far for B Stepping has this
- * information wrong for Port D. So, let's just ignore for now.
- */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
- port == PORT_D) {
- info->alternate_ddc_pin = 0;
- }
+ info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
sanitize_ddc_pin(dev_priv, port);
}
@@ -1215,9 +1225,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
sanitize_aux_ch(dev_priv, port);
}
- if (bdb->version >= 158) {
+ if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
- hdmi_level_shift = child->raw[7] & 0xF;
+ hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
@@ -1225,18 +1235,17 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
- if (bdb->version >= 196 && child->common.iboost) {
- info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+ if (bdb_version >= 196 && child->iboost) {
+ info->dp_boost_level = translate_iboost(child->dp_iboost_level);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
- info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+ info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
}
-static void parse_ddi_ports(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
enum port port;
@@ -1246,79 +1255,86 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
if (!dev_priv->vbt.child_dev_num)
return;
- if (bdb->version < 155)
+ if (bdb_version < 155)
return;
for (port = PORT_A; port < I915_MAX_PORTS; port++)
- parse_ddi_port(dev_priv, port, bdb);
+ parse_ddi_port(dev_priv, port, bdb_version);
}
static void
-parse_device_mapping(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_general_definitions(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
- const struct bdb_general_definitions *p_defs;
- const union child_device_config *p_child;
- union child_device_config *child_dev_ptr;
+ const struct bdb_general_definitions *defs;
+ const struct child_device_config *child;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
+ int bus_pin;
- p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (!p_defs) {
+ defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
+
+ block_size = get_blocksize(defs);
+ if (block_size < sizeof(*defs)) {
+ DRM_DEBUG_KMS("General definitions block too small (%u)\n",
+ block_size);
+ return;
+ }
+
+ bus_pin = defs->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
+ dev_priv->vbt.crt_ddc_pin = bus_pin;
+
if (bdb->version < 106) {
expected_size = 22;
} else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
- BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
- expected_size = sizeof(struct old_child_dev_config);
+ expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
- BUILD_BUG_ON(sizeof(*p_child) < 38);
+ BUILD_BUG_ON(sizeof(*child) < 38);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
- if (p_defs->child_dev_size != expected_size)
+ if (defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
- p_defs->child_dev_size, expected_size, bdb->version);
+ defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
- if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
- p_defs->child_dev_size);
+ defs->child_dev_size);
return;
}
- /* get the block size of general definitions */
- block_size = get_blocksize(p_defs);
/* get the number of child device */
- child_device_num = (block_size - sizeof(*p_defs)) /
- p_defs->child_dev_size;
+ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0;
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
- p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
- /* skip the device block if device type is invalid */
+ child = child_device_ptr(defs, i);
+ if (!child->device_type)
continue;
- }
count++;
}
if (!count) {
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@@ -1327,37 +1343,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
- p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
- /* skip the device block if device type is invalid */
+ child = child_device_ptr(defs, i);
+ if (!child->device_type)
continue;
- }
-
- child_dev_ptr = dev_priv->vbt.child_dev + count;
- count++;
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
* depend on VBT version.
*/
- memcpy(child_dev_ptr, p_child,
- min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
-
- /*
- * copied full block, now init values when they are not
- * available in current version
- */
- if (bdb->version < 196) {
- /* Set default values for bits added from v196 */
- child_dev_ptr->common.iboost = 0;
- child_dev_ptr->common.hpd_invert = 0;
- }
-
- if (bdb->version < 192)
- child_dev_ptr->common.lspcon = 0;
+ memcpy(dev_priv->vbt.child_dev + count, child,
+ min_t(size_t, defs->child_dev_size, sizeof(*child)));
+ count++;
}
- return;
}
/* Common defaults which may be overridden by VBT. */
@@ -1538,14 +1536,15 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
parse_lfp_panel_data(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
- parse_sdvo_device_mapping(dev_priv, bdb);
- parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
- parse_ddi_ports(dev_priv, bdb);
+
+ /* Further processing on pre-parsed data */
+ parse_sdvo_device_mapping(dev_priv, bdb->version);
+ parse_ddi_ports(dev_priv, bdb->version);
out:
if (!vbt) {
@@ -1566,7 +1565,7 @@ out:
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.int_tv_support)
@@ -1576,11 +1575,11 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
- switch (p_child->old.device_type) {
+ switch (child->device_type) {
case DEVICE_TYPE_INT_TV:
case DEVICE_TYPE_TV:
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
@@ -1591,7 +1590,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->old.addin_offset)
+ if (child->addin_offset)
return true;
}
@@ -1608,14 +1607,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- union child_device_config *uchild = dev_priv->vbt.child_dev + i;
- struct old_child_dev_config *child = &uchild->old;
+ child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -1657,6 +1656,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
@@ -1675,12 +1675,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
- DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ child = dev_priv->vbt.child_dev + i;
+
+ if ((child->dvo_port == port_mapping[port].dp ||
+ child->dvo_port == port_mapping[port].hdmi) &&
+ (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
@@ -1696,7 +1696,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
[PORT_C] = DVO_PORT_DPC,
@@ -1712,10 +1712,10 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == port_mapping[port] &&
- (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ if (child->dvo_port == port_mapping[port] &&
+ (child->device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
@@ -1723,7 +1723,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
enum port port)
{
static const struct {
@@ -1742,16 +1742,16 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
- if (p_child->common.dvo_port == port_mapping[port].dp)
+ if (child->dvo_port == port_mapping[port].dp)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
- if (p_child->common.dvo_port == port_mapping[port].hdmi &&
- p_child->common.aux_channel != 0)
+ if (child->dvo_port == port_mapping[port].hdmi &&
+ child->aux_channel != 0)
return true;
return false;
@@ -1760,13 +1760,13 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
+ child = dev_priv->vbt.child_dev + i;
- if (child_dev_is_dp_dual_mode(p_child, port))
+ if (child_dev_is_dp_dual_mode(child, port))
return true;
}
@@ -1783,17 +1783,17 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
u8 dvo_port;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = p_child->common.dvo_port;
+ dvo_port = child->dvo_port;
switch (dvo_port) {
case DVO_PORT_MIPIA:
@@ -1823,16 +1823,19 @@ bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+ child = dev_priv->vbt.child_dev + i;
+
+ if (!child->hpd_invert)
continue;
- switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
@@ -1867,16 +1870,19 @@ bool
intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
if (!HAS_LSPCON(dev_priv))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ child = dev_priv->vbt.child_dev + i;
+
+ if (!child->lspcon)
continue;
- switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 4e00e5cb9fa1..5f8b9f1f40f1 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n",
+ DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
engine->name, __builtin_return_address(0),
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted)),
@@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
-static void intel_breadcrumbs_hangcheck(unsigned long data)
+static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs *engine = from_timer(engine, t,
+ breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed)
@@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
}
}
-static void intel_breadcrumbs_fake_irq(unsigned long data)
+static void intel_breadcrumbs_fake_irq(struct timer_list *t)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs *engine = from_timer(engine, t,
+ breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The timer persists in case we cannot enable interrupts,
@@ -515,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
out:
GEM_BUG_ON(b->irq_wait == wait);
@@ -787,12 +790,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
spin_lock_init(&b->rb_lock);
spin_lock_init(&b->irq_lock);
- setup_timer(&b->fake_irq,
- intel_breadcrumbs_fake_irq,
- (unsigned long)engine);
- setup_timer(&b->hangcheck,
- intel_breadcrumbs_hangcheck,
- (unsigned long)engine);
+ timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
+ timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
/* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 1241e5891b29..b2a6d62b71c0 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -417,24 +417,21 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 540000;
}
-static int vlv_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
+static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
333333 : 320000;
- int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (!IS_CHERRYVIEW(dev_priv) &&
- max_pixclk > freq_320*limit/100)
+ if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
return 400000;
- else if (max_pixclk > 266667*limit/100)
+ else if (min_cdclk > 266667)
return freq_320;
- else if (max_pixclk > 0)
+ else if (min_cdclk > 0)
return 266667;
else
return 200000;
@@ -506,7 +503,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
else
cmd = 0;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -516,7 +513,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
mutex_lock(&dev_priv->sb_lock);
@@ -593,7 +590,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -603,7 +600,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -612,13 +609,13 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
-static int bdw_calc_cdclk(int max_pixclk)
+static int bdw_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 675000;
- else if (max_pixclk > 450000)
+ else if (min_cdclk > 450000)
return 540000;
- else if (max_pixclk > 337500)
+ else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@@ -659,10 +656,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n");
return;
@@ -672,8 +669,12 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
+ /*
+ * According to the spec, it should be enough to poll for this 1 us.
+ * However, extensive testing shows that this can take longer.
+ */
if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ LCPLL_CD_SOURCE_FCLK_DONE, 100))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -711,9 +712,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -724,23 +725,23 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
cdclk, dev_priv->cdclk.hw.cdclk);
}
-static int skl_calc_cdclk(int max_pixclk, int vco)
+static int skl_calc_cdclk(int min_cdclk, int vco)
{
if (vco == 8640000) {
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 617143;
- else if (max_pixclk > 432000)
+ else if (min_cdclk > 432000)
return 540000;
- else if (max_pixclk > 308571)
+ else if (min_cdclk > 308571)
return 432000;
else
return 308571;
} else {
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 675000;
- else if (max_pixclk > 450000)
+ else if (min_cdclk > 450000)
return 540000;
- else if (max_pixclk > 337500)
+ else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@@ -927,12 +928,12 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
WARN_ON((cdclk == 24000) != (vco == 0));
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -974,9 +975,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1075,31 +1076,25 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state);
}
-static int bxt_calc_cdclk(int max_pixclk)
+static int bxt_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 576000)
+ if (min_cdclk > 576000)
return 624000;
- else if (max_pixclk > 384000)
+ else if (min_cdclk > 384000)
return 576000;
- else if (max_pixclk > 288000)
+ else if (min_cdclk > 288000)
return 384000;
- else if (max_pixclk > 144000)
+ else if (min_cdclk > 144000)
return 288000;
else
return 144000;
}
-static int glk_calc_cdclk(int max_pixclk)
+static int glk_calc_cdclk(int min_cdclk)
{
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- if (max_pixclk > DIV_ROUND_UP(2 * 158400 * 99, 100))
+ if (min_cdclk > 158400)
return 316800;
- else if (max_pixclk > DIV_ROUND_UP(2 * 79200 * 99, 100))
+ else if (min_cdclk > 79200)
return 158400;
else
return 79200;
@@ -1273,10 +1268,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
}
/* Inform power controller of upcoming frequency change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
@@ -1305,10 +1300,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
DIV_ROUND_UP(cdclk, 25000));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
@@ -1420,11 +1415,11 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
bxt_set_cdclk(dev_priv, &cdclk_state);
}
-static int cnl_calc_cdclk(int max_pixclk)
+static int cnl_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 336000)
+ if (min_cdclk > 336000)
return 528000;
- else if (max_pixclk > 168000)
+ else if (min_cdclk > 168000)
return 336000;
else
return 168000;
@@ -1523,12 +1518,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider, pcu_ack;
int ret;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1580,9 +1575,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, val);
/* inform PCU of the change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1732,104 +1727,119 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
}
-static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
+ int pixel_rate)
+{
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
+ * once DDI clock voltage requirements are
+ * handled correctly.
+ */
+ return pixel_rate;
+ else if (IS_GEMINILAKE(dev_priv))
+ /*
+ * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
+ * as a temporary workaround. Use a higher cdclk instead. (Note that
+ * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
+ * cdclk.)
+ */
+ return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
+ else if (IS_GEN9(dev_priv) ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return pixel_rate;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else
+ return DIV_ROUND_UP(pixel_rate * 100, 90);
+}
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
+ int min_cdclk;
+
+ if (!crtc_state->base.enable)
+ return 0;
+
+ min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
* audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
* there may be audio corruption or screen corruption." This cdclk
- * restriction for GLK is 316.8 MHz and since GLK can output two
- * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
+ * restriction for GLK is 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
- if (IS_CANNONLAKE(dev_priv))
- pixel_rate = max(316800, pixel_rate);
- else if (IS_GEMINILAKE(dev_priv))
- pixel_rate = max(2 * 316800, pixel_rate);
- else
- pixel_rate = max(432000, pixel_rate);
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ /* Display WA #1145: glk,cnl */
+ min_cdclk = max(316800, min_cdclk);
+ } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(432000, min_cdclk);
+ }
}
/* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- * The check for GLK has to be adjusted as the platform can output
- * two pixels per clock.
*/
- if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
- if (IS_GEMINILAKE(dev_priv))
- pixel_rate = max(2 * 2 * 96000, pixel_rate);
- else
- pixel_rate = max(2 * 96000, pixel_rate);
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ min_cdclk = max(2 * 96000, min_cdclk);
+
+ if (min_cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
}
- return pixel_rate;
+ return min_cdclk;
}
-/* compute the max rate for new configuration */
-static int intel_max_pixel_rate(struct drm_atomic_state *state)
+static int intel_compute_min_cdclk(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
+ struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- unsigned int max_pixel_rate = 0, i;
+ int min_cdclk, i;
enum pipe pipe;
- memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
- sizeof(intel_state->min_pixclk));
-
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- int pixel_rate;
-
- crtc_state = to_intel_crtc_state(cstate);
- if (!crtc_state->base.enable) {
- intel_state->min_pixclk[i] = 0;
- continue;
- }
-
- pixel_rate = crtc_state->pixel_rate;
+ memcpy(intel_state->min_cdclk, dev_priv->min_cdclk,
+ sizeof(intel_state->min_cdclk));
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
- pixel_rate =
- bdw_adjust_min_pipe_pixel_rate(crtc_state,
- pixel_rate);
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- intel_state->min_pixclk[i] = pixel_rate;
+ intel_state->min_cdclk[i] = min_cdclk;
}
+ min_cdclk = 0;
for_each_pipe(dev_priv, pipe)
- max_pixel_rate = max(intel_state->min_pixclk[pipe],
- max_pixel_rate);
+ min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
- return max_pixel_rate;
+ return min_cdclk;
}
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- int max_pixclk = intel_max_pixel_rate(state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int cdclk;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk;
- cdclk = vlv_calc_cdclk(dev_priv, max_pixclk);
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1847,22 +1857,18 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- int max_pixclk = intel_max_pixel_rate(state);
- int cdclk;
+ int min_cdclk, cdclk;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
/*
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- cdclk = bdw_calc_cdclk(max_pixclk);
-
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1880,10 +1886,13 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
- const int max_pixclk = intel_max_pixel_rate(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
vco = intel_state->cdclk.logical.vco;
if (!vco)
@@ -1893,13 +1902,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- cdclk = skl_calc_cdclk(max_pixclk, vco);
-
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = skl_calc_cdclk(min_cdclk, vco);
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1920,25 +1923,21 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- int max_pixclk = intel_max_pixel_rate(state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(max_pixclk);
+ cdclk = glk_calc_cdclk(min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(max_pixclk);
+ cdclk = bxt_calc_cdclk(min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
-
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1964,19 +1963,15 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int max_pixclk = intel_max_pixel_rate(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
- cdclk = cnl_calc_cdclk(max_pixclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = cnl_calc_cdclk(min_cdclk);
+ vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1999,14 +1994,21 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * FIXME: Allow '2 * max_cdclk_freq'
+ * once DDI clock voltage requirements are
+ * handled correctly.
+ */
+ return max_cdclk_freq;
+ else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
- * glk_calc_cdclk() for details.
+ * intel_min_cdclk() for details.
*/
return 2 * max_cdclk_freq * 99 / 100;
- else if (INTEL_INFO(dev_priv)->gen >= 9 ||
- IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ else if (IS_GEN9(dev_priv) ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 70e0ff41070c..437339f5d098 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
+ const struct intel_crtc_state *crtc_state,
int mode)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -194,28 +194,41 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
}
static void intel_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
static void pch_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_crt(encoder, old_crtc_state, old_conn_state);
}
+static void hsw_disable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_crtc *crtc = old_crtc_state->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
static void hsw_post_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -225,11 +238,63 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
lpt_disable_iclkip(dev_priv);
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+
+ WARN_ON(!old_crtc_state->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+}
+
+static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
+static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
+}
+
+static void hsw_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void intel_enable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
}
@@ -279,10 +344,25 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ return true;
+}
+
+static bool pch_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ pipe_config->has_pch_encoder = true;
+
+ return true;
+}
+
+static bool hsw_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@@ -295,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev_priv))
- pipe_config->port_clock = 135000 * 2;
+ pipe_config->port_clock = 135000 * 2;
return true;
}
@@ -712,7 +791,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -730,7 +809,7 @@ intel_crt_detect(struct drm_connector *connector,
else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (i915.load_detect_test)
+ else if (i915_modparams.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
@@ -890,26 +969,33 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- crt->base.disable = pch_disable_crt;
- crt->base.post_disable = pch_post_disable_crt;
- } else {
- crt->base.disable = intel_disable_crt;
- }
- crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev_priv) &&
!dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
+
if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.compute_config = hsw_crt_compute_config;
+ crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
+ crt->base.pre_enable = hsw_pre_enable_crt;
+ crt->base.enable = hsw_enable_crt;
+ crt->base.disable = hsw_disable_crt;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ crt->base.compute_config = pch_crt_compute_config;
+ crt->base.disable = pch_disable_crt;
+ crt->base.post_disable = pch_post_disable_crt;
+ } else {
+ crt->base.compute_config = intel_crt_compute_config;
+ crt->base.disable = intel_disable_crt;
+ }
crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
+ crt->base.enable = intel_enable_crt;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 92c1f8e166dc..da9de47562b8 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
-
-
-
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -252,8 +248,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
}
fw_size = dev_priv->csr.dmc_fw_size;
+ assert_rpm_wakelock_held(dev_priv);
+
+ preempt_disable();
+
for (i = 0; i < fw_size; i++)
- I915_WRITE(CSR_PROGRAM(i), payload[i]);
+ I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+
+ preempt_enable();
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -285,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
- DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
(css_header->header_len * 4));
return NULL;
}
@@ -309,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
if (csr->version != required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u [" FIRMWARE_URL "].\n",
+ " please use v%u.%u\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(required_version),
@@ -324,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
&fw->data[readcount];
if (sizeof(struct intel_package_header) !=
(package_header->header_len * 4)) {
- DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong package header length "
+ "(%u bytes)\n",
(package_header->header_len * 4));
return NULL;
}
@@ -345,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
dmc_offset = package_header->fw_info[i].offset;
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("Firmware not supported for %c stepping\n",
+ DRM_ERROR("DMC firmware not supported for %c stepping\n",
si->stepping);
return NULL;
}
@@ -354,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
- DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong dmc header length "
+ "(%u bytes)\n",
(dmc_header->header_len));
return NULL;
}
@@ -362,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Cache the dmc header info. */
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
- DRM_ERROR("Firmware has wrong mmio count %u\n",
+ DRM_ERROR("DMC firmware has wrong mmio count %u\n",
dmc_header->mmio_count);
return NULL;
}
@@ -370,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
for (i = 0; i < dmc_header->mmio_count; i++) {
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
- DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+ DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]);
return NULL;
}
@@ -381,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) {
- DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+ DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
return NULL;
}
csr->dmc_fw_size = dmc_header->fw_size;
@@ -419,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MINOR(csr->version));
} else {
dev_notice(dev_priv->drm.dev,
- "Failed to load DMC firmware"
- " [" FIRMWARE_URL "],"
- " disabling runtime power management.\n");
+ "Failed to load DMC firmware %s."
+ " Disabling runtime power management.\n",
+ csr->fw_path);
+ dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+ INTEL_UC_FIRMWARE_URL);
}
release_firmware(fw);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5e5fe03b638c..e0843bb99169 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,39 +301,38 @@ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
};
struct bxt_ddi_buf_trans {
- u32 margin; /* swing value */
- u32 scale; /* scale value */
- u32 enable; /* scale enable */
- u32 deemphasis;
- bool default_index; /* true if the entry represents default value */
+ u8 margin; /* swing value */
+ u8 scale; /* scale value */
+ u8 enable; /* scale enable */
+ u8 deemphasis;
};
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
/* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
- { 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
- { 104, 0x9A, 0, 64, false }, /* 2: 400 6 */
- { 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
- { 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
- { 154, 0x9A, 0, 64, false }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
- { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
};
static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
/* Idx NT mV diff db */
- { 26, 0, 0, 128, false }, /* 0: 200 0 */
- { 38, 0, 0, 112, false }, /* 1: 200 1.5 */
- { 48, 0, 0, 96, false }, /* 2: 200 4 */
- { 54, 0, 0, 69, false }, /* 3: 200 6 */
- { 32, 0, 0, 128, false }, /* 4: 250 0 */
- { 48, 0, 0, 104, false }, /* 5: 250 1.5 */
- { 54, 0, 0, 85, false }, /* 6: 250 4 */
- { 43, 0, 0, 128, false }, /* 7: 300 0 */
- { 54, 0, 0, 101, false }, /* 8: 300 1.5 */
- { 48, 0, 0, 128, false }, /* 9: 300 0 */
+ { 26, 0, 0, 128, }, /* 0: 200 0 */
+ { 38, 0, 0, 112, }, /* 1: 200 1.5 */
+ { 48, 0, 0, 96, }, /* 2: 200 4 */
+ { 54, 0, 0, 69, }, /* 3: 200 6 */
+ { 32, 0, 0, 128, }, /* 4: 250 0 */
+ { 48, 0, 0, 104, }, /* 5: 250 1.5 */
+ { 54, 0, 0, 85, }, /* 6: 250 4 */
+ { 43, 0, 0, 128, }, /* 7: 300 0 */
+ { 54, 0, 0, 101, }, /* 8: 300 1.5 */
+ { 48, 0, 0, 128, }, /* 9: 300 0 */
};
/* BSpec has 2 recommended values - entries 0 and 8.
@@ -341,24 +340,24 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
/* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, false }, /* 0: 400 0 */
- { 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
- { 52, 0x9A, 0, 64, false }, /* 2: 400 6 */
- { 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
- { 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
- { 77, 0x9A, 0, 64, false }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
- { 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
};
struct cnl_ddi_buf_trans {
- u32 dw2_swing_sel;
- u32 dw7_n_scalar;
- u32 dw4_cursor_coeff;
- u32 dw4_post_cursor_2;
- u32 dw4_post_cursor_1;
+ u8 dw2_swing_sel;
+ u8 dw7_n_scalar;
+ u8 dw4_cursor_coeff;
+ u8 dw4_post_cursor_2;
+ u8 dw4_post_cursor_1;
};
/* Voltage Swing Programming for VccIO 0.85V for DP */
@@ -588,48 +587,29 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
}
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
{
- int n_hdmi_entries;
- int hdmi_level;
- int hdmi_default_entry;
-
- hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
- if (IS_GEN9_LP(dev_priv))
- return hdmi_level;
-
- if (IS_GEN9_BC(dev_priv)) {
- skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
- hdmi_default_entry = 8;
- } else if (IS_BROADWELL(dev_priv)) {
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
- } else if (IS_HASWELL(dev_priv)) {
- n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_default_entry = 6;
- } else {
- WARN(1, "ddi translation table missing\n");
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
- }
-
- /* Choose a good default if VBT is badly populated */
- if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
- hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_default_entry;
-
- return hdmi_level;
+ /* Only DDIA and DDIE can select the 10th register with DP */
+ if (port == PORT_A || port == PORT_E)
+ return min(n_entries, 10);
+ else
+ return min(n_entries, 9);
}
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- int *n_entries)
+ enum port port, int *n_entries)
{
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ kbl_get_buf_trans_dp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_SKYLAKE(dev_priv)) {
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_dp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
return bdw_ddi_translations_dp;
@@ -644,10 +624,13 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- int *n_entries)
+ enum port port, int *n_entries)
{
if (IS_GEN9_BC(dev_priv)) {
- return skl_get_buf_trans_edp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_edp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
return bdw_get_buf_trans_edp(dev_priv, n_entries);
} else if (IS_HASWELL(dev_priv)) {
@@ -675,6 +658,154 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
return NULL;
}
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_GEN9_BC(dev_priv)) {
+ return skl_get_buf_trans_hdmi(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ return bdw_ddi_translations_hdmi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ return hsw_ddi_translations_hdmi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+ return bxt_ddi_translations_dp;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+ return bxt_ddi_translations_edp;
+ }
+
+ return bxt_get_buf_trans_dp(dev_priv, n_entries);
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+ return bxt_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
+ return cnl_ddi_translations_hdmi_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
+ return cnl_ddi_translations_hdmi_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
+ return cnl_ddi_translations_hdmi_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
+ return cnl_ddi_translations_dp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
+ return cnl_ddi_translations_dp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
+ return cnl_ddi_translations_dp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
+ return cnl_ddi_translations_edp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
+ return cnl_ddi_translations_edp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
+ return cnl_ddi_translations_edp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+ } else {
+ return cnl_get_buf_trans_dp(dev_priv, n_entries);
+ }
+}
+
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+ int n_entries, level, default_entry;
+
+ level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_CANNONLAKE(dev_priv)) {
+ cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = n_entries - 1;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = n_entries - 1;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 6;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ return 0;
+ }
+
+ /* Choose a good default if VBT is badly populated */
+ if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
+ level = default_entry;
+
+ if (WARN_ON_ONCE(n_entries == 0))
+ return 0;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
+
+ return level;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@@ -688,16 +819,13 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations;
- if (IS_GEN9_LP(dev_priv))
- return;
-
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
+ ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_DP:
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv,
+ ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_ANALOG:
@@ -709,16 +837,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
return;
}
- if (IS_GEN9_BC(dev_priv)) {
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_entries > 9))
- n_entries = 9;
- }
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (IS_GEN9_BC(dev_priv) &&
+ dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
@@ -733,42 +855,32 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
* values in advance. This function programs the correct values for
* HDMI/DVI use cases.
*/
-static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+ int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int n_hdmi_entries, hdmi_level;
+ int n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
- const struct ddi_buf_trans *ddi_translations_hdmi;
-
- if (IS_GEN9_LP(dev_priv))
- return;
+ const struct ddi_buf_trans *ddi_translations;
- hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (IS_GEN9_BC(dev_priv)) {
- ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
- } else if (IS_BROADWELL(dev_priv)) {
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- } else if (IS_HASWELL(dev_priv)) {
- ddi_translations_hdmi = hsw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- } else {
- WARN(1, "ddi translation table missing\n");
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- }
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (IS_GEN9_BC(dev_priv) &&
+ dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
- ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
+ ddi_translations[level].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
- ddi_translations_hdmi[hdmi_level].trans2);
+ ddi_translations[level].trans2);
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -785,7 +897,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->id) {
case DPLL_ID_WRPLL1:
@@ -1044,14 +1156,14 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- uint32_t dpll)
+ enum intel_dpll_id pll_id)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
- cfgcr1_reg = DPLL_CFGCR1(dpll);
- cfgcr2_reg = DPLL_CFGCR2(dpll);
+ cfgcr1_reg = DPLL_CFGCR1(pll_id);
+ cfgcr2_reg = DPLL_CFGCR2(pll_id);
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
@@ -1104,7 +1216,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id)
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1154,7 +1266,10 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
return dco_freq / (p0 * p1 * p2 * 5);
}
@@ -1188,7 +1303,8 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t cfgcr0, pll_id;
+ uint32_t cfgcr0;
+ enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1241,17 +1357,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t dpll_ctl1, dpll;
+ uint32_t dpll_ctl1;
+ enum intel_dpll_id pll_id;
- dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
dpll_ctl1 = I915_READ(DPLL_CTRL1);
- if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
- link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+ if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
+ link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
} else {
- link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
+ link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@@ -1332,17 +1449,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
}
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id dpll)
+ enum intel_dpll_id pll_id)
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
- if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+ if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
return 0;
- pll = &dev_priv->shared_dplls[dpll];
+ pll = &dev_priv->shared_dplls[pll_id];
state = &pll->state.hw_state;
clock.m1 = 2;
@@ -1361,9 +1478,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
- uint32_t dpll = port;
+ enum intel_dpll_id pll_id = port;
- pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
+ pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
ddi_dotclock_get(pipe_config);
}
@@ -1715,54 +1832,36 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
}
-static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+static void skl_ddi_set_iboost(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port;
- int type = encoder->type;
- const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
- uint8_t dp_iboost, hdmi_iboost;
- int n_entries;
- /* VBT may override standard boost values */
- dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
- hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ if (type == INTEL_OUTPUT_HDMI)
+ iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ else
+ iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+
+ if (iboost == 0) {
+ const struct ddi_buf_trans *ddi_translations;
+ int n_entries;
- if (type == INTEL_OUTPUT_DP) {
- if (dp_iboost) {
- iboost = dp_iboost;
- } else {
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- ddi_translations = kbl_get_buf_trans_dp(dev_priv,
- &n_entries);
- else
- ddi_translations = skl_get_buf_trans_dp(dev_priv,
- &n_entries);
- iboost = ddi_translations[level].i_boost;
- }
- } else if (type == INTEL_OUTPUT_EDP) {
- if (dp_iboost) {
- iboost = dp_iboost;
- } else {
- ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ else
+ ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON(port != PORT_A &&
- port != PORT_E && n_entries > 9))
- n_entries = 9;
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
- iboost = ddi_translations[level].i_boost;
- }
- } else if (type == INTEL_OUTPUT_HDMI) {
- if (hdmi_iboost) {
- iboost = hdmi_iboost;
- } else {
- ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
- iboost = ddi_translations[level].i_boost;
- }
- } else {
- return;
+ iboost = ddi_translations[level].i_boost;
}
/* Make sure that the requested I_boost is valid */
@@ -1777,38 +1876,25 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct bxt_ddi_buf_trans *ddi_translations;
- u32 n_entries, i;
-
- if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
- ddi_translations = bxt_ddi_translations_edp;
- } else if (type == INTEL_OUTPUT_DP
- || type == INTEL_OUTPUT_EDP) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
- ddi_translations = bxt_ddi_translations_dp;
- } else if (type == INTEL_OUTPUT_HDMI) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
- ddi_translations = bxt_ddi_translations_hdmi;
- } else {
- DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
- type);
- return;
- }
+ enum port port = encoder->port;
+ int n_entries;
- /* Check if default value has to be used */
- if (level >= n_entries ||
- (type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
- for (i = 0; i < n_entries; i++) {
- if (ddi_translations[i].default_index) {
- level = i;
- break;
- }
- }
- }
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
ddi_translations[level].margin,
@@ -1820,12 +1906,25 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
int n_entries;
- if (encoder->type == INTEL_OUTPUT_EDP)
- intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
- else
- intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_CANNONLAKE(dev_priv)) {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ } else if (IS_GEN9_LP(dev_priv)) {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ } else {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ else
+ intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ }
if (WARN_ON(n_entries < 1))
n_entries = 1;
@@ -1836,95 +1935,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
-{
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
- return cnl_ddi_translations_hdmi_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
- return cnl_ddi_translations_hdmi_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
- return cnl_ddi_translations_hdmi_1_05V;
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
+static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
- return cnl_ddi_translations_dp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
- return cnl_ddi_translations_dp_1_05V;
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
-{
- if (dev_priv->vbt.edp.low_vswing) {
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_edp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
- return cnl_ddi_translations_edp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
- return cnl_ddi_translations_edp_1_05V;
- }
- return NULL;
- } else {
- return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries);
- }
-}
-
-static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
-{
- const struct cnl_ddi_buf_trans *ddi_translations = NULL;
- u32 n_entries, val, voltage;
- int ln;
-
- /*
- * Values for each port type are listed in
- * voltage swing programming tables.
- * Vccio voltage found in PORT_COMP_DW3.
- */
- voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ const struct cnl_ddi_buf_trans *ddi_translations;
+ int n_entries, ln;
+ u32 val;
- if (type == INTEL_OUTPUT_HDMI) {
- ddi_translations = cnl_get_buf_trans_hdmi(dev_priv,
- voltage, &n_entries);
- } else if (type == INTEL_OUTPUT_DP) {
- ddi_translations = cnl_get_buf_trans_dp(dev_priv,
- voltage, &n_entries);
- } else if (type == INTEL_OUTPUT_EDP) {
- ddi_translations = cnl_get_buf_trans_edp(dev_priv,
- voltage, &n_entries);
- }
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (ddi_translations == NULL) {
- MISSING_CASE(voltage);
+ if (WARN_ON_ONCE(!ddi_translations))
return;
- }
-
- if (level >= n_entries) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ if (WARN_ON_ONCE(level >= n_entries))
level = n_entries - 1;
- }
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -1942,7 +1972,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
val |= RCOMP_SCALAR(0x98);
I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
- /* Program PORT_TX_DW4 */
+ /* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
@@ -1954,7 +1984,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
}
- /* Program PORT_TX_DW5 */
+ /* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val &= ~RTERM_SELECT_MASK;
@@ -1962,33 +1992,29 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
val |= TAP3_DISABLE;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
- /* Program PORT_TX_DW7 */
+ /* Program PORT_TX_DW7 */
val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
}
-static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
+static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
- int type = encoder->type;
- int width = 0;
- int rate = 0;
+ int width, rate, ln;
u32 val;
- int ln = 0;
- if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
- width = intel_dp->lane_count;
- rate = intel_dp->link_rate;
- } else if (type == INTEL_OUTPUT_HDMI) {
+ if (type == INTEL_OUTPUT_HDMI) {
width = 4;
- /* Rate is always < than 6GHz for HDMI */
+ rate = 0; /* Rate is always < than 6GHz for HDMI */
} else {
- MISSING_CASE(type);
- return;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ width = intel_dp->lane_count;
+ rate = intel_dp->link_rate;
}
/*
@@ -1997,7 +2023,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
* else clear to 0b.
*/
val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
- if (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)
+ if (type != INTEL_OUTPUT_HDMI)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
@@ -2032,7 +2058,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
- cnl_ddi_vswing_program(dev_priv, level, port, type);
+ cnl_ddi_vswing_program(encoder, level, type);
/* 6. Set training enable to trigger update */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -2055,33 +2081,45 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
- enum port port = dport->port;
- uint32_t level;
- level = translate_signal_level(signal_levels);
+ return translate_signal_level(signal_levels);
+}
+
+u32 bxt_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct intel_encoder *encoder = &dport->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+
+ return 0;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct intel_encoder *encoder = &dport->base;
+ int level = intel_ddi_dp_level(intel_dp);
if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, level);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
- else if (IS_CANNONLAKE(dev_priv)) {
- cnl_ddi_vswing_sequence(encoder, level);
- /* DDI_BUF_CTL bits 27:24 are reserved on CNL */
- return 0;
- }
+ skl_ddi_set_iboost(encoder, level, encoder->type);
+
return DDI_BUF_TRANS_SELECT(level);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll)
+ const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
@@ -2093,6 +2131,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
I915_WRITE(DPCLKA_CFGCR0, val);
@@ -2120,108 +2159,113 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
}
}
+static void intel_ddi_clk_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ if (IS_CANNONLAKE(dev_priv))
+ I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ else if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port));
+ else if (INTEL_GEN(dev_priv) < 9)
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- int link_rate, uint32_t lane_count,
- struct intel_shared_dpll *pll,
- bool link_mst)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+
+ WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
- WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
- intel_dp_set_link_params(intel_dp, link_rate, lane_count,
- link_mst);
- if (encoder->type == INTEL_OUTPUT_EDP)
- intel_edp_panel_on(intel_dp);
+ intel_edp_panel_on(intel_dp);
- intel_ddi_clk_select(encoder, pll);
+ intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- intel_prepare_dp_ddi_buffers(encoder);
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+ else
+ intel_prepare_dp_ddi_buffers(encoder);
+
intel_ddi_init_dp_buf_reg(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
- bool has_hdmi_sink,
const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct intel_shared_dpll *pll)
+ const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_encoder *drm_encoder = &encoder->base;
enum port port = intel_ddi_get_encoder_port(encoder);
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, pll);
+ intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- intel_prepare_hdmi_ddi_buffers(encoder);
- if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, level);
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(dev_priv, level, port,
- INTEL_OUTPUT_HDMI);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, level);
+ bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+ else
+ intel_prepare_hdmi_ddi_buffers(encoder, level);
+
+ if (IS_GEN9_BC(dev_priv))
+ skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
- intel_hdmi->set_infoframes(drm_encoder,
- has_hdmi_sink,
- crtc_state, conn_state);
+ intel_dig_port->set_infoframes(&encoder->base,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- int type = encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- intel_ddi_pre_enable_dp(encoder,
- pipe_config->port_clock,
- pipe_config->lane_count,
- pipe_config->shared_dpll,
- intel_crtc_has_type(pipe_config,
- INTEL_OUTPUT_DP_MST));
- }
- if (type == INTEL_OUTPUT_HDMI) {
- intel_ddi_pre_enable_hdmi(encoder,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state,
- pipe_config->shared_dpll);
- }
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+ else
+ intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
}
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_dp *intel_dp = NULL;
- int type = intel_encoder->type;
- uint32_t val;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
bool wait = false;
-
- /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
-
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- }
+ u32 val;
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
@@ -2237,34 +2281,80 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
+}
- if (intel_dp) {
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
- }
+static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ /*
+ * old_crtc_state and old_conn_state are NULL when called from
+ * DP_MST. The main connector associated with this port is never
+ * bound to a crtc for MST.
+ */
+ bool is_mst = !old_crtc_state;
+
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- if (dig_port)
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_disable_ddi_buf(encoder);
- if (IS_CANNONLAKE(dev_priv))
- I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- else if (IS_GEN9_BC(dev_priv))
- I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
- DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_GEN(dev_priv) < 9)
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_off(intel_dp);
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
- }
+ intel_ddi_clk_disable(encoder);
+}
+
+static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+
+ intel_disable_ddi_buf(encoder);
+
+ dig_port->set_infoframes(&encoder->base, false,
+ old_crtc_state, old_conn_state);
+
+ intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+
+ intel_ddi_clk_disable(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ /*
+ * old_crtc_state and old_conn_state are NULL when called from
+ * DP_MST. The main connector associated with this port is never
+ * bound to a crtc for MST.
+ */
+ if (old_crtc_state &&
+ intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_post_disable_hdmi(encoder,
+ old_crtc_state, old_conn_state);
+ else
+ intel_ddi_post_disable_dp(encoder,
+ old_crtc_state, old_conn_state);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
@@ -2279,7 +2369,8 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_buf(encoder);
+ intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -2295,75 +2386,98 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
}
-static void intel_enable_ddi(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
- bool scrambling = pipe_config->hdmi_scrambling;
-
- intel_hdmi_handle_sink_scrambling(intel_encoder,
- conn_state->connector,
- clock_ratio, scrambling);
-
- /* In HDMI/DVI mode, the port width, and swing/emphasis values
- * are ignored so nothing special needs to be done besides
- * enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE);
- } else if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
+ intel_dp_stop_link_train(intel_dp);
- if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
- intel_dp_stop_link_train(intel_dp);
+ intel_edp_backlight_on(crtc_state, conn_state);
+ intel_psr_enable(intel_dp, crtc_state);
+ intel_edp_drrs_enable(intel_dp, crtc_state);
- intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp);
- intel_edp_drrs_enable(intel_dp, pipe_config);
- }
+ if (crtc_state->has_audio)
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
- if (pipe_config->has_audio)
- intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
+static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ intel_hdmi_handle_sink_scrambling(encoder,
+ conn_state->connector,
+ crtc_state->hdmi_high_tmds_clock_ratio,
+ crtc_state->hdmi_scrambling);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+
+ if (crtc_state->has_audio)
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
-static void intel_disable_ddi(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+static void intel_enable_ddi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- int type = intel_encoder->type;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+ else
+ intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+}
+
+static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(intel_encoder);
+ intel_audio_codec_disable(encoder);
- if (type == INTEL_OUTPUT_HDMI) {
- intel_hdmi_handle_sink_scrambling(intel_encoder,
- old_conn_state->connector,
- false, false);
- }
+ intel_edp_drrs_disable(intel_dp, old_crtc_state);
+ intel_psr_disable(intel_dp, old_crtc_state);
+ intel_edp_backlight_off(old_conn_state);
+}
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder);
- intel_edp_drrs_disable(intel_dp, old_crtc_state);
- intel_psr_disable(intel_dp);
- intel_edp_backlight_off(old_conn_state);
- }
+ intel_hdmi_handle_sink_scrambling(encoder,
+ old_conn_state->connector,
+ false, false);
+}
+
+static void intel_disable_ddi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+ else
+ intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
uint8_t mask = pipe_config->lane_lat_optim_mask;
@@ -2435,7 +2549,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_hdmi *intel_hdmi;
+ struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@@ -2474,9 +2588,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
- intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -2729,6 +2843,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
+ intel_infoframe_init(intel_dig_port);
+
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 5f91ddc78c7a..875d428ea75f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -82,6 +82,39 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
#undef PRINT_FLAG
}
+static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ const u32 fuse2 = I915_READ(GEN8_FUSE2);
+
+ sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
+ GEN10_F2_S_ENA_SHIFT;
+ sseu->subslice_mask = (1 << 4) - 1;
+ sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0));
+ sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1));
+ sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2));
+ sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) &
+ GEN10_EU_DIS_SS_MASK));
+
+ /*
+ * CNL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total,
+ sseu_subslice_total(sseu)) : 0;
+
+ /* No restrictions on Power Gating */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -343,7 +376,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[pipe] = 1;
}
- if (i915.disable_display) {
+ if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
@@ -409,10 +442,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 9)
+ else if (INTEL_GEN(dev_priv) == 9)
gen9_sseu_info_init(dev_priv);
-
- info->has_snoop = !info->has_llc;
+ else if (INTEL_GEN(dev_priv) >= 10)
+ gen10_sseu_info_init(dev_priv);
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ebdb63330dd..e8ccf89cb17b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
msleep(5);
line2 = I915_READ(reg) & line_mask;
- return line1 == line2;
+ return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Wait for the display line to settle/start moving */
+ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+ DRM_ERROR("pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), onoff(state));
+}
+
+static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, false);
+}
+
+static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, true);
}
/*
@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- enum pipe pipe = crtc->pipe;
if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
100))
WARN(1, "pipe_off wait timed out\n");
} else {
- /* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
- WARN(1, "pipe_off wait timed out\n");
+ intel_wait_for_pipe_scanline_stopped(crtc);
}
}
@@ -1539,7 +1558,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+ I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
I915_WRITE(CBR4_VLV, 0);
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
@@ -1568,11 +1587,12 @@ static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
return count;
}
-static void i9xx_enable_pll(struct intel_crtc *crtc)
+static void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config->dpll_hw_state.dpll;
+ u32 dpll = crtc_state->dpll_hw_state.dpll;
int i;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1609,7 +1629,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
- crtc->config->dpll_hw_state.dpll_md);
+ crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -1627,15 +1647,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
}
}
-/**
- * i9xx_disable_pll - disable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe, making sure the pipe is off first.
- *
- * Note! This is for pre-ILK only.
- */
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1944,15 +1955,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
POSTING_READ(reg);
/*
- * Until the pipe starts DSL will read as 0, which would cause
- * an apparent vblank timestamp jump, which messes up also the
- * frame count when it's derived from the timestamps. So let's
- * wait for the pipe to start properly before we call
- * drm_crtc_vblank_on()
+ * Until the pipe starts PIPEDSL reads will return a stale value,
+ * which causes an apparent vblank timestamp jump when PIPEDSL
+ * resets to its proper value. That also messes up the frame count
+ * when it's derived from the timestamps. So let's wait for the
+ * pipe to start properly before we call drm_crtc_vblank_on()
*/
- if (dev->max_vblank_count == 0 &&
- wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
- DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
+ if (dev->max_vblank_count == 0)
+ intel_wait_for_pipe_scanline_moving(crtc);
}
/**
@@ -2219,8 +2229,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* something and try to run the system in a "less than optimal"
* mode that matches the user configuration.
*/
- if (i915_vma_get_fence(vma) == 0)
- i915_vma_pin_fence(vma);
+ i915_vma_pin_fence(vma);
}
i915_vma_get(vma);
@@ -2856,7 +2865,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = c->primary->fb;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
goto valid_fb;
}
}
@@ -2887,7 +2896,7 @@ valid_fb:
intel_crtc->pipe, PTR_ERR(intel_state->vma));
intel_state->vma = NULL;
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return;
}
@@ -2908,7 +2917,7 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
@@ -3298,7 +3307,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane plane = primary->plane;
u32 linear_offset;
@@ -3307,16 +3315,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
int x = plane_state->main.x;
int y = plane_state->main.y;
unsigned long irqflags;
+ u32 dspaddr_offset;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- crtc->dspaddr_offset = plane_state->main.offset;
+ dspaddr_offset = plane_state->main.offset;
else
- crtc->dspaddr_offset = linear_offset;
-
- crtc->adjusted_x = x;
- crtc->adjusted_y = y;
+ dspaddr_offset = linear_offset;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3342,18 +3348,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE_FW(DSPADDR(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
}
POSTING_READ_FW(reg);
@@ -3553,100 +3559,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
-static void skylake_update_primary_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
- unsigned int rotation = plane_state->base.rotation;
- u32 stride = skl_plane_stride(fb, 0, rotation);
- u32 aux_stride = skl_plane_stride(fb, 1, rotation);
- u32 surf_addr = plane_state->main.offset;
- int scaler_id = plane_state->scaler_id;
- int src_x = plane_state->main.x;
- int src_y = plane_state->main.y;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
- int dst_x = plane_state->base.dst.x1;
- int dst_y = plane_state->base.dst.y1;
- int dst_w = drm_rect_width(&plane_state->base.dst);
- int dst_h = drm_rect_height(&plane_state->base.dst);
- unsigned long irqflags;
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- dst_w--;
- dst_h--;
-
- crtc->dspaddr_offset = surf_addr;
-
- crtc->adjusted_x = src_x;
- crtc->adjusted_y = src_y;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- PLANE_COLOR_PIPE_GAMMA_ENABLE |
- PLANE_COLOR_PIPE_CSC_ENABLE |
- PLANE_COLOR_PLANE_GAMMA_DISABLE);
- }
-
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
- I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->aux.offset - surf_addr) | aux_stride);
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->aux.y << 16) | plane_state->aux.x);
-
- if (scaler_id >= 0) {
- uint32_t ps_ctrl = 0;
-
- WARN_ON(!dst_w || !dst_h);
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
- crtc_state->scaler_state.scalers[scaler_id].mode;
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
- } else {
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
- }
-
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void skylake_disable_primary_plane(struct intel_plane *primary,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- enum plane_id plane_id = primary->id;
- enum pipe pipe = primary->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3701,7 +3613,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
/* reset doesn't touch the display */
- if (!i915.force_reset_modeset_test &&
+ if (!i915_modparams.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -3757,7 +3669,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!i915.force_reset_modeset_test &&
+ if (!i915_modparams.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -3770,8 +3682,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
if (!gpu_reset_clobbers_display(dev_priv)) {
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
- if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@@ -3782,6 +3694,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
@@ -3804,15 +3717,14 @@ unlock:
clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
-static void intel_update_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state)
+static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->base.state);
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = crtc->base.state->mode;
+ crtc->base.mode = new_crtc_state->base.mode;
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -3824,17 +3736,17 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
*/
I915_WRITE(PIPESRC(crtc->pipe),
- ((pipe_config->pipe_src_w - 1) << 16) |
- (pipe_config->pipe_src_h - 1));
+ ((new_crtc_state->pipe_src_w - 1) << 16) |
+ (new_crtc_state->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
- if (pipe_config->pch_pfit.enabled)
+ if (new_crtc_state->pch_pfit.enabled)
skylake_pfit_enable(crtc);
} else if (HAS_PCH_SPLIT(dev_priv)) {
- if (pipe_config->pch_pfit.enabled)
+ if (new_crtc_state->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(crtc, true);
@@ -4956,9 +4868,10 @@ void hsw_enable_ips(struct intel_crtc *crtc)
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
+ mutex_unlock(&dev_priv->pcu_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
@@ -4988,9 +4901,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
@@ -5118,7 +5031,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->base.state);
+ intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+ crtc);
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
@@ -5130,7 +5044,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_pri_state) {
struct intel_plane_state *primary_state =
- to_intel_plane_state(primary->state);
+ intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
+ to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@@ -5159,7 +5074,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (old_pri_state) {
struct intel_plane_state *primary_state =
- to_intel_plane_state(primary->state);
+ intel_atomic_get_new_plane_state(old_intel_state,
+ to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@@ -5456,6 +5372,20 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool apply)
+{
+ u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ if (apply)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+}
+
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5466,13 +5396,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
+ bool psl_clkgate_wa;
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->shared_dpll)
@@ -5506,19 +5434,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- else
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder)
- dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
-
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(pipe_config);
+ /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+ psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ intel_crtc->config->pch_pfit.enabled;
+ if (psl_clkgate_wa)
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
@@ -5552,11 +5478,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
- intel_wait_for_vblank(dev_priv, pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
}
/* If we change the relative order between pipe/planes enabling, we need
@@ -5652,9 +5576,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
intel_encoders_disable(crtc, old_crtc_state, old_state);
drm_crtc_vblank_off(crtc);
@@ -5679,9 +5600,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_disable_pipe_clock(intel_crtc->config);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
- if (old_crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5891,7 +5809,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- i9xx_enable_pll(intel_crtc);
+ i9xx_enable_pll(intel_crtc, pipe_config);
i9xx_pfit_enable(intel_crtc);
@@ -6038,7 +5956,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
- dev_priv->min_pixclk[intel_crtc->pipe] = 0;
+ dev_priv->min_cdclk[intel_crtc->pipe] = 0;
}
/*
@@ -6143,6 +6061,19 @@ struct intel_connector *intel_connector_alloc(void)
return connector;
}
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
@@ -6283,6 +6214,9 @@ retry:
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
+ if (pipe_config->ips_force_disable)
+ return false;
+
if (pipe_config->pipe_bpp > 24)
return false;
@@ -6307,7 +6241,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- pipe_config->ips_enabled = i915.enable_ips &&
+ pipe_config->ips_enabled = i915_modparams.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config_supports_ips(dev_priv, pipe_config);
}
@@ -6488,8 +6422,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915.panel_use_ssc >= 0)
- return i915.panel_use_ssc != 0;
+ if (i915_modparams.panel_use_ssc >= 0)
+ return i915_modparams.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -6523,11 +6457,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
- crtc->lowfreq_avail = false;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
- crtc->lowfreq_avail = true;
} else {
crtc_state->dpll_hw_state.fp1 = fp;
}
@@ -7222,15 +7154,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev_priv)) {
- if (intel_crtc->lowfreq_avail) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- } else {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- }
- }
-
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
@@ -8366,8 +8289,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- crtc->lowfreq_avail = false;
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
@@ -8840,11 +8761,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
if (IS_HASWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
DRM_DEBUG_KMS("Failed to write to D_COMP\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
} else {
I915_WRITE(D_COMP_BDW, val);
POSTING_READ(D_COMP_BDW);
@@ -9026,8 +8947,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- crtc->lowfreq_avail = false;
-
return 0;
}
@@ -9039,7 +8958,7 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> (port * 2);
+ id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
return;
@@ -9304,11 +9223,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) {
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
- if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) {
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
bool blend_mode_420 = tmp &
PIPEMISC_YUV420_MODE_FULL_BLEND;
@@ -9753,7 +9672,7 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
/* VESA 640x480x72Hz mode to set on the pipe */
-static struct drm_display_mode load_detect_mode = {
+static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
@@ -9788,7 +9707,7 @@ intel_framebuffer_pitch_for_width(int width, int bpp)
}
static u32
-intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return PAGE_ALIGN(pitch * mode->vdisplay);
@@ -9796,7 +9715,7 @@ intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device *dev,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_framebuffer *fb;
@@ -9823,7 +9742,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9847,7 +9766,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
return fb;
#else
return NULL;
@@ -9856,7 +9775,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_framebuffer *fb,
int x, int y)
{
@@ -9890,7 +9809,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -10028,7 +9947,7 @@ found:
if (ret)
goto fail;
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
@@ -10218,7 +10137,7 @@ int intel_dotclock_calculate(int link_freq,
if (!m_n->link_n)
return 0;
- return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+ return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -10239,62 +10158,44 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-/** Returns the currently programmed mode of the given pipe. */
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc)
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc_state *crtc_state;
struct drm_display_mode *mode;
- struct intel_crtc_state *pipe_config;
- u32 htot, hsync, vtot, vsync;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return NULL;
+
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config) {
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state) {
kfree(mode);
return NULL;
}
- /*
- * Construct a pipe_config sufficient for getting the clock info
- * back out of crtc_clock_get.
- *
- * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
- * to use a real value here instead.
- */
- pipe_config->cpu_transcoder = (enum transcoder) pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
- pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
- pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
- i9xx_crtc_clock_get(intel_crtc, pipe_config);
-
- mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
-
- cpu_transcoder = pipe_config->cpu_transcoder;
- htot = I915_READ(HTOTAL(cpu_transcoder));
- hsync = I915_READ(HSYNC(cpu_transcoder));
- vtot = I915_READ(VTOTAL(cpu_transcoder));
- vsync = I915_READ(VSYNC(cpu_transcoder));
-
- mode->hdisplay = (htot & 0xffff) + 1;
- mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
- mode->hsync_start = (hsync & 0xffff) + 1;
- mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
- mode->vdisplay = (vtot & 0xffff) + 1;
- mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
- mode->vsync_start = (vsync & 0xffff) + 1;
- mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+ crtc_state->base.crtc = &crtc->base;
- drm_mode_set_name(mode);
+ if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
+ kfree(crtc_state);
+ kfree(mode);
+ return NULL;
+ }
- kfree(pipe_config);
+ encoder->get_config(encoder, crtc_state);
+
+ intel_mode_from_pipe_config(mode, crtc_state);
+
+ kfree(crtc_state);
return mode;
}
@@ -10341,7 +10242,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
return false;
}
-static bool needs_scaling(struct intel_plane_state *state)
+static bool needs_scaling(const struct intel_plane_state *state)
{
int src_w = drm_rect_width(&state->base.src) >> 16;
int src_h = drm_rect_height(&state->base.src) >> 16;
@@ -10351,7 +10252,9 @@ static bool needs_scaling(struct intel_plane_state *state)
return (src_w != dst_w || src_h != dst_h);
}
-int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state)
{
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
@@ -10360,10 +10263,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->plane);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
bool mode_changed = needs_modeset(crtc_state);
- bool was_crtc_enabled = crtc->state->active;
+ bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
@@ -10681,6 +10582,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+ OUTPUT_TYPE(UNUSED),
+ OUTPUT_TYPE(ANALOG),
+ OUTPUT_TYPE(DVO),
+ OUTPUT_TYPE(SDVO),
+ OUTPUT_TYPE(LVDS),
+ OUTPUT_TYPE(TVOUT),
+ OUTPUT_TYPE(HDMI),
+ OUTPUT_TYPE(DP),
+ OUTPUT_TYPE(EDP),
+ OUTPUT_TYPE(DSI),
+ OUTPUT_TYPE(UNKNOWN),
+ OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+ unsigned int output_types)
+{
+ char *str = buf;
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+ int r;
+
+ if ((output_types & BIT(i)) == 0)
+ continue;
+
+ r = snprintf(str, len, "%s%s",
+ str != buf ? "," : "", output_type_str[i]);
+ if (r >= len)
+ break;
+ str += r;
+ len -= r;
+
+ output_types &= ~BIT(i);
+ }
+
+ WARN_ON_ONCE(output_types != 0);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -10691,10 +10638,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
+ char buf[64];
DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
crtc->base.base.id, crtc->base.name, context);
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
+ buf, pipe_config->output_types);
+
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
@@ -10854,7 +10806,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_dpll_hw_state dpll_hw_state;
struct intel_shared_dpll *shared_dpll;
struct intel_crtc_wm_state wm_state;
- bool force_thru;
+ bool force_thru, ips_force_disable;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
@@ -10865,6 +10817,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
force_thru = crtc_state->pch_pfit.force_thru;
+ ips_force_disable = crtc_state->ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
wm_state = crtc_state->wm;
@@ -10878,6 +10831,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->pch_pfit.force_thru = force_thru;
+ crtc_state->ips_force_disable = ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
crtc_state->wm = wm_state;
@@ -11332,6 +11286,18 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -12080,7 +12046,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
- if (i915.fastboot &&
+ if (i915_modparams.fastboot &&
intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(old_crtc_state),
pipe_config, true)) {
@@ -12133,73 +12099,10 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
-static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
- unsigned crtc_mask)
-{
- unsigned last_vblank_count[I915_MAX_PIPES];
- enum pipe pipe;
- int ret;
-
- if (!crtc_mask)
- return;
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- if (!((1 << pipe) & crtc_mask))
- continue;
-
- ret = drm_crtc_vblank_get(&crtc->base);
- if (WARN_ON(ret != 0)) {
- crtc_mask &= ~(1 << pipe);
- continue;
- }
-
- last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
- }
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
- long lret;
-
- if (!((1 << pipe) & crtc_mask))
- continue;
-
- lret = wait_event_timeout(dev->vblank[pipe].queue,
- last_vblank_count[pipe] !=
- drm_crtc_vblank_count(&crtc->base),
- msecs_to_jiffies(50));
-
- WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
-
- drm_crtc_vblank_put(&crtc->base);
- }
-}
-
-static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
-{
- /* fb updated, need to unpin old fb */
- if (crtc_state->fb_changed)
- return true;
-
- /* wm changes, need vblank before final wm's */
- if (crtc_state->update_wm_post)
- return true;
-
- if (crtc_state->wm.need_postvbl_update)
- return true;
-
- return false;
-}
-
static void intel_update_crtc(struct drm_crtc *crtc,
struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state,
- unsigned int *crtc_vblank_mask)
+ struct drm_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -12222,13 +12125,9 @@ static void intel_update_crtc(struct drm_crtc *crtc,
}
drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
- if (needs_vblank_wait(pipe_config))
- *crtc_vblank_mask |= drm_crtc_mask(crtc);
}
-static void intel_update_crtcs(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask)
+static void intel_update_crtcs(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -12239,12 +12138,11 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
continue;
intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state, crtc_vblank_mask);
+ new_crtc_state);
}
}
-static void skl_update_crtcs(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask)
+static void skl_update_crtcs(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
@@ -12278,13 +12176,16 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int cmask = drm_crtc_mask(crtc);
intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(crtc->state);
+ cstate = to_intel_crtc_state(new_crtc_state);
pipe = intel_crtc->pipe;
if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
+ if (skl_ddb_allocation_overlaps(dev_priv,
+ entries,
+ &cstate->wm.skl.ddb,
+ i))
continue;
updated |= cmask;
@@ -12303,7 +12204,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
vbl_wait = true;
intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state, crtc_vblank_mask);
+ new_crtc_state);
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -12364,7 +12265,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
u64 put_domains[I915_MAX_PIPES] = {};
- unsigned crtc_vblank_mask = 0;
int i;
intel_atomic_commit_fence_wait(intel_state);
@@ -12405,7 +12305,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active) {
+ if (!new_crtc_state->active) {
/*
* Make sure we don't call initial_watermarks
* for ILK-style watermark updates.
@@ -12414,7 +12314,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(crtc->state));
+ to_intel_crtc_state(new_crtc_state));
}
}
}
@@ -12453,7 +12353,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+ dev_priv->display.update_crtcs(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -12464,8 +12364,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- if (!state->legacy_cursor_update)
- intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
/*
* Now that the vblank has passed, we can go ahead and program the
@@ -12581,21 +12480,10 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
-
drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state);
- if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&intel_state->commit_ready);
- return ret;
- }
-
/*
* The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor
@@ -12604,19 +12492,37 @@ static int intel_atomic_commit(struct drm_device *dev,
* updates happen during the correct frames. Gen9+ have
* double buffered watermarks and so shouldn't need this.
*
- * Do this after drm_atomic_helper_setup_commit() and
- * intel_atomic_prepare_commit() because we still want
- * to skip the flip and fb cleanup waits. Although that
- * does risk yanking the mapping from under the display
- * engine.
+ * Unset state->legacy_cursor_update before the call to
+ * drm_atomic_helper_setup_commit() because otherwise
+ * drm_atomic_helper_wait_for_flip_done() is a noop and
+ * we get FIFO underruns because we didn't wait
+ * for vblank.
*
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9)
- state->legacy_cursor_update = false;
+ if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+ if (new_crtc_state->wm.need_postvbl_update ||
+ new_crtc_state->update_wm_post)
+ state->legacy_cursor_update = false;
+ }
+
+ ret = intel_atomic_prepare_commit(dev, state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ return ret;
+ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (!ret)
+ ret = drm_atomic_helper_swap_state(state, true);
- ret = drm_atomic_helper_swap_state(state, true);
if (ret) {
i915_sw_fence_commit(&intel_state->commit_ready);
@@ -12628,8 +12534,8 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
+ memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
+ sizeof(intel_state->min_cdclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@@ -12658,6 +12564,58 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_crc_source = intel_crtc_set_crc_source,
};
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct drm_i915_gem_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct drm_i915_gem_request *rq = wait->request;
+
+ gen6_rps_boost(rq, NULL);
+ i915_gem_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@@ -12755,12 +12713,22 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
if (!new_state->fence) { /* implicit fencing */
+ struct dma_fence *fence;
+
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
+
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ if (fence) {
+ add_rps_boost_after_vblank(new_state->crtc, fence);
+ dma_fence_put(fence);
+ }
+ } else {
+ add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
return 0;
@@ -12877,29 +12845,29 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(crtc->state);
struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
- bool modeset = needs_modeset(crtc->state);
+ struct intel_crtc_state *intel_cstate =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ bool modeset = needs_modeset(&intel_cstate->base);
if (!modeset &&
(intel_cstate->base.color_mgmt_changed ||
intel_cstate->update_pipe)) {
- intel_color_set_csc(crtc->state);
- intel_color_load_luts(crtc->state);
+ intel_color_set_csc(&intel_cstate->base);
+ intel_color_load_luts(&intel_cstate->base);
}
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_crtc);
+ intel_pipe_update_start(intel_cstate);
if (modeset)
goto out;
if (intel_cstate->update_pipe)
- intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
@@ -12913,8 +12881,12 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- intel_pipe_update_end(intel_crtc);
+ intel_pipe_update_end(new_crtc_state);
}
/**
@@ -13063,6 +13035,14 @@ intel_legacy_cursor_update(struct drm_plane *plane,
goto slow;
old_plane_state = plane->state;
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ goto slow;
/*
* If any parameters change that may affect watermarks,
@@ -13093,6 +13073,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
+ to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
+ to_intel_plane_state(plane->state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@@ -13122,17 +13104,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
}
old_fb = old_plane_state->fb;
- old_vma = to_intel_plane_state(old_plane_state)->vma;
i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
intel_plane->frontbuffer_bit);
/* Swap plane state */
- new_plane_state->fence = old_plane_state->fence;
- *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
- new_plane_state->fence = NULL;
- new_plane_state->fb = old_fb;
- to_intel_plane_state(new_plane_state)->vma = NULL;
+ plane->state = new_plane_state;
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
@@ -13144,13 +13121,17 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
+ old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
if (old_vma)
intel_unpin_fb_vma(old_vma);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
- intel_plane_destroy_state(plane, new_plane_state);
+ if (ret)
+ intel_plane_destroy_state(plane, new_plane_state);
+ else
+ intel_plane_destroy_state(plane, old_plane_state);
return ret;
slow:
@@ -13219,8 +13200,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
- primary->update_plane = skylake_update_primary_plane;
- primary->disable_plane = skylake_disable_primary_plane;
+ primary->update_plane = skl_update_plane;
+ primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13229,8 +13210,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
else
modifiers = skl_format_modifiers_noccs;
- primary->update_plane = skylake_update_primary_plane;
- primary->disable_plane = skylake_disable_primary_plane;
+ primary->update_plane = skl_update_plane;
+ primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13501,7 +13482,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
- drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
+ drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc)
return -ENOENT;
@@ -13665,7 +13646,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
@@ -13708,14 +13689,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -14208,7 +14189,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
- if (dev_priv->info.gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
dev_priv->display.update_crtcs = intel_update_crtcs;
@@ -14388,8 +14369,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_update_cdclk(dev_priv);
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
-
- intel_init_clock_gating(dev_priv);
}
/*
@@ -14682,6 +14661,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe));
@@ -14691,8 +14672,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
- if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
- DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
+ intel_wait_for_pipe_scanline_stopped(crtc);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
@@ -14739,10 +14719,10 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
}
static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder)
+ enum pipe pch_transcoder)
{
return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
- (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
+ (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -14825,7 +14805,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+ if (has_pch_trancoder(dev_priv, crtc->pipe))
crtc->pch_fifo_underrun_disabled = true;
}
}
@@ -15032,7 +15012,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- int pixclk = 0;
+ int min_cdclk = 0;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
@@ -15053,22 +15033,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->pixel_rate;
- else
- WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+ if (dev_priv->display.modeset_calc_cdclk) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
+ }
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
update_scanline_offset(crtc);
}
- dev_priv->min_pixclk[crtc->pipe] = pixclk;
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
@@ -15105,6 +15081,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
+ if (IS_HASWELL(dev_priv)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
@@ -15186,6 +15171,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(dev, state, &ctx);
+ intel_enable_ipc(dev_priv);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -15201,6 +15187,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev_priv);
+ intel_init_clock_gating(dev_priv);
+
intel_setup_overlay(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09f274419eea..158438bb0389 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+#define DP_DPRX_ESI_LEN 14
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -103,13 +104,13 @@ static const int cnl_rates[] = { 162000, 216000, 270000,
static const int default_rates[] = { 162000, 270000, 540000 };
/**
- * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
-static bool is_edp(struct intel_dp *intel_dp)
+bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -136,32 +137,20 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static int intel_dp_num_rates(u8 link_bw_code)
-{
- switch (link_bw_code) {
- default:
- WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
- link_bw_code);
- case DP_LINK_BW_1_62:
- return 1;
- case DP_LINK_BW_2_7:
- return 2;
- case DP_LINK_BW_5_4:
- return 3;
- }
-}
-
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
- int i, num_rates;
+ int i, max_rate;
- num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- for (i = 0; i < num_rates; i++)
+ for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
+ if (default_rates[i] > max_rate)
+ break;
intel_dp->sink_rates[i] = default_rates[i];
+ }
- intel_dp->num_sink_rates = num_rates;
+ intel_dp->num_sink_rates = i;
}
/* Theoretical max between source and sink */
@@ -253,15 +242,15 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
} else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else {
+ } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv)) {
source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
+ } else {
+ source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates) - 1;
}
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
intel_dp->source_rates = source_rates;
intel_dp->num_source_rates = size;
}
@@ -388,7 +377,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
- if (is_edp(intel_dp) && fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -597,7 +586,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
intel_dp->active_pipe != intel_dp->pps_pipe);
@@ -644,7 +633,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
/*
* TODO: BXT has 2 PPS instances. The correct port->PPS instance
@@ -847,7 +836,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp) || code != SYS_RESTART)
+ if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
pps_lock(intel_dp);
@@ -907,7 +896,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
@@ -1018,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
else
precharge = 5;
- if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1043,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_1600us |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
@@ -1481,14 +1470,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
- if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
- IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
- return true;
- else
- return false;
+ return max_rate >= 540000;
}
static void
@@ -1681,7 +1665,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
else
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
struct drm_display_mode *panel_mode =
intel_connector->panel.alt_fixed_mode;
struct drm_display_mode *req_mode = &pipe_config->base.mode;
@@ -1736,7 +1720,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
@@ -1829,7 +1813,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well.
*/
- if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
+ if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
int vco;
switch (pipe_config->port_clock / 2) {
@@ -1848,6 +1832,8 @@ found:
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_psr_compute_config(intel_dp, pipe_config);
+
return true;
}
@@ -1861,7 +1847,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
}
static void intel_dp_prepare(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2069,7 +2055,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return false;
cancel_delayed_work(&intel_dp->panel_vdd_work);
@@ -2119,7 +2105,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
bool vdd;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2203,7 +2189,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
@@ -2226,7 +2212,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
@@ -2267,7 +2253,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2285,7 +2271,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
@@ -2316,7 +2302,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2360,7 +2346,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2377,7 +2363,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2401,7 +2387,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2461,7 +2447,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2666,7 +2652,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
@@ -2688,33 +2674,55 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
- if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
- intel_psr_disable(intel_dp);
-
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
+}
+
+static void g4x_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
- if (INTEL_GEN(dev_priv) < 5)
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(intel_dp);
+}
+
+static void ilk_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+}
+
+static void vlv_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_psr_disable(intel_dp, old_crtc_state);
+
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void ilk_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2727,8 +2735,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder,
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2736,8 +2744,8 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder,
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2842,7 +2850,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
- struct intel_crtc_state *old_crtc_state)
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2866,8 +2874,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2914,26 +2922,26 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
static void g4x_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -3040,7 +3048,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->active_pipe = crtc->pipe;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
/* now it's all ours */
@@ -3055,8 +3063,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder);
@@ -3064,8 +3072,8 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@@ -3073,8 +3081,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder);
@@ -3085,8 +3093,8 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@@ -3094,8 +3102,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
chv_phy_post_pll_disable(encoder);
}
@@ -3147,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_GEN9_LP(dev_priv))
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
return intel_ddi_dp_voltage_max(encoder);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -3506,13 +3512,11 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ signal_levels = bxt_signal_levels(intel_dp);
+ } else if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
-
- if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv))
- signal_levels = 0;
- else
- mask = DDI_BUF_EMP_MASK;
+ mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
@@ -3791,7 +3795,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
/* Don't clobber cached eDP rates. */
- if (!is_edp(intel_dp)) {
+ if (!intel_dp_is_edp(intel_dp)) {
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -3813,7 +3817,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
- if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
return false;
if (!drm_dp_is_branch(intel_dp->dpcd))
@@ -3835,7 +3839,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
{
u8 mstm_cap;
- if (!i915.enable_dp_mst)
+ if (!i915_modparams.enable_dp_mst)
return false;
if (!intel_dp->can_mst)
@@ -3853,7 +3857,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
- if (!i915.enable_dp_mst)
+ if (!i915_modparams.enable_dp_mst)
return;
if (!intel_dp->can_mst)
@@ -4000,15 +4004,9 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- int ret;
-
- ret = drm_dp_dpcd_read(&intel_dp->aux,
- DP_SINK_COUNT_ESI,
- sink_irq_vector, 14);
- if (ret != 14)
- return false;
-
- return true;
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
+ sink_irq_vector, DP_DPRX_ESI_LEN) ==
+ DP_DPRX_ESI_LEN;
}
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -4208,7 +4206,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool bret;
if (intel_dp->is_mst) {
- u8 esi[16] = { 0 };
+ u8 esi[DP_DPRX_ESI_LEN] = { 0 };
int ret = 0;
int retry;
bool handled;
@@ -4403,7 +4401,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
@@ -4719,7 +4717,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
@@ -4745,10 +4743,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
- DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
- yesno(intel_dp_source_supports_hbr2(intel_dp)),
- yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
-
if (intel_dp->reset_link_params) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -4799,7 +4793,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@@ -4883,7 +4877,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -4934,8 +4928,10 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- /* Can't call is_edp() since the encoder may have been destroyed
- * already. */
+ /*
+ * Can't call intel_dp_is_edp() since the encoder may have been
+ * destroyed already.
+ */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
@@ -4949,7 +4945,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
@@ -4975,7 +4971,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
/*
@@ -5043,7 +5039,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
intel_dp_pps_init(encoder->dev, intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
@@ -5144,7 +5140,7 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5167,7 +5163,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
@@ -5455,7 +5451,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
+ const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
struct intel_encoder *encoder;
@@ -5474,11 +5470,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- /*
- * FIXME: This needs proper synchronization with psr state for some
- * platforms that cannot have PSR and DRRS enabled at the same time.
- */
-
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -5552,7 +5543,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
* Initializes frontbuffer_bits and drrs.dp
*/
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5562,6 +5553,11 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
return;
}
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ return;
+ }
+
mutex_lock(&dev_priv->drrs.mutex);
if (WARN_ON(dev_priv->drrs.dp)) {
DRM_ERROR("DRRS already enabled\n");
@@ -5583,7 +5579,7 @@ unlock:
*
*/
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- struct intel_crtc_state *old_crtc_state)
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5833,7 +5829,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return true;
/*
@@ -6049,7 +6045,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev_priv, port))
+ if (intel_dp_is_port_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -6067,7 +6063,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- is_edp(intel_dp) && port != PORT_B && port != PORT_C))
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
@@ -6095,7 +6092,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -6151,7 +6148,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
@@ -6159,18 +6155,24 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
} else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->disable = ilk_disable_dp;
+ intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_GEN(dev_priv) >= 5)
- intel_encoder->post_disable = ilk_post_disable_dp;
+ intel_encoder->disable = g4x_disable_dp;
}
intel_dig_port->port = port;
@@ -6193,6 +6195,9 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
+ if (port != PORT_A)
+ intel_infoframe_init(intel_dig_port);
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index d2830ba3162e..2bb2ceb9d463 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- if (!i915.enable_dpcd_backlight)
+ if (!i915_modparams.enable_dpcd_backlight)
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 93fc8ab9bb31..772521440a9f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -123,8 +123,8 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector,
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -133,7 +133,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
to_intel_connector(old_conn_state->connector);
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
@@ -146,8 +146,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -155,8 +155,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -164,20 +162,26 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+ /*
+ * Power down mst path before disabling the port, otherwise we end
+ * up getting interrupts from the sink upon detecting link loss.
+ */
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ false);
+
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base,
NULL, NULL);
-
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -195,8 +199,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -219,8 +224,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -229,7 +234,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
enum port port = intel_dig_port->port;
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_wait_for_register(dev_priv,
DP_TP_STATUS(port),
@@ -449,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
- int i;
+ enum pipe pipe;
+ int ret;
intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
connector = &intel_connector->base;
- drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ intel_connector_free(intel_connector);
+ return NULL;
+ }
+
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
- for (i = PIPE_A; i <= PIPE_C; i++) {
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_dp->mst_encoders[i]->base.base);
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_encoder *enc =
+ &intel_dp->mst_encoders[pipe]->base.base;
+
+ ret = drm_mode_connector_attach_encoder(&intel_connector->base,
+ enc);
+ if (ret)
+ goto err;
}
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_mode_connector_set_path_property(connector, pathprop);
+ if (ret)
+ goto err;
+
return connector;
+
+err:
+ drm_connector_cleanup(connector);
+ return NULL;
}
static void intel_dp_register_mst_connector(struct drm_connector *connector)
@@ -494,6 +519,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
drm_connector_unregister(connector);
if (dev_priv->fbdev)
@@ -505,7 +531,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
drm_connector_unreference(connector);
- DRM_DEBUG_KMS("\n");
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -564,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
- int i;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum pipe pipe;
- for (i = PIPE_A; i <= PIPE_C; i++)
- intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ for_each_pipe(dev_priv, pipe)
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 79fbaf78f604..6c7f8bca574e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -220,23 +220,23 @@ struct intel_encoder {
struct intel_crtc_state *,
struct drm_connector_state *);
void (*pre_pll_enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*pre_enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*post_disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*post_pll_disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -384,7 +384,8 @@ struct intel_atomic_state {
unsigned int active_pipe_changes;
unsigned int active_crtcs;
- unsigned int min_pixclk[I915_MAX_PIPES];
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -493,6 +494,8 @@ struct intel_crtc_scaler_state {
/* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED 1
+/* Flag to get scanline using frame time stamps */
+#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
struct intel_pipe_wm {
struct intel_wm_level wm[5];
@@ -714,6 +717,9 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool has_psr;
+ bool has_psr2;
+
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -752,6 +758,7 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
+ bool ips_force_disable;
bool enable_fbc;
@@ -795,18 +802,10 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
- bool lowfreq_avail;
u8 plane_ids_mask;
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
- /* Display surface base address adjustement for pageflips. Note that on
- * gen4+ this only adjusts up to a tile, offsets within a tile are
- * handled in the hw itself (with the TILEOFF register). */
- u32 dspaddr_offset;
- int adjusted_x;
- int adjusted_y;
-
struct intel_crtc_state *config;
/* global reset count when the last flip was submitted */
@@ -908,16 +907,6 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
- void (*write_infoframe)(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
- const void *frame, ssize_t len);
- void (*set_infoframes)(struct drm_encoder *encoder,
- bool enable,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct drm_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder;
@@ -1068,6 +1057,17 @@ struct intel_digital_port {
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
+
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder {
@@ -1188,6 +1188,30 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+static inline struct intel_plane_state *
+intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_old_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base,
+ &crtc->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base,
+ &crtc->base));
+}
+
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1204,11 +1228,8 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1216,7 +1237,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
{
- return mask & ~i915->rps.pm_intrmsk_mbz;
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
@@ -1227,7 +1248,7 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
- return dev_priv->pm.irqs_enabled;
+ return dev_priv->runtime_pm.irqs_enabled;
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
@@ -1245,8 +1266,8 @@ void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state);
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@@ -1271,6 +1292,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
+u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
@@ -1289,6 +1311,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void cnl_init_cdclk(struct drm_i915_private *dev_priv);
@@ -1331,11 +1354,13 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1376,7 +1401,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -1400,7 +1425,9 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
-int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1498,7 +1525,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -1517,9 +1545,9 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
@@ -1647,6 +1675,7 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
bool high_tmds_clock_ratio,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
@@ -1707,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
int intel_backlight_device_register(struct intel_connector *connector);
void intel_backlight_device_unregister(struct intel_connector *connector);
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
@@ -1718,8 +1747,10 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
/* intel_psr.c */
-void intel_psr_enable(struct intel_dp *intel_dp);
-void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_i915_private *dev_priv,
@@ -1728,6 +1759,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1755,7 +1788,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(dev_priv->pm.suspended,
+ WARN_ONCE(dev_priv->runtime_pm.suspended,
"Device suspended during HW access\n");
}
@@ -1763,7 +1796,7 @@ static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
- WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
+ WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
@@ -1788,7 +1821,7 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
}
/**
@@ -1805,7 +1838,7 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
- atomic_dec(&dev_priv->pm.wakeref_count);
+ atomic_dec(&dev_priv->runtime_pm.wakeref_count);
}
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
@@ -1843,7 +1876,6 @@ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps);
-void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1859,16 +1891,19 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
-static inline int intel_enable_rc6(void)
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
+static inline int intel_rc6_enabled(void)
{
- return i915.enable_rc6;
+ return i915_modparams.enable_rc6;
}
/* intel_sdvo.c */
@@ -1883,8 +1918,12 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+void skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -1956,7 +1995,9 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
/* intel_color.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 7442891762be..83f15848098a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -263,7 +263,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+ DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
@@ -330,6 +330,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
adjusted_mode->flags = 0;
if (IS_GEN9_LP(dev_priv)) {
+ /* Enable Frame time stamp based scanline reporting */
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
/* Dual link goes to DSI transcoder A. */
if (intel_dsi->ports == BIT(PORT_C))
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@ -731,7 +735,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
@@ -783,17 +787,22 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
*/
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
DRM_DEBUG_KMS("\n");
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
@@ -878,8 +887,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* the pre_enable hook.
*/
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
}
@@ -889,8 +898,8 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder,
* the post_disable hook.
*/
static void intel_dsi_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -925,8 +934,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -1066,7 +1075,7 @@ out_put_power:
}
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1102,6 +1111,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
pixel_format_from_register_bits(fmt));
bpp = pipe_config->pipe_bpp;
+ /* Enable Frame time stamo based scanline reporting */
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
@@ -1370,7 +1383,7 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
@@ -1738,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
else
intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link) {
+ if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
-
- switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
- case DL_DCS_PORT_A:
- intel_dsi->dcs_backlight_ports = BIT(PORT_A);
- break;
- case DL_DCS_PORT_C:
- intel_dsi->dcs_backlight_ports = BIT(PORT_C);
- break;
- default:
- case DL_DCS_PORT_A_AND_C:
- intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
- break;
- }
-
- switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
- case DL_DCS_PORT_A:
- intel_dsi->dcs_cabc_ports = BIT(PORT_A);
- break;
- case DL_DCS_PORT_C:
- intel_dsi->dcs_cabc_ports = BIT(PORT_C);
- break;
- default:
- case DL_DCS_PORT_A_AND_C:
- intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
- break;
- }
- } else {
+ else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = BIT(port);
- intel_dsi->dcs_cabc_ports = BIT(port);
- }
- if (!dev_priv->vbt.dsi.config->cabc_supported)
- intel_dsi->dcs_cabc_ports = 0;
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index c0a027274c06..53c9b763f4ce 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -175,8 +175,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -189,8 +189,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
}
static void intel_enable_dvo(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -258,8 +258,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@@ -379,32 +379,15 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode(struct drm_connector *connector)
+intel_dvo_get_current_mode(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
- struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *mode;
- /* If the DVO port is active, that'll be the LVDS, so we can pull out
- * its timings to get how the BIOS set up the panel.
- */
- if (dvo_val & DVO_ENABLE) {
- struct intel_crtc *crtc;
- int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- if (crtc) {
- mode = intel_crtc_mode_get(dev, &crtc->base);
- if (mode) {
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- }
- }
+ mode = intel_encoder_current_mode(encoder);
+ if (mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
}
return mode;
@@ -551,7 +534,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* mode being output through DVO.
*/
intel_panel_init(&intel_connector->panel,
- intel_dvo_get_current_mode(connector),
+ intel_dvo_get_current_mode(intel_encoder),
NULL, NULL);
intel_dvo->panel_wants_dither = true;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 3c2d9cf22ed5..ab5bf4e2e28e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -22,7 +22,10 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -39,6 +42,7 @@
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
@@ -150,10 +154,11 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
default:
MISSING_CASE(INTEL_GEN(dev_priv));
case 10:
+ return GEN10_LR_CONTEXT_RENDER_SIZE;
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
- return i915.enable_execlists ?
+ return i915_modparams.enable_execlists ?
GEN8_LR_CONTEXT_RENDER_SIZE :
GEN8_CXT_TOTAL_SIZE;
case 7:
@@ -301,7 +306,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
init = class_info->init_execlists;
else
init = class_info->init_legacy;
@@ -380,6 +385,37 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+ /*
+ * IOMMU adds unpredictable latency causing the CSB write (from the
+ * GPU into the HWSP) to only be visible some time after the interrupt
+ * (missed breadcrumb syndrome).
+ */
+ if (intel_vtd_active())
+ return true;
+
+ /* Older GVT emulation depends upon intercepting CSB mmio */
+ if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
+ return true;
+
+ return false;
+}
+
+static void intel_engine_init_execlist(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ execlists->csb_use_mmio = csb_force_mmio(engine->i915);
+
+ execlists->port_mask = 1;
+ BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+ GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+
+ execlists->queue = RB_ROOT;
+ execlists->first = NULL;
+}
+
/**
* intel_engines_setup_common - setup engine state not requiring hw access
* @engine: Engine to setup.
@@ -391,8 +427,7 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- engine->execlist_queue = RB_ROOT;
- engine->execlist_first = NULL;
+ intel_engine_init_execlist(engine);
intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
@@ -442,6 +477,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&engine->scratch);
}
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
+ engine->status_page.page_addr = NULL;
+}
+
+static void cleanup_status_page(struct intel_engine_cs *engine)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(&engine->status_page.vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
+}
+
+static int init_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags;
+ void *vaddr;
+ int ret;
+
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return PTR_ERR(obj);
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err;
+
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ flags = PIN_GLOBAL;
+ if (!HAS_LLC(engine->i915))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actually map it).
+ */
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+ ret = i915_vma_pin(vma, 0, 4096, flags);
+ if (ret)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
+ engine->status_page.vma = vma;
+ engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
+ engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
+
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ engine->name, i915_ggtt_offset(vma));
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static int init_phys_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ GEM_BUG_ON(engine->id != RCS);
+
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+
+ engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -471,17 +616,44 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ring))
return PTR_ERR(ring);
+ /*
+ * Similarly the preempt context must always be available so that
+ * we can interrupt the engine at any time.
+ */
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+ ring = engine->context_pin(engine,
+ engine->i915->preempt_context);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ goto err_unpin_kernel;
+ }
+ }
+
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
- goto err_unpin;
+ goto err_unpin_preempt;
ret = i915_gem_render_state_init(engine);
if (ret)
- goto err_unpin;
+ goto err_breadcrumbs;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ ret = init_phys_status_page(engine);
+ else
+ ret = init_status_page(engine);
+ if (ret)
+ goto err_rs_fini;
return 0;
-err_unpin:
+err_rs_fini:
+ i915_gem_render_state_fini(engine);
+err_breadcrumbs:
+ intel_engine_fini_breadcrumbs(engine);
+err_unpin_preempt:
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ engine->context_unpin(engine, engine->i915->preempt_context);
+err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
return ret;
}
@@ -497,11 +669,18 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ cleanup_phys_status_page(engine);
+ else
+ cleanup_status_page(engine);
+
i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
@@ -672,11 +851,6 @@ static int wa_add(struct drm_i915_private *dev_priv,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
@@ -687,8 +861,8 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
+ I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+ i915_mmio_reg_offset(reg));
wa->hw_whitelist_count[engine->id]++;
return 0;
@@ -812,6 +986,23 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
+ if (HAS_LLC(dev_priv)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+
+ I915_WRITE(MMCD_MISC_CTRL,
+ I915_READ(MMCD_MISC_CTRL) |
+ MMCD_PCLA |
+ MMCD_HOTSPOT_EN);
+ }
+
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -900,13 +1091,33 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
+ /*
+ * Supporting preemption with fine-granularity requires changes in the
+ * batch buffer programming. Since we can't break old userspace, we
+ * need to set our default preemption level to safe value. Userspace is
+ * still able to use more fine-grained preemption levels, since in
+ * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+ * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+ * not real HW workarounds, but merely a way to start using preemption
+ * while maintaining old contract with userspace.
+ */
+
+ /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
- ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
@@ -968,25 +1179,19 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
- /*
- * Actual WA is to disable percontext preemption granularity control
- * until D0 which is the default case so this is equivalent to
- * !WaDisablePerCtxtPreemptionGranularityControl:skl
- */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
/* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1022,8 +1227,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaDisablePooledEuLoadBalancingFix:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
- GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
}
/* WaDisableSbeCacheDispatchPortSharing:bxt */
@@ -1062,8 +1267,65 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaInPlaceDecompressionHang:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+
+ return 0;
+}
+
+static int cnl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ (I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
+
+ /* WaForceContextSaveRestoreNonCoherent:cnl */
+ WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
+ /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
+
+ /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
+
+ /* WaInPlaceDecompressionHang:cnl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+
+ /* WaPushConstantDereferenceHoldDisable:cnl */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+
+ /* FtrEnableFastAnisoL1BankingFix: cnl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+
+ /* WaDisable3DMidCmdPreemption:cnl */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:cnl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
return 0;
}
@@ -1083,8 +1345,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- WA_SET_BIT(GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ (I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
@@ -1097,7 +1360,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:kbl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(
@@ -1105,8 +1369,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1150,7 +1415,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:cfl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(
@@ -1158,8 +1424,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
return 0;
}
@@ -1188,6 +1455,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
err = glk_init_workarounds(engine);
else if (IS_COFFEELAKE(dev_priv))
err = cfl_init_workarounds(engine);
+ else if (IS_CANNONLAKE(dev_priv))
+ err = cnl_init_workarounds(engine);
else
err = 0;
if (err)
@@ -1279,12 +1548,12 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
return false;
- /* Both ports drained, no more ELSP submission? */
- if (port_request(&engine->execlist_port[0]))
+ /* Waiting to drain ELSP? */
+ if (READ_ONCE(engine->execlists.active))
return false;
/* ELSP is empty, but there are ready requests? */
- if (READ_ONCE(engine->execlist_first))
+ if (READ_ONCE(engine->execlists.first))
return false;
/* Ring stopped? */
@@ -1333,11 +1602,188 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- tasklet_kill(&engine->irq_tasklet);
- engine->no_priolist = false;
+ tasklet_kill(&engine->execlists.irq_tasklet);
+ engine->execlists.no_priolist = false;
+ }
+}
+
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 2:
+ return false; /* uses physical not virtual addresses */
+ case 3:
+ /* maybe only uses physical not virtual addresses */
+ return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 6:
+ return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
+ default:
+ return true;
}
}
+static void print_request(struct drm_printer *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ rq->global_seqno,
+ i915_gem_request_completed(rq) ? "!" : "",
+ rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ rq->timeline->common->name);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+ struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ drm_printf(m, "%s\n", engine->name);
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
+ drm_printf(m, "\tReset count: %d\n",
+ i915_reset_engine_count(error, engine));
+
+ rcu_read_lock();
+
+ drm_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ drm_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915_modparams.enable_execlists) {
+ const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ u32 ptr, read, write;
+ unsigned int idx;
+
+ drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+ read, execlists->csb_head,
+ write,
+ intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ idx = ++read % GEN8_CSB_ENTRIES;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ hws[idx * 2],
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
+ hws[idx * 2 + 1]);
+ }
+
+ rcu_read_lock();
+ for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+ unsigned int count;
+
+ rq = port_unpack(&execlists->port[idx], &count);
+ if (rq) {
+ drm_printf(m, "\t\tELSP[%d] count=%d, ",
+ idx, count);
+ print_request(m, rq, "rq: ");
+ } else {
+ drm_printf(m, "\t\tELSP[%d] idle\n",
+ idx);
+ }
+ }
+ drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
+ rcu_read_unlock();
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock_irq(&engine->timeline->lock);
+ list_for_each_entry(rq, &engine->timeline->requests, link)
+ print_request(m, rq, "\t\tE ");
+ for (rb = execlists->first; rb; rb = rb_next(rb)) {
+ struct i915_priolist *p =
+ rb_entry(rb, typeof(*p), node);
+
+ list_for_each_entry(rq, &p->requests, priotree.link)
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->timeline->lock);
+
+ spin_lock_irq(&b->rb_lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+
+ drm_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->rb_lock);
+
+ drm_printf(m, "\n");
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 8c8ead2276e0..1a0f5e0c8d10 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
* address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here.
*/
-static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
+static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
{
- return crtc->base.y - crtc->adjusted_y;
+ return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
}
/*
@@ -291,6 +291,19 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ u32 val = I915_READ(CHICKEN_MISC_4);
+
+ val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
+
+ if (i915_gem_object_get_tiling(params->vma->obj) !=
+ I915_TILING_X)
+ val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
+
+ I915_WRITE(CHICKEN_MISC_4, val);
+ }
+
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
@@ -714,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
&effective_h);
- effective_w += crtc->adjusted_x;
- effective_h += crtc->adjusted_y;
+ effective_w += fbc->state_cache.plane.adjusted_x;
+ effective_h += fbc->state_cache.plane.adjusted_y;
return effective_w <= max_w && effective_h <= max_h;
}
@@ -744,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
+ cache->plane.adjusted_x = plane_state->main.x;
+ cache->plane.adjusted_y = plane_state->main.y;
+ cache->plane.y = plane_state->base.src.y1 >> 16;
if (!cache->plane.visible)
return;
@@ -846,7 +862,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
return false;
}
- if (!i915.enable_fbc) {
+ if (!i915_modparams.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -875,12 +891,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
- params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+ params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
+ 32 * fbc->threshold) * 8;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
@@ -1293,8 +1313,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
- if (i915.enable_fbc >= 0)
- return !!i915.enable_fbc;
+ if (i915_modparams.enable_fbc >= 0)
+ return !!i915_modparams.enable_fbc;
if (!HAS_FBC(dev_priv))
return 0;
@@ -1338,8 +1358,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
- i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+ i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
+ i915_modparams.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 262e75c00dd2..ea96682568e8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
- drm_framebuffer_unreference(&intel_fb->base);
+ drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
@@ -206,6 +206,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -269,6 +270,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -276,6 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma);
out_unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -624,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
- drm_framebuffer_reference(&ifbdev->fb->base);
+ drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
for_each_crtc(dev, crtc) {
@@ -694,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp)) {
+ ifbdev->preferred_bpp))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
- intel_fbdev_fini(to_i915(ifbdev->helper.dev));
- }
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -797,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev)
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+ if (ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04689600e337..77c123cc8817 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -88,14 +88,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(crtc->pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
+ u32 enable_mask;
lockdep_assert_held(&dev_priv->irq_lock);
- if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
+ I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
@@ -108,15 +109,16 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
lockdep_assert_held(&dev_priv->irq_lock);
if (enable) {
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
} else {
- if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
}
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
new file mode 100644
index 000000000000..10037c0fdf95
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_guc.h"
+#include "i915_drv.h"
+
+static void gen8_guc_raise_irq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+}
+
+static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
+{
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
+ GEM_BUG_ON(i >= guc->send_regs.count);
+
+ return _MMIO(guc->send_regs.base + 4 * i);
+}
+
+void intel_guc_init_send_regs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ enum forcewake_domains fw_domains = 0;
+ unsigned int i;
+
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+
+ for (i = 0; i < guc->send_regs.count; i++) {
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ guc_send_reg(guc, i),
+ FW_REG_READ | FW_REG_WRITE);
+ }
+ guc->send_regs.fw_domains = fw_domains;
+}
+
+void intel_guc_init_early(struct intel_guc *guc)
+{
+ intel_guc_ct_init_early(&guc->ct);
+
+ mutex_init(&guc->send_mutex);
+ guc->send = intel_guc_send_nop;
+ guc->notify = gen8_guc_raise_irq;
+}
+
+static u32 get_gt_type(struct drm_i915_private *dev_priv)
+{
+ /* XXX: GT type based on PCI device ID? field seems unused by fw */
+ return 0;
+}
+
+static u32 get_core_family(struct drm_i915_private *dev_priv)
+{
+ u32 gen = INTEL_GEN(dev_priv);
+
+ switch (gen) {
+ case 9:
+ return GUC_CORE_FAMILY_GEN9;
+
+ default:
+ MISSING_CASE(gen);
+ return GUC_CORE_FAMILY_UNKNOWN;
+ }
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_init_params(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 params[GUC_CTL_MAX_DWORDS];
+ int i;
+
+ memset(params, 0, sizeof(params));
+
+ params[GUC_CTL_DEVICE_INFO] |=
+ (get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
+ (get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
+
+ /*
+ * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
+ * second. This ARAR is calculated by:
+ * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
+ */
+ params[GUC_CTL_ARAT_HIGH] = 0;
+ params[GUC_CTL_ARAT_LOW] = 100000000;
+
+ params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
+
+ params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
+ GUC_CTL_VCS2_ENABLED;
+
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
+ if (i915_modparams.guc_log_level >= 0) {
+ params[GUC_CTL_DEBUG] =
+ i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
+ } else {
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
+ }
+
+ /* If GuC submission is enabled, set up additional parameters here */
+ if (i915_modparams.enable_guc_submission) {
+ u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+ params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+ params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+
+ pgs >>= PAGE_SHIFT;
+ params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
+ (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+
+ params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+
+ /* Unmask this bit to enable the GuC's internal scheduler */
+ params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+ }
+
+ /*
+ * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
+ * they are power context saved so it's ok to release forcewake
+ * when we are done here and take it again at xfer time.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+
+ I915_WRITE(SOFT_SCRATCH(0), 0);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+}
+
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+}
+
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 status;
+ int i;
+ int ret;
+
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len > guc->send_regs.count);
+
+ /* If CT is available, we expect to use MMIO only during init/fini */
+ GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
+ *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+ *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+
+ for (i = 0; i < len; i++)
+ I915_WRITE(guc_send_reg(guc, i), action[i]);
+
+ POSTING_READ(guc_send_reg(guc, i - 1));
+
+ intel_guc_notify(guc);
+
+ /*
+ * No GuC command should ever take longer than 10ms.
+ * Fast commands should still complete in 10us.
+ */
+ ret = __intel_wait_for_register_fw(dev_priv,
+ guc_send_reg(guc, 0),
+ INTEL_GUC_RECV_MASK,
+ INTEL_GUC_RECV_MASK,
+ 10, 10, &status);
+ if (status != INTEL_GUC_STATUS_SUCCESS) {
+ /*
+ * Either the GuC explicitly returned an error (which
+ * we convert to -EIO here) or no response at all was
+ * received within the timeout limit (-ETIMEDOUT)
+ */
+ if (ret != -ETIMEDOUT)
+ ret = -EIO;
+
+ DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
+ " ret=%d status=0x%08X response=0x%08X\n",
+ action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+ }
+
+ intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+ mutex_unlock(&guc->send_mutex);
+
+ return ret;
+}
+
+int intel_guc_sample_forcewake(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 action[2];
+
+ action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
+ /* WaRsDisableCoarsePowerGating:skl,bxt */
+ if (!intel_rc6_enabled() ||
+ NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ action[1] = 0;
+ else
+ /* bit 0 and 1 are for Render and Media domain separately */
+ action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
+ * @guc: intel_guc structure
+ * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
+ *
+ * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
+ * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
+ * intel_huc_auth().
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC,
+ rsa_offset
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @dev_priv: i915 device private
+ */
+int intel_guc_suspend(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct i915_gem_context *ctx;
+ u32 data[3];
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ gen9_disable_guc_interrupts(dev_priv);
+
+ ctx = dev_priv->kernel_context;
+
+ data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
+ /* any value greater than GUC_POWER_D0 */
+ data[1] = GUC_POWER_D1;
+ /* first page is shared data with GuC */
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+ LRC_GUCSHR_PN * PAGE_SIZE;
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_resume() - notify GuC resuming from suspend state
+ * @dev_priv: i915 device private
+ */
+int intel_guc_resume(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct i915_gem_context *ctx;
+ u32 data[3];
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ if (i915_modparams.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
+ ctx = dev_priv->kernel_context;
+
+ data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
+ data[1] = GUC_POWER_D0;
+ /* first page is shared data with GuC */
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+ LRC_GUCSHR_PN * PAGE_SIZE;
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ *
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
+ *
+ * Return: A i915_vma if successful, otherwise an ERR_PTR.
+ */
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create(dev_priv, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+ PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+ u32 wopcm_size = GUC_WOPCM_TOP;
+
+ /* On BXT, the top of WOPCM is reserved for RC6 context */
+ if (IS_GEN9_LP(dev_priv))
+ wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+ return wopcm_size;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644
index 000000000000..418450b1ae27
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include "intel_uncore.h"
+#include "intel_guc_fw.h"
+#include "intel_guc_fwif.h"
+#include "intel_guc_ct.h"
+#include "intel_guc_log.h"
+#include "intel_uc_fw.h"
+#include "i915_guc_reg.h"
+#include "i915_vma.h"
+
+/*
+ * Top level structure of GuC. It handles firmware loading and manages client
+ * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * ExecList submission.
+ */
+struct intel_guc {
+ struct intel_uc_fw fw;
+ struct intel_guc_log log;
+ struct intel_guc_ct ct;
+
+ /* Log snapshot if GuC errors during load */
+ struct drm_i915_gem_object *load_err_log;
+
+ /* intel_guc_recv interrupt related state */
+ bool interrupts_enabled;
+
+ struct i915_vma *ads_vma;
+ struct i915_vma *stage_desc_pool;
+ void *stage_desc_pool_vaddr;
+ struct ida stage_ids;
+
+ struct i915_guc_client *execbuf_client;
+
+ DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
+ /* Cyclic counter mod pagesize */
+ u32 db_cacheline;
+
+ /* GuC's FW specific registers used in MMIO send */
+ struct {
+ u32 base;
+ unsigned int count;
+ enum forcewake_domains fw_domains;
+ } send_regs;
+
+ /* To serialize the intel_guc_send actions */
+ struct mutex send_mutex;
+
+ /* GuC's FW specific send function */
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+
+ /* GuC's FW specific notify function */
+ void (*notify)(struct intel_guc *guc);
+};
+
+static
+inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return guc->send(guc, action, len);
+}
+
+static inline void intel_guc_notify(struct intel_guc *guc)
+{
+ guc->notify(guc);
+}
+
+/*
+ * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
+ * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
+ * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
+ * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ */
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+ u32 offset = i915_ggtt_offset(vma);
+
+ GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+ GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+
+ return offset;
+}
+
+void intel_guc_init_early(struct intel_guc *guc);
+void intel_guc_init_send_regs(struct intel_guc *guc);
+void intel_guc_init_params(struct intel_guc *guc);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
+int intel_guc_suspend(struct drm_i915_private *dev_priv);
+int intel_guc_resume(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 8b0ae7fce7f2..ef67a36354c5 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -26,31 +26,9 @@
* Dave Gordon <david.s.gordon@intel.com>
* Alex Dai <yu.dai@intel.com>
*/
-#include "i915_drv.h"
-#include "intel_uc.h"
-/**
- * DOC: GuC-specific firmware loader
- *
- * intel_guc:
- * Top level structure of guc. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
- * ExecList submission.
- *
- * Firmware versioning:
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- * The firmware installation package will install (symbolic link) proper version
- * of firmware.
- *
- * GuC address space:
- * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
- * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
- * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
- * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
- *
- */
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
#define SKL_FW_MAJOR 6
#define SKL_FW_MINOR 1
@@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-
-static u32 get_gttype(struct drm_i915_private *dev_priv)
-{
- /* XXX: GT type based on PCI device ID? field seems unused by fw */
- return 0;
-}
-
-static u32 get_core_family(struct drm_i915_private *dev_priv)
-{
- u32 gen = INTEL_GEN(dev_priv);
-
- switch (gen) {
- case 9:
- return GUC_CORE_FAMILY_GEN9;
-
- default:
- MISSING_CASE(gen);
- return GUC_CORE_FAMILY_UNKNOWN;
- }
-}
-
-/*
- * Initialise the GuC parameter block before starting the firmware
- * transfer. These parameters are read by the firmware on startup
- * and cannot be changed thereafter.
+/**
+ * intel_guc_fw_select() - selects GuC firmware for uploading
+ *
+ * @guc: intel_guc struct
+ *
+ * Return: zero when we know firmware, non-zero in other case
*/
-static void guc_params_init(struct drm_i915_private *dev_priv)
+int intel_guc_fw_select(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- u32 params[GUC_CTL_MAX_DWORDS];
- int i;
-
- memset(&params, 0, sizeof(params));
-
- params[GUC_CTL_DEVICE_INFO] |=
- (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
- (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
-
- /*
- * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
- * second. This ARAR is calculated by:
- * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
- */
- params[GUC_CTL_ARAT_HIGH] = 0;
- params[GUC_CTL_ARAT_LOW] = 100000000;
-
- params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
-
- params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
- GUC_CTL_VCS2_ENABLED;
-
- params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
- if (i915.guc_log_level >= 0) {
- params[GUC_CTL_DEBUG] =
- i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- } else
- params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
-
- /* If GuC submission is enabled, set up additional parameters here */
- if (i915.enable_guc_submission) {
- u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
- u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
- params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
- params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
- pgs >>= PAGE_SHIFT;
- params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
- (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
- params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+ intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
- /* Unmask this bit to enable the GuC's internal scheduler */
- params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+ if (i915_modparams.guc_firmware_path) {
+ guc->fw.path = i915_modparams.guc_firmware_path;
+ guc->fw.major_ver_wanted = 0;
+ guc->fw.minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ guc->fw.path = I915_SKL_GUC_UCODE;
+ guc->fw.major_ver_wanted = SKL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ guc->fw.path = I915_BXT_GUC_UCODE;
+ guc->fw.major_ver_wanted = BXT_FW_MAJOR;
+ guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ guc->fw.path = I915_KBL_GUC_UCODE;
+ guc->fw.major_ver_wanted = KBL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ guc->fw.path = I915_GLK_GUC_UCODE;
+ guc->fw.major_ver_wanted = GLK_FW_MAJOR;
+ guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+ } else {
+ DRM_ERROR("No GuC firmware known for platform with GuC!\n");
+ return -ENOENT;
}
- I915_WRITE(SOFT_SCRATCH(0), 0);
-
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ return 0;
}
/*
@@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret;
}
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
-{
- u32 wopcm_size = GUC_WOPCM_TOP;
-
- /* On BXT, the top of WOPCM is reserved for RC6 context */
- if (IS_GEN9_LP(dev_priv))
- wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
-
- return wopcm_size;
-}
-
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct i915_vma *vma;
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
- ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
- if (ret) {
- DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
- return ret;
- }
-
- vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (IS_ERR(vma)) {
- DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
- return PTR_ERR(vma);
- }
+ GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
- guc_params_init(dev_priv);
-
ret = guc_ucode_xfer_dma(dev_priv, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
return ret;
}
/**
- * intel_guc_init_hw() - finish preparing the GuC for activity
+ * intel_guc_fw_upload() - finish preparing the GuC for activity
* @guc: intel_guc structure
*
* Called during driver loading and also after a GPU reset.
@@ -340,80 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
*
* Return: non-zero code on error
*/
-int intel_guc_init_hw(struct intel_guc *guc)
+int intel_guc_fw_upload(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- const char *fw_path = guc->fw.path;
- int ret;
-
- DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
- fw_path,
- intel_uc_fw_status_repr(guc->fw.fetch_status),
- intel_uc_fw_status_repr(guc->fw.load_status));
-
- if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -EIO;
-
- guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_uc_fw_status_repr(guc->fw.fetch_status),
- intel_uc_fw_status_repr(guc->fw.load_status));
-
- ret = guc_ucode_xfer(dev_priv);
-
- if (ret)
- return -EAGAIN;
-
- guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
-
- DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
- i915.enable_guc_submission ? "submission enabled" : "loaded",
- guc->fw.path,
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
-
- return 0;
-}
-
-/**
- * intel_guc_select_fw() - selects GuC firmware for loading
- * @guc: intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_select_fw(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- guc->fw.path = NULL;
- guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
- guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
- guc->fw.type = INTEL_UC_FW_TYPE_GUC;
-
- if (i915.guc_firmware_path) {
- guc->fw.path = i915.guc_firmware_path;
- guc->fw.major_ver_wanted = 0;
- guc->fw.minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- guc->fw.path = I915_SKL_GUC_UCODE;
- guc->fw.major_ver_wanted = SKL_FW_MAJOR;
- guc->fw.minor_ver_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- guc->fw.path = I915_BXT_GUC_UCODE;
- guc->fw.major_ver_wanted = BXT_FW_MAJOR;
- guc->fw.minor_ver_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc->fw.path = I915_KBL_GUC_UCODE;
- guc->fw.major_ver_wanted = KBL_FW_MAJOR;
- guc->fw.minor_ver_wanted = KBL_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- guc->fw.path = I915_GLK_GUC_UCODE;
- guc->fw.major_ver_wanted = GLK_FW_MAJOR;
- guc->fw.minor_ver_wanted = GLK_FW_MINOR;
- } else {
- DRM_ERROR("No GuC firmware known for platform with GuC!\n");
- return -ENOENT;
- }
-
- return 0;
+ return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
new file mode 100644
index 000000000000..023f5baa9dd6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+int intel_guc_fw_select(struct intel_guc *guc);
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 5fa286074811..80c507435458 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -56,10 +56,6 @@
#define WQ_LEN_SHIFT 16
#define WQ_NO_WCFLUSH_WAIT (1 << 27)
#define WQ_PRESENT_WORKLOAD (1 << 28)
-#define WQ_WORKLOAD_SHIFT 29
-#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
-#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
-#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
#define WQ_RING_TAIL_SHIFT 20
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
@@ -86,8 +82,8 @@
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
-#define GUC_CTL_GTTYPE_SHIFT 0
-#define GUC_CTL_COREFAMILY_SHIFT 7
+#define GUC_CTL_GT_TYPE_SHIFT 0
+#define GUC_CTL_CORE_FAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
@@ -182,49 +178,49 @@
*/
struct uc_css_header {
- uint32_t module_type;
+ u32 module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
- uint32_t header_size_dw;
- uint32_t header_version;
- uint32_t module_id;
- uint32_t module_vendor;
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
union {
struct {
- uint8_t day;
- uint8_t month;
- uint16_t year;
+ u8 day;
+ u8 month;
+ u16 year;
};
- uint32_t date;
+ u32 date;
};
- uint32_t size_dw; /* uCode plus header_size_dw */
- uint32_t key_size_dw;
- uint32_t modulus_size_dw;
- uint32_t exponent_size_dw;
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
union {
struct {
- uint8_t hour;
- uint8_t min;
- uint16_t sec;
+ u8 hour;
+ u8 min;
+ u16 sec;
};
- uint32_t time;
+ u32 time;
};
char username[8];
char buildnumber[12];
union {
struct {
- uint32_t branch_client_version;
- uint32_t sw_version;
+ u32 branch_client_version;
+ u32 sw_version;
} guc;
struct {
- uint32_t sw_version;
- uint32_t reserved;
+ u32 sw_version;
+ u32 reserved;
} huc;
};
- uint32_t prod_preprod_fw;
- uint32_t reserved[12];
- uint32_t header_info;
+ u32 prod_preprod_fw;
+ u32 reserved[12];
+ u32 header_info;
} __packed;
struct guc_doorbell_info {
@@ -388,7 +384,11 @@ struct guc_ct_buffer_desc {
/* Preempt to idle on quantum expiry */
#define POLICY_PREEMPT_TO_IDLE (1<<1)
-#define POLICY_MAX_NUM_WI 15
+#define POLICY_MAX_NUM_WI 15
+#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
+#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000
+#define POLICY_DEFAULT_FAULT_TIME_US 250000
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 16d3b8719cab..76d3eb1e4614 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -21,8 +21,11 @@
* IN THE SOFTWARE.
*
*/
+
#include <linux/debugfs.h>
#include <linux/relay.h>
+
+#include "intel_guc_log.h"
#include "i915_drv.h"
static void guc_log_capture_logs(struct intel_guc *guc);
@@ -144,7 +147,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
struct dentry *log_dir;
int ret;
- if (i915.guc_log_level < 0)
+ if (i915_modparams.guc_log_level < 0)
return 0;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
@@ -480,7 +483,7 @@ err_runtime:
guc_log_runtime_destroy(guc);
err:
/* logging will remain off */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
return ret;
}
@@ -502,7 +505,8 @@ static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ if (!i915_modparams.enable_guc_submission ||
+ (i915_modparams.guc_log_level < 0))
return;
/* First disable the interrupts, will be renabled afterwards */
@@ -524,13 +528,14 @@ int intel_guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
- uint32_t size, flags;
+ u32 flags;
+ u32 size;
int ret;
GEM_BUG_ON(guc->log.vma);
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+ if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX;
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
@@ -555,7 +560,7 @@ int intel_guc_log_create(struct intel_guc *guc)
guc->log.vma = vma;
- if (i915.guc_log_level >= 0) {
+ if (i915_modparams.guc_log_level >= 0) {
ret = guc_log_runtime_create(guc);
if (ret < 0)
goto err_vma;
@@ -576,7 +581,7 @@ err_vma:
i915_vma_unpin_and_release(&guc->log.vma);
err:
/* logging will be off */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
return ret;
}
@@ -600,7 +605,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
- if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0))
return 0;
ret = guc_log_control(guc, log_param.value);
@@ -610,7 +615,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
}
if (log_param.logging_enabled) {
- i915.guc_log_level = log_param.verbosity;
+ i915_modparams.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have
@@ -633,7 +638,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
guc_flush_logs(guc);
/* As logging is disabled, update log level to reflect that */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
}
return ret;
@@ -641,7 +646,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission || i915.guc_log_level < 0)
+ if (!i915_modparams.enable_guc_submission ||
+ (i915_modparams.guc_log_level < 0))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -651,7 +657,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission)
+ if (!i915_modparams.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
new file mode 100644
index 000000000000..f512cf79339b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_LOG_H_
+#define _INTEL_GUC_LOG_H_
+
+#include <linux/workqueue.h>
+
+#include "intel_guc_fwif.h"
+
+struct drm_i915_private;
+struct intel_guc;
+
+struct intel_guc_log {
+ u32 flags;
+ struct i915_vma *vma;
+ /* The runtime stuff gets created only when GuC logging gets enabled */
+ struct {
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+ } runtime;
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index c17ed0e62b67..b4a7f31f0214 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -58,7 +58,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_gvt)
+ if (!i915_modparams.enable_gvt)
return;
if (intel_vgpu_active(dev_priv)) {
@@ -73,7 +73,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
bail:
- i915.enable_gvt = 0;
+ i915_modparams.enable_gvt = 0;
}
/**
@@ -90,17 +90,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (!i915.enable_gvt) {
+ if (!i915_modparams.enable_gvt) {
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
return 0;
}
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
return -EIO;
}
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
@@ -123,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
bail:
- i915.enable_gvt = 0;
+ i915_modparams.enable_gvt = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d9d87d96fb69..12ac270a5f93 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -428,7 +428,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
- if (!i915.enable_hangcheck)
+ if (!i915_modparams.enable_hangcheck)
return;
if (!READ_ONCE(dev_priv->gt.awake))
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e8abea7594ec..4dea833f9d1b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
-static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
}
}
-static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_enable(unsigned int type)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
}
}
-static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 hsw_infoframe_enable(unsigned int type)
{
switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_ENABLE_VSC_HSW;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
static i915_reg_t
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder,
- enum hdmi_infoframe_type type,
+ unsigned int type,
int i)
{
switch (type) {
+ case DP_SDP_VSC:
+ return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
static void g4x_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
static void cpt_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
static void vlv_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
static void hsw_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg;
+ int data_size = type == DP_SDP_VSC ?
+ VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i;
u32 val = I915_READ(ctl_reg);
@@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
- for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
mmiowb();
@@ -434,7 +440,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -450,7 +456,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
buffer[3] = 0;
len++;
- intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -481,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable);
+ intel_hdmi->rgb_quant_range_selectable,
+ is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
@@ -945,6 +952,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -965,7 +973,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -991,8 +999,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@@ -1003,8 +1011,8 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1025,8 +1033,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1075,8 +1083,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1130,18 +1138,20 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
}
static void intel_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
@@ -1184,14 +1194,15 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
+ intel_dig_port->set_infoframes(&encoder->base, false,
+ old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
@@ -1200,16 +1211,16 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
}
static void pch_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@@ -1314,7 +1325,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return status;
}
-static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
+static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@@ -1642,24 +1653,24 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
intel_hdmi_prepare(encoder, pipe_config);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ intel_dig_port->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1669,9 +1680,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ dport->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1679,8 +1690,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@@ -1688,8 +1699,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@@ -1697,23 +1708,23 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1727,11 +1738,10 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1741,9 +1751,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ dport->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1958,6 +1968,34 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_dig_port->write_infoframe = vlv_write_infoframe;
+ intel_dig_port->set_infoframes = vlv_set_infoframes;
+ intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+ } else if (IS_G4X(dev_priv)) {
+ intel_dig_port->write_infoframe = g4x_write_infoframe;
+ intel_dig_port->set_infoframes = g4x_set_infoframes;
+ intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+ } else if (HAS_DDI(dev_priv)) {
+ intel_dig_port->write_infoframe = hsw_write_infoframe;
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+ } else if (HAS_PCH_IBX(dev_priv)) {
+ intel_dig_port->write_infoframe = ibx_write_infoframe;
+ intel_dig_port->set_infoframes = ibx_set_infoframes;
+ intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+ } else {
+ intel_dig_port->write_infoframe = cpt_write_infoframe;
+ intel_dig_port->set_infoframes = cpt_set_infoframes;
+ intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+ }
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1993,28 +2031,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
return;
intel_encoder->hpd_pin = intel_hpd_pin(port);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_hdmi->write_infoframe = vlv_write_infoframe;
- intel_hdmi->set_infoframes = vlv_set_infoframes;
- intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev_priv)) {
- intel_hdmi->write_infoframe = g4x_write_infoframe;
- intel_hdmi->set_infoframes = g4x_set_infoframes;
- intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev_priv)) {
- intel_hdmi->write_infoframe = hsw_write_infoframe;
- intel_hdmi->set_infoframes = hsw_set_infoframes;
- intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev_priv)) {
- intel_hdmi->write_infoframe = ibx_write_infoframe;
- intel_hdmi->set_infoframes = ibx_set_infoframes;
- intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
- } else {
- intel_hdmi->write_infoframe = cpt_write_infoframe;
- intel_hdmi->set_infoframes = cpt_set_infoframes;
- intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
- }
-
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
@@ -2113,5 +2129,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
+ intel_infoframe_init(intel_dig_port);
+
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 6145fa0d6773..c8a48cbc2b7d 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -21,9 +21,11 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/firmware.h>
+
+#include <linux/types.h>
+
+#include "intel_huc.h"
#include "i915_drv.h"
-#include "intel_uc.h"
/**
* DOC: HuC Firmware
@@ -76,6 +78,42 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
/**
+ * intel_huc_select_fw() - selects HuC firmware for loading
+ * @huc: intel_huc struct
+ */
+void intel_huc_select_fw(struct intel_huc *huc)
+{
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+ intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+
+ if (i915_modparams.huc_firmware_path) {
+ huc->fw.path = i915_modparams.huc_firmware_path;
+ huc->fw.major_ver_wanted = 0;
+ huc->fw.minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ huc->fw.path = I915_SKL_HUC_UCODE;
+ huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ huc->fw.path = I915_BXT_HUC_UCODE;
+ huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ huc->fw.path = I915_KBL_HUC_UCODE;
+ huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ huc->fw.path = I915_GLK_HUC_UCODE;
+ huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+ } else {
+ DRM_ERROR("No HuC firmware known for platform with HuC!\n");
+ return;
+ }
+}
+
+/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
*
@@ -83,26 +121,15 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
*
* Return: 0 on success, non-zero on failure
*/
-static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- struct i915_vma *vma;
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
unsigned long offset = 0;
u32 size;
int ret;
- ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
- if (ret) {
- DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
- return ret;
- }
-
- vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (IS_ERR(vma)) {
- DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
- return PTR_ERR(vma);
- }
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -133,55 +160,10 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
return ret;
}
/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc: intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
-{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- huc->fw.path = NULL;
- huc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
- huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
- huc->fw.type = INTEL_UC_FW_TYPE_HUC;
-
- if (i915.huc_firmware_path) {
- huc->fw.path = i915.huc_firmware_path;
- huc->fw.major_ver_wanted = 0;
- huc->fw.minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc->fw.path = I915_SKL_HUC_UCODE;
- huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc->fw.path = I915_BXT_HUC_UCODE;
- huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc->fw.path = I915_KBL_HUC_UCODE;
- huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc->fw.path = I915_GLK_HUC_UCODE;
- huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else {
- DRM_ERROR("No HuC firmware known for platform with HuC!\n");
- return;
- }
-}
-
-/**
* intel_huc_init_hw() - load HuC uCode to device
* @huc: intel_huc structure
*
@@ -195,49 +177,26 @@ void intel_huc_select_fw(struct intel_huc *huc)
*/
void intel_huc_init_hw(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- int err;
-
- DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc->fw.path,
- intel_uc_fw_status_repr(huc->fw.fetch_status),
- intel_uc_fw_status_repr(huc->fw.load_status));
-
- if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
-
- huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
- err = huc_ucode_xfer(dev_priv);
-
- huc->fw.load_status = err ?
- INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
-
- DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc->fw.path,
- intel_uc_fw_status_repr(huc->fw.fetch_status),
- intel_uc_fw_status_repr(huc->fw.load_status));
-
- if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
-
- return;
+ intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
}
/**
- * intel_guc_auth_huc() - authenticate ucode
- * @dev_priv: the drm_i915_device
+ * intel_huc_auth() - Authenticate HuC uCode
+ * @huc: intel_huc structure
+ *
+ * Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
- * Triggers a HuC fw authentication request to the GuC via intel_guc_action_
- * authenticate_huc interface.
+ * This function pins HuC firmware image object into GGTT.
+ * Then it invokes GuC action to authenticate passing the offset to RSA
+ * signature through intel_guc_auth_huc(). It then waits for 50ms for
+ * firmware verification ACK and unpins the object.
*/
-void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
+void intel_huc_auth(struct intel_huc *huc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
int ret;
- u32 data[2];
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return;
@@ -250,23 +209,19 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
return;
}
- /* Specify auth action and where public signature is. */
- data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
- data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
-
- ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+ ret = intel_guc_auth_huc(guc,
+ guc_ggtt_offset(vma) + huc->fw.rsa_offset);
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto out;
}
/* Check authentication status, it should be done by now */
- ret = intel_wait_for_register(dev_priv,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 50);
-
+ ret = intel_wait_for_register(i915,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 50);
if (ret) {
DRM_ERROR("HuC: Authentication failed %d\n", ret);
goto out;
@@ -275,4 +230,3 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
out:
i915_vma_unpin(vma);
}
-
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
new file mode 100644
index 000000000000..aaa38b9e5817
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "intel_uc_fw.h"
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
+};
+
+void intel_huc_select_fw(struct intel_huc *huc);
+void intel_huc_init_hw(struct intel_huc *huc);
+void intel_huc_auth(struct intel_huc *huc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index eb5827110d8f..49fdf09f9919 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -438,7 +438,9 @@ static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD));
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6f972e6ec663..d36e25607435 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,8 +208,9 @@
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
#define WA_TAIL_DWORDS 2
+#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
+#define PREEMPT_ID 0x1
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
@@ -243,8 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
return 0;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv) &&
- i915.use_mmio_flip >= 0)
+ USES_PPGTT(dev_priv))
return 1;
return 0;
@@ -279,17 +279,110 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
desc = ctx->desc_template; /* bits 0-11 */
- desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
+ desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
ce->lrc_desc = desc;
}
-uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static struct i915_priolist *
+lookup_priolist(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
+ bool first = true;
+
+ if (unlikely(execlists->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ parent = &execlists->queue.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = rb_entry(rb, typeof(*p), node);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
+ first = false;
+ } else {
+ return p;
+ }
+ }
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &execlists->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ execlists->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ INIT_LIST_HEAD(&p->requests);
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color(&p->node, &execlists->queue);
+
+ if (first)
+ execlists->first = &p->node;
+
+ return ptr_pack_bits(p, first, 1);
+}
+
+static void unwind_wa_tail(struct drm_i915_gem_request *rq)
{
- return ctx->engine[engine->id].lrc_desc;
+ rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+
+static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *rq, *rn;
+ struct i915_priolist *uninitialized_var(p);
+ int last_prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->timeline->lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->timeline->requests,
+ link) {
+ if (i915_gem_request_completed(rq))
+ return;
+
+ __i915_gem_request_unsubmit(rq);
+ unwind_wa_tail(rq);
+
+ GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
+ if (rq->priotree.priority != last_prio) {
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ p = ptr_mask_bits(p, 1);
+
+ last_prio = rq->priotree.priority;
+ }
+
+ list_add(&rq->priotree.link, &p->requests);
+ }
}
static inline void
@@ -336,14 +429,20 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
return ce->lrc_desc;
}
+static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+{
+ writel(upper_32_bits(desc), elsp);
+ writel(lower_32_bits(desc), elsp);
+}
+
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlist_port;
+ struct execlist_port *port = engine->execlists.port;
u32 __iomem *elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
+ for (n = execlists_num_ports(&engine->execlists); n--; ) {
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
@@ -361,8 +460,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = 0;
}
- writel(upper_32_bits(desc), elsp);
- writel(lower_32_bits(desc), elsp);
+ elsp_write(desc, elsp);
}
}
@@ -395,25 +493,43 @@ static void port_assign(struct execlist_port *port,
port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
}
+static void inject_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce =
+ &engine->i915->preempt_context->engine[engine->id];
+ u32 __iomem *elsp =
+ engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ unsigned int n;
+
+ GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
+ GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
+
+ memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
+ ce->ring->tail += WA_TAIL_BYTES;
+ ce->ring->tail &= (ce->ring->size - 1);
+ ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
+
+ for (n = execlists_num_ports(&engine->execlists); --n; )
+ elsp_write(0, elsp);
+
+ elsp_write(ce->lrc_desc, elsp);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *last;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
+ struct drm_i915_gem_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
- last = port_request(port);
- if (last)
- /* WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_breadcrumb()
- * for where we prepare the padding after the end of the
- * request.
- */
- last->tail = last->wa_tail;
-
- GEM_BUG_ON(port_isset(&port[1]));
-
/* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
@@ -436,9 +552,68 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
spin_lock_irq(&engine->timeline->lock);
- rb = engine->execlist_first;
- GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
- while (rb) {
+ rb = execlists->first;
+ GEM_BUG_ON(rb_first(&execlists->queue) != rb);
+ if (!rb)
+ goto unlock;
+
+ if (last) {
+ /*
+ * Don't resubmit or switch until all outstanding
+ * preemptions (lite-restore) are seen. Then we
+ * know the next preemption status we see corresponds
+ * to this ELSP update.
+ */
+ if (port_count(&port[0]) > 1)
+ goto unlock;
+
+ if (can_preempt(engine) &&
+ rb_entry(rb, struct i915_priolist, node)->priority >
+ max(last->priotree.priority, 0)) {
+ /*
+ * Switch to our empty preempt context so
+ * the state of the GPU is known (idle).
+ */
+ inject_preempt_context(engine);
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ goto unlock;
+ } else {
+ /*
+ * In theory, we could coalesce more requests onto
+ * the second port (the first port is active, with
+ * no preemptions pending). However, that means we
+ * then have to deal with the possible lite-restore
+ * of the second port (as we submit the ELSP, there
+ * may be a context-switch) but also we may complete
+ * the resubmission before the context-switch. Ergo,
+ * coalescing onto the second port will cause a
+ * preemption event, but we cannot predict whether
+ * that will affect port[0] or port[1].
+ *
+ * If the second port is already active, we can wait
+ * until the next context-switch before contemplating
+ * new requests. The GPU will be busy and we should be
+ * able to resubmit the new ELSP before it idles,
+ * avoiding pipeline bubbles (momentary pauses where
+ * the driver is unable to keep up the supply of new
+ * work).
+ */
+ if (port_count(&port[1]))
+ goto unlock;
+
+ /* WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == req:TAIL as we resubmit the
+ * request. See gen8_emit_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
+ }
+ }
+
+ do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
@@ -460,7 +635,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port != engine->execlist_port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -485,38 +660,108 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
port++;
+
+ GEM_BUG_ON(port_isset(port));
}
INIT_LIST_HEAD(&rq->priotree.link);
- rq->priotree.priority = INT_MAX;
-
__i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, engine));
+ trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb = rb_next(rb);
- rb_erase(&p->node, &engine->execlist_queue);
+ rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- }
+ } while (rb);
done:
- engine->execlist_first = rb;
+ execlists->first = rb;
if (submit)
port_assign(port, last);
+unlock:
spin_unlock_irq(&engine->timeline->lock);
- if (submit)
+ if (submit) {
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
execlists_submit_ports(engine);
+ }
+}
+
+static void
+execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+{
+ struct execlist_port *port = execlists->port;
+ unsigned int num_ports = execlists_num_ports(execlists);
+
+ while (num_ports-- && port_isset(port)) {
+ struct drm_i915_gem_request *rq = port_request(port);
+
+ GEM_BUG_ON(!execlists->active);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+ i915_gem_request_put(rq);
+
+ memset(port, 0, sizeof(*port));
+ port++;
+ }
}
-static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
+static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
- const struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_gem_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
- return port_count(&port[0]) + port_count(&port[1]) < 2;
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ execlist_cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline->requests, link) {
+ GEM_BUG_ON(!rq->global_seqno);
+ if (!i915_gem_request_completed(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ rb = execlists->first;
+ while (rb) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+
+ list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ INIT_LIST_HEAD(&rq->priotree.link);
+
+ dma_fence_set_error(&rq->fence, -EIO);
+ __i915_gem_request_submit(rq);
+ }
+
+ rb = rb_next(rb);
+ rb_erase(&p->node, &execlists->queue);
+ INIT_LIST_HEAD(&p->requests);
+ if (p->priority != I915_PRIORITY_NORMAL)
+ kmem_cache_free(engine->i915->priorities, p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+
+ execlists->queue = RB_ROOT;
+ execlists->first = NULL;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ /*
+ * The port is checked prior to scheduling a tasklet, but
+ * just in case we have suspended the tasklet to do the
+ * wedging make sure that when it wakes, it decides there
+ * is no work to do by clearing the irq_posted bit.
+ */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
/*
@@ -525,8 +770,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
*/
static void intel_lrc_irq_handler(unsigned long data)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -538,19 +784,24 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
GEM_BUG_ON(!dev_priv->gt.awake);
- intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
+ intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
* imposing the cost of a locked atomic transaction when submitting a
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- u32 __iomem *csb_mmio =
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- u32 __iomem *buf =
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+ /* The HWSP contains a (cacheable) mirror of the CSB */
+ const u32 *buf =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int head, tail;
+ if (unlikely(execlists->csb_use_mmio)) {
+ buf = (u32 * __force)
+ (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_head = -1; /* force mmio read of CSB ptrs */
+ }
+
/* The write will be ordered by the uncached read (itself
* a memory barrier), so we do not need another in the form
* of a locked instruction. The race between the interrupt
@@ -562,9 +813,20 @@ static void intel_lrc_irq_handler(unsigned long data)
* is set and we do a new loop.
*/
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- head = readl(csb_mmio);
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
+ if (unlikely(execlists->csb_head == -1)) { /* following a reset */
+ head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ execlists->csb_head = head;
+ } else {
+ const int write_idx =
+ intel_hws_csb_write_index(dev_priv) -
+ I915_HWS_CSB_BUF0_INDEX;
+
+ head = execlists->csb_head;
+ tail = READ_ONCE(buf[write_idx]);
+ }
+
while (head != tail) {
struct drm_i915_gem_request *rq;
unsigned int status;
@@ -590,13 +852,35 @@ static void intel_lrc_irq_handler(unsigned long data)
* status notifier.
*/
- status = readl(buf + 2 * head);
+ status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
+ buf[2*head + 1] == PREEMPT_ID) {
+ execlist_cancel_port_requests(execlists);
+
+ spin_lock_irq(&engine->timeline->lock);
+ unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->timeline->lock);
+
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT));
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ continue;
+ }
+
+ if (status & GEN8_CTX_STATUS_PREEMPTED &&
+ execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT))
+ continue;
+
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
+
/* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
- port->context_id);
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
rq = port_unpack(port, &count);
GEM_BUG_ON(count == 0);
@@ -608,8 +892,7 @@ static void intel_lrc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
+ execlists_port_complete(execlists, port);
} else {
port_set(port, port_pack(rq, count));
}
@@ -617,80 +900,33 @@ static void intel_lrc_irq_handler(unsigned long data)
/* After the final element, the hw should be idle */
GEM_BUG_ON(port_count(port) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+ if (port_count(port) == 0)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_USER);
}
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- csb_mmio);
+ if (head != execlists->csb_head) {
+ execlists->csb_head = head;
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
+ dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ }
}
- if (execlists_elsp_ready(engine))
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
- intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+ intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
}
-static bool
-insert_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+static void insert_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
{
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- if (unlikely(engine->no_priolist))
- prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &engine->execlist_queue.rb_node;
- while (*parent) {
- rb = *parent;
- p = rb_entry(rb, typeof(*p), node);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- list_add_tail(&pt->link, &p->requests);
- return false;
- }
- }
-
- if (prio == I915_PRIORITY_NORMAL) {
- p = &engine->default_priolist;
- } else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
- * allocation failure we have to disable all scheduling.
- * Requests will then be executed in fifo, and schedule
- * will ensure that dependencies are emitted in fifo.
- * There will be still some reordering with existing
- * requests, so if userspace lied about their
- * dependencies that reordering may be visible.
- */
- engine->no_priolist = true;
- goto find_priolist;
- }
- }
+ struct i915_priolist *p = lookup_priolist(engine, pt, prio);
- p->priority = prio;
- rb_link_node(&p->node, rb, parent);
- rb_insert_color(&p->node, &engine->execlist_queue);
-
- INIT_LIST_HEAD(&p->requests);
- list_add_tail(&pt->link, &p->requests);
-
- if (first)
- engine->execlist_first = &p->node;
-
- return first;
+ list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
+ if (ptr_unmask_bits(p, 1))
+ tasklet_hi_schedule(&engine->execlists.irq_tasklet);
}
static void execlists_submit_request(struct drm_i915_gem_request *request)
@@ -701,24 +937,23 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- if (insert_request(engine,
- &request->priotree,
- request->priotree.priority)) {
- if (execlists_elsp_ready(engine))
- tasklet_hi_schedule(&engine->irq_tasklet);
- }
+ insert_request(engine, &request->priotree, request->priotree.priority);
- GEM_BUG_ON(!engine->execlist_first);
+ GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
+static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+{
+ return container_of(pt, struct drm_i915_gem_request, priotree);
+}
+
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine =
- container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+ struct intel_engine_cs *engine = pt_to_request(pt)->engine;
GEM_BUG_ON(!locked);
@@ -737,6 +972,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
struct i915_dependency stack;
LIST_HEAD(dfs);
+ GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
if (prio <= READ_ONCE(request->priotree.priority))
return;
@@ -772,6 +1009,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* engines.
*/
list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ if (i915_gem_request_completed(pt_to_request(p->signaler)))
+ continue;
+
GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
@@ -785,7 +1025,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
- if (request->priotree.priority == INT_MIN) {
+ if (request->priotree.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&request->priotree.link));
request->priotree.priority = prio;
if (stack.dfs_link.next == stack.dfs_link.prev)
@@ -815,8 +1055,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
}
spin_unlock_irq(&engine->timeline->lock);
-
- /* XXX Do we need to preempt to make room for us and our deps? */
}
static struct intel_ring *
@@ -866,6 +1104,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true;
+ ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
out:
@@ -893,6 +1132,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
intel_ring_unpin(ce->ring);
+ ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state);
@@ -914,27 +1154,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- if (i915.enable_guc_submission) {
- /*
- * Check that the GuC has space for the request before
- * going any further, as the i915_add_request() call
- * later on mustn't fail ...
- */
- ret = i915_guc_wq_reserve(request);
- if (ret)
- goto err;
- }
-
cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs)) {
- ret = PTR_ERR(cs);
- goto err_unreserve;
- }
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
- goto err_unreserve;
+ return ret;
ce->initialised = true;
}
@@ -948,12 +1175,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
-
-err_unreserve:
- if (i915.enable_guc_submission)
- i915_guc_wq_unreserve(request);
-err:
- return ret;
}
/*
@@ -1031,6 +1252,8 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
i915_ggtt_offset(engine->scratch) +
2 * CACHELINE_BYTES);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
@@ -1044,26 +1267,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-/*
- * This batch is started immediately after indirect_ctx batch. Since we ensure
- * that indirect_ctx ends on a cacheline this batch is aligned automatically.
- *
- * The number of DWORDS written are returned using this field.
- *
- * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
- * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
- */
-static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
- /* WaDisableCtxRestoreArbitration:bdw,chv */
- *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *batch++ = MI_BATCH_BUFFER_END;
-
- return batch;
-}
-
static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
@@ -1109,6 +1316,8 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
*batch++ = 0;
}
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
@@ -1116,13 +1325,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 *gen9_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
- *batch++ = MI_BATCH_BUFFER_END;
-
- return batch;
-}
-
#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
@@ -1175,13 +1377,15 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
+ case 10:
+ return 0;
case 9:
wa_bb_fn[0] = gen9_init_indirectctx_bb;
- wa_bb_fn[1] = gen9_init_perctx_bb;
+ wa_bb_fn[1] = NULL;
break;
case 8:
wa_bb_fn[0] = gen8_init_indirectctx_bb;
- wa_bb_fn[1] = gen8_init_perctx_bb;
+ wa_bb_fn[1] = NULL;
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
@@ -1208,7 +1412,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
ret = -EINVAL;
break;
}
- batch_ptr = wa_bb_fn[i](engine, batch_ptr);
+ if (wa_bb_fn[i])
+ batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
@@ -1232,9 +1437,7 @@ static u8 gtiir[] = {
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct execlist_port *port = engine->execlist_port;
- unsigned int n;
- bool submit;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1267,24 +1470,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ execlists->csb_head = -1;
+ execlists->active = 0;
/* After a GPU reset, we may have requests to replay */
- submit = false;
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
- if (!port_isset(&port[n]))
- break;
-
- DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n",
- engine->name, n,
- port_request(&port[n])->global_seqno);
-
- /* Discard the current inflight count */
- port_set(&port[n], port_request(&port[n]));
- submit = true;
- }
-
- if (submit && !i915.enable_guc_submission)
- execlists_submit_ports(engine);
+ if (!i915_modparams.enable_guc_submission && execlists->first)
+ tasklet_schedule(&execlists->irq_tasklet);
return 0;
}
@@ -1325,9 +1516,11 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
- unsigned int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
/*
* Catch up with any missed context-switch interrupts.
@@ -1338,20 +1531,12 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- if (!request) {
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- i915_gem_request_put(port_request(&port[n]));
- memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- return;
- }
+ execlist_cancel_port_requests(execlists);
- if (request->ctx != port_request(port)->ctx) {
- i915_gem_request_put(port_request(port));
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
- }
+ /* Push back any incomplete requests for replay after the reset. */
+ unwind_incomplete_requests(engine);
- GEM_BUG_ON(request->ctx != port_request(port)->ctx);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
@@ -1363,7 +1548,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- if (request->fence.error != -EIO)
+ if (!request || request->fence.error != -EIO)
return;
/* We want a simple context + ring to execute the breadcrumb update.
@@ -1386,10 +1571,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail =
- intel_ring_wrap(request->ring,
- request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
- assert_ring_tail_valid(request->ring, request->tail);
+ unwind_wa_tail(request);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1448,13 +1630,31 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
if (IS_ERR(cs))
return PTR_ERR(cs);
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, there doesn't seem to be a downside to
+ * being paranoid and making sure it is set before each batch and
+ * every context-switch.
+ *
+ * Note that if we fail to enable arbitration before the request
+ * is complete, then we do not see the context-switch interrupt and
+ * the engine hangs (with RING_HEAD == RING_TAIL).
+ *
+ * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
@@ -1583,7 +1783,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
*/
static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
{
- *cs++ = MI_NOOP;
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
*cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs);
}
@@ -1604,7 +1805,6 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
gen8_emit_wa_tail(request, cs);
}
-
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
@@ -1632,7 +1832,6 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
gen8_emit_wa_tail(request, cs);
}
-
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
@@ -1666,8 +1865,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
*/
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
- tasklet_kill(&engine->irq_tasklet);
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
+ tasklet_kill(&engine->execlists.irq_tasklet);
dev_priv = engine->i915;
@@ -1678,11 +1877,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (engine->status_page.vma) {
- i915_gem_object_unpin_map(engine->status_page.vma->obj);
- engine->status_page.vma = NULL;
- }
-
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
@@ -1694,8 +1888,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
+ engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
- engine->irq_tasklet.func = intel_lrc_irq_handler;
+ engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
}
static void
@@ -1729,24 +1924,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static int
-lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
-{
- const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
- void *hws;
-
- /* The HWSP is part of the default context object in LRC mode. */
- hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- engine->status_page.page_addr = hws + hws_offset;
- engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
- engine->status_page.vma = vma;
-
- return 0;
-}
-
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
@@ -1770,32 +1947,23 @@ logical_ring_setup(struct intel_engine_cs *engine)
RING_CONTEXT_STATUS_BUF_BASE(engine),
FW_REG_READ);
- engine->fw_domains = fw_domains;
+ engine->execlists.fw_domains = fw_domains;
- tasklet_init(&engine->irq_tasklet,
+ tasklet_init(&engine->execlists.irq_tasklet,
intel_lrc_irq_handler, (unsigned long)engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
}
-static int
-logical_ring_init(struct intel_engine_cs *engine)
+static int logical_ring_init(struct intel_engine_cs *engine)
{
- struct i915_gem_context *dctx = engine->i915->kernel_context;
int ret;
ret = intel_engine_init_common(engine);
if (ret)
goto error;
- /* And setup the hardware status page. */
- ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
- if (ret) {
- DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
- goto error;
- }
-
return 0;
error:
@@ -1953,13 +2121,12 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
if (rcs) {
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+
CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
RING_INDIRECT_CTX_OFFSET(base), 0);
-
- if (engine->wa_ctx.vma) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+ if (wa_ctx->indirect_ctx.size) {
u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
regs[CTX_RCS_INDIRECT_CTX + 1] =
@@ -1968,6 +2135,11 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+
+ CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
+ if (wa_ctx->per_ctx.size) {
+ u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
regs[CTX_BB_PER_CTX_PTR + 1] =
(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
@@ -2052,8 +2224,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /* One extra page as the sharing data between driver and GuC */
- context_size += PAGE_SIZE * LRC_PPHWSP_PN;
+ /*
+ * Before the actual start of the context image, we insert a few pages
+ * for our own use and for sharing with the GuC.
+ */
+ context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
if (IS_ERR(ctx_obj)) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 57ef5833c427..689fde1a63a9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,7 @@
#define _INTEL_LRC_H_
#include "intel_ringbuffer.h"
+#include "i915_gem_context.h"
#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
@@ -60,6 +61,7 @@
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
};
/* Logical Rings */
@@ -69,17 +71,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-/* One extra page is added before LRC for GuC as shared data */
+/*
+ * We allocate a header at the start of the context image for our own
+ * use, therefore the actual location of the logical state is offset
+ * from the start of the VMA. The layout is
+ *
+ * | [guc] | [hwsp] [logical state] |
+ * |<- our header ->|<- context image ->|
+ *
+ */
+/* The first page is used for sharing data with the GuC */
#define LRC_GUCSHR_PN (0)
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
-#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
+#define LRC_GUCSHR_SZ (1)
+/* At the start of the context image is its per-process HWS page */
+#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_SZ (1)
+/* Finally we have the logical state for the context */
+#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+
+/*
+ * Currently we include the PPHWSP in __intel_engine_context_size() so
+ * the size of the header is synonymous with the start of the PPHWSP.
+ */
+#define LRC_HEADER_PAGES LRC_PPHWSP_PN
struct drm_i915_private;
struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+
+static inline uint64_t
+intel_lr_context_descriptor(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ return ctx->engine[engine->id].lrc_desc;
+}
+
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index beb9baaf2f2e..dcbc786479f9 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
if (drm_lspcon_get_mode(adapter, &current_mode)) {
- DRM_ERROR("Error reading LSPCON mode\n");
+ DRM_DEBUG_KMS("Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
return current_mode;
@@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon);
- if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
+ if (current_mode == mode)
goto out;
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
- current_mode == DRM_LSPCON_MODE_INVALID, 100);
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
if (current_mode != mode)
- DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
+ DRM_ERROR("LSPCON mode hasn't settled\n");
out:
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
@@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
+ int retry;
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
enum drm_lspcon_mode expected_mode;
@@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
/* Lets probe the adaptor and check its type */
- adaptor_type = drm_dp_dual_mode_detect(adapter);
+ for (retry = 0; retry < 6; retry++) {
+ if (retry)
+ usleep_range(500, 1000);
+
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
+ break;
+ }
+
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
- drm_dp_get_dual_mode_type_name(adaptor_type));
+ drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8e215777c7f4..38572d65e46e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -229,8 +229,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -306,8 +306,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -324,8 +324,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
}
static void intel_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -339,8 +339,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
@@ -349,15 +349,15 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
}
static void pch_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
@@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
- if (i915.lvds_channel_mode > 0)
- return i915.lvds_channel_mode == 2;
+ if (i915_modparams.lvds_channel_mode > 0)
+ return i915_modparams.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
@@ -939,10 +939,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
- int pipe;
u8 pin;
u32 allowed_scalers;
@@ -1113,22 +1111,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
-
- /* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev_priv))
- goto failed;
-
- pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
- if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
- drm_mode_debug_printmodeline(fixed_mode);
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- goto out;
- }
+ fixed_mode = intel_encoder_current_mode(intel_encoder);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/* If we still don't have a mode after all that, give up. */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 98154efcb2f4..1d946240e55f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
const struct firmware *fw = NULL;
- const char *name = i915.vbt_firmware;
+ const char *name = i915_modparams.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index aace22e7ccac..1b397b41cb4f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1134,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3b1c5d783ee7..adc51e452e3e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -379,13 +379,13 @@ enum drm_connector_status
intel_panel_detect(struct drm_i915_private *dev_priv)
{
/* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+ if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
return *dev_priv->opregion.lid_state & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
- switch (i915.panel_ignore_lid) {
+ switch (i915_modparams.panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
@@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
WARN_ON(panel->backlight.max == 0);
- if (i915.invert_brightness < 0)
+ if (i915_modparams.invert_brightness < 0)
return val;
- if (i915.invert_brightness > 0 ||
+ if (i915_modparams.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val + panel->backlight.min;
}
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 8fbd2bd0877f..899839f2f7c6 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -206,11 +206,11 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- int i;
+ enum pipe pipe;
- for (i = 0; i < I915_MAX_PIPES; i++)
- seq_printf(m, "%c %s\n", pipe_name(i),
- pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+ for_each_pipe(dev_priv, pipe)
+ seq_printf(m, "%c %s\n", pipe_name(pipe),
+ pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
return 0;
}
@@ -506,8 +506,8 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
+static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
@@ -533,10 +533,24 @@ retry:
goto put_state;
}
- pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
- pipe_config->base.connectors_changed = true;
+ if (HAS_IPS(dev_priv)) {
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ pipe_config->ips_force_disable = enable;
+ if (pipe_config->ips_enabled == enable)
+ pipe_config->base.connectors_changed = true;
+ }
+
+ if (IS_HASWELL(dev_priv)) {
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
+ }
ret = drm_atomic_commit(state);
@@ -570,8 +584,9 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
+ if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -606,7 +621,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source source)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -643,14 +657,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
goto out;
}
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(crtc);
-
spin_lock_irq(&pipe_crc->lock);
kfree(pipe_crc->entries);
pipe_crc->entries = entries;
@@ -691,10 +697,9 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
g4x_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(crtc);
+ else if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, false);
}
ret = 0;
@@ -770,11 +775,12 @@ display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
return -EINVAL;
}
-static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
+ const char *buf, enum pipe *pipe)
{
const char name = buf[0];
- if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
return -EINVAL;
*pipe = name - 'A';
@@ -823,7 +829,7 @@ static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
return -EINVAL;
}
@@ -914,7 +920,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
@@ -935,16 +940,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
if (ret != 0)
goto out;
- if (source) {
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(intel_crtc);
- }
-
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
@@ -953,10 +948,9 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(intel_crtc);
+ else if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index cb950752c346..f0d0dbab4150 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,24 +58,23 @@
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ if (HAS_LLC(dev_priv)) {
+ /*
+ * WaCompressedResourceDisplayNewHashMode:skl,kbl
+ * Display WA#0390: skl,kbl
+ *
+ * Must match Sampler, Pixel Back End, and Media. See
+ * WaCompressedResourceSamplerPbeMediaNewHashMode.
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) |
+ SKL_DE_COMPRESSED_HASH_MODE);
+ }
+
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
- /*
- * Display WA#0390: skl,bxt,kbl,glk
- *
- * Must match Sampler, Pixel Back End, and Media
- * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
- *
- * Including bits outside the page in the hash would
- * require 2 (or 4?) MiB alignment of resources. Just
- * assume the defaul hashing mode which only uses bits
- * within the page.
- */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
-
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
@@ -317,7 +316,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if (enable)
@@ -332,14 +331,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (enable)
@@ -348,7 +347,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
#define FW_WM(value, plane) \
@@ -1322,21 +1321,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
- struct intel_plane_state *plane_state;
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int i, level;
unsigned int dirty = 0;
- for_each_intel_plane_in_state(state, plane, plane_state, i) {
- const struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.crtc != &crtc->base &&
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
- if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@@ -1831,21 +1830,21 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
- struct intel_plane_state *plane_state;
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int level, ret, i;
unsigned int dirty = 0;
- for_each_intel_plane_in_state(state, plane, plane_state, i) {
- const struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.crtc != &crtc->base &&
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
- if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@@ -1864,7 +1863,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
/* cursor changes don't warrant a FIFO recompute */
if (dirty & ~BIT(PLANE_CURSOR)) {
const struct intel_crtc_state *old_crtc_state =
- to_intel_crtc_state(crtc->base.state);
+ intel_atomic_get_old_crtc_state(state, crtc);
const struct vlv_fifo_state *old_fifo_state =
&old_crtc_state->wm.vlv.fifo_state;
@@ -2785,11 +2784,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2806,11 +2805,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
@@ -3119,7 +3118,11 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc_state *newstate)
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
- struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(newstate->base.state);
+ const struct intel_crtc_state *oldstate =
+ intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
+ const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
@@ -3128,6 +3131,9 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
+ if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+ return 0;
+
a->pipe_enabled |= b->pipe_enabled;
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
@@ -3594,13 +3600,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Enabling the SAGV\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for the SAGV when enabling */
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -3631,14 +3637,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Disabling the SAGV\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -4361,134 +4367,147 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
downscale_amount);
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- uint16_t ddb_allocation,
- int level,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines, /* out */
- bool *enabled /* out */)
+static int
+skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ struct skl_wm_params *wp)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
- uint32_t latency = dev_priv->wm.skl_latency[level];
- uint_fixed_16_16_t method1, method2;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t selected_result;
uint32_t interm_pbpl;
- uint32_t plane_bytes_per_line;
- uint32_t res_blocks, res_lines;
- uint8_t cpp;
- uint32_t width = 0;
- uint32_t plane_pixel_rate;
- uint_fixed_16_16_t y_tile_minimum;
- uint32_t y_min_scanlines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- bool y_tiled, x_tiled;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- *enabled = false;
+ if (!intel_wm_plane_visible(cstate, intel_pstate))
return 0;
- }
-
- y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
-
- /* Display WA #1141: kbl,cfl */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
- latency += 4;
- if (apply_memory_bw_wa && x_tiled)
- latency += 15;
+ wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
if (plane->id == PLANE_CURSOR) {
- width = intel_pstate->base.crtc_w;
+ wp->width = intel_pstate->base.crtc_w;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
- fb->format->cpp[0];
- plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+ wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
+ fb->format->cpp[0];
+ wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
+ intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- switch (cpp) {
+ switch (wp->cpp) {
case 1:
- y_min_scanlines = 16;
+ wp->y_min_scanlines = 16;
break;
case 2:
- y_min_scanlines = 8;
+ wp->y_min_scanlines = 8;
break;
case 4:
- y_min_scanlines = 4;
+ wp->y_min_scanlines = 4;
break;
default:
- MISSING_CASE(cpp);
+ MISSING_CASE(wp->cpp);
return -EINVAL;
}
} else {
- y_min_scanlines = 4;
+ wp->y_min_scanlines = 4;
}
if (apply_memory_bw_wa)
- y_min_scanlines *= 2;
+ wp->y_min_scanlines *= 2;
- plane_bytes_per_line = width * cpp;
- if (y_tiled) {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
- y_min_scanlines, 512);
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines, 512);
if (INTEL_GEN(dev_priv) >= 10)
interm_pbpl++;
- plane_blocks_per_line = div_fixed16(interm_pbpl,
- y_min_scanlines);
- } else if (x_tiled && INTEL_GEN(dev_priv) == 9) {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512);
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1;
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
- method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency);
- method2 = skl_wm_method2(plane_pixel_rate,
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+ wp->linetime_us = fixed16_to_u32_round_up(
+ intel_get_linetime_us(cstate));
+
+ return 0;
+}
+
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ uint16_t ddb_allocation,
+ int level,
+ const struct skl_wm_params *wp,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines, /* out */
+ bool *enabled /* out */)
+{
+ const struct drm_plane_state *pstate = &intel_pstate->base;
+ uint32_t latency = dev_priv->wm.skl_latency[level];
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ uint32_t res_blocks, res_lines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+
+ if (latency == 0 ||
+ !intel_wm_plane_visible(cstate, intel_pstate)) {
+ *enabled = false;
+ return 0;
+ }
+
+ /* Display WA #1141: kbl,cfl */
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
+ IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
+ dev_priv->ipc_enabled)
+ latency += 4;
+
+ if (apply_memory_bw_wa && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
+ wp->cpp, latency);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
latency,
- plane_blocks_per_line);
-
- y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
- plane_blocks_per_line);
+ wp->plane_blocks_per_line);
- if (y_tiled) {
- selected_result = max_fixed16(method2, y_tile_minimum);
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- uint32_t linetime_us;
-
- linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(cstate));
- if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
- (plane_bytes_per_line / 512 < 1))
+ if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+ 512 < 1) && (wp->plane_bytes_per_line / 512 < 1))
selected_result = method2;
else if (ddb_allocation >=
- fixed16_to_u32_round_up(plane_blocks_per_line))
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line))
selected_result = min_fixed16(method1, method2);
- else if (latency >= linetime_us)
+ else if (latency >= wp->linetime_us)
selected_result = min_fixed16(method1, method2);
else
selected_result = method1;
@@ -4496,19 +4515,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
res_lines = div_round_up_fixed16(selected_result,
- plane_blocks_per_line);
+ wp->plane_blocks_per_line);
/* Display WA #1125: skl,bxt,kbl,glk */
- if (level == 0 &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
- res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
+ if (level == 0 && wp->rc_surface)
+ res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
/* Display WA #1126: skl,bxt,kbl,glk */
if (level >= 1 && level <= 7) {
- if (y_tiled) {
- res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
- res_lines += y_min_scanlines;
+ if (wp->y_tiled) {
+ res_blocks += fixed16_to_u32_round_up(
+ wp->y_tile_minimum);
+ res_lines += wp->y_min_scanlines;
} else {
res_blocks++;
}
@@ -4546,6 +4564,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
+ const struct skl_wm_params *wm_params,
struct skl_plane_wm *wm)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4569,6 +4588,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
intel_pstate,
ddb_blocks,
level,
+ wm_params,
&result->plane_res_b,
&result->plane_res_l,
&result->plane_en);
@@ -4594,20 +4614,65 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
- /* Display WA #1135: bxt. */
- if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
- linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
+ /* Display WA #1135: bxt:ALL GLK:ALL */
+ if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
+ dev_priv->ipc_enabled)
+ linetime_wm /= 2;
return linetime_wm;
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
+ struct skl_wm_params *wp,
+ struct skl_wm_level *wm_l0,
+ uint16_t ddb_allocation,
struct skl_wm_level *trans_wm /* out */)
{
+ struct drm_device *dev = cstate->base.crtc->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ uint16_t trans_min, trans_y_tile_min;
+ const uint16_t trans_amount = 10; /* This is configurable amount */
+ uint16_t trans_offset_b, res_blocks;
+
if (!cstate->base.active)
+ goto exit;
+
+ /* Transition WM are not recommended by HW team for GEN9 */
+ if (INTEL_GEN(dev_priv) <= 9)
+ goto exit;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!dev_priv->ipc_enabled)
+ goto exit;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ trans_min = 4;
+
+ trans_offset_b = trans_min + trans_amount;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
+ wp->y_tile_minimum);
+ res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+ trans_offset_b;
+ } else {
+ res_blocks = wm_l0->plane_res_b + trans_offset_b;
+
+ /* WA BUG:1938466 add one block for non y-tile planes */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
+ res_blocks += 1;
+
+ }
+
+ res_blocks += 1;
+
+ if (res_blocks < ddb_allocation) {
+ trans_wm->plane_res_b = res_blocks;
+ trans_wm->plane_en = true;
return;
+ }
- /* Until we know more, just disable transition WMs */
+exit:
trans_wm->plane_en = false;
}
@@ -4633,14 +4698,25 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
enum plane_id plane_id = to_intel_plane(plane)->id;
+ struct skl_wm_params wm_params;
+ enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
+ uint16_t ddb_blocks;
wm = &pipe_wm->planes[plane_id];
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+ memset(&wm_params, 0, sizeof(struct skl_wm_params));
+
+ ret = skl_compute_plane_wm_params(dev_priv, cstate,
+ intel_pstate, &wm_params);
+ if (ret)
+ return ret;
ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, wm);
+ intel_pstate, &wm_params, wm);
if (ret)
return ret;
- skl_compute_transition_wm(cstate, &wm->trans_wm);
+ skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
+ ddb_blocks, &wm->trans_wm);
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -4736,16 +4812,18 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore)
{
- int i;
+ enum pipe pipe;
- for (i = 0; i < I915_MAX_PIPES; i++)
- if (i != ignore && entries[i] &&
- skl_ddb_entries_overlap(ddb, entries[i]))
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe != ignore && entries[pipe] &&
+ skl_ddb_entries_overlap(ddb, entries[pipe]))
return true;
+ }
return false;
}
@@ -5535,7 +5613,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (val & DSP_MAXFIFO_PM5_ENABLE)
@@ -5565,7 +5643,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
for_each_intel_crtc(dev, crtc) {
@@ -5669,12 +5747,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
+/*
+ * FIXME should probably kill this and improve
+ * the real watermark readout/sanitation instead
+ */
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+}
+
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
+ ilk_init_lp_watermarks(dev_priv);
+
for_each_crtc(dev, crtc)
ilk_pipe_wm_get_hw_state(crtc);
@@ -5739,6 +5835,36 @@ void intel_update_watermarks(struct intel_crtc *crtc)
dev_priv->display.update_wm(crtc);
}
+void intel_enable_ipc(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(dev_priv)) {
+ dev_priv->ipc_enabled = false;
+ return;
+ }
+
+ val = I915_READ(DISP_ARB_CTL2);
+
+ if (dev_priv->ipc_enabled)
+ val |= DISP_IPC_ENABLE;
+ else
+ val &= ~DISP_IPC_ENABLE;
+
+ I915_WRITE(DISP_ARB_CTL2, val);
+}
+
+void intel_init_ipc(struct drm_i915_private *dev_priv)
+{
+ dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
+
+ dev_priv->ipc_enabled = true;
+ intel_enable_ipc(dev_priv);
+}
+
/*
* Lock protecting IPS related data structures
*/
@@ -5872,6 +5998,7 @@ static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
*/
static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 limits;
/* Only set the down limit when we've reached the lowest level to avoid
@@ -5881,13 +6008,13 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
if (INTEL_GEN(dev_priv) >= 9) {
- limits = (dev_priv->rps.max_freq_softlimit) << 23;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ limits = (rps->max_freq_softlimit) << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= (rps->min_freq_softlimit) << 14;
} else {
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
}
return limits;
@@ -5895,39 +6022,40 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
- new_power = dev_priv->rps.power;
- switch (dev_priv->rps.power) {
+ new_power = rps->power;
+ switch (rps->power) {
case LOW_POWER:
- if (val > dev_priv->rps.efficient_freq + 1 &&
- val > dev_priv->rps.cur_freq)
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.efficient_freq &&
- val < dev_priv->rps.cur_freq)
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_freq &&
- val > dev_priv->rps.cur_freq)
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
- val < dev_priv->rps.cur_freq)
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
- if (val <= dev_priv->rps.min_freq_softlimit)
+ if (val <= rps->min_freq_softlimit)
new_power = LOW_POWER;
- if (val >= dev_priv->rps.max_freq_softlimit)
+ if (val >= rps->max_freq_softlimit)
new_power = HIGH_POWER;
- if (new_power == dev_priv->rps.power)
+ if (new_power == rps->power)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
@@ -5990,20 +6118,21 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
- dev_priv->rps.power = new_power;
- dev_priv->rps.up_threshold = threshold_up;
- dev_priv->rps.down_threshold = threshold_down;
- dev_priv->rps.last_adj = 0;
+ rps->power = new_power;
+ rps->up_threshold = threshold_up;
+ rps->down_threshold = threshold_down;
+ rps->last_adj = 0;
}
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 mask = 0;
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > dev_priv->rps.min_freq_softlimit)
+ if (val > rps->min_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < dev_priv->rps.max_freq_softlimit)
+ if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
mask &= dev_priv->pm_rps_events;
@@ -6016,10 +6145,12 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* min/max delay may still have been modified so be sure to
* write the limits value.
*/
- if (val != dev_priv->rps.cur_freq) {
+ if (val != rps->cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
if (INTEL_GEN(dev_priv) >= 9)
@@ -6041,7 +6172,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- dev_priv->rps.cur_freq = val;
+ rps->cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
return 0;
@@ -6057,7 +6188,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- if (val != dev_priv->rps.cur_freq) {
+ if (val != dev_priv->gt_pm.rps.cur_freq) {
err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (err)
return err;
@@ -6065,7 +6196,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
gen6_set_rps_thresholds(dev_priv, val);
}
- dev_priv->rps.cur_freq = val;
+ dev_priv->gt_pm.rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
return 0;
@@ -6080,10 +6211,11 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
- u32 val = dev_priv->rps.idle_freq;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u32 val = rps->idle_freq;
int err;
- if (dev_priv->rps.cur_freq <= val)
+ if (rps->cur_freq <= val)
return;
/* The punit delays the write of the frequency and voltage until it
@@ -6108,34 +6240,38 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ if (rps->enabled) {
u8 freq;
if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ gen6_rps_pm_mask(dev_priv, rps->cur_freq));
gen6_enable_rps_interrupts(dev_priv);
/* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
*/
- freq = max(dev_priv->rps.cur_freq,
- dev_priv->rps.efficient_freq);
+ freq = max(rps->cur_freq,
+ rps->efficient_freq);
if (intel_set_rps(dev_priv,
clamp(freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit)))
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit)))
DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* Flush our bottom-half so that it does not race with us
* setting the idle frequency and so that it is bounded by
* our rpm wakeref. And then disable the interrupts to stop any
@@ -6143,58 +6279,60 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
*/
gen6_disable_rps_interrupts(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
+ mutex_lock(&dev_priv->pcu_lock);
+ if (rps->enabled) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
- dev_priv->rps.last_adj = 0;
+ gen6_set_rps(dev_priv, rps->idle_freq);
+ rps->last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
void gen6_rps_boost(struct drm_i915_gem_request *rq,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
- struct drm_i915_private *i915 = rq->i915;
+ struct intel_rps *rps = &rq->i915->gt_pm.rps;
+ unsigned long flags;
bool boost;
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!i915->rps.enabled)
+ if (!rps->enabled)
return;
boost = false;
- spin_lock_irq(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
if (!rq->waitboost && !i915_gem_request_completed(rq)) {
- atomic_inc(&i915->rps.num_waiters);
+ atomic_inc(&rps->num_waiters);
rq->waitboost = true;
boost = true;
}
- spin_unlock_irq(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
return;
- if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
- schedule_work(&i915->rps.work);
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
- atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
+ atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int err;
- lockdep_assert_held(&dev_priv->rps.hw_lock);
- GEM_BUG_ON(val > dev_priv->rps.max_freq);
- GEM_BUG_ON(val < dev_priv->rps.min_freq);
+ lockdep_assert_held(&dev_priv->pcu_lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
- if (!dev_priv->rps.enabled) {
- dev_priv->rps.cur_freq = val;
+ if (!rps->enabled) {
+ rps->cur_freq = val;
return 0;
}
@@ -6217,21 +6355,30 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+{
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
+static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
- /* we're doing forcewake before Disabling RC6,
+ /* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -6240,6 +6387,11 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
{
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -6362,24 +6514,26 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(dev_priv)) {
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
}
/* hw_max = RP0 until we check for overclocking */
- dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ rps->max_freq = rps->rp0_freq;
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ rps->efficient_freq = rps->rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
u32 ddcc_status = 0;
@@ -6387,33 +6541,34 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
if (sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status) == 0)
- dev_priv->rps.efficient_freq =
+ rps->efficient_freq =
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
- dev_priv->rps.min_freq,
- dev_priv->rps.max_freq);
+ rps->min_freq,
+ rps->max_freq);
}
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
- dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
}
}
static void reset_rps(struct drm_i915_private *dev_priv,
int (*set)(struct drm_i915_private *, u8))
{
- u8 freq = dev_priv->rps.cur_freq;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u8 freq = rps->cur_freq;
/* force a reset */
- dev_priv->rps.power = -1;
- dev_priv->rps.cur_freq = -1;
+ rps->power = -1;
+ rps->cur_freq = -1;
if (set(dev_priv, freq))
DRM_ERROR("Failed to reset RPS to initial values\n");
@@ -6426,7 +6581,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
/* Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+ GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
/* 1 second timeout*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6446,7 +6601,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint32_t rc6_mask = 0;
+ u32 rc6_mode, rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6480,12 +6635,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
+ GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
@@ -6500,7 +6662,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -6509,7 +6671,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
- /* 1c & 1d: Get forcewake during program sequence. Although the driver
+ /* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -6523,36 +6685,38 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev_priv, rc6_mask);
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- rc6_mask);
- else
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
- /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ HSW_FREQUENCY(rps->rp1_freq));
I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ HSW_FREQUENCY(rps->rp1_freq));
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_freq_softlimit << 24 |
- dev_priv->rps.min_freq_softlimit << 16);
+ rps->max_freq_softlimit << 24 |
+ rps->min_freq_softlimit << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -6561,7 +6725,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* 5: Enable RPS */
+ /* 2: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
@@ -6570,14 +6734,12 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
- /* 6: Ring frequency + overclocking (our driver does this later */
-
reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -6586,14 +6748,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
int rc6_mode;
int ret;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
I915_WRITE(GEN6_RC_STATE, 0);
/* Clear the DBG now so we don't confuse earlier errors */
@@ -6627,7 +6781,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
- rc6_mode = intel_enable_rc6();
+ rc6_mode = intel_rc6_enabled();
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
@@ -6647,12 +6801,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev_priv) && ret) {
@@ -6670,8 +6818,28 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Power down if completely idle for over 50ms */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ reset_rps(dev_priv, gen6_set_rps);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
@@ -6679,7 +6847,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int scaling_factor = 180;
struct cpufreq_policy *policy;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
policy = cpufreq_cpu_get(0);
if (policy) {
@@ -6702,11 +6870,11 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
- max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+ min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
+ max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
} else {
- min_gpu_freq = dev_priv->rps.min_freq;
- max_gpu_freq = dev_priv->rps.max_freq;
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
}
/*
@@ -6957,17 +7125,18 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
{
- dev_priv->rps.gpll_ref_freq =
+ dev_priv->gt_pm.rps.gpll_ref_freq =
vlv_get_cck_clock(dev_priv, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
dev_priv->czclk_freq);
DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->rps.gpll_ref_freq);
+ dev_priv->gt_pm.rps.gpll_ref_freq);
}
static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
valleyview_setup_pctx(dev_priv);
@@ -6989,30 +7158,31 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
- dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
- dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ rps->max_freq = valleyview_rps_max_freq(dev_priv);
+ rps->rp0_freq = rps->max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
- dev_priv->rps.max_freq);
+ intel_gpu_freq(dev_priv, rps->max_freq),
+ rps->max_freq);
- dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+ rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ rps->efficient_freq);
- dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
- dev_priv->rps.rp1_freq);
+ intel_gpu_freq(dev_priv, rps->rp1_freq),
+ rps->rp1_freq);
- dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+ rps->min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- dev_priv->rps.min_freq);
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ rps->min_freq);
}
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
cherryview_setup_pctx(dev_priv);
@@ -7033,31 +7203,29 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
- dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
- dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ rps->max_freq = cherryview_rps_max_freq(dev_priv);
+ rps->rp0_freq = rps->max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
- dev_priv->rps.max_freq);
+ intel_gpu_freq(dev_priv, rps->max_freq),
+ rps->max_freq);
- dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ rps->efficient_freq);
- dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
- dev_priv->rps.rp1_freq);
+ intel_gpu_freq(dev_priv, rps->rp1_freq),
+ rps->rp1_freq);
- dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ rps->min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- dev_priv->rps.min_freq);
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ rps->min_freq);
- WARN_ONCE((dev_priv->rps.max_freq |
- dev_priv->rps.efficient_freq |
- dev_priv->rps.rp1_freq |
- dev_priv->rps.min_freq) & 1,
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
"Odd GPU freq values\n");
}
@@ -7066,13 +7234,11 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
valleyview_cleanup_pctx(dev_priv);
}
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ u32 gtfifodbg, rc6_mode = 0, pcbr;
gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
GT_FIFO_FREE_ENTRIES_CHV);
@@ -7103,7 +7269,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
- /* allows RC6 residency counter to work */
+ /* Allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC6_COUNT_EN |
@@ -7113,13 +7279,22 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+ if ((intel_rc6_enabled() & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- /* 4 Program defaults and thresholds for RPS*/
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -7128,7 +7303,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* 5: Enable RPS */
+ /* 2: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
@@ -7155,13 +7330,11 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, val, rc6_mode = 0;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ u32 gtfifodbg, rc6_mode = 0;
valleyview_check_pctx(dev_priv);
@@ -7172,28 +7345,11 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- /* If VLV, Forcewake all wells, else re-direct to regular path */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
@@ -7203,7 +7359,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
- /* allows RC6 residency counter to work */
+ /* Allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC0_COUNT_EN |
@@ -7211,13 +7367,38 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
intel_print_rc6_info(dev_priv, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN |
VLV_SOC_TDP_EN |
@@ -7425,7 +7606,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
lockdep_assert_held(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
+ pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -7712,17 +7893,19 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
- if (!i915.enable_rc6) {
+ if (!i915_modparams.enable_rc6) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
mutex_lock(&dev_priv->drm.struct_mutex);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
@@ -7733,16 +7916,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
gen6_init_rps_frequencies(dev_priv);
/* Derive initial user preferences/limits from the hardware limits */
- dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
- dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dev_priv->rps.min_freq_softlimit =
+ rps->min_freq_softlimit =
max_t(int,
- dev_priv->rps.efficient_freq,
+ rps->efficient_freq,
intel_freq_opcode(dev_priv, 450));
/* After setting max-softlimit, find the overclock max freq */
@@ -7753,16 +7936,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
if (params & BIT(31)) { /* OC supported */
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (dev_priv->rps.max_freq & 0xff) * 50,
+ (rps->max_freq & 0xff) * 50,
(params & 0xff) * 50);
- dev_priv->rps.max_freq = params & 0xff;
+ rps->max_freq = params & 0xff;
}
}
/* Finally allow us to boost to max by default */
- dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+ rps->boost_freq = rps->max_freq;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_autoenable_gt_powersave(dev_priv);
@@ -7773,7 +7956,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
- if (!i915.enable_rc6)
+ if (!i915_modparams.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
@@ -7790,7 +7973,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+ if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
intel_runtime_pm_put(dev_priv);
/* gen6_rps_idle() will be called later to disable interrupts */
@@ -7798,90 +7981,168 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
- dev_priv->rps.enabled = true; /* force disabling */
+ dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
+ dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_disable_gt_powersave(dev_priv);
gen6_reset_rps_interrupts(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
{
- if (!READ_ONCE(dev_priv->rps.enabled))
+ lockdep_assert_held(&i915->pcu_lock);
+
+ if (!i915->gt_pm.llc_pstate.enabled)
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ /* Currently there is no HW configuration to be done to disable. */
- if (INTEL_GEN(dev_priv) >= 9) {
+ i915->gt_pm.llc_pstate.enabled = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (!dev_priv->gt_pm.rc6.enabled)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 9)
gen9_disable_rc6(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_disable_rc6(dev_priv);
+
+ dev_priv->gt_pm.rc6.enabled = false;
+}
+
+static void intel_disable_rps(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (!dev_priv->gt_pm.rps.enabled)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 9)
gen9_disable_rps(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ else if (IS_CHERRYVIEW(dev_priv))
cherryview_disable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ else if (IS_VALLEYVIEW(dev_priv))
valleyview_disable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ else if (INTEL_GEN(dev_priv) >= 6)
gen6_disable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
+ else if (IS_IRONLAKE_M(dev_priv))
ironlake_disable_drps(dev_priv);
- }
- dev_priv->rps.enabled = false;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ dev_priv->gt_pm.rps.enabled = false;
}
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- /* We shouldn't be disabling as we submit, so this should be less
- * racy than it appears!
- */
- if (READ_ONCE(dev_priv->rps.enabled))
+ mutex_lock(&dev_priv->pcu_lock);
+
+ intel_disable_rc6(dev_priv);
+ intel_disable_rps(dev_priv);
+ if (HAS_LLC(dev_priv))
+ intel_disable_llc_pstate(dev_priv);
+
+ mutex_unlock(&dev_priv->pcu_lock);
+}
+
+static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->pcu_lock);
+
+ if (i915->gt_pm.llc_pstate.enabled)
return;
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
+ gen6_update_ring_freq(i915);
+
+ i915->gt_pm.llc_pstate.enabled = true;
+}
+
+static void intel_enable_rc6(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (dev_priv->gt_pm.rc6.enabled)
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_enable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 9)
+ gen9_enable_rc6(dev_priv);
+ else if (IS_BROADWELL(dev_priv))
+ gen8_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_enable_rc6(dev_priv);
+
+ dev_priv->gt_pm.rc6.enabled = true;
+}
+
+static void intel_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (rps->enabled)
+ return;
if (IS_CHERRYVIEW(dev_priv)) {
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
- gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 6) {
gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
} else if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
intel_init_emon(dev_priv);
}
- WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+
+ rps->enabled = true;
+}
+
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+{
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev_priv))
+ return;
- WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+ mutex_lock(&dev_priv->pcu_lock);
- dev_priv->rps.enabled = true;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_enable_rc6(dev_priv);
+ intel_enable_rps(dev_priv);
+ if (HAS_LLC(dev_priv))
+ intel_enable_llc_pstate(dev_priv);
+
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void __intel_autoenable_gt_powersave(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+ container_of(work,
+ typeof(*dev_priv),
+ gt_pm.autoenable_work.work);
struct intel_engine_cs *rcs;
struct drm_i915_gem_request *req;
- if (READ_ONCE(dev_priv->rps.enabled))
- goto out;
-
rcs = dev_priv->engine[RCS];
if (rcs->last_retired_context)
goto out;
@@ -7895,7 +8156,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (IS_ERR(req))
goto unlock;
- if (!i915.enable_execlists && i915_switch_context(req) == 0)
+ if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */
@@ -7909,9 +8170,6 @@ out:
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (READ_ONCE(dev_priv->rps.enabled))
- return;
-
if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
intel_init_emon(dev_priv);
@@ -7929,7 +8187,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
* runtime resume it's necessary).
*/
if (queue_delayed_work(dev_priv->wq,
- &dev_priv->rps.autoenable_work,
+ &dev_priv->gt_pm.autoenable_work,
round_jiffies_up_relative(HZ)))
intel_runtime_pm_get_noresume(dev_priv);
}
@@ -7959,19 +8217,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
}
}
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
-
- /*
- * Don't touch WM1S_LP_EN here.
- * Doing so could cause underruns.
- */
-}
-
-static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
@@ -8004,8 +8250,6 @@ static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev_priv);
-
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
@@ -8118,8 +8362,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev_priv);
-
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -8257,7 +8499,50 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_PCH_CNP(dev_priv))
+ return;
+
+ /* Wa #1181 */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ CNP_PWM_CGE_GATING_DISABLE);
+}
+
+static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ cnp_init_clock_gating(dev_priv);
+
+ /* This is not an Wa. Enable for better image quality */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+
+ /* WaEnableChickenDCPR:cnl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcWakeMemOn:cnl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
+ I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
+ I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
+ SARBUNIT_CLKGATE_DIS);
+}
+
+static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ cnp_init_clock_gating(dev_priv);
+ gen9_init_clock_gating(dev_priv);
+
+ /* WaFbcNukeOnHostModify:cfl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@@ -8271,12 +8556,12 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
- /* WaFbcNukeOnHostModify:kbl,cfl */
+ /* WaFbcNukeOnHostModify:kbl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@@ -8289,12 +8574,13 @@ static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
+static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* The GTT cache must be disabled if the system is using 2M pages. */
+ bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
+ I915_GTT_PAGE_SIZE_2M);
enum pipe pipe;
- ilk_init_lp_watermarks(dev_priv);
-
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -8325,12 +8611,8 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
- /*
- * WaGttCachingOffByDefault:bdw
- * GTT cache may not work with big pages, so if those
- * are ever enabled GTT cache may need to be disabled.
- */
- I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+ /* WaGttCachingOffByDefault:bdw */
+ I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
/* WaKVMNotificationOnConfigChange:bdw */
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
@@ -8347,10 +8629,8 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
-static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
+static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- ilk_init_lp_watermarks(dev_priv);
-
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
@@ -8394,19 +8674,13 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
- /* WaRsPkgCStateDisplayPMReq:hsw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
-
lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t snpcr;
- ilk_init_lp_watermarks(dev_priv);
-
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:ivb */
@@ -8498,7 +8772,7 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -8578,7 +8852,7 @@ static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
+static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -8638,7 +8912,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
@@ -8652,7 +8926,7 @@ static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
@@ -8737,34 +9011,38 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = skylake_init_clock_gating;
- else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
+ if (IS_CANNONLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = cnl_init_clock_gating;
+ else if (IS_COFFEELAKE(dev_priv))
+ dev_priv->display.init_clock_gating = cfl_init_clock_gating;
+ else if (IS_SKYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = skl_init_clock_gating;
+ else if (IS_KABYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = kbl_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_GEMINILAKE(dev_priv))
dev_priv->display.init_clock_gating = glk_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
- dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+ dev_priv->display.init_clock_gating = bdw_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
+ dev_priv->display.init_clock_gating = chv_init_clock_gating;
else if (IS_HASWELL(dev_priv))
- dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ dev_priv->display.init_clock_gating = hsw_init_clock_gating;
else if (IS_IVYBRIDGE(dev_priv))
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
+ dev_priv->display.init_clock_gating = vlv_init_clock_gating;
else if (IS_GEN6(dev_priv))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
else if (IS_GEN5(dev_priv))
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
else if (IS_I965GM(dev_priv))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ dev_priv->display.init_clock_gating = i965g_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
@@ -8907,7 +9185,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
{
int status;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
* use te fw I915_READ variants to reduce the amount of work
@@ -8954,7 +9232,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
{
int status;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
* use te fw I915_READ variants to reduce the amount of work
@@ -9031,7 +9309,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 status;
int ret;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
&status)
@@ -9073,31 +9351,39 @@ out:
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* N = val - 0xb7
* Slow = Fast = GPLL ref * N
*/
- return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* N = val / 2
* CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
*/
- return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
}
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -9126,53 +9412,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
-struct request_boost {
- struct work_struct work;
- struct drm_i915_gem_request *req;
-};
-
-static void __intel_rps_boost_work(struct work_struct *work)
-{
- struct request_boost *boost = container_of(work, struct request_boost, work);
- struct drm_i915_gem_request *req = boost->req;
-
- if (!i915_gem_request_completed(req))
- gen6_rps_boost(req, NULL);
-
- i915_gem_request_put(req);
- kfree(boost);
-}
-
-void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
-{
- struct request_boost *boost;
-
- if (req == NULL || INTEL_GEN(req->i915) < 6)
- return;
-
- if (i915_gem_request_completed(req))
- return;
-
- boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
- if (boost == NULL)
- return;
-
- boost->req = i915_gem_request_get(req);
-
- INIT_WORK(&boost->work, __intel_rps_boost_work);
- queue_work(req->i915->wq, &boost->work);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->pcu_lock);
- INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+ INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
__intel_autoenable_gt_powersave);
- atomic_set(&dev_priv->rps.num_waiters, 0);
+ atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
- dev_priv->pm.suspended = false;
- atomic_set(&dev_priv->pm.wakeref_count, 0);
+ dev_priv->runtime_pm.suspended = false;
+ atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
@@ -9225,7 +9474,7 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
{
u64 time_hw, units, div;
- if (!intel_enable_rc6())
+ if (!intel_rc6_enabled())
return 0;
intel_runtime_pm_get(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 1b31ab002dae..6e3b430fccdc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -58,6 +58,9 @@
static bool is_edp_psr(struct intel_dp *intel_dp)
{
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
@@ -72,90 +75,54 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
-static void intel_psr_write_vsc(struct intel_dp *intel_dp,
- const struct edp_vsc_psr *vsc_psr)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- uint32_t *data = (uint32_t *) vsc_psr;
- unsigned int i;
-
- /* As per BSPec (Pipe Video Data Island Packet), we need to disable
- the video DIP being updated before program video DIP data buffer
- registers for DIP being updated. */
- I915_WRITE(ctl_reg, 0);
- POSTING_READ(ctl_reg);
-
- for (i = 0; i < sizeof(*vsc_psr); i += 4) {
- I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
- i >> 2), *data);
- data++;
- }
- for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
- I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
- i >> 2), 0);
-
- I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
- POSTING_READ(ctl_reg);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t val;
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(pipe));
+ val = I915_READ(VLV_VSCSDP(crtc->pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(pipe), val);
+ I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
}
-static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- struct edp_vsc_psr psr_vsc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct edp_vsc_psr psr_vsc;
- /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support &&
- dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x5;
- psr_vsc.sdp_header.HB3 = 0x13;
- } else if (dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x4;
- psr_vsc.sdp_header.HB3 = 0xe;
+ if (dev_priv->psr.psr2_support) {
+ /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ if (dev_priv->psr.colorimetry_support &&
+ dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x5;
+ psr_vsc.sdp_header.HB3 = 0x13;
+ } else if (dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x4;
+ psr_vsc.sdp_header.HB3 = 0xe;
+ } else {
+ psr_vsc.sdp_header.HB2 = 0x3;
+ psr_vsc.sdp_header.HB3 = 0xc;
+ }
} else {
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xc;
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
}
- intel_psr_write_vsc(intel_dp, &psr_vsc);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
-{
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_psr_write_vsc(intel_dp, &psr_vsc);
+ intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+ DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
@@ -233,16 +200,15 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(aux_ctl_reg, aux_ctl);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+static void vlv_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
- I915_WRITE(VLV_PSRCTL(pipe),
+ /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
+ I915_WRITE(VLV_PSRCTL(crtc->pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
@@ -256,16 +222,17 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- /* Let's do the transition from PSR_state 1 to PSR_state 2
- * that is PSR transition to active - static frame transmission.
- * Then Hardware is responsible for the transition to PSR_state 3
- * that is PSR active - no Remote Frame Buffer (RFB) update.
+ /*
+ * Let's do the transition from PSR_state 1 (inactive) to
+ * PSR_state 2 (transition to active - static frame transmission).
+ * Then Hardware is responsible for the transition to
+ * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
*/
I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
VLV_EDP_PSR_ACTIVE_ENTRY);
}
-static void intel_enable_source_psr1(struct intel_dp *intel_dp)
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -319,7 +286,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR_CTL, val);
}
-static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -333,6 +300,7 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val;
+ uint8_t sink_latency;
val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
@@ -340,8 +308,16 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
val |= EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE |
- EDP_FRAMES_BEFORE_SU_ENTRY;
+ EDP_SU_TRACK_ENABLE;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SYNCHRONIZATION_LATENCY_IN_SINK,
+ &sink_latency) == 1) {
+ sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+ } else {
+ sink_latency = 0;
+ }
+ val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -355,35 +331,43 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static void hsw_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ /* On HSW+ after we enable PSR on source it will activate it
+ * as soon as it match configure idle_frame count. So
+ * we just actually enable it here on activation time.
+ */
+
/* psr1 and psr2 are mutually exclusive.*/
if (dev_priv->psr.psr2_support)
- intel_enable_source_psr2(intel_dp);
+ hsw_activate_psr2(intel_dp);
else
- intel_enable_source_psr1(intel_dp);
+ hsw_activate_psr1(intel_dp);
}
-static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
int psr_setup_time;
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ if (!HAS_PSR(dev_priv))
+ return;
+
+ if (!is_edp_psr(intel_dp))
+ return;
- dev_priv->psr.source_ok = false;
+ if (!i915_modparams.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return;
+ }
/*
* HSW spec explicitly says PSR is tied to port A.
@@ -394,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
*/
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
+ return;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+ I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
+ return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
- return false;
+ return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
- return false;
+ return;
+ }
+
+ /*
+ * FIXME psr2_support is messed up. It's both computed
+ * dynamically during PSR enable, and extracted from sink
+ * caps during eDP detection.
+ */
+ if (!dev_priv->psr.psr2_support) {
+ crtc_state->has_psr = true;
+ return;
}
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (dev_priv->psr.psr2_support &&
- (intel_crtc->config->pipe_src_w > 3200 ||
- intel_crtc->config->pipe_src_h > 2000)) {
- dev_priv->psr.psr2_support = false;
- return false;
+ if (adjusted_mode->crtc_hdisplay > 3200 ||
+ adjusted_mode->crtc_vdisplay > 2000) {
+ DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
+ return;
}
/*
* FIXME:enable psr2 only for y-cordinate psr2 panels
* After gtc implementation , remove this restriction.
*/
- if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
+ if (!dev_priv->psr.y_cord_support) {
DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
- return false;
+ return;
}
- dev_priv->psr.source_ok = true;
- return true;
+ crtc_state->has_psr = true;
+ crtc_state->has_psr2 = true;
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -469,153 +457,133 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- /* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev_priv))
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
- hsw_psr_enable_source(intel_dp);
- else
- vlv_psr_activate(intel_dp);
-
+ dev_priv->psr.activate(intel_dp);
dev_priv->psr.active = true;
}
+static void hsw_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 chicken;
+
+ if (dev_priv->psr.psr2_support) {
+ chicken = PSR2_VSC_ENABLE_PROG_HEADER;
+ if (dev_priv->psr.y_cord_support)
+ chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+ I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ } else {
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP
+ * and HPD. also mask LPSP to avoid dependency on other
+ * drivers that might block runtime_pm besides
+ * preventing other hw tracking issues now we can rely
+ * on frontbuffer tracking.
+ */
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP);
+ }
+}
+
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
-void intel_psr_enable(struct intel_dp *intel_dp)
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- u32 chicken;
- if (!HAS_PSR(dev_priv)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ if (!crtc_state->has_psr)
return;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- return;
- }
+ WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
- if (!intel_psr_match_conditions(intel_dp))
- goto unlock;
+ dev_priv->psr.psr2_support = crtc_state->has_psr2;
+ dev_priv->psr.source_ok = true;
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support) {
- skl_psr_setup_su_vsc(intel_dp);
- chicken = PSR2_VSC_ENABLE_PROG_HEADER;
- if (dev_priv->psr.y_cord_support)
- chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /* set up vsc header for psr1 */
- hsw_psr_setup_vsc(intel_dp);
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP);
- }
-
- /* Enable PSR on the panel */
- hsw_psr_enable_sink(intel_dp);
+ dev_priv->psr.setup_vsc(intel_dp, crtc_state);
+ dev_priv->psr.enable_sink(intel_dp);
+ dev_priv->psr.enable_source(intel_dp, crtc_state);
+ dev_priv->psr.enabled = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9)
- intel_psr_activate(intel_dp);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_psr_activate(intel_dp);
} else {
- vlv_psr_setup_vsc(intel_dp);
-
- /* Enable PSR on the panel */
- vlv_psr_enable_sink(intel_dp);
-
- /* On HSW+ enable_source also means go to PSR entry/active
- * state as soon as idle_frame achieved and here would be
- * to soon. However on VLV enable_source just enable PSR
- * but let it on inactive state. So we might do this prior
- * to active transition, i.e. here.
+ /*
+ * FIXME: Activation should happen immediately since this
+ * function is just called after pipe is fully trained and
+ * enabled.
+ * However on some platforms we face issues when first
+ * activation follows a modeset so quickly.
+ * - On VLV/CHV we get bank screen on first activation
+ * - On HSW/BDW we get a recoverable frozen screen until
+ * next exit-activate sequence.
*/
- vlv_psr_enable_source(intel_dp);
- }
-
- /*
- * FIXME: Activation should happen immediately since this function
- * is just called after pipe is fully trained and enabled.
- * However on every platform we face issues when first activation
- * follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until next
- * exit-activate sequence.
- */
- if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
- dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp)
+static void vlv_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+ /* Put VLV PSR back to PSR_state 0 (disabled). */
if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(intel_crtc->pipe),
+ VLV_PSRSTAT(crtc->pipe),
VLV_EDP_PSR_IN_TRANS,
0,
1))
WARN(1, "PSR transition took longer than expected\n");
- val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val = I915_READ(VLV_PSRCTL(crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+ I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
dev_priv->psr.active = false;
} else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
+ WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
}
}
-static void hsw_psr_disable(struct intel_dp *intel_dp)
+static void hsw_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -664,26 +632,27 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
+ * @old_crtc_state: old CRTC state
*
* This function needs to be called before disabling pipe.
*/
-void intel_psr_disable(struct intel_dp *intel_dp)
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ if (!old_crtc_state->has_psr)
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
- /* Disable PSR on Source */
- if (HAS_DDI(dev_priv))
- hsw_psr_disable(intel_dp);
- else
- vlv_psr_disable(intel_dp);
+ dev_priv->psr.disable_source(intel_dp, old_crtc_state);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -783,17 +752,20 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
} else {
val = I915_READ(VLV_PSRCTL(pipe));
- /* Here we do the transition from PSR_state 3 to PSR_state 5
- * directly once PSR State 4 that is active with single frame
- * update can be skipped. PSR_state 5 that is PSR exit then
- * Hardware is responsible to transition back to PSR_state 1
- * that is PSR inactive. Same state after
- * vlv_edp_psr_enable_source.
+ /*
+ * Here we do the transition drirectly from
+ * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
+ * PSR_state 5 (exit).
+ * PSR State 4 (active with single frame update) can be skipped.
+ * On PSR_state 5 (exit) Hardware is responsible to transition
+ * back to PSR_state 1 (inactive).
+ * Now we are at Same state after vlv_psr_enable_source.
*/
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
I915_WRITE(VLV_PSRCTL(pipe), val);
- /* Send AUX wake up - Spec says after transitioning to PSR
+ /*
+ * Send AUX wake up - Spec says after transitioning to PSR
* active we have to send AUX wake up by writing 01h in DPCD
* 600h of sink device.
* XXX: This might slow down the transition, but without this
@@ -824,6 +796,9 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 val;
+ if (!HAS_PSR(dev_priv))
+ return;
+
/*
* Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed.
@@ -870,6 +845,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
+ if (!HAS_PSR(dev_priv))
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -907,6 +885,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
+ if (!HAS_PSR(dev_priv))
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -939,12 +920,15 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PSR(dev_priv))
+ return;
+
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
/* Per platform default: all disabled. */
- if (i915.enable_psr == -1)
- i915.enable_psr = 0;
+ if (i915_modparams.enable_psr == -1)
+ i915_modparams.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -958,15 +942,29 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
/* Override link_standby x link_off defaults */
- if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
+ if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
dev_priv->psr.link_standby = true;
}
- if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
+ if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
dev_priv->psr.link_standby = false;
}
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->psr.enable_source = vlv_psr_enable_source;
+ dev_priv->psr.disable_source = vlv_psr_disable;
+ dev_priv->psr.enable_sink = vlv_psr_enable_sink;
+ dev_priv->psr.activate = vlv_psr_activate;
+ dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
+ } else {
+ dev_priv->psr.enable_source = hsw_psr_enable_source;
+ dev_priv->psr.disable_source = hsw_psr_disable;
+ dev_priv->psr.enable_sink = hsw_psr_enable_sink;
+ dev_priv->psr.activate = hsw_psr_activate;
+ dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cdf084ef5aae..8da1bde442dd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -402,17 +402,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
*/
if (IS_GEN7(dev_priv)) {
switch (engine->id) {
+ /*
+ * No more rings exist on Gen7. Default case is only to shut up
+ * gcc switch check warning.
+ */
+ default:
+ GEM_BUG_ON(engine->id);
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
- /*
- * VCS2 actually doesn't exist on Gen7. Only shut up
- * gcc switch check warning
- */
- case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
@@ -427,6 +428,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = RING_HWS_PGA(engine->mmio_base);
}
+ if (INTEL_GEN(dev_priv) >= 6)
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
@@ -480,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
- if (INTEL_GEN(dev_priv) > 2) {
- (void)I915_READ_CTL(engine);
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
- }
-
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
@@ -566,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
+ if (INTEL_GEN(dev_priv) > 2)
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -575,7 +577,16 @@ out:
static void reset_ring_common(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- /* Try to restore the logical GPU state to match the continuation
+ /*
+ * RC6 must be prevented until the reset is complete and the engine
+ * reinitialised. If it occurs in the middle of this sequence, the
+ * state written to/loaded from the power context is ill-defined (e.g.
+ * the PP_BASE_DIR may be lost).
+ */
+ assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+
+ /*
+ * Try to restore the logical GPU state to match the continuation
* of the request queue. If we skip the context/PD restore, then
* the next request may try to execute assuming that its context
* is valid and loaded on the GPU and so may try to access invalid
@@ -778,6 +789,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
return cs;
}
+static void cancel_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ GEM_BUG_ON(!request->global_seqno);
+ if (!i915_gem_request_completed(request))
+ dma_fence_set_error(&request->fence, -EIO);
+ }
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
@@ -1174,113 +1203,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static void cleanup_phys_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!dev_priv->status_page_dmah)
- return;
-
- drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
- engine->status_page.page_addr = NULL;
-}
-
-static void cleanup_status_page(struct intel_engine_cs *engine)
-{
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
-
- vma = fetch_and_zero(&engine->status_page.vma);
- if (!vma)
- return;
-
- obj = vma->obj;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_gem_object_unpin_map(obj);
- __i915_gem_object_release_unless_active(obj);
-}
-
-static int init_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int flags;
- void *vaddr;
- int ret;
-
- obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
- return PTR_ERR(obj);
- }
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err;
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err;
- }
-
- flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
- /* On g33, we cannot place HWS above 256MiB, so
- * restrict its pinning to the low mappable arena.
- * Though this restriction is not documented for
- * gen4, gen5, or byt, they also behave similarly
- * and hang if the HWS is placed at the top of the
- * GTT. To generalise, it appears that all !llc
- * platforms have issues with us placing the HWS
- * above the mappable region (even though we never
- * actualy map it).
- */
- flags |= PIN_MAPPABLE;
- ret = i915_vma_pin(vma, 0, 4096, flags);
- if (ret)
- goto err;
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_unpin;
- }
-
- engine->status_page.vma = vma;
- engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
-
- DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- engine->name, i915_ggtt_offset(vma));
- return 0;
-
-err_unpin:
- i915_vma_unpin(vma);
-err:
- i915_gem_object_put(obj);
- return ret;
-}
-
-static int init_phys_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(engine->id != RCS);
-
- dev_priv->status_page_dmah =
- drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
- if (!dev_priv->status_page_dmah)
- return -ENOMEM;
-
- engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(engine->status_page.page_addr, 0, PAGE_SIZE);
- return 0;
-}
int intel_ring_pin(struct intel_ring *ring,
struct drm_i915_private *i915,
@@ -1321,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
if (IS_ERR(addr))
goto err;
+ vma->obj->pin_global++;
+
ring->vaddr = addr;
return 0;
@@ -1352,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
i915_gem_object_unpin_map(ring->vma->obj);
ring->vaddr = NULL;
+ ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma);
}
@@ -1516,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
goto err;
ce->state->obj->mm.dirty = true;
+ ce->state->obj->pin_global++;
}
/* The kernel context is only used as a placeholder for flushing the
@@ -1550,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
if (--ce->pin_count)
return;
- if (ce->state)
+ if (ce->state) {
+ ce->state->obj->pin_global--;
i915_vma_unpin(ce->state);
+ }
i915_gem_context_put(ctx);
}
@@ -1567,17 +1496,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
goto err;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
- err = init_phys_status_page(engine);
- else
- err = init_status_page(engine);
- if (err)
- goto err;
-
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err_hws;
+ goto err;
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1592,11 +1514,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
err_ring:
intel_ring_free(ring);
-err_hws:
- if (HWS_NEEDS_PHYSICAL(engine->i915))
- cleanup_phys_status_page(engine);
- else
- cleanup_status_page(engine);
err:
intel_engine_cleanup_common(engine);
return err;
@@ -1615,11 +1532,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (HWS_NEEDS_PHYSICAL(dev_priv))
- cleanup_phys_status_page(engine);
- else
- cleanup_status_page(engine);
-
intel_engine_cleanup_common(engine);
dev_priv->engine[engine->id] = NULL;
@@ -1983,7 +1895,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
int ret, i;
- if (!i915.semaphores)
+ if (!i915_modparams.semaphores)
return;
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
@@ -2083,7 +1995,7 @@ err_obj:
i915_gem_object_put(obj);
err:
DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
- i915.semaphores = 0;
+ i915_modparams.semaphores = 0;
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
@@ -2115,11 +2027,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = gen6_bsd_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
@@ -2138,7 +2052,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (i915.semaphores) {
+ if (i915_modparams.semaphores) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
@@ -2182,7 +2096,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores) {
+ if (i915_modparams.semaphores) {
int num_rings;
engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6b2067f10824..2863d5a65187 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -8,6 +8,8 @@
#include "i915_gem_timeline.h"
#include "i915_selftest.h"
+struct drm_printer;
+
#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
@@ -185,6 +187,104 @@ struct i915_priolist {
int priority;
};
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @irq_tasklet: softirq tasklet for bottom handler
+ */
+ struct tasklet_struct irq_tasklet;
+
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
+ /**
+ * @port: execlist port states
+ *
+ * For each hardware ELSP (ExecList Submission Port) we keep
+ * track of the last request and the number of times we submitted
+ * that port to hw. We then count the number of times the hw reports
+ * a context completion or preemption. As only one context can
+ * be active on hw, we limit resubmission of context to port[0]. This
+ * is called Lite Restore, of the context.
+ */
+ struct execlist_port {
+ /**
+ * @request_count: combined request and submission count
+ */
+ struct drm_i915_gem_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+ /**
+ * @context_id: context ID for port
+ */
+ GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @active: is the HW active? We consider the HW as active after
+ * submitting any context for execution and until we have seen the
+ * last context completion event. After that, we do not expect any
+ * more events until we submit, and so can park the HW.
+ *
+ * As we have a small number of different sources from which we feed
+ * the HW, we track the state of each inside a single bitfield.
+ */
+ unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @queue: queue of requests, in priority lists
+ */
+ struct rb_root queue;
+
+ /**
+ * @first: leftmost level in priority @queue
+ */
+ struct rb_node *first;
+
+ /**
+ * @fw_domains: forcewake domains for irq tasklet
+ */
+ unsigned int fw_domains;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ unsigned int csb_head;
+
+ /**
+ * @csb_use_mmio: access csb through mmio, instead of hwsp
+ */
+ bool csb_use_mmio;
+};
+
#define INTEL_ENGINE_CS_MAX_NAME 8
struct intel_engine_cs {
@@ -307,6 +407,14 @@ struct intel_engine_cs {
void (*schedule)(struct drm_i915_gem_request *request,
int priority);
+ /*
+ * Cancel all requests on the hardware, or queued for execution.
+ * This should only cancel the ready requests that have been
+ * submitted to the engine (via the engine->submit_request callback).
+ * This is called when marking the device as wedged.
+ */
+ void (*cancel_requests)(struct intel_engine_cs *engine);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -373,25 +481,7 @@ struct intel_engine_cs {
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
- /* Execlists */
- struct tasklet_struct irq_tasklet;
- struct i915_priolist default_priolist;
- bool no_priolist;
- struct execlist_port {
- struct drm_i915_gem_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, e) ((p) - (e)->execlist_port)
- GEM_DEBUG_DECL(u32 context_id);
- } execlist_port[2];
- struct rb_root execlist_queue;
- struct rb_node *execlist_first;
- unsigned int fw_domains;
+ struct intel_engine_execlists execlists;
/* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the
@@ -444,6 +534,46 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
+static inline void
+execlists_set_active(struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ __set_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline void
+execlists_clear_active(struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ __clear_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline bool
+execlists_is_active(const struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ return test_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
+{
+ return execlists->port_mask + 1;
+}
+
+static inline void
+execlists_port_complete(struct intel_engine_execlists * const execlists,
+ struct execlist_port * const port)
+{
+ const unsigned int m = execlists->port_mask;
+
+ GEM_BUG_ON(port_index(port, execlists) != 0);
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+
+ memmove(port, port + 1, m * sizeof(struct execlist_port));
+ memset(port + m, 0, sizeof(struct execlist_port));
+}
+
static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
@@ -497,6 +627,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_HWS_CSB_BUF0_INDEX 0x10
+#define I915_HWS_CSB_WRITE_INDEX 0x1f
+#define CNL_HWS_CSB_WRITE_INDEX 0x2f
+
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring,
@@ -736,16 +870,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
-static inline bool
-__intel_engine_can_store_dword(unsigned int gen, unsigned int class)
-{
- if (gen <= 2)
- return false; /* uses physical not virtual addresses */
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
- if (gen == 6 && class == VIDEO_DECODE_CLASS)
- return false; /* b0rked */
-
- return true;
-}
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49577eba8e7e..8af286c63d3b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -187,7 +187,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well;
bool is_enabled;
- if (dev_priv->pm.suspended)
+ if (dev_priv->runtime_pm.suspended)
return false;
is_enabled = true;
@@ -785,7 +785,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
PUNIT_PWRGT_PWR_GATE(power_well_id);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
@@ -806,7 +806,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
@@ -833,7 +833,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
mask = PUNIT_PWRGT_MASK(power_well_id);
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
@@ -852,7 +852,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return enabled;
}
@@ -1364,7 +1364,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
bool enabled;
u32 state, ctrl;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
/*
@@ -1381,7 +1381,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return enabled;
}
@@ -1396,7 +1396,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
@@ -1417,7 +1417,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -2413,7 +2413,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
mask = 0;
}
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -2471,10 +2471,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
- i915.disable_power_well);
- dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
- i915.enable_dc);
+ i915_modparams.disable_power_well =
+ sanitize_disable_power_well_option(dev_priv,
+ i915_modparams.disable_power_well);
+ dev_priv->csr.allowed_dc_mask =
+ get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
@@ -2535,7 +2536,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
intel_display_set_init_power(dev_priv, true);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
/*
@@ -2707,30 +2708,67 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-#define CNL_PROCMON_IDX(val) \
- (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
-#define NUM_CNL_PROCMON \
- (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
static const struct cnl_procmon {
u32 dw1, dw9, dw10;
-} cnl_procmon_values[NUM_CNL_PROCMON] = {
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
- { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+} cnl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+ [PROCMON_0_95V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+ [PROCMON_0_95V_DOT_1] =
+ { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+ [PROCMON_1_05V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+ [PROCMON_1_05V_DOT_1] =
+ { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ val = I915_READ(CNL_PORT_COMP_DW3);
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ val = I915_READ(CNL_PORT_COMP_DW1);
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ I915_WRITE(CNL_PORT_COMP_DW1, val);
+
+ I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
+ I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+}
+
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- const struct cnl_procmon *procmon;
struct i915_power_well *well;
u32 val;
@@ -2746,18 +2784,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
- val = I915_READ(CNL_PORT_COMP_DW3);
- procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
-
- WARN_ON(procmon->dw10 == 0);
-
- val = I915_READ(CNL_PORT_COMP_DW1);
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- I915_WRITE(CNL_PORT_COMP_DW1, val);
-
- I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
- I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+ cnl_set_procmon_ref_values(dev_priv);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -2787,9 +2814,6 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
intel_csr_load_program(dev_priv);
}
-#undef CNL_PROCMON_IDX
-#undef NUM_CNL_PROCMON
-
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -2975,7 +2999,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
/* Disable power support if the user asked so. */
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(dev_priv);
power_domains->initializing = false;
@@ -2994,7 +3018,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_CANNONLAKE(dev_priv))
@@ -3104,7 +3128,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
}
@@ -3138,7 +3162,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
return false;
}
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
return true;
@@ -3169,7 +3193,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
assert_rpm_wakelock_held(dev_priv);
pm_runtime_get_noresume(kdev);
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
}
/**
@@ -3186,7 +3210,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- atomic_dec(&dev_priv->pm.wakeref_count);
+ atomic_dec(&dev_priv->runtime_pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 29a3b0f5bec7..7437944b388f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -201,11 +201,8 @@ to_intel_sdvo_connector(struct drm_connector *connector)
return container_of(connector, struct intel_sdvo_connector, base.base);
}
-static struct intel_sdvo_connector_state *
-to_intel_sdvo_connector_state(struct drm_connector_state *conn_state)
-{
- return container_of(conn_state, struct intel_sdvo_connector_state, base.base);
-}
+#define to_intel_sdvo_connector_state(conn_state) \
+ container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
@@ -998,7 +995,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
@@ -1032,7 +1029,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
- struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
uint32_t format_map;
@@ -1202,9 +1199,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
} while (0)
static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector_state *sdvo_state)
+ const struct intel_sdvo_connector_state *sdvo_state)
{
- struct drm_connector_state *conn_state = &sdvo_state->base.base;
+ const struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
uint16_t val;
@@ -1258,14 +1255,15 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
}
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state);
- struct drm_display_mode *mode = &crtc_state->base.mode;
+ const struct intel_sdvo_connector_state *sdvo_state =
+ to_intel_sdvo_connector_state(conn_state);
+ const struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1507,8 +1505,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1552,21 +1550,21 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
static void pch_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
}
static void intel_enable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 7d971cb56116..75c872bb8cc9 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
int err;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 524933b01483..4fcf80ca91dd 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -66,12 +66,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
+/* FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio. */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
#define VBLANK_EVASION_TIME_US 100
+#endif
/**
* intel_pipe_update_start() - start update of a set of display registers
- * @crtc: the crtc of which the registers are going to be updated
- * @start_vbl_count: vblank counter return pointer used for error checking
+ * @new_crtc_state: the new crtc state
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@@ -79,18 +84,18 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*
* After a successful call to this function, interrupts will be disabled
* until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays. The value written to @start_vbl_count should be
- * supplied to intel_pipe_update_end() for error checking.
+ * avoid random delays.
*/
-void intel_pipe_update_start(struct intel_crtc *crtc)
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -170,15 +175,15 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
/**
* intel_pipe_update_end() - end update of a set of display registers
- * @crtc: the crtc of which the registers were updated
- * @start_vbl_count: start vblank counter (used for error checking)
+ * @new_crtc_state: the new crtc state
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
- * before a vblank using the value of @start_vbl_count.
+ * before a vblank.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@@ -191,14 +196,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (crtc->base.state->event) {
+ if (new_crtc_state->base.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+ drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock(&crtc->base.dev->event_lock);
- crtc->base.state->event = NULL;
+ new_crtc_state->base.event = NULL;
}
local_irq_enable();
@@ -225,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
#endif
}
-static void
+void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -306,7 +311,7 @@ skl_update_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
+void
skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -995,7 +1000,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
- plane = drm_plane_find(dev, set->plane_id);
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 906893c006d8..a79a7591b2cf 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -814,8 +814,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
static void
intel_enable_tv(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -829,8 +829,8 @@ intel_enable_tv(struct intel_encoder *encoder,
static void
intel_disable_tv(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -838,7 +838,7 @@ intel_disable_tv(struct intel_encoder *encoder,
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
-static const struct tv_mode *intel_tv_mode_find(struct drm_connector_state *conn_state)
+static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
@@ -976,8 +976,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1385,7 +1385,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
- tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal);
tmp *= mode_ptr->htotal;
tmp = div_u64(tmp, 1000000);
mode_ptr->clock = (int) tmp;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 0178ba42a0e5..25bd162f38d2 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -22,22 +22,9 @@
*
*/
-#include "i915_drv.h"
#include "intel_uc.h"
-#include <linux/firmware.h>
-
-/* Cleans up uC firmware by releasing the firmware GEM obj.
- */
-static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
+#include "i915_drv.h"
+#include "i915_guc_submission.h"
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
@@ -63,234 +50,70 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
{
if (!HAS_GUC(dev_priv)) {
- if (i915.enable_guc_loading > 0 ||
- i915.enable_guc_submission > 0)
+ if (i915_modparams.enable_guc_loading > 0 ||
+ i915_modparams.enable_guc_submission > 0)
DRM_INFO("Ignoring GuC options, no hardware\n");
- i915.enable_guc_loading = 0;
- i915.enable_guc_submission = 0;
+ i915_modparams.enable_guc_loading = 0;
+ i915_modparams.enable_guc_submission = 0;
return;
}
/* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ if (i915_modparams.enable_guc_loading < 0)
+ i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
/* Verify firmware version */
- if (i915.enable_guc_loading) {
+ if (i915_modparams.enable_guc_loading) {
if (HAS_HUC_UCODE(dev_priv))
intel_huc_select_fw(&dev_priv->huc);
- if (intel_guc_select_fw(&dev_priv->guc))
- i915.enable_guc_loading = 0;
+ if (intel_guc_fw_select(&dev_priv->guc))
+ i915_modparams.enable_guc_loading = 0;
}
/* Can't enable guc submission without guc loaded */
- if (!i915.enable_guc_loading)
- i915.enable_guc_submission = 0;
+ if (!i915_modparams.enable_guc_loading)
+ i915_modparams.enable_guc_submission = 0;
/* A negative value means "use platform default" */
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
-}
-
-static void gen8_guc_raise_irq(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ if (i915_modparams.enable_guc_submission < 0)
+ i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- intel_guc_ct_init_early(&guc->ct);
-
- mutex_init(&guc->send_mutex);
- guc->send = intel_guc_send_nop;
- guc->notify = gen8_guc_raise_irq;
-}
-
-static void fetch_uc_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- if (!uc_fw->path)
- return;
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("Skipping %s firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+ intel_guc_init_early(&dev_priv->guc);
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
- fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+ intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
}
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
- __intel_uc_fw_fini(&dev_priv->guc.fw);
- __intel_uc_fw_fini(&dev_priv->huc.fw);
+ intel_uc_fw_fini(&dev_priv->guc.fw);
+ intel_uc_fw_fini(&dev_priv->huc.fw);
}
-static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
-{
- GEM_BUG_ON(!guc->send_regs.base);
- GEM_BUG_ON(!guc->send_regs.count);
- GEM_BUG_ON(i >= guc->send_regs.count);
-
- return _MMIO(guc->send_regs.base + 4 * i);
-}
-
-static void guc_init_send_regs(struct intel_guc *guc)
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ *
+ * @dev_priv: device private
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- enum forcewake_domains fw_domains = 0;
- unsigned int i;
-
- guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
-
- for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- guc_send_reg(guc, i),
- FW_REG_READ | FW_REG_WRITE);
- }
- guc->send_regs.fw_domains = fw_domains;
+ intel_guc_init_send_regs(&dev_priv->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || i915.guc_log_level < 0)
+ if (!guc->log.vma || i915_modparams.guc_log_level < 0)
return;
if (!guc->load_err_log)
@@ -309,8 +132,6 @@ static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- guc_init_send_regs(guc);
-
if (HAS_GUC_CT(dev_priv))
return intel_guc_enable_ct(guc);
@@ -333,7 +154,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
int ret, attempts;
- if (!i915.enable_guc_loading)
+ if (!i915_modparams.enable_guc_loading)
return 0;
guc_disable_communication(guc);
@@ -342,7 +163,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -374,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_submission;
intel_huc_init_hw(&dev_priv->huc);
- ret = intel_guc_init_hw(&dev_priv->guc);
+ intel_guc_init_params(guc);
+ ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN)
break;
@@ -390,9 +212,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- intel_guc_auth_huc(dev_priv);
- if (i915.enable_guc_submission) {
- if (i915.guc_log_level >= 0)
+ intel_huc_auth(&dev_priv->huc);
+ if (i915_modparams.enable_guc_submission) {
+ if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ret = i915_guc_submission_enable(dev_priv);
@@ -400,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_interrupts;
}
+ dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
+ i915_modparams.enable_guc_submission ? "submission enabled" :
+ "loaded",
+ guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found);
+
return 0;
/*
@@ -417,24 +245,26 @@ err_interrupts:
err_log_capture:
guc_capture_load_err_log(guc);
err_submission:
- if (i915.enable_guc_submission)
+ if (i915_modparams.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
err_guc:
i915_ggtt_disable_guc(dev_priv);
- DRM_ERROR("GuC init failed\n");
- if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
+ if (i915_modparams.enable_guc_loading > 1 ||
+ i915_modparams.enable_guc_submission > 1) {
+ DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
ret = -EIO;
- else
+ } else {
+ DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
ret = 0;
+ }
- if (i915.enable_guc_submission) {
- i915.enable_guc_submission = 0;
+ if (i915_modparams.enable_guc_submission) {
+ i915_modparams.enable_guc_submission = 0;
DRM_NOTE("Falling back from GuC submission to execlist mode\n");
}
- i915.enable_guc_loading = 0;
- DRM_NOTE("GuC firmware loading disabled\n");
+ i915_modparams.enable_guc_loading = 0;
return ret;
}
@@ -443,97 +273,18 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
guc_free_load_err_log(&dev_priv->guc);
- if (!i915.enable_guc_loading)
+ if (!i915_modparams.enable_guc_loading)
return;
- if (i915.enable_guc_submission)
+ if (i915_modparams.enable_guc_submission)
i915_guc_submission_disable(dev_priv);
guc_disable_communication(&dev_priv->guc);
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
gen9_disable_guc_interrupts(dev_priv);
i915_guc_submission_fini(dev_priv);
}
i915_ggtt_disable_guc(dev_priv);
}
-
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
-{
- WARN(1, "Unexpected send: action=%#x\n", *action);
- return -ENODEV;
-}
-
-/*
- * This function implements the MMIO based host to GuC interface.
- */
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 status;
- int i;
- int ret;
-
- GEM_BUG_ON(!len);
- GEM_BUG_ON(len > guc->send_regs.count);
-
- /* If CT is available, we expect to use MMIO only during init/fini */
- GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
- *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
- *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
-
- mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
-
- for (i = 0; i < len; i++)
- I915_WRITE(guc_send_reg(guc, i), action[i]);
-
- POSTING_READ(guc_send_reg(guc, i - 1));
-
- intel_guc_notify(guc);
-
- /*
- * No GuC command should ever take longer than 10ms.
- * Fast commands should still complete in 10us.
- */
- ret = __intel_wait_for_register_fw(dev_priv,
- guc_send_reg(guc, 0),
- INTEL_GUC_RECV_MASK,
- INTEL_GUC_RECV_MASK,
- 10, 10, &status);
- if (status != INTEL_GUC_STATUS_SUCCESS) {
- /*
- * Either the GuC explicitly returned an error (which
- * we convert to -EIO here) or no response at all was
- * received within the timeout limit (-ETIMEDOUT)
- */
- if (ret != -ETIMEDOUT)
- ret = -EIO;
-
- DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
- }
-
- intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
- mutex_unlock(&guc->send_mutex);
-
- return ret;
-}
-
-int intel_guc_sample_forcewake(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 action[2];
-
- action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- action[1] = 0;
- else
- /* bit 0 and 1 are for Render and Media domain separately */
- action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 22ae52b17b0f..e18d3bb02088 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -24,256 +24,15 @@
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
-#include "intel_guc_fwif.h"
-#include "i915_guc_reg.h"
-#include "intel_ringbuffer.h"
-#include "intel_guc_ct.h"
-#include "i915_vma.h"
+#include "intel_guc.h"
+#include "intel_huc.h"
-struct drm_i915_gem_request;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- *
- * We also keep a few statistics on failures. Ideally, these should all
- * be zero!
- * no_wq_space: times that the submission pre-check found no space was
- * available in the work queue (note, the queue is shared,
- * not per-engine). It is OK for this to be nonzero, but
- * it should not be huge!
- * b_fail: failed to ring the doorbell. This should never happen, unless
- * somehow the hardware misbehaves, or maybe if the GuC firmware
- * crashes? We probably need to reset the GPU to recover.
- * retcode: errno from last guc_submit()
- */
-struct i915_guc_client {
- struct i915_vma *vma;
- void *vaddr;
- struct i915_gem_context *owner;
- struct intel_guc *guc;
-
- uint32_t engines; /* bitmap of (host) engine ids */
- uint32_t priority;
- u32 stage_id;
- uint32_t proc_desc_offset;
-
- u16 doorbell_id;
- unsigned long doorbell_offset;
- u32 doorbell_cookie;
-
- spinlock_t wq_lock;
- uint32_t wq_offset;
- uint32_t wq_size;
- uint32_t wq_tail;
- uint32_t wq_rsvd;
- uint32_t no_wq_space;
-
- /* Per-engine counts of GuC submissions */
- uint64_t submissions[I915_NUM_ENGINES];
-};
-
-enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -1,
- INTEL_UC_FIRMWARE_NONE = 0,
- INTEL_UC_FIRMWARE_PENDING,
- INTEL_UC_FIRMWARE_SUCCESS
-};
-
-/* User-friendly representation of an enum */
-static inline
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- }
- return "<invalid>";
-}
-
-enum intel_uc_fw_type {
- INTEL_UC_FW_TYPE_GUC,
- INTEL_UC_FW_TYPE_HUC
-};
-
-/* User-friendly representation of an enum */
-static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
-{
- switch (type) {
- case INTEL_UC_FW_TYPE_GUC:
- return "GuC";
- case INTEL_UC_FW_TYPE_HUC:
- return "HuC";
- }
- return "uC";
-}
-
-/*
- * This structure encapsulates all the data needed during the process
- * of fetching, caching, and loading the firmware image into the GuC.
- */
-struct intel_uc_fw {
- const char *path;
- size_t size;
- struct drm_i915_gem_object *obj;
- enum intel_uc_fw_status fetch_status;
- enum intel_uc_fw_status load_status;
-
- uint16_t major_ver_wanted;
- uint16_t minor_ver_wanted;
- uint16_t major_ver_found;
- uint16_t minor_ver_found;
-
- enum intel_uc_fw_type type;
- uint32_t header_size;
- uint32_t header_offset;
- uint32_t rsa_size;
- uint32_t rsa_offset;
- uint32_t ucode_size;
- uint32_t ucode_offset;
-};
-
-struct intel_guc_log {
- uint32_t flags;
- struct i915_vma *vma;
- /* The runtime stuff gets created only when GuC logging gets enabled */
- struct {
- void *buf_addr;
- struct workqueue_struct *flush_wq;
- struct work_struct flush_work;
- struct rchan *relay_chan;
- } runtime;
- /* logging related stats */
- u32 capture_miss_count;
- u32 flush_interrupt_count;
- u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 flush_count[GUC_MAX_LOG_BUFFER];
-};
-
-struct intel_guc {
- struct intel_uc_fw fw;
- struct intel_guc_log log;
- struct intel_guc_ct ct;
-
- /* Log snapshot if GuC errors during load */
- struct drm_i915_gem_object *load_err_log;
-
- /* intel_guc_recv interrupt related state */
- bool interrupts_enabled;
-
- struct i915_vma *ads_vma;
- struct i915_vma *stage_desc_pool;
- void *stage_desc_pool_vaddr;
- struct ida stage_ids;
-
- struct i915_guc_client *execbuf_client;
-
- DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
- uint32_t db_cacheline; /* Cyclic counter mod pagesize */
-
- /* GuC's FW specific registers used in MMIO send */
- struct {
- u32 base;
- unsigned int count;
- enum forcewake_domains fw_domains;
- } send_regs;
-
- /* To serialize the intel_guc_send actions */
- struct mutex send_mutex;
-
- /* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
-
- /* GuC's FW specific notify function */
- void (*notify)(struct intel_guc *guc);
-};
-
-struct intel_huc {
- /* Generic uC firmware management */
- struct intel_uc_fw fw;
-
- /* HuC-specific additions */
-};
-
-/* intel_uc.c */
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_guc_sample_forcewake(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
-
-static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
-{
- return guc->send(guc, action, len);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
- guc->notify(guc);
-}
-
-/* intel_guc_loader.c */
-int intel_guc_select_fw(struct intel_guc *guc);
-int intel_guc_init_hw(struct intel_guc *guc);
-int intel_guc_suspend(struct drm_i915_private *dev_priv);
-int intel_guc_resume(struct drm_i915_private *dev_priv);
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
-
-/* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
-void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-
-/* intel_guc_log.c */
-int intel_guc_log_create(struct intel_guc *guc);
-void intel_guc_log_destroy(struct intel_guc *guc);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
-void i915_guc_log_register(struct drm_i915_private *dev_priv);
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-
-static inline u32 guc_ggtt_offset(struct i915_vma *vma)
-{
- u32 offset = i915_ggtt_offset(vma);
- GEM_BUG_ON(offset < GUC_WOPCM_TOP);
- GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
- return offset;
-}
-
-/* intel_huc.c */
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
new file mode 100644
index 000000000000..973888e94cba
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drm_print.h>
+
+#include "intel_uc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ *
+ * @dev_priv: device private
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ */
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ if (!uc_fw->path)
+ return;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ fw->size, sizeof(struct uc_css_header));
+ err = -ENODATA;
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) *
+ sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Mismatched firmware header definition\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* Header and uCode will be loaded to WOPCM */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ err = -E2BIG;
+ goto fail;
+ }
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
+ err = -ENOEXEC;
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ MISSING_CASE(uc_fw->type);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("%s: Skipping firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ release_firmware(fw);
+ return;
+
+fail:
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ DRM_INFO("%s: Firmware can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ *
+ * @uc_fw: uC firmware
+ * @loader: custom uC firmware loader function
+ *
+ * Loads uC firmware using custom loader and updates internal flags.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+ int (*xfer)(struct intel_uc_fw *uc_fw,
+ struct i915_vma *vma))
+{
+ struct i915_vma *vma;
+ int err;
+
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return -EIO;
+
+ uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ /* Pin object with firmware */
+ err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ /* Call custom loader */
+ err = xfer(uc_fw, vma);
+
+ /*
+ * We keep the object pages for reuse during resume. But we can unpin it
+ * now that DMA has completed, so it doesn't continue to take up space.
+ */
+ i915_vma_unpin(vma);
+
+ if (err)
+ goto fail;
+
+ uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+
+ return 0;
+
+fail:
+ uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+
+ return err;
+}
+
+/**
+ * intel_uc_fw_fini - cleanup uC firmware
+ *
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: fetch %s, load %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+ drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ drm_printf(p, "\theader: offset %u, size %u\n",
+ uc_fw->header_offset, uc_fw->header_size);
+ drm_printf(p, "\tuCode: offset %u, size %u\n",
+ uc_fw->ucode_offset, uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: offset %u, size %u\n",
+ uc_fw->rsa_offset, uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
new file mode 100644
index 000000000000..132903669391
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+struct drm_printer;
+struct drm_i915_private;
+struct i915_vma;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
+
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_FAIL = -1,
+ INTEL_UC_FIRMWARE_NONE = 0,
+ INTEL_UC_FIRMWARE_PENDING,
+ INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC,
+ INTEL_UC_FW_TYPE_HUC
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+ const char *path;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+ enum intel_uc_fw_status fetch_status;
+ enum intel_uc_fw_status load_status;
+
+ /*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+ u16 major_ver_wanted;
+ u16 minor_ver_wanted;
+ u16 major_ver_found;
+ u16 minor_ver_found;
+
+ enum intel_uc_fw_type type;
+ u32 header_size;
+ u32 header_offset;
+ u32 rsa_size;
+ u32 rsa_offset;
+ u32 ucode_size;
+ u32 ucode_offset;
+};
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_NONE:
+ return "NONE";
+ case INTEL_UC_FIRMWARE_PENDING:
+ return "PENDING";
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ return "SUCCESS";
+ }
+ return "<invalid>";
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
+static inline
+void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+{
+ uc_fw->path = NULL;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+ uc_fw->type = type;
+}
+
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+ int (*xfer)(struct intel_uc_fw *uc_fw,
+ struct i915_vma *vma));
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1d7b879cc68c..8c2ce81f01c2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -434,9 +434,16 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
}
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+}
+
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
+ i915_modparams.enable_rc6 =
+ sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_sanitize_gt_powersave(dev_priv);
@@ -490,6 +497,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
}
/**
+ * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
+ * @dev_priv: i915 device instance
+ *
+ * This function is a wrapper around intel_uncore_forcewake_get() to acquire
+ * the GT powerwell and in the process disable our debugging for the
+ * duration of userspace's bypass.
+ */
+void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->uncore.lock);
+ if (!dev_priv->uncore.user_forcewake.count++) {
+ intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+
+ /* Save and disable mmio debugging for the user bypass */
+ dev_priv->uncore.user_forcewake.saved_mmio_check =
+ dev_priv->uncore.unclaimed_mmio_check;
+ dev_priv->uncore.user_forcewake.saved_mmio_debug =
+ i915_modparams.mmio_debug;
+
+ dev_priv->uncore.unclaimed_mmio_check = 0;
+ i915_modparams.mmio_debug = 0;
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+/**
+ * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
+ * @dev_priv: i915 device instance
+ *
+ * This function complements intel_uncore_forcewake_user_get() and releases
+ * the GT powerwell taken on behalf of the userspace bypass.
+ */
+void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->uncore.lock);
+ if (!--dev_priv->uncore.user_forcewake.count) {
+ if (intel_uncore_unclaimed_mmio(dev_priv))
+ dev_info(dev_priv->drm.dev,
+ "Invalid mmio detected during user access\n");
+
+ dev_priv->uncore.unclaimed_mmio_check =
+ dev_priv->uncore.user_forcewake.saved_mmio_check;
+ i915_modparams.mmio_debug =
+ dev_priv->uncore.user_forcewake.saved_mmio_debug;
+
+ intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to get reference on
@@ -574,7 +632,23 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- WARN_ON(dev_priv->uncore.fw_domains_active);
+ WARN(dev_priv->uncore.fw_domains_active,
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
+ dev_priv->uncore.fw_domains_active);
+}
+
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ fw_domains &= dev_priv->uncore.fw_domains;
+ WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+ "Expected %08x fw_domains to be active, but %08x are off\n",
+ fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -790,7 +864,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
- i915.mmio_debug--; /* Only report the first N failures */
+ /* Only report the first N failures */
+ i915_modparams.mmio_debug--;
}
static inline void
@@ -799,7 +874,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- if (likely(!i915.mmio_debug))
+ if (likely(!i915_modparams.mmio_debug))
return;
__unclaimed_reg_debug(dev_priv, reg, read, before);
@@ -1171,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* bus, which will be busy after this notification, leading to:
* "render: timed out waiting for forcewake ack request."
* errors.
+ *
+ * The notifier is unregistered during intel_runtime_suspend(),
+ * so it's ok to access the HW here without holding a RPM
+ * wake reference -> disable wakeref asserts for the time of
+ * the access.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -1241,102 +1323,104 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_reset(dev_priv, false);
}
-#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
-
-static const struct register_whitelist {
- i915_reg_t offset_ldw, offset_udw;
- uint32_t size;
- /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
- uint32_t gen_bitmask;
-} whitelist[] = {
- { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
- .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
-};
+static const struct reg_whitelist {
+ i915_reg_t offset_ldw;
+ i915_reg_t offset_udw;
+ u16 gen_mask;
+ u8 size;
+} reg_read_whitelist[] = { {
+ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
+ .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
+ .gen_mask = INTEL_GEN_MASK(4, 10),
+ .size = 8
+} };
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
- struct register_whitelist const *entry = whitelist;
- unsigned size;
- i915_reg_t offset_ldw, offset_udw;
- int i, ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
+ struct reg_whitelist const *entry;
+ unsigned int flags;
+ int remain;
+ int ret = 0;
+
+ entry = reg_read_whitelist;
+ remain = ARRAY_SIZE(reg_read_whitelist);
+ while (remain) {
+ u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
+
+ GEM_BUG_ON(!is_power_of_2(entry->size));
+ GEM_BUG_ON(entry->size > 8);
+ GEM_BUG_ON(entry_offset & (entry->size - 1));
+
+ if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
+ entry_offset == (reg->offset & -entry->size))
break;
+ entry++;
+ remain--;
}
- if (i == ARRAY_SIZE(whitelist))
+ if (!remain)
return -EINVAL;
- /* We use the low bits to encode extra flags as the register should
- * be naturally aligned (and those that are not so aligned merely
- * limit the available flags for that register).
- */
- offset_ldw = entry->offset_ldw;
- offset_udw = entry->offset_udw;
- size = entry->size;
- size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
+ flags = reg->offset & (entry->size - 1);
intel_runtime_pm_get(dev_priv);
-
- switch (size) {
- case 8 | 1:
- reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
- break;
- case 8:
- reg->val = I915_READ64(offset_ldw);
- break;
- case 4:
- reg->val = I915_READ(offset_ldw);
- break;
- case 2:
- reg->val = I915_READ16(offset_ldw);
- break;
- case 1:
- reg->val = I915_READ8(offset_ldw);
- break;
- default:
+ if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
+ reg->val = I915_READ64_2x32(entry->offset_ldw,
+ entry->offset_udw);
+ else if (entry->size == 8 && flags == 0)
+ reg->val = I915_READ64(entry->offset_ldw);
+ else if (entry->size == 4 && flags == 0)
+ reg->val = I915_READ(entry->offset_ldw);
+ else if (entry->size == 2 && flags == 0)
+ reg->val = I915_READ16(entry->offset_ldw);
+ else if (entry->size == 1 && flags == 0)
+ reg->val = I915_READ8(entry->offset_ldw);
+ else
ret = -EINVAL;
- goto out;
- }
-
-out:
intel_runtime_pm_put(dev_priv);
+
return ret;
}
-static void gen3_stop_rings(struct drm_i915_private *dev_priv)
+static void gen3_stop_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register_fw(dev_priv,
+ mode,
+ MODE_IDLE,
+ MODE_IDLE,
+ 500))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
+ engine->name);
+
+ I915_WRITE_FW(RING_CTL(base), 0);
+ I915_WRITE_FW(RING_HEAD(base), 0);
+ I915_WRITE_FW(RING_TAIL(base), 0);
+
+ /* Check acts as a post */
+ if (I915_READ_FW(RING_HEAD(base)) != 0)
+ DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+ engine->name);
+}
+
+static void i915_stop_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register_fw(dev_priv,
- mode,
- MODE_IDLE,
- MODE_IDLE,
- 500))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
-
- I915_WRITE_FW(RING_CTL(base), 0);
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
-
- /* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
- }
+ if (INTEL_GEN(dev_priv) < 3)
+ return;
+
+ for_each_engine_masked(engine, dev_priv, engine_mask, id)
+ gen3_stop_engine(engine);
}
static bool i915_reset_complete(struct pci_dev *pdev)
@@ -1371,9 +1455,6 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- /* Stop engines before we reset; see g4x_do_reset() below for why. */
- gen3_stop_rings(dev_priv);
-
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(pdev), 500);
}
@@ -1388,12 +1469,6 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- /* We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk).
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- */
- gen3_stop_rings(dev_priv);
-
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(pdev), 500);
@@ -1662,7 +1737,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
- if (!i915.reset)
+ if (!i915_modparams.reset)
return NULL;
if (INTEL_INFO(dev_priv)->gen >= 8)
@@ -1683,22 +1758,34 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- reset_func reset;
+ reset_func reset = intel_get_gpu_reset(dev_priv);
int retry;
int ret;
might_sleep();
- reset = intel_get_gpu_reset(dev_priv);
- if (reset == NULL)
- return -ENODEV;
-
/* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
for (retry = 0; retry < 3; retry++) {
- ret = reset(dev_priv, engine_mask);
+
+ /* We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ i915_stop_engines(dev_priv, engine_mask);
+
+ ret = -ENODEV;
+ if (reset)
+ ret = reset(dev_priv, engine_mask);
if (ret != -ETIMEDOUT)
break;
@@ -1722,7 +1809,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (dev_priv->info.has_reset_engine &&
!dev_priv->guc.execbuf_client &&
- i915.reset >= 2);
+ i915_modparams.reset >= 2);
}
int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1747,7 +1834,7 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
- if (unlikely(i915.mmio_debug ||
+ if (unlikely(i915_modparams.mmio_debug ||
dev_priv->uncore.unclaimed_mmio_check <= 0))
return false;
@@ -1755,7 +1842,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
- i915.mmio_debug++;
+ i915_modparams.mmio_debug++;
dev_priv->uncore.unclaimed_mmio_check--;
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 5f90278da461..9ce079b5dd0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -25,6 +25,12 @@
#ifndef __INTEL_UNCORE_H__
#define __INTEL_UNCORE_H__
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+
+#include "i915_reg.h"
+
struct drm_i915_private;
enum forcewake_domain_id {
@@ -102,6 +108,13 @@ struct intel_uncore {
i915_reg_t reg_ack;
} fw_domain[FW_DOMAIN_ID_COUNT];
+ struct {
+ unsigned int count;
+
+ int saved_mmio_check;
+ int saved_mmio_debug;
+ } user_forcewake;
+
int unclaimed_mmio_check;
};
@@ -121,9 +134,12 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
@@ -144,6 +160,9 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index a92e7762f596..f225c288a121 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -149,16 +149,19 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
+ u8 underscan_vga_timings:1;
u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
+ u8 vbios_hotplug_support:1;
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:1;
+ u8 rotate_180:1; /* 181 */
u8 fdi_rx_polarity_inverted:1;
- u8 rsvd10:4; /* finish byte */
+ u8 vbios_extended_mode:1; /* 160 */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
+ u8 panel_best_fit_timing:1; /* 160 */
+ u8 ignore_strap_state:1; /* 160 */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -167,9 +170,10 @@ struct bdb_general_features {
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
+ u8 dp_ssc_dongle_supported:1;
+ u8 rsvd11:2; /* finish byte */
} __packed;
/* pre-915 */
@@ -206,6 +210,56 @@ struct bdb_general_features {
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
+ * system, the other bits may or may not be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
#define DEVICE_CFG_NONE 0x00
#define DEVICE_CFG_12BIT_DVOB 0x01
#define DEVICE_CFG_12BIT_DVOC 0x02
@@ -226,77 +280,134 @@ struct bdb_general_features {
#define DEVICE_WIRE_DVOB_MASTER 0x0d
#define DEVICE_WIRE_DVOC_MASTER 0x0e
+/* dvo_port pre BDB 155 */
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
+/* dvo_port BDB 155+ */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11 /* 193 */
+#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_MIPIA 21 /* 171 */
+#define DVO_PORT_MIPIB 22 /* 171 */
+#define DVO_PORT_MIPIC 23 /* 171 */
+#define DVO_PORT_MIPID 24 /* 171 */
+
+#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
+
+/* DDC Bus DDI Type 155+ */
+enum vbt_gmbus_ddi {
+ DDC_BUS_DDI_B = 0x1,
+ DDC_BUS_DDI_C,
+ DDC_BUS_DDI_D,
+ DDC_BUS_DDI_F,
+};
+
/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
+ * The child device config, aka the display device data structure, provides a
+ * description of a port and its configuration on the platform.
+ *
+ * The child device config size has been increased, and fields have been added
+ * and their meaning has changed over time. Care must be taken when accessing
+ * basically any of the fields to ensure the correct interpretation for the BDB
+ * version in question.
+ *
+ * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
+ * space for the full structure below, and initialize the tail not actually
+ * present in VBT to zeros. Accessing those fields is fine, as long as the
+ * default zero is taken into account, again according to the BDB version.
+ *
+ * BDB versions 155 and below are considered legacy, and version 155 seems to be
+ * a baseline for some of the VBT documentation. When adding new fields, please
+ * include the BDB version when the field was added, if it's above that.
*/
-struct old_child_dev_config {
+struct child_device_config {
u16 handle;
- u16 device_type;
- u8 device_id[10]; /* ascii string */
- u16 addin_offset;
- u8 dvo_port; /* See Device_PORT_* above */
- u8 i2c_pin;
- u8 slave_addr;
- u8 ddc_pin;
- u16 edid_ptr;
- u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 dvo2_port;
- u8 i2c2_pin;
- u8 slave2_addr;
- u8 ddc2_pin;
- u8 capabilities;
- u8 dvo_wiring;/* See DEVICE_WIRE_* above */
- u8 dvo2_wiring;
- u16 extended_type;
- u8 dvo_function;
-} __packed;
+ u16 device_type; /* See DEVICE_TYPE_* above */
+
+ union {
+ u8 device_id[10]; /* ascii string */
+ struct {
+ u8 i2c_speed;
+ u8 dp_onboard_redriver; /* 158 */
+ u8 dp_ondock_redriver; /* 158 */
+ u8 hdmi_level_shifter_value:4; /* 169 */
+ u8 hdmi_max_data_rate:4; /* 204 */
+ u16 dtd_buf_ptr; /* 161 */
+ u8 edidless_efp:1; /* 161 */
+ u8 compression_enable:1; /* 198 */
+ u8 compression_method:1; /* 198 */
+ u8 ganged_edp:1; /* 202 */
+ u8 reserved0:4;
+ u8 compression_structure_index:4; /* 198 */
+ u8 reserved1:4;
+ u8 slave_port; /* 202 */
+ u8 reserved2;
+ } __packed;
+ } __packed;
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-struct common_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 not_common1[12];
- u8 dvo_port;
- u8 not_common2[2];
+ u16 addin_offset;
+ u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 efp_routed:1;
- u8 lane_reversal:1;
- u8 lspcon:1;
- u8 iboost:1;
- u8 hpd_invert:1;
- u8 flag_reserved:3;
- u8 hdmi_support:1;
- u8 dp_support:1;
- u8 tmds_support:1;
- u8 support_reserved:5;
- u8 aux_channel;
- u8 not_common3[11];
- u8 iboost_level;
-} __packed;
+ union {
+ struct {
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ } __packed;
+ struct {
+ u8 efp_routed:1; /* 158 */
+ u8 lane_reversal:1; /* 184 */
+ u8 lspcon:1; /* 192 */
+ u8 iboost:1; /* 196 */
+ u8 hpd_invert:1; /* 196 */
+ u8 flag_reserved:3;
+ u8 hdmi_support:1; /* 158 */
+ u8 dp_support:1; /* 158 */
+ u8 tmds_support:1; /* 158 */
+ u8 support_reserved:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ } __packed;
+ } __packed;
+
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158 */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 capabilities_reserved:2;
+ u8 dvo_wiring; /* See DEVICE_WIRE_* above */
+
+ union {
+ u8 dvo2_wiring;
+ u8 mipi_bridge_type; /* 171 */
+ } __packed;
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
- /* This one is safe to be used anywhere, but the code should still check
- * the BDB version. */
- u8 raw[33];
- /* This one should only be kept for legacy code. */
- struct old_child_dev_config old;
- /* This one should also be safe to use anywhere, even without version
- * checks. */
- struct common_child_dev_config common;
+ u16 extended_type;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195 */
+ u8 flags2_reserved:7; /* 195 */
+ u8 dp_gpio_index; /* 195 */
+ u16 dp_gpio_pin_num; /* 195 */
+ u8 dp_iboost_level:4; /* 196 */
+ u8 hdmi_iboost_level:4; /* 196 */
} __packed;
struct bdb_general_definitions {
@@ -585,23 +696,38 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
-struct edp_link_params {
+struct edp_fast_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __packed;
+struct edp_pwm_delays {
+ u16 pwm_on_to_backlight_enable;
+ u16 backlight_disable_to_pwm_off;
+} __packed;
+
+struct edp_full_link_params {
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
- struct edp_link_params link_params[16];
+ struct edp_fast_link_params fast_link_params[16];
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
- u64 edp_vswing_preemph; /* v173 */
+ u16 edp_s3d_feature; /* 162 */
+ u16 edp_t3_optimization; /* 165 */
+ u64 edp_vswing_preemph; /* 173 */
+ u16 fast_link_training; /* 182 */
+ u16 dpcd_600h_write_required; /* 185 */
+ struct edp_pwm_delays pwm_delays[16]; /* 186 */
+ u16 full_link_params_provided; /* 199 */
+ struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
struct psr_table {
@@ -745,81 +871,6 @@ struct bdb_psr {
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP 0x1022
-#define DEVICE_TYPE_INT_TV 0x1009
-#define DEVICE_TYPE_HDMI 0x60D2
-#define DEVICE_TYPE_DP 0x68C6
-#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
-#define DEVICE_TYPE_eDP 0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_DIGITAL_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define DVO_B 1
-#define DVO_C 2
-#define DVO_D 3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA 0
-#define DVO_PORT_HDMIB 1
-#define DVO_PORT_HDMIC 2
-#define DVO_PORT_HDMID 3
-#define DVO_PORT_LVDS 4
-#define DVO_PORT_TV 5
-#define DVO_PORT_CRT 6
-#define DVO_PORT_DPB 7
-#define DVO_PORT_DPC 8
-#define DVO_PORT_DPD 9
-#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11
-#define DVO_PORT_HDMIE 12
-#define DVO_PORT_MIPIA 21
-#define DVO_PORT_MIPIB 22
-#define DVO_PORT_MIPIC 23
-#define DVO_PORT_MIPID 24
-
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data block
* block below
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index c5c7e8efbdd3..a2632df39173 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = pages->sgl;
@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
- return pages;
+ __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
+
+ return 0;
err:
huge_free_pages(obj, pages);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
new file mode 100644
index 000000000000..5cc8101bb2b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -0,0 +1,1734 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include <linux/prime_numbers.h>
+
+#include "mock_drm.h"
+
+static const unsigned int page_sizes[] = {
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_4K,
+};
+
+static unsigned int get_largest_page_size(struct drm_i915_private *i915,
+ u64 rem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
+ return page_size;
+ }
+
+ return 0;
+}
+
+static void huge_pages_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ }
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static int get_huge_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ unsigned int page_mask = obj->mm.page_mask;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+
+ /*
+ * Our goal here is simple, we want to greedily fill the object from
+ * largest to smallest page-size, while ensuring that we use *every*
+ * page-size as per the given page-mask.
+ */
+ do {
+ unsigned int bit = ilog2(page_mask);
+ unsigned int page_size = BIT(bit);
+ int order = get_order(page_size);
+
+ do {
+ struct page *page;
+
+ GEM_BUG_ON(order >= MAX_ORDER);
+ page = alloc_pages(GFP | __GFP_ZERO, order);
+ if (!page)
+ goto err;
+
+ sg_set_page(sg, page, page_size, 0);
+ sg_page_sizes |= page_size;
+ st->nents++;
+
+ rem -= page_size;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while ((rem - ((page_size-1) & page_mask)) >= page_size);
+
+ page_mask &= (page_size-1);
+ } while (page_mask);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err:
+ sg_set_page(sg, NULL, 0, 0);
+ sg_mark_end(sg);
+ huge_pages_free_pages(st);
+
+ return -ENOMEM;
+}
+
+static void put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ huge_pages_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops huge_page_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = get_huge_pages,
+ .put_pages = put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+huge_pages_object(struct drm_i915_private *i915,
+ u64 size,
+ unsigned int page_mask)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &huge_page_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ obj->mm.page_mask = page_mask;
+
+ return obj;
+}
+
+static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const u64 max_len = rounddown_pow_of_two(UINT_MAX);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Use optimal page sized chunks to fill in the sg table */
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ do {
+ unsigned int page_size = get_largest_page_size(i915, rem);
+ unsigned int len = min(page_size * div_u64(rem, page_size),
+ max_len);
+
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = len;
+ sg_dma_len(sg) = len;
+ sg_dma_address(sg) = page_size;
+
+ sg_page_sizes |= len;
+
+ st->nents++;
+
+ rem -= len;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = sg_next(sg);
+ } while (1);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+}
+
+static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int page_size;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ st->nents = 1;
+
+ page_size = get_largest_page_size(i915, obj->base.size);
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = obj->base.size;
+ sg_dma_len(sg) = obj->base.size;
+ sg_dma_address(sg) = page_size;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+#undef GFP
+}
+
+static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_huge_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages,
+ .put_pages = fake_put_huge_pages,
+};
+
+static const struct drm_i915_gem_object_ops fake_ops_single = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages_single,
+ .put_pages = fake_put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (size >> PAGE_SHIFT > UINT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+
+ if (single)
+ i915_gem_object_init(obj, &fake_ops_single);
+ else
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+}
+
+static int igt_check_page_sizes(struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err = 0;
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
+ pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
+ vma->page_sizes.sg & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+ pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
+ vma->page_sizes.gtt & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+ pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
+ vma->page_sizes.phys, obj->mm.page_sizes.phys);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+ pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
+ vma->page_sizes.sg, obj->mm.page_sizes.sg);
+ err = -EINVAL;
+ }
+
+ if (obj->mm.page_sizes.gtt) {
+ pr_err("obj->page_sizes.gtt(%u) should never be set\n",
+ obj->mm.page_sizes.gtt);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igt_mock_exhaust_device_supported_pages(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i, j, single;
+ int err;
+
+ /*
+ * Sanity check creating objects with every valid page support
+ * combination for our mock device.
+ */
+
+ for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
+ unsigned int combination = 0;
+
+ for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
+ if (i & BIT(j))
+ combination |= page_sizes[j];
+ }
+
+ mkwrite_device_info(i915)->page_sizes = combination;
+
+ for (single = 0; single <= 1; ++single) {
+ obj = fake_huge_pages_object(i915, combination, !!single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ if (obj->base.size != combination) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, combination);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.sg != combination) {
+ pr_err("page_sizes.sg=%u, expected=%u\n",
+ vma->page_sizes.sg, combination);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+
+ if (err)
+ goto out_device;
+ }
+ }
+
+ goto out_device;
+
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = saved_mask;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_misaligned_dma(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ int bit;
+ int err;
+
+ /*
+ * Sanity check dma misalignment for huge pages -- the dma addresses we
+ * insert into the paging structures need to always respect the page
+ * size alignment.
+ */
+
+ bit = ilog2(I915_GTT_PAGE_SIZE_64K);
+
+ for_each_set_bit_from(bit, &supported,
+ ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ IGT_TIMEOUT(end_time);
+ unsigned int page_size = BIT(bit);
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int offset;
+ unsigned int size =
+ round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
+ struct i915_vma *vma;
+
+ obj = fake_huge_pages_object(i915, size, true);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ /* Force the page size for this object */
+ obj->mm.page_sizes.sg = page_size;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("page_sizes.gtt=%u, expected %u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ /*
+ * Try all the other valid offsets until the next
+ * boundary -- should always fall back to using 4K
+ * pages.
+ */
+ for (offset = 4096; offset < page_size; offset += 4096) {
+ err = i915_vma_unbind(vma);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags | offset);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+ pr_err("page_sizes.gtt=%u, expected %lu\n",
+ vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at offset %x with page-size %x\n",
+ __func__, offset, page_size))
+ break;
+ }
+
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (!IS_ERR(vma))
+ i915_vma_close(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int igt_mock_ppgtt_huge_fill(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ unsigned long page_num;
+ bool single = false;
+ LIST_HEAD(objects);
+ IGT_TIMEOUT(end_time);
+ int err = -ENODEV;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ u64 size = page_num << PAGE_SHIFT;
+ struct i915_vma *vma;
+ unsigned int expected_gtt = 0;
+ int i;
+
+ obj = fake_huge_pages_object(i915, size, single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zd, expected=%llu\n",
+ obj->base.size, size);
+ i915_gem_object_put(obj);
+ err = -EINVAL;
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ break;
+
+ err = igt_check_page_sizes(vma);
+ if (err) {
+ i915_vma_unpin(vma);
+ break;
+ }
+
+ /*
+ * Figure out the expected gtt page size knowing that we go from
+ * largest to smallest page size sg chunks, and that we align to
+ * the largest page size.
+ */
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) &&
+ size >= page_size) {
+ expected_gtt |= page_size;
+ size &= page_size-1;
+ }
+ }
+
+ GEM_BUG_ON(!expected_gtt);
+ GEM_BUG_ON(size);
+
+ if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+ expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
+
+ i915_vma_unpin(vma);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt,
+ obj->base.size, yesno(!!single));
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at size %zd\n",
+ __func__, obj->base.size))
+ break;
+
+ single = !single;
+ }
+
+ close_object_list(&objects, ppgtt);
+
+ if (err == -ENOMEM || err == -ENOSPC)
+ err = 0;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_64K(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ const struct object_info {
+ unsigned int size;
+ unsigned int gtt;
+ unsigned int offset;
+ } objects[] = {
+ /* Cases with forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ /* Try without any forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .offset = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ {
+ .size = SZ_128K,
+ .offset = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ };
+ struct i915_vma *vma;
+ int i, single;
+ int err;
+
+ /*
+ * Sanity check some of the trickiness with 64K pages -- either we can
+ * safely mark the whole page-table(2M block) as 64K, or we have to
+ * always fallback to 4K.
+ */
+
+ if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(objects); ++i) {
+ unsigned int size = objects[i].size;
+ unsigned int expected_gtt = objects[i].gtt;
+ unsigned int offset = objects[i].offset;
+ unsigned int flags = PIN_USER;
+
+ for (single = 0; single <= 1; single++) {
+ obj = fake_huge_pages_object(i915, size, !!single);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_object_put;
+
+ /*
+ * Disable 2M pages -- We only want to use 64K/4K pages
+ * for this test.
+ */
+ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object_unpin;
+ }
+
+ if (offset)
+ flags |= PIN_OFFSET_FIXED | offset;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_vma_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt, i,
+ yesno(!!single));
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ return 0;
+
+out_vma_unpin:
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+out_object_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_object_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static struct i915_vma *
+gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned int count = vma->size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *batch;
+ unsigned int size;
+ u32 *cmd;
+ int n;
+ int err;
+
+ size = (1 + 4 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? 1 << 22 : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+
+ offset += PAGE_SIZE;
+ }
+
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ batch = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return batch;
+
+err:
+ i915_gem_object_put(obj);
+
+ return ERR_PTR(err);
+}
+
+static int gpu_write(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 dword,
+ u32 value)
+{
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *batch;
+ int flags = 0;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ batch = gpu_write_dw(vma, dword * sizeof(u32), value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_request;
+ }
+
+ i915_vma_move_to_active(batch, rq, 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto err_request;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto err_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+ reservation_object_lock(vma->resv, NULL);
+ reservation_object_add_excl_fence(vma->resv, &rq->fence);
+ reservation_object_unlock(vma->resv);
+
+err_request:
+ __i915_add_request(rq, err == 0);
+
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned int needs_flush;
+ unsigned long n;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+
+ if (needs_flush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(ptr, PAGE_SIZE);
+
+ if (ptr[dword] != val) {
+ pr_err("n=%lu ptr[%u]=%u, val=%u\n",
+ n, dword, ptr[dword], val);
+ kunmap_atomic(ptr);
+ err = -EINVAL;
+ break;
+ }
+
+ kunmap_atomic(ptr);
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+
+ return err;
+}
+
+static int igt_write_huge(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int max_page_size;
+ unsigned int id;
+ u64 max;
+ u64 num;
+ u64 size;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ size = obj->base.size;
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64((vm->total - size), max_page_size);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine)) {
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
+ continue;
+ }
+
+ /*
+ * Try various offsets until we timeout -- we want to avoid
+ * issues hidden by effectively always using offset = 0.
+ */
+ for_each_prime_number_from(num, 0, max) {
+ u64 offset = num * max_page_size;
+ u32 dword;
+
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_vma_close;
+
+ err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+ if (err) {
+ /*
+ * The ggtt may have some pages reserved so
+ * refrain from erroring out.
+ */
+ if (err == -ENOSPC && i915_is_ggtt(vm)) {
+ err = 0;
+ continue;
+ }
+
+ goto out_vma_close;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ dword = offset_in_page(num) / 4;
+
+ err = gpu_write(vma, ctx, engine, dword, num + 1);
+ if (err) {
+ pr_err("gpu-write failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ err = cpu_check(obj, dword, num + 1);
+ if (err) {
+ pr_err("cpu-check failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (num > 0 &&
+ igt_timeout(end_time,
+ "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+ __func__, id, offset, max_page_size))
+ break;
+ }
+ }
+
+out_vma_unpin:
+ if (i915_vma_is_pinned(vma))
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+
+ return err;
+}
+
+static int igt_ppgtt_exhaust_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ static unsigned int pages[ARRAY_SIZE(page_sizes)];
+ struct drm_i915_gem_object *obj;
+ unsigned int size_mask;
+ unsigned int page_mask;
+ int n, i;
+ int err = -ENODEV;
+
+ /*
+ * Sanity check creating objects with a varying mix of page sizes --
+ * ensuring that our writes lands in the right place.
+ */
+
+ n = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
+ pages[n++] = BIT(i);
+
+ for (size_mask = 2; size_mask < BIT(n); size_mask++) {
+ unsigned int size = 0;
+
+ for (i = 0; i < n; i++) {
+ if (size_mask & BIT(i))
+ size |= pages[i];
+ }
+
+ /*
+ * For our page mask we want to enumerate all the page-size
+ * combinations which will fit into our chosen object size.
+ */
+ for (page_mask = 2; page_mask <= size_mask; page_mask++) {
+ unsigned int page_sizes = 0;
+
+ for (i = 0; i < n; i++) {
+ if (page_mask & BIT(i))
+ page_sizes |= pages[i];
+ }
+
+ /*
+ * Ensure that we can actually fill the given object
+ * with our chosen page mask.
+ */
+ if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
+ continue;
+
+ obj = huge_pages_object(i915, size, page_sizes);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+
+ if (err == -ENOMEM) {
+ pr_info("unable to get pages, size=%u, pages=%u\n",
+ size, page_sizes);
+ err = 0;
+ break;
+ }
+
+ pr_err("pin_pages failed, size=%u, pages=%u\n",
+ size_mask, page_mask);
+
+ goto out_device;
+ }
+
+ /* Force the page-size for the gtt insertion */
+ obj->mm.page_sizes.sg = page_sizes;
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("exhaust write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_device;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = supported;
+
+ return err;
+}
+
+static int igt_ppgtt_internal_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_64K,
+ SZ_128K,
+ SZ_256K,
+ SZ_512K,
+ SZ_1M,
+ SZ_2M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through internal
+ * -- ensure that our writes land in the right place.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+ pr_info("internal unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("internal write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static int igt_ppgtt_gemfs_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_2M,
+ SZ_4M,
+ SZ_8M,
+ SZ_16M,
+ SZ_32M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through gemfs --
+ * ensure that our writes land in the right place.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("gemfs write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_ppgtt_pin_update(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ int first, last;
+ int err;
+
+ /*
+ * Make sure there's no funny business when doing a PIN_UPDATE -- in the
+ * past we had a subtle issue with being able to incorrectly do multiple
+ * alloc va ranges on the same object when doing a PIN_UPDATE, which
+ * resulted in some pretty nasty bugs, though only when using
+ * huge-gtt-pages.
+ */
+
+ if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pr_info("48b PPGTT not supported, skipping\n");
+ return 0;
+ }
+
+ first = ilog2(I915_GTT_PAGE_SIZE_64K);
+ last = ilog2(I915_GTT_PAGE_SIZE_2M);
+
+ for_each_set_bit_from(first, &supported, last + 1) {
+ unsigned int page_size = BIT(first);
+
+ obj = i915_gem_object_create_internal(dev_priv, page_size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, SZ_2M, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (vma->page_sizes.sg < page_size) {
+ pr_info("Unable to allocate page-size %x, finishing test early\n",
+ page_size);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ if (vma->page_sizes.gtt != page_size) {
+ dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
+
+ /*
+ * The only valid reason for this to ever fail would be
+ * if the dma-mapper screwed us over when we did the
+ * dma_map_sg(), since it has the final say over the dma
+ * address.
+ */
+ if (IS_ALIGNED(addr, page_size)) {
+ pr_err("page_sizes.gtt=%u, expected=%u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ } else {
+ pr_info("dma address misaligned, finishing test early\n");
+ }
+
+ goto out_unpin;
+ }
+
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+ }
+
+ obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ /*
+ * Make sure we don't end up with something like where the pde is still
+ * pointing to the 2M page, and the pt we just filled-in is dangling --
+ * we can check this by writing to the first page where it would then
+ * land in the now stale 2M page.
+ */
+
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_tmpfs_fallback(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct vfsmount *gemfs = i915->mm.gemfs;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that we don't burst into a ball of flames upon falling back
+ * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * when setting up gemfs.
+ */
+
+ i915->mm.gemfs = NULL;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_restore;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+ *vaddr = 0xdeadbeaf;
+
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_restore:
+ i915->mm.gemfs = gemfs;
+
+ return err;
+}
+
+static int igt_shrink_thp(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER;
+ int err;
+
+ /*
+ * Sanity check shrinking huge-paged object -- make sure nothing blows
+ * up.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ obj = i915_gem_object_create(i915, SZ_2M);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("failed to allocate THP, finishing test early\n");
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+
+ /*
+ * Now that the pages are *unpinned* shrink-all should invoke
+ * shmem to truncate our pages.
+ */
+ i915_gem_shrink_all(i915);
+ if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ pr_err("shrink-all didn't truncate the pages\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+ pr_err("residual page-size bits left\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_gem_huge_page_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_ppgtt_misaligned_dma),
+ SUBTEST(igt_mock_ppgtt_huge_fill),
+ SUBTEST(igt_mock_ppgtt_64K),
+ };
+ int saved_ppgtt = i915_modparams.enable_ppgtt;
+ struct drm_i915_private *dev_priv;
+ struct pci_dev *pdev;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ dev_priv = mock_gem_device();
+ if (!dev_priv)
+ return -ENOMEM;
+
+ /* Pretend to be a device which supports the 48b PPGTT */
+ i915_modparams.enable_ppgtt = 3;
+
+ pdev = dev_priv->drm.pdev;
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+
+ if (!i915_vm_is_48bit(&ppgtt->base)) {
+ pr_err("failed to create 48b PPGTT\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ /* If we were ever hit this then it's time to mock the 64K scratch */
+ if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ pr_err("PPGTT missing 64K scratch page\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_subtests(tests, ppgtt);
+
+out_close:
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ i915_modparams.enable_ppgtt = saved_ppgtt;
+
+ drm_dev_unref(&dev_priv->drm);
+
+ return err;
+}
+
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shrink_thp),
+ SUBTEST(igt_ppgtt_pin_update),
+ SUBTEST(igt_tmpfs_fallback),
+ SUBTEST(igt_ppgtt_exhaust_huge),
+ SUBTEST(igt_ppgtt_gemfs_huge),
+ SUBTEST(igt_ppgtt_internal_huge),
+ };
+ struct drm_file *file;
+ struct i915_gem_context *ctx;
+ int err;
+
+ if (!USES_PPGTT(dev_priv)) {
+ pr_info("PPGTT not supported, skipping live-selftests\n");
+ return 0;
+ }
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ctx = live_context(dev_priv, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ err = i915_subtests(tests, ctx);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index fb0a58fc8348..def5052862ae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
if (err)
return err;
- list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 5ea373221f49..f463105ff48d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -24,6 +24,9 @@
#include "../i915_selftest.h"
+#include "lib_sw_fence.h"
+#include "mock_context.h"
+#include "mock_drm.h"
#include "mock_gem_device.h"
static int populate_ggtt(struct drm_i915_private *i915)
@@ -47,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
if (!list_empty(&i915->mm.unbound_list)) {
size = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
size++;
pr_err("Found %lld objects unbound!\n", size);
@@ -74,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
- list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
i915_gem_object_put(obj);
- list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
i915_gem_object_put(obj);
mutex_unlock(&i915->drm.struct_mutex);
@@ -149,8 +152,6 @@ static int igt_overcommit(void *arg)
goto cleanup;
}
- list_move(&obj->global_link, &i915->mm.unbound_list);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -325,6 +326,148 @@ cleanup:
return err;
}
+static int igt_evict_contexts(void *arg)
+{
+ const u64 PRETEND_GGTT_SIZE = 16ull << 20;
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct reserved {
+ struct drm_mm_node node;
+ struct reserved *next;
+ } *reserved = NULL;
+ struct drm_mm_node hole;
+ unsigned long count;
+ int err;
+
+ /*
+ * The purpose of this test is to verify that we will trigger an
+ * eviction in the GGTT when constructing a request that requires
+ * additional space in the GGTT for pinning the context. This space
+ * is not directly tied to the request so reclaiming it requires
+ * extra work.
+ *
+ * As such this test is only meaningful for full-ppgtt environments
+ * where the GTT space of the request is separate from the GGTT
+ * allocation required to build the request.
+ */
+ if (!USES_FULL_PPGTT(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Reserve a block so that we know we have enough to fit a few rq */
+ memset(&hole, 0, sizeof(hole));
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT);
+ if (err)
+ goto out_locked;
+
+ /* Make the GGTT appear small by filling it with unevictable nodes */
+ count = 0;
+ do {
+ struct reserved *r;
+
+ r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+
+ if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT)) {
+ kfree(r);
+ break;
+ }
+
+ r->next = reserved;
+ reserved = r;
+
+ count++;
+ } while (1);
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+ pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
+
+ /* Overfill the GGTT with context objects and so try to evict one. */
+ for_each_engine(engine, i915, id) {
+ struct i915_sw_fence fence;
+ struct drm_file *file;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ count = 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ onstack_fence_init(&fence);
+ do {
+ struct drm_i915_gem_request *rq;
+ struct i915_gem_context *ctx;
+
+ ctx = live_context(i915, file);
+ if (!ctx)
+ break;
+
+ /* We will need some GGTT space for the rq's context */
+ igt_evict_ctl.fail_if_busy = true;
+ rq = i915_gem_request_alloc(engine, ctx);
+ igt_evict_ctl.fail_if_busy = false;
+
+ if (IS_ERR(rq)) {
+ /* When full, fail_if_busy will trigger EBUSY */
+ if (PTR_ERR(rq) != -EBUSY) {
+ pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
+ ctx->hw_id, engine->name,
+ (int)PTR_ERR(rq));
+ err = PTR_ERR(rq);
+ }
+ break;
+ }
+
+ /* Keep every request/ctx pinned until we are full */
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &fence,
+ GFP_KERNEL);
+ if (err < 0)
+ break;
+
+ i915_add_request(rq);
+ count++;
+ err = 0;
+ } while(1);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ onstack_fence_fini(&fence);
+ pr_info("Submitted %lu contexts/requests on %s\n",
+ count, engine->name);
+
+ mock_file_free(i915, file);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+ while (reserved) {
+ struct reserved *next = reserved->next;
+
+ drm_mm_remove_node(&reserved->node);
+ kfree(reserved);
+
+ reserved = next;
+ }
+ if (drm_mm_node_allocated(&hole))
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
int i915_gem_evict_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -348,3 +491,12 @@ int i915_gem_evict_mock_selftests(void)
drm_dev_unref(&i915->drm);
return err;
}
+
+int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_contexts),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6b132caffa18..9da0c9f99916 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -39,25 +39,26 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
#define PFN_BIAS 0x1000
struct sg_table *pages;
struct scatterlist *sg;
+ unsigned int sg_page_sizes;
typeof(obj->base.size) rem;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31;
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+ sg_page_sizes = 0;
rem = obj->base.size;
for (sg = pages->sgl; sg; sg = sg_next(sg)) {
unsigned long len = min_t(typeof(rem), rem, BIT(31));
@@ -66,13 +67,17 @@ fake_get_pages(struct drm_i915_gem_object *obj)
sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
sg_dma_address(sg) = page_to_phys(sg_page(sg));
sg_dma_len(sg) = len;
+ sg_page_sizes |= len;
rem -= len;
}
GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED;
- return pages;
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 8f011c447e41..1b8774a42e48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return PTR_ERR(io);
}
- err = i915_vma_get_fence(vma);
- if (err) {
- pr_err("Failed to get fence for partial view: offset=%lu\n",
- page);
- i915_vma_unpin_iomap(vma);
- return err;
- }
-
iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
i915_vma_unpin_iomap(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 6664cb2eb0b8..a999161e8db1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -215,7 +215,9 @@ static int igt_request_rewind(void *arg)
}
i915_gem_request_get(vip);
i915_add_request(vip);
+ rcu_read_lock();
request->engine->submit_request(request);
+ rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
@@ -418,7 +420,10 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
err = PTR_ERR(cmd);
goto err;
}
+
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +610,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+ i915_gem_chipset_flush(i915);
- wmb();
i915_gem_object_unpin_map(obj);
return vma;
@@ -625,7 +630,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(batch->vm->i915);
i915_gem_object_unpin_map(batch->obj);
@@ -858,7 +863,8 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(request[id]->batch->obj);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 7a44dab631b8..4795877abe56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -121,7 +121,7 @@ out:
static unsigned int random_engine(struct rnd_state *rnd)
{
- return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32;
+ return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
}
static int bench_sync(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1519f1b7841b..d7dd98a6acad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,5 +16,7 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(evict, i915_gem_evict_live_selftests)
+selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index e5a9e5dcf2f3..19c6fce837df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -22,3 +22,4 @@ selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
+selftest(hugepages, i915_gem_huge_page_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 222c511bea49..b85872cc7fbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -41,11 +41,6 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
return x;
}
-static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
-{
- return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
-}
-
void i915_random_reorder(unsigned int *order, unsigned int count,
struct rnd_state *state)
{
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 6c9379871384..7dffedc501ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -43,6 +43,11 @@
u64 i915_prandom_u64_state(struct rnd_state *rnd);
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro));
+}
+
unsigned int *i915_random_order(unsigned int count,
struct rnd_state *state);
void i915_random_reorder(unsigned int *order,
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 19d145d6bf52..ea01d0fe3ace 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -24,6 +24,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
@@ -565,6 +566,46 @@ err_in:
return ret;
}
+static int test_timer(void *arg)
+{
+ unsigned long target, delay;
+ struct timed_fence tf;
+
+ timed_fence_init(&tf, target = jiffies);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with immediate expiration not signaled\n");
+ goto err;
+ }
+ timed_fence_fini(&tf);
+
+ for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ timed_fence_init(&tf, target = jiffies + delay);
+ if (i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
+ goto err;
+ }
+
+ i915_sw_fence_wait(&tf.fence);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence not signaled after wait\n");
+ goto err;
+ }
+ if (time_before(jiffies, target)) {
+ pr_err("Fence signaled too early, target=%lu, now=%lu\n",
+ target, jiffies);
+ goto err;
+ }
+
+ timed_fence_fini(&tf);
+ }
+
+ return 0;
+
+err:
+ timed_fence_fini(&tf);
+ return -EINVAL;
+}
+
int i915_sw_fence_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
SUBTEST(test_C_AB),
SUBTEST(test_chain),
SUBTEST(test_ipc),
+ SUBTEST(test_timer),
};
return i915_subtests(tests, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 828904b7d468..54fc571b1102 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -271,13 +271,7 @@ struct igt_wakeup {
u32 seqno;
};
-static int wait_atomic(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
-static int wait_atomic_timeout(atomic_t *p)
+static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}
@@ -348,7 +342,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0);
wake_up_all(wq);
- wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+ wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
atomic_set(ready, count);
atomic_set(done, count);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 02e52a146ed8..71ce06680d66 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+ i915_gem_chipset_flush(h->i915);
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(h->i915);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
i915_gem_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
__i915_add_request(rq, true);
timeout = i915_wait_request(rq,
@@ -621,7 +624,15 @@ static int igt_wait_reset(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto out_rq;
}
@@ -708,10 +719,18 @@ static int igt_reset_queue(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, prev)) {
- pr_err("Failed to start request %x\n",
- prev->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(rq->engine, &p);
+
i915_gem_request_put(rq);
i915_gem_request_put(prev);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto fini;
}
@@ -756,7 +775,7 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
i915_gem_request_put(prev);
}
@@ -806,7 +825,15 @@ static int igt_handle_error(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto err_request;
}
@@ -843,17 +870,24 @@ err_unlock:
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_global_reset),
SUBTEST(igt_reset_engine),
SUBTEST(igt_reset_active_engines),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ int err;
if (!intel_has_gpu_reset(i915))
return 0;
- return i915_subtests(tests, i915);
+ intel_runtime_pm_get(i915);
+
+ err = i915_subtests(tests, i915);
+
+ intel_runtime_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
new file mode 100644
index 000000000000..b26f07b55d86
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "lib_sw_fence.h"
+
+/* Small library of different fence types useful for writing tests */
+
+static int __i915_sw_fence_call
+nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key)
+{
+ debug_fence_init_onstack(fence);
+
+ __init_waitqueue_head(&fence->wait, name, key);
+ atomic_set(&fence->pending, 1);
+ fence->flags = (unsigned long)nop_fence_notify;
+}
+
+void onstack_fence_fini(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_commit(fence);
+ i915_sw_fence_fini(fence);
+}
+
+static void timed_fence_wake(struct timer_list *t)
+{
+ struct timed_fence *tf = from_timer(tf, t, timer);
+
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires)
+{
+ onstack_fence_init(&tf->fence);
+
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
+
+ if (time_after(expires, jiffies))
+ mod_timer(&tf->timer, expires);
+ else
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_fini(struct timed_fence *tf)
+{
+ if (del_timer_sync(&tf->timer))
+ i915_sw_fence_commit(&tf->fence);
+
+ destroy_timer_on_stack(&tf->timer);
+ i915_sw_fence_fini(&tf->fence);
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
new file mode 100644
index 000000000000..474aafb92ae1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -0,0 +1,42 @@
+/*
+ * lib_sw_fence.h - library routines for testing N:M synchronisation points
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _LIB_SW_FENCE_H_
+#define _LIB_SW_FENCE_H_
+
+#include <linux/timer.h>
+
+#include "../i915_sw_fence.h"
+
+#ifdef CONFIG_LOCKDEP
+#define onstack_fence_init(fence) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __onstack_fence_init((fence), #fence, &__key); \
+} while (0)
+#else
+#define onstack_fence_init(fence) \
+ __onstack_fence_init((fence), NULL, NULL)
+#endif
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key);
+void onstack_fence_fini(struct i915_sw_fence *fence);
+
+struct timed_fence {
+ struct i915_sw_fence fence;
+ struct timer_list timer;
+};
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires);
+void timed_fence_fini(struct timed_fence *tf);
+
+#endif /* _LIB_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 098ce643ad07..bbf80d42e793 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -73,11 +73,7 @@ err_put:
void mock_context_close(struct i915_gem_context *ctx)
{
- i915_gem_context_set_closed(ctx);
-
- i915_ppgtt_close(&ctx->ppgtt->base);
-
- i915_gem_context_put(ctx);
+ context_close(ctx);
}
void mock_init_contexts(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index fc0fd7498689..331c2b09869e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -32,9 +32,9 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
-static void hw_delay_complete(unsigned long data)
+static void hw_delay_complete(struct timer_list *t)
{
- struct mock_engine *engine = (typeof(engine))data;
+ struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct mock_request *request;
spin_lock(&engine->hw_lock);
@@ -161,9 +161,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
- setup_timer(&engine->hw_delay,
- hw_delay_complete,
- (unsigned long)engine);
+ timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
return &engine->base;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 678723430d78..04eb9362f4f8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -83,6 +83,8 @@ static void mock_device_release(struct drm_device *dev)
kmem_cache_destroy(i915->vmas);
kmem_cache_destroy(i915->objects);
+ i915_gemfs_fini(i915);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -146,6 +148,11 @@ struct drm_i915_private *mock_gem_device(void)
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ /* hack to disable iommu for the fake device; force identity mapping */
+ pdev->dev.archdata.iommu = (void *)-1;
+#endif
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -167,6 +174,11 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->gen = -1;
+ mkwrite_device_info(i915)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M;
+
spin_lock_init(&i915->mm.object_stat_lock);
mock_uncore_init(i915);
@@ -234,8 +246,16 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context)
goto err_engine;
+ i915->preempt_context = mock_context(i915, NULL);
+ if (!i915->preempt_context)
+ goto err_kernel_context;
+
+ WARN_ON(i915_gemfs_init(i915));
+
return i915;
+err_kernel_context:
+ i915_gem_context_put(i915->kernel_context);
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f2118cf535a0..336e1afb250f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,6 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->pages = vma->obj->mm.pages;
vma->flags |= I915_VMA_LOCAL_BIND;
return 0;
}
@@ -84,6 +83,8 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->base.insert_entries = mock_insert_entries;
ppgtt->base.bind_vma = mock_bind_ppgtt;
ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = mock_cleanup;
return ppgtt;
@@ -93,12 +94,6 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- int err;
-
- err = i915_get_ggtt_vma_pages(vma);
- if (err)
- return err;
-
vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0;
}
@@ -124,6 +119,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.insert_entries = mock_insert_entries;
ggtt->base.bind_vma = mock_bind_ggtt;
ggtt->base.unbind_vma = mock_unbind_ggtt;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->base, i915, "global");
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index 1cc5d2931753..cd6d2a16071f 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,20 @@ static unsigned int random(unsigned long n,
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static unsigned int random_page_size_pages(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ /* 4K, 64K, 2M */
+ static unsigned int page_count[] = {
+ BIT(12) >> PAGE_SHIFT,
+ BIT(16) >> PAGE_SHIFT,
+ BIT(21) >> PAGE_SHIFT,
+ };
+
+ return page_count[(prandom_u32_state(rnd) % 3)];
+}
+
static inline bool page_contiguous(struct page *first,
struct page *last,
unsigned long npages)
@@ -252,6 +266,7 @@ static const npages_fn_t npages_funcs[] = {
grow,
shrink,
random,
+ random_page_size_pages,
NULL,
};
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index f91cb72d0830..17d2f3a1c562 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
@@ -105,7 +106,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
}
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -132,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
plane_disabling = true;
}
- if (plane_disabling) {
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ /*
+ * The flip done wait is only strictly required by imx-drm if a deferred
+ * plane disable is in-flight. As the core requires blocking commits
+ * to wait for the flip it is done here unconditionally. This keeps the
+ * workitem around a bit longer than required for the majority of
+ * non-blocking commits, but we accept that for the sake of simplicity.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+ if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 53e0b24beda6..9a9961802f5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
if (crtc->state) {
if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_blob_put(crtc->state->mode_blob);
state = to_imx_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index cf98596c7ce1..247c60e6bed2 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -18,6 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
@@ -690,7 +691,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
- .prepare_fb = drm_fb_cma_prepare_fb,
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8def97d75030..aedecda9728a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
&imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
}
if (imxpd->panel)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 690c67507cbc..3ff502771ba2 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1696,11 +1696,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
- ret = drm_bridge_add(&hdmi->bridge);
- if (ret) {
- dev_err(dev, "failed to add bridge, ret = %d\n", ret);
- return ret;
- }
+ drm_bridge_add(&hdmi->bridge);
ret = mtk_hdmi_clk_enable_audio(hdmi);
if (ret) {
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 7742c7d81ed8..3b804fdaf7a0 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_fb_helper.h>
@@ -78,7 +79,7 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.output_poll_changed = meson_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
static irqreturn_t meson_irq(int irq, void *arg)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e9cd4c0e8b6..68e5d9c94475 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1670,7 +1670,7 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ced70783b44e..92b3844202d2 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -9,6 +9,7 @@ msm-y := \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -58,7 +59,8 @@ msm-y := \
msm_iommu.o \
msm_perf.o \
msm_rd.o \
- msm_ringbuffer.o
+ msm_ringbuffer.o \
+ msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7791313405b5..4baef2738178 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -44,7 +44,7 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return a3xx_idle(gpu);
}
@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -444,9 +444,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -492,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 58341ef6f15b..8199a4b9f2fa 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return a4xx_idle(gpu);
}
@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -532,9 +532,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -574,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 17c59d839e6f..a1f4eeeb73e2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -26,8 +26,9 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
{
+ struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
struct device_node *np;
struct resource r;
@@ -55,10 +56,10 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
- ret = request_firmware(&fw, fwname, dev);
- if (ret) {
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return ret;
+ return PTR_ERR(fw);
}
/* Figure out how much memory we need */
@@ -75,9 +76,26 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
goto out;
}
- /* Load the rest of the MDT */
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
- mem_size);
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten thru adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
+ mem_region, mem_phys, mem_size);
+ } else {
+ char newname[strlen("qcom/") + strlen(fwname) + 1];
+
+ sprintf(newname, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
+ mem_region, mem_phys, mem_size);
+ }
if (ret)
goto out;
@@ -95,14 +113,65 @@ out:
return ret;
}
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
@@ -120,16 +189,54 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, submit->seqno);
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
- gpu->funcs->flush(gpu);
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
}
static const struct {
@@ -245,7 +352,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
static int a5xx_me_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
@@ -276,11 +383,54 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
- return a5xx_idle(gpu) ? 0 : -EINVAL;
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
@@ -381,7 +531,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -396,6 +546,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
@@ -536,13 +687,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
- /* Load the GPMU firmware before starting the HW init */
- a5xx_gpmu_ucode_init(gpu);
-
ret = adreno_hw_init(gpu);
if (ret)
return ret;
+ a5xx_preempt_hw_init(gpu);
+
+ a5xx_gpmu_ucode_init(gpu);
+
ret = a5xx_ucode_init(gpu);
if (ret)
return ret;
@@ -565,11 +717,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* ticking correctly
*/
if (adreno_is_a530(adreno_gpu)) {
- OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
- OUT_RING(gpu->rb, 0x0F);
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
- gpu->funcs->flush(gpu);
- if (!a5xx_idle(gpu))
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
@@ -582,11 +734,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
*/
ret = a5xx_zap_shader_init(gpu);
if (!ret) {
- OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
- OUT_RING(gpu->rb, 0x00000000);
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
- gpu->funcs->flush(gpu);
- if (!a5xx_idle(gpu))
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else {
/* Print a warning so if we die, we know why */
@@ -595,6 +747,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
}
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
return 0;
}
@@ -625,6 +780,8 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
+ a5xx_preempt_fini(gpu);
+
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -660,18 +817,27 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
}
-bool a5xx_idle(struct msm_gpu *gpu)
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
/* wait for CP to drain ringbuffer: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, ring))
return false;
if (spin_until(_a5xx_check_idle(gpu))) {
- DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
gpu->name, __builtin_return_address(0),
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
- gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
-
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
return false;
}
@@ -802,9 +968,10 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- gpu->funcs->last_fence(gpu),
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
@@ -854,8 +1021,13 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
a5xx_gpmu_err_irq(gpu);
- if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
msm_gpu_retire(gpu);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
@@ -985,6 +1157,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
#endif
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -992,9 +1172,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a5xx_pm_suspend,
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
- .last_fence = adreno_last_fence,
.submit = a5xx_submit,
- .flush = adreno_flush,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -1030,7 +1210,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
@@ -1039,5 +1219,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index e94451685bf8..6fb8c2f9b9e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,10 +35,100 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_ABORT,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
@@ -55,7 +145,22 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT;
}
-bool a5xx_idle(struct msm_gpu *gpu);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
+}
+
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 04aab1dcae2b..e5700bbf09dd 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords)
return 0;
@@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
- if (!a5xx_idle(gpu)) {
+ if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
gpu->name);
return -EINVAL;
@@ -264,7 +264,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
return;
/* Get the firmware */
- if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
+ if (IS_ERR(fw)) {
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
gpu->name);
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..970c7963ae29
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,304 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == ring->memptrs->rptr);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(struct timer_list *t)
+{
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->wptr = 0;
+ a5xx_gpu->preempt[i]->rptr = 0;
+ a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo = NULL;
+ u64 iova = 0;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(ring, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (!a5xx_gpu->preempt_bo[i])
+ continue;
+
+ msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ return;
+ }
+ }
+
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index c75c4df4bc39..05022ea2a007 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -125,51 +125,24 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
- struct adreno_platform_config *config;
- struct adreno_rev rev;
- const struct adreno_info *info;
- struct msm_gpu *gpu = NULL;
+ struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
+ int ret;
- if (!pdev) {
+ if (!gpu) {
dev_err(dev->dev, "no adreno device\n");
return NULL;
}
- config = pdev->dev.platform_data;
- rev = config->rev;
- info = adreno_info(config->rev);
-
- if (!info) {
- dev_warn(dev->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
- rev.core, rev.major, rev.minor, rev.patchid);
+ pm_runtime_get_sync(&pdev->dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = msm_gpu_hw_init(gpu);
+ mutex_unlock(&dev->struct_mutex);
+ pm_runtime_put_sync(&pdev->dev);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
- DBG("Found GPU: %u.%u.%u.%u", rev.core, rev.major,
- rev.minor, rev.patchid);
-
- gpu = info->init(dev);
- if (IS_ERR(gpu)) {
- dev_warn(dev->dev, "failed to load adreno gpu\n");
- gpu = NULL;
- /* not fatal */
- }
-
- if (gpu) {
- int ret;
-
- pm_runtime_get_sync(&pdev->dev);
- mutex_lock(&dev->struct_mutex);
- ret = msm_gpu_hw_init(gpu);
- mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
- if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
- gpu->funcs->destroy(gpu);
- gpu = NULL;
- }
- }
-
return gpu;
}
@@ -282,6 +255,9 @@ static int adreno_get_pwrlevels(struct device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
+ const struct adreno_info *info;
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_gpu *gpu;
u32 val;
int ret;
@@ -302,13 +278,39 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret;
dev->platform_data = &config;
- set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ set_gpu_pdev(drm, to_platform_device(dev));
+
+ info = adreno_info(config.rev);
+
+ if (!info) {
+ dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+
+ gpu = info->init(drm);
+ if (IS_ERR(gpu)) {
+ dev_warn(drm->dev, "failed to load adreno gpu\n");
+ return PTR_ERR(gpu);
+ }
+
+ dev_set_drvdata(dev, gpu);
+
return 0;
}
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
+ struct msm_gpu *gpu = dev_get_drvdata(dev);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->destroy(gpu);
+
set_gpu_pdev(dev_get_drvdata(master), NULL);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c8b4ac254bb5..e2ffecce59a3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -21,8 +21,6 @@
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -58,72 +56,181 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
return ret;
}
return -EINVAL;
+ case MSM_PARAM_NR_RINGS:
+ *value = gpu->nr_rings;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
+const struct firmware *
+adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
+{
+ struct drm_device *drm = adreno_gpu->base.dev;
+ const struct firmware *fw = NULL;
+ char newname[strlen("qcom/") + strlen(fwname) + 1];
+ int ret;
+
+ sprintf(newname, "qcom/%s", fwname);
+
+ /*
+ * Try first to load from qcom/$fwfile using a direct load (to avoid
+ * a potential timeout waiting for usermode helper)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
+
+ ret = request_firmware_direct(&fw, newname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s from new location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_NEW;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /*
+ * Then try the legacy location without qcom/ prefix
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
+
+ ret = request_firmware_direct(&fw, fwname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s from legacy location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_LEGACY;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ fwname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /*
+ * Finally fall back to request_firmware() for cases where the
+ * usermode helper is needed (I think mainly android)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
+
+ ret = request_firmware(&fw, newname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s with helper\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_HELPER;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ dev_err(drm->dev, "failed to load %s\n", fwname);
+ return ERR_PTR(-ENOENT);
+}
+
+static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+{
+ const struct firmware *fw;
+
+ if (adreno_gpu->pm4)
+ return 0;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+ adreno_gpu->pm4 = fw;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
+ if (IS_ERR(fw)) {
+ release_firmware(adreno_gpu->pm4);
+ adreno_gpu->pm4 = NULL;
+ return PTR_ERR(fw);
+ }
+ adreno_gpu->pfp = fw;
+
+ return 0;
+}
+
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int ret, i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
return ret;
- }
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (!ring)
+ continue;
+
+ ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
+ if (ret) {
+ ring->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
+
+ ring->cur = ring->start;
+ ring->next = ring->start;
- /* reset completed fence seqno: */
- adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
- adreno_gpu->memptrs->rptr = 0;
+ /* reset completed fence seqno: */
+ ring->memptrs->fence = ring->seqno;
+ ring->memptrs->rptr = 0;
+ }
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup ringbuffer address: */
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(adreno_gpu, rptr));
+ rbmemptr(gpu->rb[0], rptr));
}
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
-{
- return ring->cur - ring->start;
-}
-
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
-static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
{
if (adreno_is_a430(adreno_gpu))
- return adreno_gpu->memptrs->rptr = adreno_gpu_read(
+ return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
- return adreno_gpu->memptrs->rptr;
+ return ring->memptrs->rptr;
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+ return gpu->rb[0];
}
void adreno_recover(struct msm_gpu *gpu)
@@ -149,7 +256,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = submit->ring;
unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
@@ -164,7 +271,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
@@ -172,7 +279,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, submit->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
@@ -188,8 +295,8 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
OUT_PKT3(ring, CP_INTERRUPT, 1);
@@ -215,20 +322,23 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
/*
* Mask wptr value that we calculate to fit in the HW range. This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb->next hasn't wrapped to zero yet
*/
- wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -236,17 +346,19 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-bool adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
return false;
}
@@ -261,10 +373,16 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->fctx->last_fence);
- seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence, ring->seqno);
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
@@ -290,16 +408,23 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->fctx->last_fence);
- printk("rptr: %d\n", get_rptr(adreno_gpu));
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ printk("rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence,
+ ring->seqno);
+
+ printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
+ printk("rb wptr: %d\n", get_wptr(ring));
+ }
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
@@ -322,28 +447,31 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = get_rptr(adreno_gpu);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_DEV_ERROR(ring->gpu->dev->dev,
+ "timeout waiting for space in ringubffer %d\n",
+ ring->id);
}
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
- int ret;
adreno_gpu->funcs = funcs;
adreno_gpu->info = adreno_info(config->rev);
@@ -366,59 +494,20 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
- adreno_gpu_config.ringsz = RB_SIZE;
+ adreno_gpu_config.nr_rings = nr_rings;
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
- if (ret)
- return ret;
-
- ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
- if (ret) {
- dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
- adreno_gpu->info->pm4fw, ret);
- return ret;
- }
-
- ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
- dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
- adreno_gpu->info->pfpfw, ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_kernel_new(drm,
- sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
- &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
-
- if (IS_ERR(adreno_gpu->memptrs)) {
- ret = PTR_ERR(adreno_gpu->memptrs);
- adreno_gpu->memptrs = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- }
-
- return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (adreno_gpu->memptrs_bo) {
- if (adreno_gpu->memptrs)
- msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
-
- if (adreno_gpu->memptrs_iova)
- msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
-
- drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
- }
release_firmware(adreno_gpu->pm4);
release_firmware(adreno_gpu->pfp);
- msm_gpu_cleanup(gpu);
+ msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 4d9165f29f43..28e3de6e5f94 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
-struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t fence;
-};
-
struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
@@ -101,16 +93,30 @@ struct adreno_gpu {
/* interesting register offsets to dump: */
const unsigned int *registers;
+ /*
+ * Are we loading fw from legacy path? Prior to addition
+ * of gpu firmware to linux-firmware, the fw files were
+ * placed in toplevel firmware directory, following qcom's
+ * android kernel. But linux-firmware preferred they be
+ * placed in a 'qcom' subdirectory.
+ *
+ * For backwards compatibility, we try first to load from
+ * the new path, using request_firmware_direct() to avoid
+ * any potential timeout waiting for usermode helper, then
+ * fall back to the old path (with direct load). And
+ * finally fall back to request_firmware() with the new
+ * path to allow the usermode helper.
+ */
+ enum {
+ FW_LOCATION_UNKNOWN = 0,
+ FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
+ FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
+ FW_LOCATION_HELPER,
+ } fwloc;
+
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint64_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
@@ -196,22 +202,25 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
+ const char *fwname);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-bool adreno_idle(struct msm_gpu *gpu);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
@@ -220,7 +229,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -228,14 +237,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
@@ -257,14 +266,14 @@ static inline u32 PM4_PARITY(u32 val)
static inline void
OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt + 1);
+ adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, PKT4(regindx, cnt));
}
static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt + 1);
+ adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
}
@@ -323,6 +332,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
adreno_gpu_write(gpu, hi, upper_32_bits(data));
}
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index a5d75c9b3a73..65c1dfbbe019 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -14,7 +14,7 @@
#include "dsi_cfg.h"
static const char * const dsi_v2_bus_clk_names[] = {
- "core_mmss_clk", "iface_clk", "bus_clk",
+ "core_mmss", "iface", "bus",
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
@@ -34,7 +34,7 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
};
static const char * const dsi_6g_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
+ "mdp_core", "iface", "bus", "core_mmss",
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
@@ -55,7 +55,7 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
};
static const char * const dsi_8916_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk",
+ "mdp_core", "iface", "bus",
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
@@ -99,7 +99,7 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
* without it too. Figure out why it doesn't enable and uncomment below
*/
static const char * const dsi_8996_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
+ "mdp_core", "iface", "bus", /* "core_mmss", */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index deaf869374ea..0f7324a686ca 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -334,46 +334,46 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
- struct device *dev = &msm_host->pdev->dev;
+ struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0;
/* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++) {
- msm_host->bus_clks[i] = devm_clk_get(dev,
+ msm_host->bus_clks[i] = msm_clk_get(pdev,
cfg->bus_clk_names[i]);
if (IS_ERR(msm_host->bus_clks[i])) {
ret = PTR_ERR(msm_host->bus_clks[i]);
- pr_err("%s: Unable to get %s, ret = %d\n",
+ pr_err("%s: Unable to get %s clock, ret = %d\n",
__func__, cfg->bus_clk_names[i], ret);
goto exit;
}
}
/* get link and source clocks */
- msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+ msm_host->byte_clk = msm_clk_get(pdev, "byte");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_byte clock. ret=%d\n",
__func__, ret);
msm_host->byte_clk = NULL;
goto exit;
}
- msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(msm_host->pixel_clk)) {
ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
__func__, ret);
msm_host->pixel_clk = NULL;
goto exit;
}
- msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+ msm_host->esc_clk = msm_clk_get(pdev, "core");
if (IS_ERR(msm_host->esc_clk)) {
ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_esc clock. ret=%d\n",
__func__, ret);
msm_host->esc_clk = NULL;
goto exit;
@@ -382,22 +382,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
+ pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
if (!msm_host->pixel_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
+ pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = devm_clk_get(dev, "src_clk");
+ msm_host->src_clk = msm_clk_get(pdev, "src");
if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find dsi_src_clk. ret=%d\n",
+ pr_err("%s: can't find src clock. ret=%d\n",
__func__, ret);
msm_host->src_clk = NULL;
goto exit;
@@ -406,7 +406,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
if (!msm_host->esc_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't get esc_clk_src. ret=%d\n",
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
__func__, ret);
goto exit;
}
@@ -414,7 +414,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
if (!msm_host->dsi_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't get dsi_clk_src. ret=%d\n",
+ pr_err("%s: can't get src clock parent. ret=%d\n",
__func__, ret);
}
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 7c9bf91bc22b..790ca280cbfd 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -482,7 +482,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
- phy->ahb_clk = devm_clk_get(dev, "iface_clk");
+ phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index e32a4a4f3797..7c72264101ff 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -150,46 +150,46 @@ static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
static int edp_clk_init(struct edp_ctrl *ctrl)
{
- struct device *dev = &ctrl->pdev->dev;
+ struct platform_device *pdev = ctrl->pdev;
int ret;
- ctrl->aux_clk = devm_clk_get(dev, "core_clk");
+ ctrl->aux_clk = msm_clk_get(pdev, "core");
if (IS_ERR(ctrl->aux_clk)) {
ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find aux_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find core clock, %d\n", __func__, ret);
ctrl->aux_clk = NULL;
return ret;
}
- ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(ctrl->pixel_clk)) {
ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
ctrl->pixel_clk = NULL;
return ret;
}
- ctrl->ahb_clk = devm_clk_get(dev, "iface_clk");
+ ctrl->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(ctrl->ahb_clk)) {
ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
ctrl->ahb_clk = NULL;
return ret;
}
- ctrl->link_clk = devm_clk_get(dev, "link_clk");
+ ctrl->link_clk = msm_clk_get(pdev, "link");
if (IS_ERR(ctrl->link_clk)) {
ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find link clock, %d\n", __func__, ret);
ctrl->link_clk = NULL;
return ret;
}
/* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
if (IS_ERR(ctrl->mdp_core_clk)) {
ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
ctrl->mdp_core_clk = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 17e069a133a4..e63dc0fb55f8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -208,7 +208,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk;
- clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+ clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
@@ -228,7 +228,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk;
- clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+ clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
@@ -361,7 +361,7 @@ static const char *hpd_reg_names_none[] = {};
static struct hdmi_platform_config hdmi_tx_8660_config;
static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
-static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static struct hdmi_platform_config hdmi_tx_8960_config = {
HDMI_CFG(hpd_reg, 8960),
@@ -370,8 +370,8 @@ static struct hdmi_platform_config hdmi_tx_8960_config = {
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
-static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
-static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
+static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 534ce5b49781..5e631392dc85 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -48,7 +48,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_clks; i++) {
struct clk *clk;
- clk = devm_clk_get(dev, cfg->clk_names[i]);
+ clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "failed to get phy clock: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e6ee6b745ab7..0980da8ec966 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -48,7 +48,7 @@ static const char * const hdmi_phy_8960_reg_names[] = {
};
static const char * const hdmi_phy_8960_clk_names[] = {
- "slave_iface_clk",
+ "slave_iface",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 1fb7645cc721..0df504c61833 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -758,9 +758,7 @@ static const char * const hdmi_phy_8996_reg_names[] = {
};
static const char * const hdmi_phy_8996_clk_names[] = {
- "mmagic_iface_clk",
- "iface_clk",
- "ref_clk",
+ "iface", "ref",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index c4a61e537851..4a8b8468586a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -41,8 +41,7 @@ static const char * const hdmi_phy_8x74_reg_names[] = {
};
static const char * const hdmi_phy_8x74_clk_names[] = {
- "iface_clk",
- "alt_iface_clk"
+ "iface", "alt_iface"
};
const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 47fa2aba1983..14bd3bd3e040 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -290,6 +290,9 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp4_crtc->enabled))
return;
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
@@ -308,6 +311,10 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
return;
mdp4_enable(mdp4_kms);
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
crtc_flush(crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 60790df91bfa..1abc7f5c345c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -224,7 +224,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 440977677001..e414850dbbda 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -55,18 +55,23 @@ struct mdp5_crtc {
struct completion pp_completion;
+ bool lm_cursor_enabled;
+
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
+ uint64_t iova;
uint32_t width, height;
uint32_t x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
+
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -114,6 +119,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (!plane->state->visible)
+ continue;
flush_mask |= mdp5_plane_get_flush(plane);
}
@@ -242,6 +249,9 @@ static void blend_setup(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp5_pipe right_pipe;
+ if (!plane->state->visible)
+ continue;
+
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
@@ -422,11 +432,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp5_crtc->enabled))
return;
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
mdp5_crtc->enabled = false;
}
@@ -446,6 +459,29 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(dev);
+ if (mdp5_crtc->lm_cursor_enabled) {
+ /*
+ * Restore LM cursor state, as it might have been lost
+ * with suspend:
+ */
+ if (mdp5_crtc->cursor.iova) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, true);
+ } else {
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, false);
+ }
+ }
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
mdp5_crtc_mode_set_nofb(crtc);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
@@ -580,6 +616,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ if (!pstate->visible)
+ continue;
+
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -723,6 +762,50 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
mdp5_crtc->cursor.y);
}
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ uint32_t blendcfg, stride;
+ uint32_t x, y, width, height;
+ uint32_t roi_w, roi_h;
+ int lm;
+
+ assert_spin_locked(&mdp5_crtc->cursor.lock);
+
+ lm = mdp5_cstate->pipeline.mixer->lm;
+
+ x = mdp5_crtc->cursor.x;
+ y = mdp5_crtc->cursor.y;
+ width = mdp5_crtc->cursor.width;
+ height = mdp5_crtc->cursor.height;
+
+ stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
+ MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
+ MDP5_LM_CURSOR_START_XY_Y_START(y) |
+ MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
+ mdp5_crtc->cursor.iova);
+
+ blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+}
+
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
@@ -735,16 +818,18 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, stride;
- uint64_t cursor_addr;
struct mdp5_ctl *ctl;
- int ret, lm;
- enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ int ret;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
- uint32_t roi_w, roi_h;
bool cursor_enable = true;
unsigned long flags;
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_set is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
@@ -761,6 +846,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ mdp5_crtc->cursor.iova = 0;
pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
@@ -769,13 +855,11 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ &mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
- lm = mdp5_cstate->pipeline.mixer->lm;
- stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
-
pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
@@ -785,22 +869,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
mdp5_crtc->cursor.width = width;
mdp5_crtc->cursor.height = height;
- get_roi(crtc, &roi_w, &roi_h);
-
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
- MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
- MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
- MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
- MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
- MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
-
- blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
- blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+ mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
@@ -815,7 +884,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -829,12 +898,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ struct drm_device *dev = crtc->dev;
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_move is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
/* don't support LM cursors when we we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
@@ -851,17 +926,12 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
- MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
- MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
- MDP5_LM_CURSOR_START_XY_Y_START(y) |
- MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
crtc_flush(crtc, flush_mask);
- pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
return 0;
}
@@ -941,16 +1011,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.atomic_print_state = mdp5_crtc_atomic_print_state,
};
-static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = mdp5_crtc_reset,
- .atomic_duplicate_state = mdp5_crtc_duplicate_state,
- .atomic_destroy_state = mdp5_crtc_destroy_state,
- .atomic_print_state = mdp5_crtc_atomic_print_state,
-};
-
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.atomic_check = mdp5_crtc_atomic_check,
@@ -1119,12 +1179,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
- if (cursor_plane)
- drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
- &mdp5_crtc_no_lm_cursor_funcs, NULL);
- else
- drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &mdp5_crtc_funcs, NULL);
+ mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ &mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 5b851380d3f2..36ad3cbe5f79 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -384,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index bb5deb00c899..280e368bc9bb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -54,7 +54,7 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -72,7 +72,7 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -84,7 +84,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
@@ -119,7 +119,7 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -132,5 +132,5 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f7c0698fec40..3e9bba4d6624 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -125,7 +125,7 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
@@ -496,12 +496,12 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- DBG("MDP5 version v%d.%d", *major, *minor);
+ dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -599,7 +599,7 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- if (pipe < 0 || pipe >= priv->num_crtcs)
+ if (pipe >= priv->num_crtcs)
return 0;
crtc = priv->crtcs[pipe];
@@ -683,7 +683,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms);
if (ret) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 2bfac3712685..ff52c49095f9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -17,19 +17,20 @@
#include "mdp5_kms.h"
-struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
- struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state;
struct mdp5_hw_pipe_state *old_state, *new_state;
- struct mdp5_hw_pipe *hwpipe = NULL;
- int i;
+ int i, j;
state = mdp5_get_state(s);
if (IS_ERR(state))
- return ERR_CAST(state);
+ return PTR_ERR(state);
/* grab old_state after mdp5_get_state(), since now we hold lock: */
old_state = &mdp5_kms->state->hwpipe;
@@ -64,31 +65,67 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
/* possible candidate, take the one with the
* fewest unneeded caps bits set:
*/
- if (!hwpipe || (hweight_long(cur->caps & ~caps) <
- hweight_long(hwpipe->caps & ~caps)))
- hwpipe = cur;
+ if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
+ hweight_long((*hwpipe)->caps & ~caps))) {
+ bool r_found = false;
+
+ if (r_hwpipe) {
+ for (j = i + 1; j < mdp5_kms->num_hwpipes;
+ j++) {
+ struct mdp5_hw_pipe *r_cur =
+ mdp5_kms->hwpipes[j];
+
+ /* reject different types of hwpipes */
+ if (r_cur->caps != cur->caps)
+ continue;
+
+ /* respect priority, eg. VIG0 > VIG1 */
+ if (cur->pipe > r_cur->pipe)
+ continue;
+
+ *r_hwpipe = r_cur;
+ r_found = true;
+ break;
+ }
+ }
+
+ if (!r_hwpipe || r_found)
+ *hwpipe = cur;
+ }
}
- if (!hwpipe)
- return ERR_PTR(-ENOMEM);
+ if (!(*hwpipe))
+ return -ENOMEM;
+
+ if (r_hwpipe && !(*r_hwpipe))
+ return -ENOMEM;
if (mdp5_kms->smp) {
int ret;
- DBG("%s: alloc SMP blocks", hwpipe->name);
+ /* We don't support SMP and 2 hwpipes/plane together */
+ WARN_ON(r_hwpipe);
+
+ DBG("%s: alloc SMP blocks", (*hwpipe)->name);
ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
- hwpipe->pipe, blkcfg);
+ (*hwpipe)->pipe, blkcfg);
if (ret)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- hwpipe->blkcfg = blkcfg;
+ (*hwpipe)->blkcfg = blkcfg;
}
DBG("%s: assign to plane %s for caps %x",
- hwpipe->name, plane->name, caps);
- new_state->hwpipe_to_plane[hwpipe->idx] = plane;
+ (*hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
- return hwpipe;
+ if (r_hwpipe) {
+ DBG("%s: assign to right of plane %s for caps %x",
+ (*r_hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
+ }
+
+ return 0;
}
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 924c3e6f9517..bb2b0ac7aa2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -44,9 +44,10 @@ struct mdp5_hw_pipe_state {
struct drm_plane *hwpipe_to_plane[SSPP_MAX];
};
-struct mdp5_hw_pipe *__must_check
-mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
- uint32_t caps, uint32_t blkcfg);
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe);
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 4b22ac3413a1..be50445f9901 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,15 +31,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest);
-static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
@@ -254,18 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state,
};
-static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
- .update_plane = mdp5_update_cursor_plane_legacy,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = mdp5_plane_destroy,
- .atomic_set_property = mdp5_plane_atomic_set_property,
- .atomic_get_property = mdp5_plane_atomic_get_property,
- .reset = mdp5_plane_reset,
- .atomic_duplicate_state = mdp5_plane_duplicate_state,
- .atomic_destroy_state = mdp5_plane_destroy_state,
- .atomic_print_state = mdp5_plane_atomic_print_state,
-};
-
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -414,31 +393,30 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
struct mdp5_hw_pipe *old_right_hwpipe =
mdp5_state->r_hwpipe;
-
- mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
- plane, caps, blkcfg);
- if (IS_ERR(mdp5_state->hwpipe)) {
- DBG("%s: failed to assign hwpipe!", plane->name);
- return PTR_ERR(mdp5_state->hwpipe);
+ struct mdp5_hw_pipe *new_hwpipe = NULL;
+ struct mdp5_hw_pipe *new_right_hwpipe = NULL;
+
+ ret = mdp5_pipe_assign(state->state, plane, caps,
+ blkcfg, &new_hwpipe,
+ need_right_hwpipe ?
+ &new_right_hwpipe : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hwpipe(s)!",
+ plane->name);
+ return ret;
}
- if (need_right_hwpipe) {
- mdp5_state->r_hwpipe =
- mdp5_pipe_assign(state->state, plane,
- caps, blkcfg);
- if (IS_ERR(mdp5_state->r_hwpipe)) {
- DBG("%s: failed to assign right hwpipe",
- plane->name);
- return PTR_ERR(mdp5_state->r_hwpipe);
- }
- } else {
+ mdp5_state->hwpipe = new_hwpipe;
+ if (need_right_hwpipe)
+ mdp5_state->r_hwpipe = new_right_hwpipe;
+ else
/*
* set it to NULL so that the driver knows we
* don't have a right hwpipe when committing a
* new state
*/
mdp5_state->r_hwpipe = NULL;
- }
+
mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe);
@@ -487,11 +465,98 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
}
+static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+ int min_scale, max_scale;
+ int ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ mdp5_state = to_mdp5_plane_state(state);
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_state->hwpipe)
+ return -EINVAL;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane->state->crtc != state->crtc ||
+ plane->state->src_w != state->src_w ||
+ plane->state->src_h != state->src_h ||
+ plane->state->crtc_w != state->crtc_w ||
+ plane->state->crtc_h != state->crtc_h ||
+ !plane->state->fb ||
+ plane->state->fb != state->fb)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_plane_helper_check_state(state, &clip, min_scale,
+ max_scale, true, true);
+ if (ret)
+ return ret;
+
+ /*
+ * if the visibility of the plane changes (i.e, if the cursor is
+ * clipped out completely, we can't take the async path because
+ * we need to stage/unstage the plane from the Layer Mixer(s). We
+ * also assign/unassign the hwpipe(s) tied to the plane. We avoid
+ * taking the fast path for both these reasons.
+ */
+ if (state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (plane_enabled(new_state)) {
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline =
+ mdp5_crtc_get_pipeline(plane->crtc);
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(new_state->crtc);
+
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
+ }
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
+}
+
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
+ .atomic_async_check = mdp5_plane_atomic_async_check,
+ .atomic_async_update = mdp5_plane_atomic_async_update,
};
static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
@@ -996,84 +1061,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret;
}
-static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state, *new_plane_state;
- struct mdp5_plane_state *mdp5_pstate;
- struct drm_crtc_state *crtc_state = crtc->state;
- int ret;
-
- if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
- goto slow;
-
- plane_state = plane->state;
- mdp5_pstate = to_mdp5_plane_state(plane_state);
-
- /* don't use fast path if we don't have a hwpipe allocated yet */
- if (!mdp5_pstate->hwpipe)
- goto slow;
-
- /* only allow changing of position(crtc x/y or src x/y) in fast path */
- if (plane_state->crtc != crtc ||
- plane_state->src_w != src_w ||
- plane_state->src_h != src_h ||
- plane_state->crtc_w != crtc_w ||
- plane_state->crtc_h != crtc_h ||
- !plane_state->fb ||
- plane_state->fb != fb)
- goto slow;
-
- new_plane_state = mdp5_plane_duplicate_state(plane);
- if (!new_plane_state)
- return -ENOMEM;
-
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
-
- ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
- if (ret)
- goto slow_free;
-
- if (new_plane_state->visible) {
- struct mdp5_ctl *ctl;
- struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
-
- ret = mdp5_plane_mode_set(plane, crtc, fb,
- &new_plane_state->src,
- &new_plane_state->dst);
- WARN_ON(ret < 0);
-
- ctl = mdp5_crtc_get_ctl(crtc);
-
- mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
- }
-
- *to_mdp5_plane_state(plane_state) =
- *to_mdp5_plane_state(new_plane_state);
-
- mdp5_plane_destroy_state(plane, new_plane_state);
-
- return 0;
-slow_free:
- mdp5_plane_destroy_state(plane, new_plane_state);
-slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
/*
* Use this func and the one below only after the atomic state has been
* successfully swapped
@@ -1133,16 +1120,9 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
- if (type == DRM_PLANE_TYPE_CURSOR)
- ret = drm_universal_plane_init(dev, plane, 0xff,
- &mdp5_cursor_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- NULL, type, NULL);
- else
- ret = drm_universal_plane_init(dev, plane, 0xff,
- &mdp5_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- NULL, type, NULL);
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ NULL, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 025d454163b0..bf5f8c39f34d 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -146,35 +146,6 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true);
}
-/*
- * this func is identical to the drm_atomic_helper_check, but we keep this
- * because we might eventually need to have a more finegrained check
- * sequence without using the atomic helpers.
- *
- * In the past, we first called drm_atomic_helper_check_planes, and then
- * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
- * ->atomic_check could update ->mode_changed for pixel format changes.
- * This, however isn't needed now because if there is a pixel format change,
- * we just assign a new hwpipe for it with a new SMP allocation. We might
- * eventually hit a condition where we would need to do a full modeset if
- * we run out of planes. There, we'd probably need to set mode_changed.
- */
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
-{
- int ret;
-
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
-
- return ret;
-}
-
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
@@ -202,6 +173,18 @@ int msm_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
+ /*
+ * Note that plane->atomic_async_check() should fail if we need
+ * to re-assign hwpipe or anything that touches global atomic
+ * state, so we'll never go down the async update path in those
+ * cases.
+ */
+ if (state->async_update) {
+ drm_atomic_helper_async_commit(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return 0;
+ }
+
c = commit_init(state);
if (!c) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 606df7bea97b..0a3ea3034e39 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,9 +29,12 @@
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl
+ * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
+ * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
+ * MSM_GEM_INFO ioctl.
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 2
+#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = msm_atomic_check,
+ .atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc,
.atomic_state_clear = msm_atomic_state_clear,
@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
- struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev)
if (kms && kms->funcs)
kms->funcs->destroy(kms);
- if (gpu) {
- mutex_lock(&ddev->struct_mutex);
- // XXX what do we do here?
- //pm_runtime_enable(&pdev->dev);
- gpu->funcs->pm_suspend(gpu);
- mutex_unlock(&ddev->struct_mutex);
- gpu->funcs->destroy(gpu);
- }
-
if (priv->vram.paddr) {
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static int context_init(struct drm_device *dev, struct drm_file *file)
{
struct msm_file_private *ctx;
- /* For now, load gpu on open.. to avoid the requirement of having
- * firmware in the initrd.
- */
- load_gpu(dev);
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ msm_submitqueue_init(dev, ctx);
+
file->driver_priv = ctx;
return 0;
}
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ return context_init(dev, file);
+}
+
+static void context_close(struct msm_file_private *ctx)
+{
+ msm_submitqueue_close(ctx);
+ kfree(ctx);
+}
+
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
- kfree(ctx);
+ context_close(ctx);
}
static void msm_lastclose(struct drm_device *dev)
@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
+ struct msm_gpu_submitqueue *queue;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
- if (!priv->gpu)
+ if (!gpu)
return 0;
- return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
+ true);
+
+ msm_submitqueue_put(queue);
+ return ret;
}
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
@@ -787,6 +804,28 @@ unlock:
return ret;
}
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue *args = data;
+
+ if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+ return -EINVAL;
+
+ return msm_submitqueue_create(dev, file->driver_priv, args->prio,
+ args->flags, &args->id);
+}
+
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ u32 id = *(u32 *) data;
+
+ return msm_submitqueue_remove(file->driver_priv, id);
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5e8109c07560..c646843d8822 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -56,11 +56,9 @@ struct msm_gem_address_space;
struct msm_gem_vma;
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
};
enum msm_mdp_plane_property {
@@ -76,6 +74,8 @@ struct msm_vblank_ctrl {
spinlock_t lock;
};
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct drm_device *dev;
@@ -108,7 +108,8 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev;
- struct msm_rd_state *rd;
+ struct msm_rd_state *rd; /* debugfs to dump all submits */
+ struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/* list of GEM objects: */
@@ -154,20 +155,12 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
-
- /* task holding struct_mutex.. currently only used in submit path
- * to detect and reject faults from copy_from_user() for submit
- * ioctl.
- */
- struct task_struct *struct_mutex_task;
};
struct msm_format {
uint32_t pixel_format;
};
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
@@ -219,6 +212,7 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
@@ -303,7 +297,8 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
-void msm_rd_dump_submit(struct msm_gem_submit *submit);
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
@@ -319,6 +314,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
+struct msm_gpu_submitqueue;
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a2f89bac9c16..349c12f670eb 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
- fctx->name = name;
+ strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 56061aa1959d..1aa6a4c6530c 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -22,7 +22,7 @@
struct msm_fence_context {
struct drm_device *dev;
- const char *name;
+ char name[32];
unsigned context;
/* last_fence == completed_fence --> no pending work */
uint32_t last_fence; /* last assigned fence */
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ea5bb0e1632c..81fe6d6740ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -470,14 +470,16 @@ fail:
return ret;
}
-void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
mutex_lock(&msm_obj->lock);
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (WARN_ON(msm_obj->madv > madv)) {
+ dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
}
@@ -513,6 +515,22 @@ fail:
return ERR_PTR(ret);
}
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, MSM_MADV_WILLNEED);
+}
+
+/*
+ * Don't use this! It is for the very special case of dumping
+ * submits from GPU hangs or faults, were the bo may already
+ * be MSM_MADV_DONTNEED, but we know the buffer is still on the
+ * active list.
+ */
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, __MSM_MADV_PURGED);
+}
+
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 91c210d2359c..9320e184b48d 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -138,12 +138,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
- struct list_head node; /* node in gpu submit_list */
+ struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
+ uint32_t seqno; /* Sequence number of the submit on the ring */
struct dma_fence *fence;
+ struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
+ struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 93535cac0676..b8dc8f96caf2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -31,7 +31,8 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
+ struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
+ uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
@@ -49,6 +50,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
+ submit->queue = queue;
+ submit->ring = gpu->rb[queue->prio];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -66,6 +69,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
+ msm_submitqueue_put(submit->queue);
+
kfree(submit);
}
@@ -156,7 +161,8 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
+ int i, bool backoff)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -166,7 +172,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
- if (!(submit->bos[i].flags & BO_VALID))
+ if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
@@ -201,10 +207,10 @@ retry:
fail:
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(submit, i, true);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
+ submit_unlock_unpin_bo(submit, slow_locked, true);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -243,7 +249,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
if (no_implicit)
continue;
- ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
+ ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
+ write);
if (ret)
break;
}
@@ -387,7 +394,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -405,6 +412,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_ringbuffer *ring;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -421,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ queue = msm_submitqueue_get(ctx, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ring = gpu->rb[queue->prio];
+
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
@@ -431,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
+ if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
@@ -449,9 +464,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
}
- priv->struct_mutex_task = current;
- submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
@@ -534,7 +548,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- submit->fence = msm_fence_alloc(gpu->fctx);
+ submit->fence = msm_fence_alloc(ring->fctx);
if (IS_ERR(submit->fence)) {
ret = PTR_ERR(submit->fence);
submit->fence = NULL;
@@ -567,7 +581,6 @@ out:
out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6a887032c66a..232201403439 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,6 +20,8 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <linux/string_helpers.h>
+
/*
* Power Management:
@@ -221,33 +223,102 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
* Hangcheck detection for locked gpu:
*/
+static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ uint32_t fence)
+{
+ struct msm_gem_submit *submit;
+
+ list_for_each_entry(submit, &ring->submits, node) {
+ if (submit->seqno > fence)
+ break;
+
+ msm_update_fence(submit->ring->fctx,
+ submit->fence->seqno);
+ }
+}
+
+static struct msm_gem_submit *
+find_submit(struct msm_ringbuffer *ring, uint32_t fence)
+{
+ struct msm_gem_submit *submit;
+
+ WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
+
+ list_for_each_entry(submit, &ring->submits, node)
+ if (submit->seqno == fence)
+ return submit;
+
+ return NULL;
+}
+
static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
-
- msm_update_fence(gpu->fctx, fence + 1);
+ struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ int i;
mutex_lock(&dev->struct_mutex);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
- list_for_each_entry(submit, &gpu->submit_list, node) {
- if (submit->fence->seqno == (fence + 1)) {
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- dev_err(dev->dev, "%s: offending task: %s\n",
- gpu->name, task->comm);
- }
- rcu_read_unlock();
- break;
+
+ submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
+ if (submit) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ char *cmd;
+
+ /*
+ * So slightly annoying, in other paths like
+ * mmap'ing gem buffers, mmap_sem is acquired
+ * before struct_mutex, which means we can't
+ * hold struct_mutex across the call to
+ * get_cmdline(). But submits are retired
+ * from the same in-order workqueue, so we can
+ * safely drop the lock here without worrying
+ * about the submit going away.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ mutex_lock(&dev->struct_mutex);
+
+ dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, task->comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", task->comm, cmd);
+ } else {
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
+ rcu_read_unlock();
+ }
+
+
+ /*
+ * Update all the rings with the latest and greatest fence.. this
+ * needs to happen after msm_rd_dump_submit() to ensure that the
+ * bo's referenced by the offending submit are still around.
+ */
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ uint32_t fence = ring->memptrs->fence;
+
+ /*
+ * For the current (faulting?) ring/submit advance the fence by
+ * one more to clear the faulting submit
+ */
+ if (ring == cur_ring)
+ fence++;
+
+ update_fences(gpu, ring, fence);
}
if (msm_gpu_active(gpu)) {
@@ -258,9 +329,15 @@ static void recover_worker(struct work_struct *work)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- /* replay the remaining submits after the one that hung: */
- list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ /*
+ * Replay all remaining submits starting with highest priority
+ * ring
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ list_for_each_entry(submit, &ring->submits, node)
+ gpu->funcs->submit(gpu, submit, NULL);
}
}
@@ -276,30 +353,32 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = ring->memptrs->fence;
- if (fence != gpu->hangcheck_fence) {
+ if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->fctx->last_fence) {
+ ring->hangcheck_fence = fence;
+ } else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ ring->hangcheck_fence = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->fctx->last_fence);
+ gpu->name, ring->seqno);
+
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->fctx->last_fence > gpu->hangcheck_fence)
+ if (ring->seqno > ring->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -428,19 +507,18 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
static void retire_submits(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
+ int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ /* Retire the commits starting with highest priority */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
- if (dma_fence_is_signaled(submit->fence)) {
- retire_submit(gpu, submit);
- } else {
- break;
+ list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
+ if (dma_fence_is_signaled(submit->fence))
+ retire_submit(gpu, submit);
}
}
}
@@ -449,9 +527,10 @@ static void retire_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ int i;
- msm_update_fence(gpu->fctx, fence);
+ for (i = 0; i < gpu->nr_rings; i++)
+ update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
@@ -472,6 +551,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -480,9 +560,11 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gpu_hw_init(gpu);
- list_add_tail(&submit->node, &gpu->submit_list);
+ submit->seqno = ++ring->seqno;
+
+ list_add_tail(&submit->node, &ring->submits);
- msm_rd_dump_submit(submit);
+ msm_rd_dump_submit(priv->rd, submit, NULL);
update_sw_cntrs(gpu);
@@ -605,7 +687,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- int ret;
+ int i, ret, nr_rings = config->nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -613,21 +697,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->fctx = msm_fence_context_alloc(drm, name);
- if (IS_ERR(gpu->fctx)) {
- ret = PTR_ERR(gpu->fctx);
- gpu->fctx = NULL;
- goto fail;
- }
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
- INIT_LIST_HEAD(&gpu->submit_list);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
@@ -689,34 +765,76 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Create ringbuffer: */
- gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ &memptrs_iova);
+
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
+ ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
+ }
+
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+
+ memptrs += sizeof(struct msm_rbmemptrs);
+ memptrs_iova += sizeof(struct msm_rbmemptrs);
+ }
+
+ gpu->nr_rings = nr_rings;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
platform_set_drvdata(pdev, NULL);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index df4e2771fb85..e113d64574d3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -33,7 +33,7 @@ struct msm_gpu_config {
const char *irqname;
uint64_t va_start;
uint64_t va_end;
- unsigned int ringsz;
+ unsigned int nr_rings;
};
/* So far, with hardware that I've seen to date, we can have:
@@ -57,9 +57,9 @@ struct msm_gpu_funcs {
int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
@@ -86,16 +86,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- /* ringbuffer: */
- struct msm_ringbuffer *rb;
- uint64_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- /* fencing: */
- struct msm_fence_context *fctx;
-
/* does gpu need hw_init? */
bool needs_hw_init;
@@ -126,15 +122,31 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
struct work_struct recover_work;
- struct list_head submit_list;
+ struct drm_gem_object *memptrs_bo;
};
+/* It turns out that all targets use the same ringbuffer size */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (ring->seqno > ring->memptrs->fence)
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -150,6 +162,15 @@ struct msm_gpu_perfcntr {
const char *name;
};
+struct msm_gpu_submitqueue {
+ int id;
+ u32 flags;
+ u32 prio;
+ int faults;
+ struct list_head node;
+ struct kref ref;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
+{
+ if (queue)
+ kref_put(&queue->ref, msm_submitqueue_destroy);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index ec56794ad039..3aa8a8576abe 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -19,11 +19,17 @@
*
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
*
- * To log the cmdstream in a format that is understood by freedreno/cffdump
+ * to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup.
*
+ * Additionally:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
+ *
+ * will capture just the cmdstream from submits which triggered a GPU hang.
+ *
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
@@ -220,53 +226,89 @@ static const struct file_operations rd_debugfs_fops = {
.release = rd_release,
};
-int msm_rd_debugfs_init(struct drm_minor *minor)
+
+static void rd_cleanup(struct msm_rd_state *rd)
+{
+ if (!rd)
+ return;
+
+ mutex_destroy(&rd->read_lock);
+ kfree(rd);
+}
+
+static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
- struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
struct dentry *ent;
-
- /* only create on first minor: */
- if (priv->rd)
- return 0;
+ int ret = 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
rd->dev = minor->dev;
rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock);
- priv->rd = rd;
init_waitqueue_head(&rd->fifo_event);
- ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
+ ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops);
if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n",
- minor->debugfs_root);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
+ minor->debugfs_root, name);
+ ret = -ENOMEM;
goto fail;
}
+ return rd;
+
+fail:
+ rd_cleanup(rd);
+ return ERR_PTR(ret);
+}
+
+int msm_rd_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd;
+ int ret;
+
+ /* only create on first minor: */
+ if (priv->rd)
+ return 0;
+
+ rd = rd_init(minor, "rd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->rd = rd;
+
+ rd = rd_init(minor, "hangrd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->hangrd = rd;
+
return 0;
fail:
msm_rd_debugfs_cleanup(priv);
- return -1;
+ return ret;
}
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{
- struct msm_rd_state *rd = priv->rd;
-
- if (!rd)
- return;
-
+ rd_cleanup(priv->rd);
priv->rd = NULL;
- mutex_destroy(&rd->read_lock);
- kfree(rd);
+
+ rd_cleanup(priv->hangrd);
+ priv->hangrd = NULL;
}
static void snapshot_buf(struct msm_rd_state *rd,
@@ -276,10 +318,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
- buf = msm_gem_get_vaddr(&obj->base);
- if (IS_ERR(buf))
- return;
-
if (iova) {
buf += iova - submit->bos[idx].iova;
} else {
@@ -287,20 +325,33 @@ static void snapshot_buf(struct msm_rd_state *rd,
size = obj->base.size;
}
+ /*
+ * Always write the GPUADDR header so can get a complete list of all the
+ * buffers in the cmd
+ */
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
+
+ /* But only dump the contents of buffers marked READ */
+ if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
+ return;
+
+ buf = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(buf))
+ return;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
/* called under struct_mutex */
-void msm_rd_dump_submit(struct msm_gem_submit *submit)
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...)
{
struct drm_device *dev = submit->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_rd_state *rd = priv->rd;
- char msg[128];
+ struct task_struct *task;
+ char msg[256];
int i, n;
if (!rd->open)
@@ -311,23 +362,32 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
*/
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
- TASK_COMM_LEN, current->comm, task_pid_nr(current),
- submit->fence->seqno);
+ if (fmt) {
+ va_list args;
- rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+ va_start(args, fmt);
+ n = vsnprintf(msg, sizeof(msg), fmt, args);
+ va_end(args);
- if (rd_full) {
- for (i = 0; i < submit->nr_bos; i++) {
- /* buffers that are written to probably don't start out
- * with anything interesting:
- */
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- continue;
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+ }
- snapshot_buf(rd, submit, i, 0, 0);
- }
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ TASK_COMM_LEN, task->comm,
+ pid_nr(submit->pid), submit->seqno);
+ } else {
+ n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ pid_nr(submit->pid), submit->seqno);
}
+ rcu_read_unlock();
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+
+ for (i = 0; rd_full && i < submit->nr_bos; i++)
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bf065a540130..6ca98da35f63 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,13 +18,15 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
+ char name[32];
int ret;
- if (WARN_ON(!is_power_of_2(size)))
- return ERR_PTR(-EINVAL);
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -33,32 +35,46 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
-
+ ring->id = id;
/* Pass NULL for the iova pointer - we will map it later */
- ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
- gpu->aspace, &ring->bo, NULL);
+ ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+ MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
- ring->end = ring->start + (size / 4);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+ INIT_LIST_HEAD(&ring->submits);
+ spin_lock_init(&ring->lock);
+
+ snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
+
+ ring->fctx = msm_fence_context_alloc(gpu->dev, name);
return ring;
fail:
- if (ring)
- msm_ringbuffer_destroy(ring);
+ msm_ringbuffer_destroy(ring);
return ERR_PTR(ret);
}
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
+ if (IS_ERR_OR_NULL(ring))
+ return;
+
+ msm_fence_context_free(ring->fctx);
+
if (ring->bo) {
+ msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..cffce094aecb 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -20,14 +20,31 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+
+struct msm_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+};
+
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ struct list_head submits;
+ uint64_t iova;
+ uint32_t seqno;
+ uint32_t hangcheck_fence;
+ struct msm_rbmemptrs *memptrs;
+ uint64_t memptrs_iova;
+ struct msm_fence_context *fctx;
+ spinlock_t lock;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +52,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
new file mode 100644
index 000000000000..5115f75b5b7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -0,0 +1,152 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kref.h>
+#include "msm_gpu.h"
+
+void msm_submitqueue_destroy(struct kref *kref)
+{
+ struct msm_gpu_submitqueue *queue = container_of(kref,
+ struct msm_gpu_submitqueue, ref);
+
+ kfree(queue);
+}
+
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return NULL;
+
+ read_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ kref_get(&entry->ref);
+ read_unlock(&ctx->queuelock);
+
+ return entry;
+ }
+ }
+
+ read_unlock(&ctx->queuelock);
+ return NULL;
+}
+
+void msm_submitqueue_close(struct msm_file_private *ctx)
+{
+ struct msm_gpu_submitqueue *entry, *tmp;
+
+ if (!ctx)
+ return;
+
+ /*
+ * No lock needed in close and there won't
+ * be any more user ioctls coming our way
+ */
+ list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+ msm_submitqueue_put(entry);
+}
+
+int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_gpu_submitqueue *queue;
+
+ if (!ctx)
+ return -ENODEV;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+
+ if (!queue)
+ return -ENOMEM;
+
+ kref_init(&queue->ref);
+ queue->flags = flags;
+
+ if (priv->gpu) {
+ if (prio >= priv->gpu->nr_rings)
+ return -EINVAL;
+
+ queue->prio = prio;
+ }
+
+ write_lock(&ctx->queuelock);
+
+ queue->id = ctx->queueid++;
+
+ if (id)
+ *id = queue->id;
+
+ list_add_tail(&queue->node, &ctx->submitqueues);
+
+ write_unlock(&ctx->queuelock);
+
+ return 0;
+}
+
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ int default_prio;
+
+ if (!ctx)
+ return 0;
+
+ /*
+ * Select priority 2 as the "default priority" unless nr_rings is less
+ * than 2 and then pick the lowest pirority
+ */
+ default_prio = priv->gpu ?
+ clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
+
+ INIT_LIST_HEAD(&ctx->submitqueues);
+
+ rwlock_init(&ctx->queuelock);
+
+ return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
+}
+
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return 0;
+
+ /*
+ * id 0 is the "default" queue and can't be destroyed
+ * by the user
+ */
+ if (!id)
+ return -ENOENT;
+
+ write_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ list_del(&entry->node);
+ write_unlock(&ctx->queuelock);
+
+ msm_submitqueue_put(entry);
+ return 0;
+ }
+ }
+
+ write_unlock(&ctx->queuelock);
+ return -ENOENT;
+}
+
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 7fbad9cb656e..1207ffe36250 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -35,6 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
@@ -92,7 +93,7 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -127,7 +128,7 @@ static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2e9ce53ae3a8..9c0c650655e9 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o
# DRM - memory management
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
+nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
+nouveau-y += nouveau_vmm.o
# DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c02a13406a81..4b75ad40dd80 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
help
Selects the default debug level
+config NOUVEAU_DEBUG_MMU
+ bool "Enable additional MMU debugging"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to enable verbose MMU debug output.
+
config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 5b9d549aa791..501d2d290e9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- nvif_object_map(&drm->client.device.object);
+ nvif_object_map(&drm->client.device.object, NULL, 0);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index b4cd58093300..989690fe3cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -6,7 +6,7 @@ struct nv50_channel_dma_v0 {
__u8 version;
__u8 chid;
__u8 pad02[6];
- __u64 vm;
+ __u64 vmm;
__u64 pushbuf;
__u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 14d20c813cdb..5137b6879abd 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -9,6 +9,6 @@ struct nv50_channel_gpfifo_v0 {
__u32 ilength;
__u64 ioffset;
__u64 pushbuf;
- __u64 vm;
+ __u64 vmm;
};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 36944ff09e3c..1a875090b251 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -6,7 +6,7 @@ struct g82_channel_dma_v0 {
__u8 version;
__u8 chid;
__u8 pad02[6];
- __u64 vm;
+ __u64 vmm;
__u64 pushbuf;
__u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index df09a50817eb..e4e50cfe88f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -9,7 +9,7 @@ struct g82_channel_gpfifo_v0 {
__u32 ilength;
__u64 ioffset;
__u64 pushbuf;
- __u64 vm;
+ __u64 vmm;
};
#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 6d16a3a2ec02..ab0fa8adb756 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -8,7 +8,7 @@ struct fermi_channel_gpfifo_v0 {
__u8 pad02[2];
__u32 ilength;
__u64 ioffset;
- __u64 vm;
+ __u64 vmm;
};
#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 597ebb52d5f9..56f5bd81e480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -23,7 +23,7 @@ struct kepler_channel_gpfifo_a_v0 {
__u32 engines;
__u32 ilength;
__u64 ioffset;
- __u64 vm;
+ __u64 vmm;
};
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e3a2ea8bde70..a7c5bf572788 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -15,6 +15,23 @@
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
+#define NVIF_CLASS_MMU /* if0008.h */ 0x80000008
+#define NVIF_CLASS_MMU_NV04 /* if0008.h */ 0x80000009
+#define NVIF_CLASS_MMU_NV50 /* if0008.h */ 0x80005009
+#define NVIF_CLASS_MMU_GF100 /* if0008.h */ 0x80009009
+
+#define NVIF_CLASS_MEM /* if000a.h */ 0x8000000a
+#define NVIF_CLASS_MEM_NV04 /* if000b.h */ 0x8000000b
+#define NVIF_CLASS_MEM_NV50 /* if500b.h */ 0x8000500b
+#define NVIF_CLASS_MEM_GF100 /* if900b.h */ 0x8000900b
+
+#define NVIF_CLASS_VMM /* if000c.h */ 0x8000000c
+#define NVIF_CLASS_VMM_NV04 /* if000d.h */ 0x8000000d
+#define NVIF_CLASS_VMM_NV50 /* if500d.h */ 0x8000500d
+#define NVIF_CLASS_VMM_GF100 /* if900d.h */ 0x8000900d
+#define NVIF_CLASS_VMM_GM200 /* ifb00d.h */ 0x8000b00d
+#define NVIF_CLASS_VMM_GP100 /* ifc00d.h */ 0x8000c00d
+
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_NULL_CLASS 0x00000030
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 09439b037870..6edb6266857e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -39,7 +39,6 @@ u64 nvif_device_time(struct nvif_device *);
/*XXX*/
#include <subdev/bios.h>
#include <subdev/fb.h>
-#include <subdev/mmu.h>
#include <subdev/bar.h>
#include <subdev/gpio.h>
#include <subdev/clk.h>
@@ -58,8 +57,6 @@ u64 nvif_device_time(struct nvif_device *);
})
#define nvxx_bios(a) nvxx_device(a)->bios
#define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_mmu(a) nvxx_device(a)->mmu
-#define nvxx_bar(a) nvxx_device(a)->bar
#define nvxx_gpio(a) nvxx_device(a)->gpio
#define nvxx_clk(a) nvxx_device(a)->clk
#define nvxx_i2c(a) nvxx_device(a)->i2c
@@ -67,10 +64,8 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt
-#include <core/device.h>
#include <engine/fifo.h>
#include <engine/gr.h>
-#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvxx_device(a)->gr
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
new file mode 100644
index 000000000000..8450127420f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_IF0008_H__
+#define __NVIF_IF0008_H__
+struct nvif_mmu_v0 {
+ __u8 version;
+ __u8 dmabits;
+ __u8 heap_nr;
+ __u8 type_nr;
+ __u16 kind_nr;
+};
+
+#define NVIF_MMU_V0_HEAP 0x00
+#define NVIF_MMU_V0_TYPE 0x01
+#define NVIF_MMU_V0_KIND 0x02
+
+struct nvif_mmu_heap_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+ __u64 size;
+};
+
+struct nvif_mmu_type_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 heap;
+ __u8 vram;
+ __u8 host;
+ __u8 comp;
+ __u8 disp;
+ __u8 kind;
+ __u8 mappable;
+ __u8 coherent;
+ __u8 uncached;
+};
+
+struct nvif_mmu_kind_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 count;
+ __u8 data[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000a.h b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
new file mode 100644
index 000000000000..88d0938fbd5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
@@ -0,0 +1,22 @@
+#ifndef __NVIF_IF000A_H__
+#define __NVIF_IF000A_H__
+struct nvif_mem_v0 {
+ __u8 version;
+ __u8 type;
+ __u8 page;
+ __u8 pad03[5];
+ __u64 size;
+ __u64 addr;
+ __u8 data[];
+};
+
+struct nvif_mem_ram_vn {
+};
+
+struct nvif_mem_ram_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ dma_addr_t *dma;
+ struct scatterlist *sgl;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000b.h b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
new file mode 100644
index 000000000000..c677fb0293da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
@@ -0,0 +1,11 @@
+#ifndef __NVIF_IF000B_H__
+#define __NVIF_IF000B_H__
+#include "if000a.h"
+
+struct nv04_mem_vn {
+ /* nvkm_mem_vX ... */
+};
+
+struct nv04_mem_map_vn {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
new file mode 100644
index 000000000000..2928ecd989ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -0,0 +1,64 @@
+#ifndef __NVIF_IF000C_H__
+#define __NVIF_IF000C_H__
+struct nvif_vmm_v0 {
+ __u8 version;
+ __u8 page_nr;
+ __u8 pad02[6];
+ __u64 addr;
+ __u64 size;
+ __u8 data[];
+};
+
+#define NVIF_VMM_V0_PAGE 0x00
+#define NVIF_VMM_V0_GET 0x01
+#define NVIF_VMM_V0_PUT 0x02
+#define NVIF_VMM_V0_MAP 0x03
+#define NVIF_VMM_V0_UNMAP 0x04
+
+struct nvif_vmm_page_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 shift;
+ __u8 sparse;
+ __u8 vram;
+ __u8 host;
+ __u8 comp;
+ __u8 pad07[1];
+};
+
+struct nvif_vmm_get_v0 {
+ __u8 version;
+#define NVIF_VMM_GET_V0_ADDR 0x00
+#define NVIF_VMM_GET_V0_PTES 0x01
+#define NVIF_VMM_GET_V0_LAZY 0x02
+ __u8 type;
+ __u8 sparse;
+ __u8 page;
+ __u8 align;
+ __u8 pad05[3];
+ __u64 size;
+ __u64 addr;
+};
+
+struct nvif_vmm_put_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+};
+
+struct nvif_vmm_map_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+ __u64 memory;
+ __u64 offset;
+ __u8 data[];
+};
+
+struct nvif_vmm_unmap_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000d.h b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
new file mode 100644
index 000000000000..516ec9401401
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
@@ -0,0 +1,12 @@
+#ifndef __NVIF_IF000D_H__
+#define __NVIF_IF000D_H__
+#include "if000c.h"
+
+struct nv04_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct nv04_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500b.h b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
new file mode 100644
index 000000000000..c7c8431fb2ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
@@ -0,0 +1,25 @@
+#ifndef __NVIF_IF500B_H__
+#define __NVIF_IF500B_H__
+#include "if000a.h"
+
+struct nv50_mem_vn {
+ /* nvif_mem_vX ... */
+};
+
+struct nv50_mem_v0 {
+ /* nvif_mem_vX ... */
+ __u8 version;
+ __u8 bankswz;
+ __u8 contig;
+};
+
+struct nv50_mem_map_vn {
+};
+
+struct nv50_mem_map_v0 {
+ __u8 version;
+ __u8 ro;
+ __u8 kind;
+ __u8 comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500d.h b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
new file mode 100644
index 000000000000..c29a7822b363
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF500D_H__
+#define __NVIF_IF500D_H__
+#include "if000c.h"
+
+struct nv50_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct nv50_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct nv50_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+ __u8 comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900b.h b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
new file mode 100644
index 000000000000..9b164548eea8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
@@ -0,0 +1,23 @@
+#ifndef __NVIF_IF900B_H__
+#define __NVIF_IF900B_H__
+#include "if000a.h"
+
+struct gf100_mem_vn {
+ /* nvif_mem_vX ... */
+};
+
+struct gf100_mem_v0 {
+ /* nvif_mem_vX ... */
+ __u8 version;
+ __u8 contig;
+};
+
+struct gf100_mem_map_vn {
+};
+
+struct gf100_mem_map_v0 {
+ __u8 version;
+ __u8 ro;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900d.h b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
new file mode 100644
index 000000000000..49aa50583c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF900D_H__
+#define __NVIF_IF900D_H__
+#include "if000c.h"
+
+struct gf100_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gf100_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gf100_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
new file mode 100644
index 000000000000..a0e419830595
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
@@ -0,0 +1,27 @@
+#ifndef __NVIF_IFB00D_H__
+#define __NVIF_IFB00D_H__
+#include "if000c.h"
+
+struct gm200_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gm200_vmm_v0 {
+ /* nvif_vmm_vX ... */
+ __u8 version;
+ __u8 bigpage;
+};
+
+struct gm200_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gm200_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
new file mode 100644
index 000000000000..1d9c637859f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IFC00D_H__
+#define __NVIF_IFC00D_H__
+#include "if000c.h"
+
+struct gp100_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gp100_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gp100_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 688c4bcd9c64..b93d586a2304 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -2,7 +2,7 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
-#define NVIF_VERSION_LATEST 0x0000000000000000ULL
+#define NVIF_VERSION_LATEST 0x0000000000000100ULL
struct nvif_ioctl_v0 {
__u8 version;
@@ -84,9 +84,13 @@ struct nvif_ioctl_wr_v0 {
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
- __u8 pad01[3];
- __u32 length;
+#define NVIF_IOCTL_MAP_V0_IO 0x00
+#define NVIF_IOCTL_MAP_V0_VA 0x01
+ __u8 type;
+ __u8 pad02[6];
__u64 handle;
+ __u64 length;
+ __u8 data[];
};
struct nvif_ioctl_unmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
new file mode 100644
index 000000000000..b542fe38398e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_MEM_H__
+#define __NVIF_MEM_H__
+#include "mmu.h"
+
+struct nvif_mem {
+ struct nvif_object object;
+ u8 type;
+ u8 page;
+ u64 addr;
+ u64 size;
+};
+
+int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *);
+int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_fini(struct nvif_mem *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
new file mode 100644
index 000000000000..c8cd5b5b0688
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -0,0 +1,56 @@
+#ifndef __NVIF_MMU_H__
+#define __NVIF_MMU_H__
+#include <nvif/object.h>
+
+struct nvif_mmu {
+ struct nvif_object object;
+ u8 dmabits;
+ u8 heap_nr;
+ u8 type_nr;
+ u16 kind_nr;
+
+ struct {
+ u64 size;
+ } *heap;
+
+ struct {
+#define NVIF_MEM_VRAM 0x01
+#define NVIF_MEM_HOST 0x02
+#define NVIF_MEM_COMP 0x04
+#define NVIF_MEM_DISP 0x08
+#define NVIF_MEM_KIND 0x10
+#define NVIF_MEM_MAPPABLE 0x20
+#define NVIF_MEM_COHERENT 0x40
+#define NVIF_MEM_UNCACHED 0x80
+ u8 type;
+ u8 heap;
+ } *type;
+
+ u8 *kind;
+};
+
+int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
+void nvif_mmu_fini(struct nvif_mmu *);
+
+static inline bool
+nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
+{
+ const u8 invalid = mmu->kind_nr - 1;
+ if (kind) {
+ if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+ return false;
+ }
+ return true;
+}
+
+static inline int
+nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
+{
+ int i;
+ for (i = 0; i < mmu->type_nr; i++) {
+ if ((mmu->type[i].type & mask) == mask)
+ return i;
+ }
+ return -EINVAL;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 6912b8cffc98..a2d5244ff2b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -17,7 +17,7 @@ struct nvif_object {
void *priv; /*XXX: hack */
struct {
void __iomem *ptr;
- u32 size;
+ u64 size;
} map;
};
@@ -30,7 +30,10 @@ void nvif_object_sclass_put(struct nvif_sclass **);
u32 nvif_object_rd(struct nvif_object *, int, u64);
void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
-int nvif_object_map(struct nvif_object *);
+int nvif_object_map_handle(struct nvif_object *, void *, u32,
+ u64 *handle, u64 *length);
+void nvif_object_unmap_handle(struct nvif_object *);
+int nvif_object_map(struct nvif_object *, void *, u32);
void nvif_object_unmap(struct nvif_object *);
#define nvif_handle(a) (unsigned long)(void *)(a)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 6b16ab6b26d5..fd09b2842972 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -34,18 +34,4 @@
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
-
-#ifndef ioread32_native
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native ioread32be
-#define iowrite32_native iowrite32be
-#else /* def __BIG_ENDIAN */
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native ioread32
-#define iowrite32_native iowrite32
-#endif /* def __BIG_ENDIAN else */
-#endif /* !ioread32_native */
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
new file mode 100644
index 000000000000..c5db8a2e82df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_VMM_H__
+#define __NVIF_VMM_H__
+#include <nvif/object.h>
+struct nvif_mem;
+struct nvif_mmu;
+
+enum nvif_vmm_get {
+ ADDR,
+ PTES,
+ LAZY
+};
+
+struct nvif_vma {
+ u64 addr;
+ u64 size;
+};
+
+struct nvif_vmm {
+ struct nvif_object object;
+ u64 start;
+ u64 limit;
+
+ struct {
+ u8 shift;
+ bool sparse:1;
+ bool vram:1;
+ bool host:1;
+ bool comp:1;
+ } *page;
+ int page_nr;
+};
+
+int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
+ void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
+ u8 page, u8 align, u64 size, struct nvif_vma *);
+void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
+int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_mem *, u64 offset);
+int nvif_vmm_unmap(struct nvif_vmm *, u64);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index ca23230d5743..757fac823a10 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -17,7 +17,8 @@ struct nvkm_client {
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
- struct nvkm_vm *vm;
+ struct list_head umem;
+ spinlock_t lock;
};
int nvkm_client_new(const char *name, u64 device, const char *cfg,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d7ecb65ba19f..560265b15ec2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
+#include <core/oclass.h>
#include <core/event.h>
-#include <core/object.h>
enum nvkm_devidx {
NVKM_SUBDEV_PCI,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index c6bcd8a64cae..ebf8473a39fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -16,6 +16,7 @@ struct nvkm_engine {
struct nvkm_engine_func {
void *(*dtor)(struct nvkm_engine *);
+ void (*preinit)(struct nvkm_engine *);
int (*oneinit)(struct nvkm_engine *);
int (*init)(struct nvkm_engine *);
int (*fini)(struct nvkm_engine *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index 473ba0b9a368..10eeaeebc242 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,18 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
-#include <core/object.h>
#include <core/memory.h>
#include <core/mm.h>
-struct nvkm_vma;
-struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj {
- struct nvkm_object object;
- const struct nvkm_gpuobj_func *func;
+ union {
+ const struct nvkm_gpuobj_func *func;
+ const struct nvkm_gpuobj_func *ptrs;
+ };
struct nvkm_gpuobj *parent;
struct nvkm_memory *memory;
struct nvkm_mm_node *node;
@@ -29,15 +28,14 @@ struct nvkm_gpuobj_func {
void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+ int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
};
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **);
int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
- struct nvkm_vma *);
-void nvkm_gpuobj_unmap(struct nvkm_vma *);
void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length);
void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index affba21fcbad..05f505de0075 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -4,7 +4,12 @@
#include <core/os.h>
struct nvkm_device;
struct nvkm_vma;
-struct nvkm_vm;
+struct nvkm_vmm;
+
+struct nvkm_tags {
+ struct nvkm_mm_node *mn;
+ refcount_t refcount;
+};
enum nvkm_memory_target {
NVKM_MEM_TARGET_INST, /* instance memory */
@@ -15,41 +20,84 @@ enum nvkm_memory_target {
struct nvkm_memory {
const struct nvkm_memory_func *func;
+ const struct nvkm_memory_ptrs *ptrs;
+ struct kref kref;
+ struct nvkm_tags *tags;
};
struct nvkm_memory_func {
void *(*dtor)(struct nvkm_memory *);
enum nvkm_memory_target (*target)(struct nvkm_memory *);
+ u8 (*page)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *);
- void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+ void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *);
+ int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
+};
+
+struct nvkm_memory_ptrs {
u32 (*rd32)(struct nvkm_memory *, u64 offset);
void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
- void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
};
void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
u64 size, u32 align, bool zero, struct nvkm_memory **);
-void nvkm_memory_del(struct nvkm_memory **);
+struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
+void nvkm_memory_unref(struct nvkm_memory **);
+int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
+ void (*clear)(struct nvkm_device *, u32, u32),
+ struct nvkm_tags **);
+void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
+ struct nvkm_tags **);
+
#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_page(p) (p)->func->page(p)
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
-#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+#define nvkm_memory_map(p,o,vm,va,av,ac) \
+ (p)->func->map((p),(o),(vm),(va),(av),(ac))
/* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets
*/
#define nvkm_kmap(o) (o)->func->acquire(o)
-#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
-#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_done(o) (o)->func->release(o)
+
+#define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
_data; \
})
-#define nvkm_done(o) (o)->func->release(o)
+
+#define nvkm_wo64(o,a,d) do { \
+ u64 __a = (a), __d = (d); \
+ nvkm_wo32((o), __a + 0, lower_32_bits(__d)); \
+ nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
+} while(0)
+
+#define nvkm_fill(t,s,o,a,d,c) do { \
+ u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
+ u##t __iomem *_m = nvkm_kmap(o); \
+ if (likely(_m)) { \
+ if (_d) { \
+ while (_c--) \
+ iowrite##t##_native(_d, &_m[_o++]); \
+ } else { \
+ memset_io(&_m[_o], _d, _s); \
+ } \
+ } else { \
+ for (; _c; _c--, _a += BIT(s)) \
+ nvkm_wo##t((o), _a, _d); \
+ } \
+ nvkm_done(o); \
+} while(0)
+#define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c))
+#define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c))
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 2002a4da9999..b0726c39429e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -31,7 +31,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
return mm->heap_nodes;
}
-int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
int nvkm_mm_fini(struct nvkm_mm *);
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **);
@@ -40,9 +40,39 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *);
+static inline u32
+nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
+{
+ struct nvkm_mm_node *node;
+ u32 size = 0;
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (node->heap == heap)
+ size += node->length;
+ }
+ return size;
+}
+
static inline bool
nvkm_mm_contiguous(struct nvkm_mm_node *node)
{
return !node->next;
}
+
+static inline u32
+nvkm_mm_addr(struct nvkm_mm_node *node)
+{
+ if (WARN_ON(!nvkm_mm_contiguous(node)))
+ return 0;
+ return node->offset;
+}
+
+static inline u32
+nvkm_mm_size(struct nvkm_mm_node *node)
+{
+ u32 size = 0;
+ do {
+ size += node->length;
+ } while ((node = node->next));
+ return size;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 3f13ff1d4ee4..270f893cc154 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,11 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
-#include <core/os.h>
-#include <core/debug.h>
+#include <core/oclass.h>
struct nvkm_event;
struct nvkm_gpuobj;
-struct nvkm_oclass;
struct nvkm_object {
const struct nvkm_object_func *func;
@@ -22,13 +20,20 @@ struct nvkm_object {
struct rb_node node;
};
+enum nvkm_object_map {
+ NVKM_OBJECT_MAP_IO,
+ NVKM_OBJECT_MAP_VA
+};
+
struct nvkm_object_func {
void *(*dtor)(struct nvkm_object *);
int (*init)(struct nvkm_object *);
int (*fini)(struct nvkm_object *, bool suspend);
int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
- int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+ int (*map)(struct nvkm_object *, void *argv, u32 argc,
+ enum nvkm_object_map *, u64 *addr, u64 *size);
+ int (*unmap)(struct nvkm_object *);
int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
@@ -53,7 +58,9 @@ int nvkm_object_init(struct nvkm_object *);
int nvkm_object_fini(struct nvkm_object *, bool suspend);
int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+ enum nvkm_object_map *, u64 *addr, u64 *size);
+int nvkm_object_unmap(struct nvkm_object *);
int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
@@ -67,28 +74,4 @@ bool nvkm_object_insert(struct nvkm_object *);
void nvkm_object_remove(struct nvkm_object *);
struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
const struct nvkm_object_func *);
-
-struct nvkm_sclass {
- int minver;
- int maxver;
- s32 oclass;
- const struct nvkm_object_func *func;
- int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
-};
-
-struct nvkm_oclass {
- int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
- struct nvkm_sclass base;
- const void *priv;
- const void *engn;
- u32 handle;
- u8 route;
- u64 token;
- u64 object;
- struct nvkm_client *client;
- struct nvkm_object *parent;
- struct nvkm_engine *engine;
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
new file mode 100644
index 000000000000..8e1b945d38f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_OCLASS_H__
+#define __NVKM_OCLASS_H__
+#include <core/os.h>
+#include <core/debug.h>
+struct nvkm_oclass;
+struct nvkm_object;
+
+struct nvkm_sclass {
+ int minver;
+ int maxver;
+ s32 oclass;
+ const struct nvkm_object_func *func;
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const void *priv;
+ const void *engn;
+ u32 handle;
+ u8 route;
+ u64 token;
+ u64 object;
+ struct nvkm_client *client;
+ struct nvkm_object *parent;
+ struct nvkm_engine *engine;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index fc9e8cd36087..445602d1e8d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -2,4 +2,23 @@
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do { \
+ u32 __iomem *_p = (u32 __iomem *)(p); \
+ u64 _v = (v); \
+ iowrite32_native(lower_32_bits(_v), &_p[0]); \
+ iowrite32_native(upper_32_bits(_v), &_p[1]); \
+} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index 674a38408240..d5d789663aca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
+struct nvkm_object;
struct nvkm_ramht_data {
struct nvkm_gpuobj *inst;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 38f51ff7ab40..63df2290177f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -34,7 +34,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \
const struct nvkm_subdev *_subdev = (s); \
- if (_subdev->debug >= (l)) { \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \
dev_##p(_subdev->device->dev, "%s: "f, \
nvkm_subdev_name[_subdev->index], ##a); \
} \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 5f5cae7c474e..0f9c1c702ed6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
+#include <core/object.h>
struct nvkm_client;
struct nvkm_dmaobj {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 5a51842bc241..6427747b6f77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -4,6 +4,7 @@
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
+struct nvkm_gpuobj;
enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0,
@@ -78,7 +79,7 @@ struct nvkm_falcon_func {
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
- void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
+ void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
@@ -113,7 +114,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
bool);
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
-void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
+void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
void nvkm_falcon_start(struct nvkm_falcon *);
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 025f400c9f5d..c17b3a9bf8fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/engine.h>
+#include <core/object.h>
#include <core/event.h>
#define NVKM_FIFO_CHID_NR 4096
@@ -22,7 +23,7 @@ struct nvkm_fifo_chan {
u16 chid;
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
- struct nvkm_vm *vm;
+ struct nvkm_vmm *vmm;
void __iomem *user;
u64 addr;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 91f1e0efe061..f6bd94c7e0f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -9,17 +9,22 @@ struct nvkm_bar {
struct nvkm_subdev subdev;
spinlock_t lock;
+ bool bar2;
/* whether the BAR supports to be ioremapped WC or should be uncached */
bool iomap_uncached;
};
+struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar2_init(struct nvkm_device *);
+void nvkm_bar_bar2_fini(struct nvkm_device *);
+struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
-struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
-int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 4da68dd52619..adb78f7d083a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -2,8 +2,7 @@
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
-
-#include <subdev/mmu.h>
+#include <core/mm.h>
/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1
@@ -22,22 +21,6 @@
#define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03
-struct nvkm_mem {
- struct drm_device *dev;
-
- struct nvkm_vma bar_vma;
- struct nvkm_vma vma[2];
- u8 page_shift;
-
- struct nvkm_mm_node *tag;
- struct nvkm_mm_node *mem;
- dma_addr_t *pages;
- u32 memtype;
- u64 offset;
- u64 size;
- struct sg_table *sg;
-};
-
struct nvkm_fb_tile {
struct nvkm_mm_node *tag;
u32 addr;
@@ -51,6 +34,7 @@ struct nvkm_fb {
struct nvkm_subdev subdev;
struct nvkm_ram *ram;
+ struct nvkm_mm tags;
struct {
struct nvkm_fb_tile region[16];
@@ -63,7 +47,6 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr;
};
-bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
@@ -130,8 +113,11 @@ struct nvkm_ram {
u64 size;
#define NVKM_RAM_MM_SHIFT 12
+#define NVKM_RAM_MM_ANY (NVKM_MM_HEAP_ANY + 0)
+#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
+#define NVKM_RAM_MM_NOMAP (NVKM_MM_HEAP_ANY + 2)
+#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram;
- struct nvkm_mm tags;
u64 stolen;
int ranks;
@@ -148,6 +134,10 @@ struct nvkm_ram {
struct nvkm_ram_data target;
};
+int
+nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+ bool contig, bool back, struct nvkm_memory **);
+
struct nvkm_ram_func {
u64 upper;
u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
@@ -158,14 +148,8 @@ struct nvkm_ram_func {
void *(*dtor)(struct nvkm_ram *);
int (*init)(struct nvkm_ram *);
- int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
- u32 type, struct nvkm_mem **);
- void (*put)(struct nvkm_ram *, struct nvkm_mem **);
-
int (*calc)(struct nvkm_ram *, u32 freq);
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
-
-extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 91126fd29222..36ed520ed2d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -10,6 +10,7 @@ struct nvkm_instmem {
spinlock_t lock;
struct list_head list;
+ struct list_head boot;
u32 reserved;
struct nvkm_memory *vbios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 63b7ad1f9ce2..95b611554d53 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -15,8 +15,7 @@ struct nvkm_ltc {
u32 num_tags;
u32 tag_base;
- struct nvkm_mm tags;
- struct nvkm_mm_node *tag_ram;
+ struct nvkm_memory *tag_ram;
int zbc_min;
int zbc_max;
@@ -24,9 +23,7 @@ struct nvkm_ltc {
u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
};
-int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
-void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0fdfc610ceb3..0760b93e9d1f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -2,68 +2,130 @@
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
-#include <core/mm.h>
-struct nvkm_device;
-struct nvkm_mem;
-
-struct nvkm_vm_pgt {
- struct nvkm_memory *mem[2];
- u32 refcount[2];
-};
-
-struct nvkm_vm_pgd {
- struct list_head head;
- struct nvkm_gpuobj *obj;
-};
struct nvkm_vma {
struct list_head head;
- int refcount;
- struct nvkm_vm *vm;
- struct nvkm_mm_node *node;
- u64 offset;
- u32 access;
+ struct rb_node tree;
+ u64 addr;
+ u64 size:50;
+ bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
+ bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
+#define NVKM_VMA_PAGE_NONE 7
+ u8 page:3; /* Requested page type (index, or NONE for automatic). */
+ u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
+ bool used:1; /* Region allocated. */
+ bool part:1; /* Region was split from an allocated region by map(). */
+ bool user:1; /* Region user-allocated. */
+ bool busy:1; /* Region busy (for temporarily preventing user access). */
+ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
+ struct nvkm_tags *tags; /* Compression tag reference. */
};
-struct nvkm_vm {
+struct nvkm_vmm {
+ const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu;
-
+ const char *name;
+ u32 debug;
+ struct kref kref;
struct mutex mutex;
- struct nvkm_mm mm;
- struct kref refcount;
- struct list_head pgd_list;
+ u64 start;
+ u64 limit;
+
+ struct nvkm_vmm_pt *pd;
+ struct list_head join;
+
+ struct list_head list;
+ struct rb_root free;
+ struct rb_root root;
+
+ bool bootstrapped;
atomic_t engref[NVKM_SUBDEV_NR];
- struct nvkm_vm_pgt *pgt;
- u32 fpde;
- u32 lpde;
+ dma_addr_t null;
+ void *nullp;
};
-int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *, struct nvkm_vm **);
-int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
-int nvkm_vm_boot(struct nvkm_vm *, u64 size);
-int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
- struct nvkm_vma *);
-void nvkm_vm_put(struct nvkm_vma *);
-void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
-void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
-void nvkm_vm_unmap(struct nvkm_vma *);
-void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *, const char *name, struct nvkm_vmm **);
+struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
+void nvkm_vmm_unref(struct nvkm_vmm **);
+int nvkm_vmm_boot(struct nvkm_vmm *);
+int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
+void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
+int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
+void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
+
+struct nvkm_vmm_map {
+ struct nvkm_memory *memory;
+ u64 offset;
+
+ struct nvkm_mm_node *mem;
+ struct scatterlist *sgl;
+ dma_addr_t *dma;
+ u64 off;
+
+ const struct nvkm_vmm_page *page;
+
+ struct nvkm_tags *tags;
+ u64 next;
+ u64 type;
+ u64 ctag;
+};
+
+int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
+ struct nvkm_vmm_map *);
+void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
+
+struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
+struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
struct nvkm_mmu {
const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev;
- u64 limit;
u8 dma_bits;
- u8 lpg_shift;
+
+ int heap_nr;
+ struct {
+#define NVKM_MEM_VRAM 0x01
+#define NVKM_MEM_HOST 0x02
+#define NVKM_MEM_COMP 0x04
+#define NVKM_MEM_DISP 0x08
+ u8 type;
+ u64 size;
+ } heap[4];
+
+ int type_nr;
+ struct {
+#define NVKM_MEM_KIND 0x10
+#define NVKM_MEM_MAPPABLE 0x20
+#define NVKM_MEM_COHERENT 0x40
+#define NVKM_MEM_UNCACHED 0x80
+ u8 type;
+ u8 heap;
+ } type[16];
+
+ struct nvkm_vmm *vmm;
+
+ struct {
+ struct mutex mutex;
+ struct list_head list;
+ } ptc, ptp;
+
+ struct nvkm_device_oclass user;
};
int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 98fe1d0fd592..b1ac47eb786e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -98,4 +98,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index f98f800cc011..ece650a0c5f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -34,6 +34,7 @@
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
}
if (chan->ntfy) {
- nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+ nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
}
@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nvxx_device(device)->func->pci)
+ if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nvxx_device(device)->func->pci)
+ if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nvxx_device(device)->func->pci)
- getparam->value = 3;
- else
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
- getparam->value = 0;
- else
- if (!pci_is_pcie(dev->pdev))
- getparam->value = 1;
- else
- getparam->value = 2;
- break;
+ switch (device->info.platform) {
+ case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
+ case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
+ case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
+ case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
+ case NV_DEVICE_INFO_V0_IGP :
+ if (!pci_is_pcie(dev->pdev))
+ getparam->value = 1;
+ else
+ getparam->value = 2;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = drm->gem.vram_available;
break;
@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
- &chan->ntfy_vma);
+ ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
if (ret)
goto done;
}
@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+ ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
done:
if (ret)
nouveau_abi16_chan_fini(abi16, chan);
@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
- args.start += chan->ntfy_vma.offset;
- args.limit += chan->ntfy_vma.offset;
+ args.start += chan->ntfy_vma->addr;
+ args.limit += chan->ntfy_vma->addr;
} else
if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 441100aa2320..36fde1ff3ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -24,7 +24,7 @@ struct nouveau_abi16_chan {
struct nouveau_channel *chan;
struct list_head notifiers;
struct nouveau_bo *ntfy;
- struct nvkm_vma ntfy_vma;
+ struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dd6fba55ad5d..66bf2aff4a3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
case 1:
entry->dpconf.link_bw = 270000;
break;
- default:
+ case 2:
entry->dpconf.link_bw = 540000;
break;
+ case 3:
+ default:
+ entry->dpconf.link_bw = 810000;
+ break;
}
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
@@ -1964,7 +1968,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
* The microcode entries are found by the "HWSQ" signature.
*/
- const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+ static const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
const int sz = sizeof(hwsq_signature);
int hwsq_offset;
@@ -1980,7 +1984,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- const uint8_t edid_sig[] = {
+ static const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
uint16_t newoffset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e427f80344c4..2615912430cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -37,6 +37,12 @@
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
/*
* NV10-NV40 tiling helpers
@@ -48,8 +54,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_fb *fb = device->fb;
+ struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -97,7 +102,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
- u32 size, u32 pitch, u32 flags)
+ u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
@@ -120,8 +125,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
}
if (found)
- nv10_bo_update_tile_region(dev, found, addr, size,
- pitch, flags);
+ nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
return found;
}
@@ -155,27 +159,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
struct nvif_device *device = &drm->client.device;
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
- if (nvbo->tile_mode) {
+ if (nvbo->mode) {
if (device->info.chipset >= 0x40) {
*align = 65536;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x30) {
*align = 32768;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x20) {
*align = 16384;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x10) {
*align = 16384;
- *size = roundup_64(*size, 32 * nvbo->tile_mode);
+ *size = roundup_64(*size, 32 * nvbo->mode);
}
}
} else {
- *size = roundup_64(*size, (1 << nvbo->page_shift));
- *align = max((1 << nvbo->page_shift), *align);
+ *size = roundup_64(*size, (1 << nvbo->page));
+ *align = max((1 << nvbo->page), *align);
}
*size = roundup_64(*size, PAGE_SIZE);
@@ -187,11 +191,13 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
struct sg_table *sg, struct reservation_object *robj,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(cli->dev);
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
+ struct nvif_mmu *mmu = &cli->mmu;
+ struct nvif_vmm *vmm = &cli->vmm.vmm;
size_t acc_size;
- int ret;
int type = ttm_bo_type_device;
+ int ret, i, pi = -1;
if (!size) {
NV_WARN(drm, "skipped size %016llx\n", size);
@@ -207,19 +213,80 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
- nvbo->tile_mode = tile_mode;
- nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->cli = cli;
- if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
- nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+ /* This is confusing, and doesn't actually mean we want an uncached
+ * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+ * into in nouveau_gem_new().
+ */
+ if (flags & TTM_PL_FLAG_UNCACHED) {
+ /* Determine if we can get a cache-coherent map, forcing
+ * uncached mapping if we can't.
+ */
+ if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ nvbo->force_coherent = true;
+ }
+
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return -EINVAL;
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return -EINVAL;
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+ nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+ (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
+ continue;
+
+ /* Select this page size if it's the first that supports
+ * the potential memory domains, or when it's compatible
+ * with the requested compression settings.
+ */
+ if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+ pi = i;
+
+ /* Stop once the buffer is larger than the current page size. */
+ if (size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+
+ if (WARN_ON(pi < 0))
+ return -EINVAL;
- nvbo->page_shift = 12;
- if (drm->client.vm) {
- if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+ /* Disable compression if suitable settings couldn't be found. */
+ if (nvbo->comp && !vmm->page[pi].comp) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ nvbo->kind = mmu->kind[nvbo->kind];
+ nvbo->comp = 0;
}
+ nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
@@ -262,7 +329,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
@@ -270,7 +337,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+ if (nvbo->zeta) {
fpfn = vram_pages / 2;
lpfn = ~0;
} else {
@@ -321,14 +388,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
memtype == TTM_PL_FLAG_VRAM && contig) {
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- struct nvkm_mem *mem = bo->mem.mm_node;
- if (!nvkm_mm_contiguous(mem->mem))
- evict = true;
- }
- nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+ if (!nvbo->contig) {
+ nvbo->contig = true;
force = true;
+ evict = true;
}
}
@@ -376,7 +439,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
out:
if (force && ret)
- nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
+ nvbo->contig = false;
ttm_bo_unreserve(bo);
return ret;
}
@@ -446,7 +509,6 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -458,7 +520,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+ dma_sync_single_for_device(drm->dev->dev,
+ ttm_dma->dma_address[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
@@ -466,7 +529,6 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -478,7 +540,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+ dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_FROM_DEVICE);
}
@@ -568,6 +630,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nvif_mmu *mmu = &drm->client.mmu;
switch (type) {
case TTM_PL_SYSTEM:
@@ -584,7 +647,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
/* Some BARs do not support being ioremapped WC */
- if (nvxx_bar(&drm->client.device)->iomap_uncached) {
+ const u8 type = mmu->type[drm->ttm.type_vram].type;
+ if (type & NVIF_MEM_UNCACHED) {
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
}
@@ -659,14 +723,14 @@ static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 10);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
@@ -691,9 +755,9 @@ static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -729,9 +793,9 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -768,9 +832,9 @@ static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -806,14 +870,14 @@ static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* COPY */);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
}
@@ -824,15 +888,15 @@ static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
}
return ret;
@@ -858,12 +922,12 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 length = (new_reg->num_pages << PAGE_SHIFT);
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
- int src_tiled = !!mem->memtype;
- int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ int src_tiled = !!mem->kind;
+ int dst_tiled = !!nouveau_mem(new_reg)->kind;
int ret;
while (length) {
@@ -1000,25 +1064,31 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_mem_reg *reg)
{
- struct nvkm_mem *old_mem = bo->mem.mm_node;
- struct nvkm_mem *new_mem = reg->mm_node;
- u64 size = (u64)reg->num_pages << PAGE_SHIFT;
+ struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *new_mem = nouveau_mem(reg);
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
- ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
- NV_MEM_ACCESS_RW, &old_mem->vma[0]);
+ ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
+ old_mem->mem.size, &old_mem->vma[0]);
if (ret)
return ret;
- ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
- NV_MEM_ACCESS_RW, &old_mem->vma[1]);
+ ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
+ new_mem->mem.size, &old_mem->vma[1]);
+ if (ret)
+ goto done;
+
+ ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
+ if (ret)
+ goto done;
+
+ ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
+done:
if (ret) {
- nvkm_vm_put(&old_mem->vma[0]);
- return ret;
+ nvif_vmm_put(vmm, &old_mem->vma[1]);
+ nvif_vmm_put(vmm, &old_mem->vma[0]);
}
-
- nvkm_vm_map(&old_mem->vma[0], old_mem);
- nvkm_vm_map(&old_mem->vma[1], new_mem);
return 0;
}
@@ -1200,21 +1270,23 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
struct ttm_mem_reg *new_reg)
{
+ struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
/* ttm can now (stupidly) pass the driver bos it didn't create... */
if (bo->destroy != nouveau_bo_del_ttm)
return;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
- (new_reg->mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
- nvkm_vm_map(vma, new_reg->mm_node);
- } else {
+ if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
+ mem->mem.page == nvbo->page) {
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ nouveau_vma_map(vma, mem);
+ }
+ } else {
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
WARN_ON(ttm_bo_wait(bo, false, false));
- nvkm_vm_unmap(vma);
+ nouveau_vma_unmap(vma);
}
}
}
@@ -1234,8 +1306,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
- nvbo->tile_mode,
- nvbo->tile_flags);
+ nvbo->mode, nvbo->zeta);
}
return 0;
@@ -1331,8 +1402,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_mem *mem = reg->mm_node;
- int ret;
+ struct nouveau_mem *mem = nouveau_mem(reg);
reg->bus.addr = NULL;
reg->bus.offset = 0;
@@ -1353,7 +1423,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
reg->bus.is_iomem = !drm->agp.cma;
}
#endif
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
+ if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
/* untiled */
break;
/* fallthrough, tiled memory */
@@ -1361,19 +1431,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
reg->bus.offset = reg->start << PAGE_SHIFT;
reg->bus.base = device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
- int page_shift = 12;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
- page_shift = mem->page_shift;
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ union {
+ struct nv50_mem_map_v0 nv50;
+ struct gf100_mem_map_v0 gf100;
+ } args;
+ u64 handle, length;
+ u32 argc = 0;
+ int ret;
+
+ switch (mem->mem.object.oclass) {
+ case NVIF_CLASS_MEM_NV50:
+ args.nv50.version = 0;
+ args.nv50.ro = 0;
+ args.nv50.kind = mem->kind;
+ args.nv50.comp = mem->comp;
+ break;
+ case NVIF_CLASS_MEM_GF100:
+ args.gf100.version = 0;
+ args.gf100.ro = 0;
+ args.gf100.kind = mem->kind;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
- ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
- &mem->bar_vma);
- if (ret)
- return ret;
+ ret = nvif_object_map_handle(&mem->mem.object,
+ &argc, argc,
+ &handle, &length);
+ if (ret != 1)
+ return ret ? ret : -EINVAL;
- nvkm_vm_map(&mem->bar_vma, mem);
- reg->bus.offset = mem->bar_vma.offset;
+ reg->bus.base = 0;
+ reg->bus.offset = handle;
}
break;
default:
@@ -1385,13 +1476,22 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
- struct nvkm_mem *mem = reg->mm_node;
-
- if (!mem->bar_vma.node)
- return;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_mem *mem = nouveau_mem(reg);
- nvkm_vm_unmap(&mem->bar_vma);
- nvkm_vm_put(&mem->bar_vma);
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ switch (reg->mem_type) {
+ case TTM_PL_TT:
+ if (mem->kind)
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ case TTM_PL_VRAM:
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ default:
+ break;
+ }
+ }
}
static int
@@ -1408,7 +1508,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
- !nouveau_bo_tile_layout(nvbo))
+ !nvbo->kind)
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@@ -1445,9 +1545,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nvkm_device *device;
- struct drm_device *dev;
- struct device *pdev;
+ struct device *dev;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1464,9 +1562,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->client.device);
- dev = drm->dev;
- pdev = device->dev;
+ dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@@ -1476,7 +1572,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev->dev);
+ return ttm_dma_populate((void *)ttm, dev);
}
#endif
@@ -1488,12 +1584,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
dma_addr_t addr;
- addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+ addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pdev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
while (i--) {
- dma_unmap_page(pdev, ttm_dma->dma_address[i],
+ dma_unmap_page(dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
@@ -1511,9 +1607,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nvkm_device *device;
- struct drm_device *dev;
- struct device *pdev;
+ struct device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1521,9 +1615,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->client.device);
- dev = drm->dev;
- pdev = device->dev;
+ dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@@ -1534,14 +1626,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev->dev);
+ ttm_dma_unpopulate((void *)ttm, dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+ dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
}
@@ -1576,48 +1668,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
-
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
-{
- struct nvkm_vma *vma;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (vma->vm == vm)
- return vma;
- }
-
- return NULL;
-}
-
-int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
- struct nvkm_vma *vma)
-{
- const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- int ret;
-
- ret = nvkm_vm_get(vm, size, nvbo->page_shift,
- NV_MEM_ACCESS_RW, vma);
- if (ret)
- return ret;
-
- if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
- (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->mmu->lpg_shift))
- nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
-
- list_add_tail(&vma->head, &nvbo->vma_list);
- vma->refcount = 1;
- return 0;
-}
-
-void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
-{
- if (vma->node) {
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- list_del(&vma->head);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 4caade5dee50..7b5cc5c73d20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -25,12 +25,16 @@ struct nouveau_bo {
bool validate_mapped;
struct list_head vma_list;
- unsigned page_shift;
struct nouveau_cli *cli;
- u32 tile_mode;
- u32 tile_flags;
+ unsigned contig:1;
+ unsigned page:5;
+ unsigned kind:8;
+ unsigned comp:3;
+ unsigned zeta:3;
+ unsigned mode;
+
struct nouveau_drm_tile *tile;
/* Only valid if allocated via nouveau_gem_new() and iff you hold a
@@ -90,13 +94,6 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
-
-int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
- struct nvkm_vma *);
-void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
-
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index dbc41fa86ee8..af1116655910 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -40,6 +40,7 @@
#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
+ struct nouveau_cli *cli = (void *)chan->user.client;
+ bool super;
+
+ if (cli) {
+ super = cli->base.super;
+ cli->base.super = true;
+ }
+
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
nvif_object_fini(&chan->nvsw);
@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_notify_fini(&chan->kill);
nvif_object_fini(&chan->user);
nvif_object_fini(&chan->push.ctxdma);
- nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+ nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
+
+ if (cli)
+ cli->base.super = super;
}
*pchan = NULL;
}
@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 size, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
* pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers
*/
- chan->push.vma.offset = chan->push.buffer->bo.offset;
+ chan->push.addr = chan->push.buffer->bo.offset;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
- &chan->push.vma);
+ ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
+ &chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
+
+ chan->push.addr = chan->push.vma->addr;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
}
}
@@ -203,6 +216,7 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
+ struct nouveau_cli *cli = (void *)device->object.client;
static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.kepler.version = 0;
args.kepler.engines = engine;
args.kepler.ilength = 0x02000;
- args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
- args.kepler.vm = 0;
+ args.kepler.ioffset = 0x10000 + chan->push.addr;
+ args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.kepler);
} else
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
args.fermi.version = 0;
args.fermi.ilength = 0x02000;
- args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
- args.fermi.vm = 0;
+ args.fermi.ioffset = 0x10000 + chan->push.addr;
+ args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.fermi);
} else {
args.nv50.version = 0;
args.nv50.ilength = 0x02000;
- args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+ args.nv50.ioffset = 0x10000 + chan->push.addr;
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
- args.nv50.vm = 0;
+ args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.nv50);
}
@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */
args.version = 0;
args.pushbuf = nvif_handle(&chan->push.ctxdma);
- args.offset = chan->push.vma.offset;
+ args.offset = chan->push.addr;
do {
ret = nvif_object_init(&device->object, 0, *oclass++,
@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nvif_device *device = chan->device;
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_drm *drm = chan->drm;
- struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
int ret, i;
- nvif_object_map(&chan->user);
+ nvif_object_map(&chan->user, NULL, 0);
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
} else
if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
}
ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 9463a78613cb..14607c16a2bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -17,8 +17,9 @@ struct nouveau_channel {
struct {
struct nouveau_bo *buffer;
- struct nvkm_vma vma;
+ struct nouveau_vma *vma;
struct nvif_object ctxdma;
+ u64 addr;
} push;
/* TODO: this will be reworked in the near future */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 70d8e0d69ad5..69d6e61a01ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -373,7 +373,7 @@ find_encoder(struct drm_connector *connector, int type)
if (!id)
break;
- enc = drm_encoder_find(dev, id);
+ enc = drm_encoder_find(dev, NULL, id);
if (!enc)
continue;
nv_encoder = nouveau_encoder(enc);
@@ -441,7 +441,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (id == 0)
break;
- encoder = drm_encoder_find(dev, id);
+ encoder = drm_encoder_find(dev, NULL, id);
if (!encoder)
continue;
nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 34cd144681b9..270ba56f2756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,15 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
-
-#include <subdev/mmu.h>
-
#include "nouveau_drv.h"
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
- struct nvkm_vma vma;
+ struct nouveau_vma *vma;
u32 r_handle;
u32 r_format;
u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2634a1a79888..10e84f6ca2b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_vmm.h"
void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return -EBUSY;
}
- if (val < chan->push.vma.offset ||
- val > chan->push.vma.offset + (chan->dma.max << 2))
+ if (val < chan->push.addr ||
+ val > chan->push.addr + (chan->dma.max << 2))
return -EINVAL;
- return (val - chan->push.vma.offset) >> 2;
+ return (val - chan->push.addr) >> 2;
}
void
@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
{
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_bo *pb = chan->push.buffer;
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
- vma = nouveau_bo_vma_find(bo, cli->vm);
+ vma = nouveau_vma_find(bo, &cli->vmm);
BUG_ON(!vma);
- offset = vma->offset + delta;
+ offset = vma->addr + delta;
BUG_ON(chan->dma.ib_free < 1);
@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
* instruct the GPU to jump back to the start right
* after processing the currently pending commands.
*/
- OUT_RING(chan, chan->push.vma.offset | 0x20000000);
+ OUT_RING(chan, chan->push.addr | 0x20000000);
/* wait for GET to depart from the skips area.
* prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index aff3a9d0a1fc..74e10b14a7da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
- nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+ nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
} while (0)
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 595630d1fb9e..8d4a5be3b913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev)
return nouveau_platform_name(to_platform_device(dev->dev));
}
+static inline bool
+nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
+{
+ if (!dma_fence_is_signaled(fence)) {
+ if (!wait)
+ return false;
+ WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+ }
+ dma_fence_put(fence);
+ return true;
+}
+
+static void
+nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
+{
+ struct nouveau_cli_work *work, *wtmp;
+ mutex_lock(&cli->lock);
+ list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
+ if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
+ list_del(&work->head);
+ work->func(work);
+ }
+ }
+ mutex_unlock(&cli->lock);
+}
+
+static void
+nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
+ schedule_work(&work->cli->work);
+}
+
+void
+nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
+ struct nouveau_cli_work *work)
+{
+ work->fence = dma_fence_get(fence);
+ work->cli = cli;
+ mutex_lock(&cli->lock);
+ list_add_tail(&work->head, &cli->worker);
+ mutex_unlock(&cli->lock);
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
+ nouveau_cli_work_fence(fence, &work->cb);
+}
+
+static void
+nouveau_cli_work(struct work_struct *w)
+{
+ struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
+ nouveau_cli_work_flush(cli, false);
+}
+
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
- nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
+ nouveau_cli_work_flush(cli, true);
usif_client_fini(cli);
+ nouveau_vmm_fini(&cli->vmm);
+ nvif_mmu_fini(&cli->mmu);
nvif_device_fini(&cli->device);
+ mutex_lock(&cli->drm->master.lock);
nvif_client_fini(&cli->base);
+ mutex_unlock(&cli->drm->master.lock);
}
static int
nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
struct nouveau_cli *cli)
{
+ static const struct nvif_mclass
+ mems[] = {
+ { NVIF_CLASS_MEM_GF100, -1 },
+ { NVIF_CLASS_MEM_NV50 , -1 },
+ { NVIF_CLASS_MEM_NV04 , -1 },
+ {}
+ };
+ static const struct nvif_mclass
+ mmus[] = {
+ { NVIF_CLASS_MMU_GF100, -1 },
+ { NVIF_CLASS_MMU_NV50 , -1 },
+ { NVIF_CLASS_MMU_NV04 , -1 },
+ {}
+ };
+ static const struct nvif_mclass
+ vmms[] = {
+ { NVIF_CLASS_VMM_GP100, -1 },
+ { NVIF_CLASS_VMM_GM200, -1 },
+ { NVIF_CLASS_VMM_GF100, -1 },
+ { NVIF_CLASS_VMM_NV50 , -1 },
+ { NVIF_CLASS_VMM_NV04 , -1 },
+ {}
+ };
u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
- cli->dev = drm->dev;
+ cli->drm = drm;
mutex_init(&cli->mutex);
usif_client_init(cli);
- if (cli == &drm->client) {
+ INIT_WORK(&cli->work, nouveau_cli_work);
+ INIT_LIST_HEAD(&cli->worker);
+ mutex_init(&cli->lock);
+
+ if (cli == &drm->master) {
ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
cli->name, device, &cli->base);
} else {
- ret = nvif_client_init(&drm->client.base, cli->name, device,
+ mutex_lock(&drm->master.lock);
+ ret = nvif_client_init(&drm->master.base, cli->name, device,
&cli->base);
+ mutex_unlock(&drm->master.lock);
}
if (ret) {
NV_ERROR(drm, "Client allocation failed: %d\n", ret);
@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
goto done;
}
+ ret = nvif_mclass(&cli->device.object, mmus);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MMU class\n");
+ goto done;
+ }
+
+ ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+ if (ret) {
+ NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ goto done;
+ }
+
+ ret = nvif_mclass(&cli->mmu.object, vmms);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported VMM class\n");
+ goto done;
+ }
+
+ ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
+ if (ret) {
+ NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+ goto done;
+ }
+
+ ret = nvif_mclass(&cli->mmu.object, mems);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MEM class\n");
+ goto done;
+ }
+
+ cli->mem = &mems[ret];
+ return 0;
done:
if (ret)
nouveau_cli_fini(cli);
@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ if (ret)
+ return ret;
+
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
return ret;
@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_vga_init(drm);
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- if (!nvxx_device(&drm->client.device)->mmu) {
- ret = -ENOSYS;
- goto fail_device;
- }
-
- ret = nvkm_vm_new(nvxx_device(&drm->client.device),
- 0, (1ULL << 40), 0x1000, NULL,
- &drm->client.vm);
- if (ret)
- goto fail_device;
-
- nvxx_client(&drm->client.base)->vm = drm->client.vm;
- }
-
ret = nouveau_ttm_init(drm);
if (ret)
goto fail_ttm;
@@ -516,8 +623,8 @@ fail_bios:
nouveau_ttm_fini(drm);
fail_ttm:
nouveau_vga_fini(drm);
-fail_device:
nouveau_cli_fini(&drm->client);
+ nouveau_cli_fini(&drm->master);
kfree(drm);
return ret;
}
@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev)
if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device);
nouveau_cli_fini(&drm->client);
+ nouveau_cli_fini(&drm->master);
kfree(drm);
}
@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "suspending object tree...\n");
- ret = nvif_client_suspend(&drm->client.base);
+ ret = nvif_client_suspend(&drm->master.base);
if (ret)
goto fail_client;
@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- nvif_client_resume(&drm->client.base);
+ nvif_client_resume(&drm->master.base);
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
cli->base.super = false;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
- (1ULL << 40), 0x1000, NULL, &cli->vm);
- if (ret)
- goto done;
-
- nvxx_client(&cli->base)->vm = cli->vm;
- }
-
fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 77dea95c1bf1..3331e82ae9e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -6,7 +6,7 @@
#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
#define DRIVER_NAME "nouveau"
-#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
@@ -43,6 +43,8 @@
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
+#include <nvif/mmu.h>
+#include <nvif/vmm.h>
#include <drm/drmP.h>
@@ -62,6 +64,7 @@ struct platform_device;
#include "nouveau_fence.h"
#include "nouveau_bios.h"
+#include "nouveau_vmm.h"
struct nouveau_drm_tile {
struct nouveau_fence *fence;
@@ -87,19 +90,37 @@ enum nouveau_drm_handle {
struct nouveau_cli {
struct nvif_client base;
- struct drm_device *dev;
+ struct nouveau_drm *drm;
struct mutex mutex;
struct nvif_device device;
+ struct nvif_mmu mmu;
+ struct nouveau_vmm vmm;
+ const struct nvif_mclass *mem;
- struct nvkm_vm *vm; /*XXX*/
struct list_head head;
void *abi16;
struct list_head objects;
struct list_head notifys;
char name[32];
+
+ struct work_struct work;
+ struct list_head worker;
+ struct mutex lock;
};
+struct nouveau_cli_work {
+ void (*func)(struct nouveau_cli_work *);
+ struct nouveau_cli *cli;
+ struct list_head head;
+
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
+};
+
+void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
+ struct nouveau_cli_work *);
+
static inline struct nouveau_cli *
nouveau_cli(struct drm_file *fpriv)
{
@@ -110,6 +131,7 @@ nouveau_cli(struct drm_file *fpriv)
#include <nvif/device.h>
struct nouveau_drm {
+ struct nouveau_cli master;
struct nouveau_cli client;
struct drm_device *dev;
@@ -134,6 +156,9 @@ struct nouveau_drm {
struct nouveau_channel *chan;
struct nvif_object copy;
int mtrr;
+ int type_vram;
+ int type_host;
+ int type_ncoh;
} ttm;
/* GEM interface support */
@@ -205,7 +230,7 @@ void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
- dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a); \
+ dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2b12d82aac15..c533d8e04afc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -48,6 +48,7 @@
#include "nouveau_bo.h"
#include "nouveau_fbcon.h"
#include "nouveau_chan.h"
+#include "nouveau_vmm.h"
#include "nouveau_crtc.h"
@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
out_unlock:
if (chan)
- nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_vma_del(&fb->vma);
nouveau_bo_unmap(fb->nvbo);
out_unpin:
nouveau_bo_unpin(fb->nvbo);
@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
drm_framebuffer_unreference(&nouveau_fb->base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99e14e3e0fe4..503fa94dc06d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
WARN_ON(ret);
}
-struct nouveau_fence_work {
- struct work_struct work;
- struct dma_fence_cb cb;
- void (*func)(void *);
- void *data;
-};
-
-static void
-nouveau_fence_work_handler(struct work_struct *kwork)
-{
- struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
- work->func(work->data);
- kfree(work);
-}
-
-static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
-
- schedule_work(&work->work);
-}
-
-void
-nouveau_fence_work(struct dma_fence *fence,
- void (*func)(void *), void *data)
-{
- struct nouveau_fence_work *work;
-
- if (dma_fence_is_signaled(fence))
- goto err;
-
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work) {
- /*
- * this might not be a nouveau fence any more,
- * so force a lazy wait here
- */
- WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
- true, false));
- goto err;
- }
-
- INIT_WORK(&work->work, nouveau_fence_work_handler);
- work->func = func;
- work->data = data;
-
- if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
- goto err_free;
- return;
-
-err_free:
- kfree(work);
-err:
- func(data);
-}
-
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
if (!fence)
return -ENOMEM;
- fence->sysmem = sysmem;
-
ret = nouveau_fence_emit(fence, chan);
if (ret)
nouveau_fence_unref(&fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9b399ad89e6..5bd8d30d1657 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -13,8 +13,6 @@ struct nouveau_fence {
struct list_head head;
- bool sysmem;
-
struct nouveau_channel __rcu *channel;
unsigned long timeout;
};
@@ -25,7 +23,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -91,14 +88,12 @@ int nouveau_flip_complete(struct nvif_notify *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
- struct nvkm_vma vma;
- struct nvkm_vma vma_gart;
+ struct nouveau_vma *vma;
};
struct nv84_fence_priv {
struct nouveau_fence_priv base;
struct nouveau_bo *bo;
- struct nouveau_bo *bo_gart;
u32 *suspend;
struct mutex mutex;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2170534101ca..efc89aaef66a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -31,6 +31,10 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
void
nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_vma *vma;
struct device *dev = drm->dev->dev;
+ struct nouveau_vma *vma;
int ret;
- if (!cli->vm)
+ if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
- if (!vma) {
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES) {
- kfree(vma);
- goto out;
- }
-
- ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret)
- kfree(vma);
-
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- } else {
- vma->refcount++;
- }
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+ ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
+struct nouveau_gem_object_unmap {
+ struct nouveau_cli_work work;
+ struct nouveau_vma *vma;
+};
+
static void
-nouveau_gem_object_delete(void *data)
+nouveau_gem_object_delete(struct nouveau_vma *vma)
{
- struct nvkm_vma *vma = data;
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- kfree(vma);
+ nouveau_vma_del(&vma);
}
static void
-nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
+nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
+{
+ struct nouveau_gem_object_unmap *work =
+ container_of(w, typeof(*work), work);
+ nouveau_gem_object_delete(work->vma);
+ kfree(work);
+}
+
+static void
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
+ struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
- list_del(&vma->head);
+ list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1)
ttm_bo_wait(&nvbo->bo, false, false);
@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
else
fence = reservation_object_get_excl(nvbo->bo.resv);
- if (fence && mapped) {
- nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
- } else {
- if (mapped)
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- kfree(vma);
+ if (!fence || !mapped) {
+ nouveau_gem_object_delete(vma);
+ return;
+ }
+
+ if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
+ WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+ nouveau_gem_object_delete(vma);
+ return;
}
+
+ work->work.func = nouveau_gem_object_delete_work;
+ work->vma = vma;
+ nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
void
@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
int ret;
- if (!cli->vm)
+ if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
+ vma = nouveau_vma_find(nvbo, &cli->vmm);
if (vma) {
- if (--vma->refcount == 0) {
+ if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(cli->dev);
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->vm) {
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
+ if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ vma = nouveau_vma_find(nvbo, &cli->vmm);
if (!vma)
return -EINVAL;
- rep->offset = vma->offset;
+ rep->offset = vma->addr;
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
- rep->tile_mode = nvbo->tile_mode;
- rep->tile_flags = nvbo->tile_flags;
+ rep->tile_mode = nvbo->mode;
+ rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ rep->tile_flags |= nvbo->kind << 8;
+ else
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
+ else
+ rep->tile_flags |= nvbo->zeta;
return 0;
}
@@ -255,18 +267,11 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
- NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
- return -EINVAL;
- }
-
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo[push[i].bo_index].user_priv;
uint32_t cmd;
- cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
+ cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
@@ -843,7 +848,7 @@ out_next:
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
- (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
+ (chan->push.addr + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 0456c94a5d4d..fe39998f65cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -7,9 +7,6 @@
#include "nouveau_drv.h"
#include "nouveau_bo.h"
-#define nouveau_bo_tile_layout(nvbo) \
- ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 000000000000..589a9621db76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_mem.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+
+int
+nouveau_mem_map(struct nouveau_mem *mem,
+ struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+ union {
+ struct nv50_vmm_map_v0 nv50;
+ struct gf100_vmm_map_v0 gf100;
+ } args;
+ u32 argc = 0;
+ bool super;
+ int ret;
+
+ switch (vmm->object.oclass) {
+ case NVIF_CLASS_VMM_NV04:
+ break;
+ case NVIF_CLASS_VMM_NV50:
+ args.nv50.version = 0;
+ args.nv50.ro = 0;
+ args.nv50.priv = 0;
+ args.nv50.kind = mem->kind;
+ args.nv50.comp = mem->comp;
+ argc = sizeof(args.nv50);
+ break;
+ case NVIF_CLASS_VMM_GF100:
+ case NVIF_CLASS_VMM_GM200:
+ case NVIF_CLASS_VMM_GP100:
+ args.gf100.version = 0;
+ if (mem->mem.type & NVIF_MEM_VRAM)
+ args.gf100.vol = 0;
+ else
+ args.gf100.vol = 1;
+ args.gf100.ro = 0;
+ args.gf100.priv = 0;
+ args.gf100.kind = mem->kind;
+ argc = sizeof(args.gf100);
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ super = vmm->object.client->super;
+ vmm->object.client->super = true;
+ ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
+ &mem->mem, 0);
+ vmm->object.client->super = super;
+ return ret;
+}
+
+void
+nouveau_mem_fini(struct nouveau_mem *mem)
+{
+ nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
+ nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
+ mutex_lock(&mem->cli->drm->master.lock);
+ nvif_mem_fini(&mem->mem);
+ mutex_unlock(&mem->cli->drm->master.lock);
+}
+
+int
+nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nouveau_cli *cli = mem->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_mmu *mmu = &cli->mmu;
+ struct nvif_mem_ram_v0 args = {};
+ bool super = cli->base.super;
+ u8 type;
+ int ret;
+
+ if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ type = drm->ttm.type_ncoh;
+ else
+ type = drm->ttm.type_host;
+
+ if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
+ mem->comp = mem->kind = 0;
+ if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ mem->kind = mmu->kind[mem->kind];
+ mem->comp = 0;
+ }
+
+ if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
+ else args.dma = tt->dma_address;
+
+ mutex_lock(&drm->master.lock);
+ cli->base.super = true;
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+ reg->num_pages << PAGE_SHIFT,
+ &args, sizeof(args), &mem->mem);
+ cli->base.super = super;
+ mutex_unlock(&drm->master.lock);
+ return ret;
+}
+
+int
+nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nouveau_cli *cli = mem->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_mmu *mmu = &cli->mmu;
+ bool super = cli->base.super;
+ u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+ int ret;
+
+ mutex_lock(&drm->master.lock);
+ cli->base.super = true;
+ switch (cli->mem->oclass) {
+ case NVIF_CLASS_MEM_GF100:
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ drm->ttm.type_vram, page, size,
+ &(struct gf100_mem_v0) {
+ .contig = contig,
+ }, sizeof(struct gf100_mem_v0),
+ &mem->mem);
+ break;
+ case NVIF_CLASS_MEM_NV50:
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ drm->ttm.type_vram, page, size,
+ &(struct nv50_mem_v0) {
+ .bankswz = mmu->kind[mem->kind] == 2,
+ .contig = contig,
+ }, sizeof(struct nv50_mem_v0),
+ &mem->mem);
+ break;
+ default:
+ ret = -ENOSYS;
+ WARN_ON(1);
+ break;
+ }
+ cli->base.super = super;
+ mutex_unlock(&drm->master.lock);
+
+ reg->start = mem->mem.addr >> PAGE_SHIFT;
+ return ret;
+}
+
+void
+nouveau_mem_del(struct ttm_mem_reg *reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ nouveau_mem_fini(mem);
+ kfree(reg->mm_node);
+ reg->mm_node = NULL;
+}
+
+int
+nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+ struct ttm_mem_reg *reg)
+{
+ struct nouveau_mem *mem;
+
+ if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+ return -ENOMEM;
+ mem->cli = cli;
+ mem->kind = kind;
+ mem->comp = comp;
+
+ reg->mm_node = mem;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
new file mode 100644
index 000000000000..f6d039e73812
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_MEM_H__
+#define __NOUVEAU_MEM_H__
+#include <drm/ttm/ttm_bo_api.h>
+struct ttm_dma_tt;
+
+#include <nvif/mem.h>
+#include <nvif/vmm.h>
+
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_mem_reg *reg)
+{
+ return reg->mm_node;
+}
+
+struct nouveau_mem {
+ struct nouveau_cli *cli;
+ u8 kind;
+ u8 comp;
+ struct nvif_mem mem;
+ struct nvif_vma vma[2];
+};
+
+int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+ struct ttm_mem_reg *);
+void nouveau_mem_del(struct ttm_mem_reg *);
+int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+void nouveau_mem_fini(struct nouveau_mem *);
+int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index fde11ce466e4..11f6ca89769b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include "nouveau_drv.h"
+#include "nouveau_mem.h"
#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
@@ -10,7 +11,7 @@ struct nouveau_sgdma_be {
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
- struct nvkm_mem *node;
+ struct nouveau_mem *mem;
};
static void
@@ -28,19 +29,20 @@ static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = reg->mm_node;
-
- if (ttm->sg) {
- node->sg = ttm->sg;
- node->pages = NULL;
- } else {
- node->sg = NULL;
- node->pages = nvbe->ttm.dma_address;
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
+
+ ret = nouveau_mem_host(reg, &nvbe->ttm);
+ if (ret)
+ return ret;
+
+ ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ if (ret) {
+ nouveau_mem_fini(mem);
+ return ret;
}
- node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
- nvkm_vm_map(&node->vma[0], node);
- nvbe->node = node;
+ nvbe->mem = mem;
return 0;
}
@@ -48,7 +50,7 @@ static int
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nvkm_vm_unmap(&nvbe->node->vma[0]);
+ nouveau_mem_fini(nvbe->mem);
return 0;
}
@@ -62,30 +64,20 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = reg->mm_node;
-
- /* noop: bound in move_notify() */
- if (ttm->sg) {
- node->sg = ttm->sg;
- node->pages = NULL;
- } else {
- node->sg = NULL;
- node->pages = nvbe->ttm.dma_address;
- }
- node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
- return 0;
-}
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
-static int
-nv50_sgdma_unbind(struct ttm_tt *ttm)
-{
- /* noop: unbound in move_notify() */
+ ret = nouveau_mem_host(reg, &nvbe->ttm);
+ if (ret)
+ return ret;
+
+ nvbe->mem = mem;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind,
- .unbind = nv50_sgdma_unbind,
+ .unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index b0ad7fcefcf5..08b974b30482 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -23,53 +23,37 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include "nouveau_drv.h"
-#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_ttm.h"
#include <drm/drm_legacy.h>
#include <core/tegra.h>
static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
- man->priv = fb;
return 0;
}
static int
-nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+nouveau_manager_fini(struct ttm_mem_type_manager *man)
{
- man->priv = NULL;
return 0;
}
-static inline void
-nvkm_mem_node_cleanup(struct nvkm_mem *node)
+static void
+nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{
- if (node->vma[0].node) {
- nvkm_vm_unmap(&node->vma[0]);
- nvkm_vm_put(&node->vma[0]);
- }
-
- if (node->vma[1].node) {
- nvkm_vm_unmap(&node->vma[1]);
- nvkm_vm_put(&node->vma[1]);
- }
+ nouveau_mem_del(reg);
}
static void
-nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *reg)
+nouveau_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
- nvkm_mem_node_cleanup(reg->mm_node);
- ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
}
static int
@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_mem *node;
- u32 size_nc = 0;
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->page_shift;
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
- reg->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
if (ret) {
- reg->mm_node = NULL;
- return (ret == -ENOSPC) ? 0 : ret;
+ nouveau_mem_del(reg);
+ if (ret == -ENOSPC) {
+ reg->mm_node = NULL;
+ return 0;
+ }
+ return ret;
}
- node->page_shift = nvbo->page_shift;
-
- reg->mm_node = node;
- reg->start = node->offset >> PAGE_SHIFT;
return 0;
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
- .init = nouveau_vram_manager_init,
- .takedown = nouveau_vram_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nouveau_vram_manager_new,
- .put_node = nouveau_vram_manager_del,
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug,
};
static int
-nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- return 0;
-}
-
-static int
-nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void
-nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *reg)
-{
- nvkm_mem_node_cleanup(reg->mm_node);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
-}
-
-static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_mem *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
+ int ret;
- node->page_shift = 12;
-
- switch (drm->client.device.info.family) {
- case NV_DEVICE_INFO_V0_TNT:
- case NV_DEVICE_INFO_V0_CELSIUS:
- case NV_DEVICE_INFO_V0_KELVIN:
- case NV_DEVICE_INFO_V0_RANKINE:
- case NV_DEVICE_INFO_V0_CURIE:
- break;
- case NV_DEVICE_INFO_V0_TESLA:
- if (drm->client.device.info.chipset != 0x50)
- node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
- break;
- case NV_DEVICE_INFO_V0_FERMI:
- case NV_DEVICE_INFO_V0_KEPLER:
- case NV_DEVICE_INFO_V0_MAXWELL:
- case NV_DEVICE_INFO_V0_PASCAL:
- node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
- break;
- default:
- NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
- drm->client.device.info.family);
- break;
- }
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- reg->mm_node = node;
- reg->start = 0;
+ reg->start = 0;
return 0;
}
-static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
- .init = nouveau_gart_manager_init,
- .takedown = nouveau_gart_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nouveau_gart_manager_new,
- .put_node = nouveau_gart_manager_del,
- .debug = nouveau_gart_manager_debug
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug
};
-/*XXX*/
-#include <subdev/mmu/nv04.h>
-static int
-nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
- struct nv04_mmu *priv = (void *)mmu;
- struct nvkm_vm *vm = NULL;
- nvkm_vm_ref(priv->vm, &vm, NULL);
- man->priv = vm;
- return 0;
-}
-
-static int
-nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
- struct nvkm_vm *vm = man->priv;
- nvkm_vm_ref(NULL, &vm, NULL);
- man->priv = NULL;
- return 0;
-}
-
-static void
-nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
-{
- struct nvkm_mem *node = reg->mm_node;
- if (node->vma[0].node)
- nvkm_vm_put(&node->vma[0]);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
-}
-
static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nvkm_mem *node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node->page_shift = 12;
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
- NV_MEM_ACCESS_RW, &node->vma[0]);
+ ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+ reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- kfree(node);
+ nouveau_mem_del(reg);
+ if (ret == -ENOSPC) {
+ reg->mm_node = NULL;
+ return 0;
+ }
return ret;
}
- reg->mm_node = node;
- reg->start = node->vma[0].offset >> PAGE_SHIFT;
+ reg->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
-static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
const struct ttm_mem_type_manager_func nv04_gart_manager = {
- .init = nv04_gart_manager_init,
- .takedown = nv04_gart_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nv04_gart_manager_new,
- .put_node = nv04_gart_manager_del,
- .debug = nv04_gart_manager_debug
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug
};
int
@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_pci *pci = device->pci;
+ struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
- u8 bits;
- int ret;
+ int typei, ret;
- if (pci && pci->agp.bridge) {
- drm->agp.bridge = pci->agp.bridge;
- drm->agp.base = pci->agp.base;
- drm->agp.size = pci->agp.size;
- drm->agp.cma = pci->agp.cma;
- }
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
+ NVIF_MEM_COHERENT);
+ if (typei < 0)
+ return -ENOSYS;
- bits = nvxx_mmu(&drm->client.device)->dma_bits;
- if (nvxx_device(&drm->client.device)->func->pci) {
- if (drm->agp.bridge)
- bits = 32;
- } else if (device->func->tegra) {
- struct nvkm_device_tegra *tegra = device->func->tegra(device);
+ drm->ttm.type_host = typei;
- /*
- * If the platform can use a IOMMU, then the addressable DMA
- * space is constrained by the IOMMU bit
- */
- if (tegra->func->iommu_bit)
- bits = min(bits, tegra->func->iommu_bit);
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+ if (typei < 0)
+ return -ENOSYS;
- }
+ drm->ttm.type_ncoh = typei;
- ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
- if (ret && bits != 32) {
- bits = 32;
- ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
+ if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
+ drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
+ NVIF_MEM_KIND |
+ NVIF_MEM_COMP |
+ NVIF_MEM_DISP);
+ if (typei < 0)
+ return -ENOSYS;
+
+ drm->ttm.type_vram = typei;
+ } else {
+ drm->ttm.type_vram = -1;
}
- if (ret)
- return ret;
- ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
- if (ret)
- dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
+ if (pci && pci->agp.bridge) {
+ drm->agp.bridge = pci->agp.bridge;
+ drm->agp.base = pci->agp.base;
+ drm->agp.size = pci->agp.size;
+ drm->agp.cma = pci->agp.cma;
+ }
ret = nouveau_ttm_global_init(drm);
if (ret)
@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
- bits <= 32 ? true : false);
+ drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* GART init */
if (!drm->agp.bridge) {
- drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
+ drm->gem.gart_available = drm->client.vmm.vmm.limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
new file mode 100644
index 000000000000..9e2628dd8e4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_vmm.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+#include "nouveau_mem.h"
+
+void
+nouveau_vma_unmap(struct nouveau_vma *vma)
+{
+ if (vma->mem) {
+ nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
+ vma->mem = NULL;
+ }
+}
+
+int
+nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
+{
+ struct nvif_vma tmp = { .addr = vma->addr };
+ int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
+ if (ret)
+ return ret;
+ vma->mem = mem;
+ return 0;
+}
+
+struct nouveau_vma *
+nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
+{
+ struct nouveau_vma *vma;
+
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (vma->vmm == vmm)
+ return vma;
+ }
+
+ return NULL;
+}
+
+void
+nouveau_vma_del(struct nouveau_vma **pvma)
+{
+ struct nouveau_vma *vma = *pvma;
+ if (vma && --vma->refs <= 0) {
+ if (likely(vma->addr != ~0ULL)) {
+ struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
+ nvif_vmm_put(&vma->vmm->vmm, &tmp);
+ }
+ list_del(&vma->head);
+ *pvma = NULL;
+ kfree(*pvma);
+ }
+}
+
+int
+nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+ struct nouveau_vma **pvma)
+{
+ struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+ struct nouveau_vma *vma;
+ struct nvif_vma tmp;
+ int ret;
+
+ if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
+ vma->refs++;
+ return 0;
+ }
+
+ if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
+ return -ENOMEM;
+ vma->vmm = vmm;
+ vma->refs = 1;
+ vma->addr = ~0ULL;
+ vma->mem = NULL;
+ list_add_tail(&vma->head, &nvbo->vma_list);
+
+ if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ mem->mem.page == nvbo->page) {
+ ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
+ if (ret)
+ goto done;
+
+ vma->addr = tmp.addr;
+ ret = nouveau_vma_map(vma, mem);
+ } else {
+ ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
+ vma->addr = tmp.addr;
+ }
+
+done:
+ if (ret)
+ nouveau_vma_del(pvma);
+ return ret;
+}
+
+void
+nouveau_vmm_fini(struct nouveau_vmm *vmm)
+{
+ nvif_vmm_fini(&vmm->vmm);
+ vmm->cli = NULL;
+}
+
+int
+nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
+{
+ int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
+ &vmm->vmm);
+ if (ret)
+ return ret;
+
+ vmm->cli = cli;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
new file mode 100644
index 000000000000..5c31f43678d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_VMA_H__
+#define __NOUVEAU_VMA_H__
+#include <nvif/vmm.h>
+struct nouveau_bo;
+struct nouveau_mem;
+
+struct nouveau_vma {
+ struct nouveau_vmm *vmm;
+ int refs;
+ struct list_head head;
+ u64 addr;
+
+ struct nouveau_mem *mem;
+};
+
+struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
+int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
+ struct nouveau_vma **);
+void nouveau_vma_del(struct nouveau_vma **);
+int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vma_unmap(struct nouveau_vma *);
+
+struct nouveau_vmm {
+ struct nouveau_cli *cli;
+ struct nvif_vmm vmm;
+ struct nvkm_vm *vm;
+};
+
+int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
+void nouveau_vmm_fini(struct nouveau_vmm *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index fb47d46050ec..584466ef688f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_init(disp, 0, oclass[0],
data, size, &chan->user);
if (ret == 0)
- nvif_object_map(&chan->user);
+ nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
return ret;
}
@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
{
struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
struct nv50_dmac_ctxdma *ctxdma;
- const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u8 kind = fb->nvbo->kind;
const u32 handle = 0xfb000000 | kind;
struct {
struct nv_dma_v0 base;
@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
int ret;
mutex_init(&dmac->lock);
+ INIT_LIST_HEAD(&dmac->ctxdma);
dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
&dmac->handle, GFP_KERNEL);
@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.w = fb->base.width;
asyw->image.h = fb->base.height;
- asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ asyw->image.kind = fb->nvbo->kind;
if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
asyw->interval = 0;
@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
if (asyw->image.kind) {
asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0)
- asyw->image.block = fb->nvbo->tile_mode >> 4;
+ asyw->image.block = fb->nvbo->mode >> 4;
else
- asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.block = fb->nvbo->mode;
asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
} else {
asyw->image.layout = 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 327dcd7901ed..facd18564e0d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma->addr));
+ OUT_RING(chan, lower_32_bits(fb->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma->addr));
+ OUT_RING(chan, lower_32_bits(fb->vma->addr));
FIRE_RING(chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index bd7a8a1e4ad9..5f0c0c27d5dc 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include "nouveau_vmm.h"
#include "nv50_display.h"
@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence;
- u64 addr = chan->chid * 16;
-
- if (fence->sysmem)
- addr += fctx->vma_gart.offset;
- else
- addr += fctx->vma.offset;
+ u64 addr = fctx->vma->addr + chan->chid * 16;
return fctx->base.emit32(chan, addr, fence->base.seqno);
}
@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nv84_fence_chan *fctx = chan->fence;
- u64 addr = prev->chid * 16;
-
- if (fence->sysmem)
- addr += fctx->vma_gart.offset;
- else
- addr += fctx->vma.offset;
+ u64 addr = fctx->vma->addr + prev->chid * 16;
return fctx->base.sync32(chan, addr, fence->base.seqno);
}
@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
- nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
- nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ nouveau_vma_del(&fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
- ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
- if (ret == 0) {
- ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
- &fctx->vma_gart);
- }
+ ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
mutex_unlock(&priv->mutex);
if (ret)
@@ -182,10 +168,6 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo_gart);
- if (priv->bo_gart)
- nouveau_bo_unpin(priv->bo_gart);
- nouveau_bo_ref(NULL, &priv->bo_gart);
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0)
- ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
- 0, NULL, NULL, &priv->bo_gart);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo_gart);
- if (ret)
- nouveau_bo_unpin(priv->bo_gart);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo_gart);
- }
-
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 90f27bfa381f..c0deef4fe727 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma->addr));
+ OUT_RING (chan, lower_32_bits(fb->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma->addr));
+ OUT_RING (chan, lower_32_bits(fb->vma->addr));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 067b5e9f5ec1..f1675a4ab6fa 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -2,4 +2,7 @@ nvif-y := nvif/object.o
nvif-y += nvif/client.o
nvif-y += nvif/device.o
nvif-y += nvif/driver.o
+nvif-y += nvif/mem.o
+nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
+nvif-y += nvif/vmm.o
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
new file mode 100644
index 000000000000..0f9382c60145
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mem.h>
+#include <nvif/client.h>
+
+#include <nvif/if000a.h>
+
+void
+nvif_mem_fini(struct nvif_mem *mem)
+{
+ nvif_object_fini(&mem->object);
+}
+
+int
+nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+ struct nvif_mem_v0 *args;
+ u8 stack[128];
+ int ret;
+
+ mem->object.client = NULL;
+ if (type < 0)
+ return -EINVAL;
+
+ if (sizeof(*args) + argc > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+ args->version = 0;
+ args->type = type;
+ args->page = page;
+ args->size = size;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_init(&mmu->object, 0, oclass, args,
+ sizeof(*args) + argc, &mem->object);
+ if (ret == 0) {
+ mem->type = mmu->type[type].type;
+ mem->page = args->page;
+ mem->addr = args->addr;
+ mem->size = args->size;
+ }
+
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+
+}
+
+int
+nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+ int ret = -EINVAL, i;
+
+ mem->object.client = NULL;
+
+ for (i = 0; ret && i < mmu->type_nr; i++) {
+ if ((mmu->type[i].type & type) == type) {
+ ret = nvif_mem_init_type(mmu, oclass, i, page, size,
+ argv, argc, mem);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
new file mode 100644
index 000000000000..15d0dcbf7ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mmu.h>
+
+#include <nvif/class.h>
+#include <nvif/if0008.h>
+
+void
+nvif_mmu_fini(struct nvif_mmu *mmu)
+{
+ kfree(mmu->kind);
+ kfree(mmu->type);
+ kfree(mmu->heap);
+ nvif_object_fini(&mmu->object);
+}
+
+int
+nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+{
+ struct nvif_mmu_v0 args;
+ int ret, i;
+
+ args.version = 0;
+ mmu->heap = NULL;
+ mmu->type = NULL;
+ mmu->kind = NULL;
+
+ ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
+ &mmu->object);
+ if (ret)
+ goto done;
+
+ mmu->dmabits = args.dmabits;
+ mmu->heap_nr = args.heap_nr;
+ mmu->type_nr = args.type_nr;
+ mmu->kind_nr = args.kind_nr;
+
+ mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
+ mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+ if (ret = -ENOMEM, !mmu->heap || !mmu->type)
+ goto done;
+
+ mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+ if (!mmu->kind && mmu->kind_nr)
+ goto done;
+
+ for (i = 0; i < mmu->heap_nr; i++) {
+ struct nvif_mmu_heap_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
+ &args, sizeof(args));
+ if (ret)
+ goto done;
+
+ mmu->heap[i].size = args.size;
+ }
+
+ for (i = 0; i < mmu->type_nr; i++) {
+ struct nvif_mmu_type_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
+ &args, sizeof(args));
+ if (ret)
+ goto done;
+
+ mmu->type[i].type = 0;
+ if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
+ if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
+ if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
+ if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
+ if (args.kind ) mmu->type[i].type |= NVIF_MEM_KIND;
+ if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
+ if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
+ if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
+ mmu->type[i].heap = args.heap;
+ }
+
+ if (mmu->kind_nr) {
+ struct nvif_mmu_kind_v0 *kind;
+ u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+
+ if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
+ goto done;
+ kind->version = 0;
+ kind->count = mmu->kind_nr;
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
+ kind, argc);
+ if (ret == 0)
+ memcpy(mmu->kind, kind->data, kind->count);
+ kfree(kind);
+ }
+
+done:
+ if (ret)
+ nvif_mmu_fini(mmu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index c3fb6a20f567..40adfe9b334b 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
}
void
-nvif_object_unmap(struct nvif_object *object)
+nvif_object_unmap_handle(struct nvif_object *object)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_unmap unmap;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_UNMAP,
+ };
+
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
+ u64 *handle, u64 *length)
{
- if (object->map.size) {
- struct nvif_client *client = object->client;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_unmap unmap;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_UNMAP,
- };
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_map_v0 map;
+ } *args;
+ u32 argn = sizeof(*args) + argc;
+ int ret, maptype;
+
+ if (!(args = kzalloc(argn, GFP_KERNEL)))
+ return -ENOMEM;
+ args->ioctl.type = NVIF_IOCTL_V0_MAP;
+ memcpy(args->map.data, argv, argc);
- if (object->map.ptr) {
+ ret = nvif_object_ioctl(object, args, argn, NULL);
+ *handle = args->map.handle;
+ *length = args->map.length;
+ maptype = args->map.type;
+ kfree(args);
+ return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+ struct nvif_client *client = object->client;
+ if (object->map.ptr) {
+ if (object->map.size) {
client->driver->unmap(client, object->map.ptr,
object->map.size);
- object->map.ptr = NULL;
+ object->map.size = 0;
}
-
- nvif_object_ioctl(object, &args, sizeof(args), NULL);
- object->map.size = 0;
+ object->map.ptr = NULL;
+ nvif_object_unmap_handle(object);
}
}
int
-nvif_object_map(struct nvif_object *object)
+nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
{
struct nvif_client *client = object->client;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_map_v0 map;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_MAP,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- object->map.size = args.map.length;
- object->map.ptr = client->driver->map(client, args.map.handle,
- object->map.size);
- if (ret = -ENOMEM, object->map.ptr)
+ u64 handle, length;
+ int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
+ if (ret >= 0) {
+ if (ret) {
+ object->map.ptr = client->driver->map(client,
+ handle,
+ length);
+ if (ret = -ENOMEM, object->map.ptr) {
+ object->map.size = length;
+ return 0;
+ }
+ } else {
+ object->map.ptr = (void *)(unsigned long)handle;
return 0;
- nvif_object_unmap(object);
+ }
+ nvif_object_unmap_handle(object);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
new file mode 100644
index 000000000000..31cdb2d2e1ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/if000c.h>
+
+int
+nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
+{
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
+ &(struct nvif_vmm_unmap_v0) { .addr = addr },
+ sizeof(struct nvif_vmm_unmap_v0));
+}
+
+int
+nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_mem *mem, u64 offset)
+{
+ struct nvif_vmm_map_v0 *args;
+ u8 stack[16];
+ int ret;
+
+ if (sizeof(*args) + argc > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+
+ args->version = 0;
+ args->addr = addr;
+ args->size = size;
+ args->memory = nvif_handle(&mem->object);
+ args->offset = offset;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
+ args, sizeof(*args) + argc);
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+}
+
+void
+nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+ if (vma->size) {
+ WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
+ &(struct nvif_vmm_put_v0) {
+ .addr = vma->addr,
+ }, sizeof(struct nvif_vmm_put_v0)));
+ vma->size = 0;
+ }
+}
+
+int
+nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
+ u8 page, u8 align, u64 size, struct nvif_vma *vma)
+{
+ struct nvif_vmm_get_v0 args;
+ int ret;
+
+ args.version = vma->size = 0;
+ args.sparse = sparse;
+ args.page = page;
+ args.align = align;
+ args.size = size;
+
+ switch (type) {
+ case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
+ case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
+ case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
+ &args, sizeof(args));
+ if (ret == 0) {
+ vma->addr = args.addr;
+ vma->size = args.size;
+ }
+ return ret;
+}
+
+void
+nvif_vmm_fini(struct nvif_vmm *vmm)
+{
+ kfree(vmm->page);
+ nvif_object_fini(&vmm->object);
+}
+
+int
+nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
+ void *argv, u32 argc, struct nvif_vmm *vmm)
+{
+ struct nvif_vmm_v0 *args;
+ u32 argn = sizeof(*args) + argc;
+ int ret = -ENOSYS, i;
+
+ vmm->object.client = NULL;
+ vmm->page = NULL;
+
+ if (!(args = kmalloc(argn, GFP_KERNEL)))
+ return -ENOMEM;
+ args->version = 0;
+ args->addr = addr;
+ args->size = size;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
+ &vmm->object);
+ if (ret)
+ goto done;
+
+ vmm->start = args->addr;
+ vmm->limit = args->size;
+
+ vmm->page_nr = args->page_nr;
+ vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+ if (!vmm->page) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vmm->page_nr; i++) {
+ struct nvif_vmm_page_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
+ &args, sizeof(args));
+ if (ret)
+ break;
+
+ vmm->page[i].shift = args.shift;
+ vmm->page[i].sparse = args.sparse;
+ vmm->page[i].vram = args.vram;
+ vmm->page[i].host = args.host;
+ vmm->page[i].comp = args.comp;
+ }
+
+done:
+ if (ret)
+ nvif_vmm_fini(vmm);
+ kfree(args);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 0d3a896892b4..ac671202919e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
client->ntfy = ntfy;
+ INIT_LIST_HEAD(&client->umem);
+ spin_lock_init(&client->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index b6c916954a10..657231c3c098 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
return ret;
}
+static int
+nvkm_engine_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ if (engine->func->preinit)
+ engine->func->preinit(engine);
+ return 0;
+}
+
static void *
nvkm_engine_dtor(struct nvkm_subdev *subdev)
{
@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_engine_func = {
.dtor = nvkm_engine_dtor,
+ .preinit = nvkm_engine_preinit,
.init = nvkm_engine_init,
.fini = nvkm_engine_fini,
.intr = nvkm_engine_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index a7bd22706b2a..d6de2b3ed2c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
}
/* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_heap_map,
};
static const struct nvkm_gpuobj_func
@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
+ .map = nvkm_gpuobj_heap_map,
};
static void *
@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
+ .map = nvkm_gpuobj_heap_map,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+ vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_map,
};
static const struct nvkm_gpuobj_func
@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
+ .map = nvkm_gpuobj_map,
};
static void *
@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
+ .map = nvkm_gpuobj_map,
};
static int
@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
gpuobj->size = nvkm_memory_size(gpuobj->memory);
}
- return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+ return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
}
void
@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
if (gpuobj->parent)
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
nvkm_mm_fini(&gpuobj->heap);
- nvkm_memory_del(&gpuobj->memory);
+ nvkm_memory_unref(&gpuobj->memory);
kfree(*pgpuobj);
*pgpuobj = NULL;
}
@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
return ret;
}
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
- u32 access, struct nvkm_vma *vma)
-{
- struct nvkm_memory *memory = gpuobj->memory;
- int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret == 0)
- nvkm_memory_map(memory, vma, 0);
- return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
- if (vma->node) {
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- }
-}
-
/* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else.
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index be19bbe56bba..d777df5a64e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
union {
struct nvif_ioctl_sclass_v0 v0;
} *args = data;
- struct nvkm_oclass oclass;
+ struct nvkm_oclass oclass = { .client = client };
int ret = -ENOSYS, i = 0;
nvif_ioctl(object, "sclass size %d\n", size);
@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client,
union {
struct nvif_ioctl_map_v0 v0;
} *args = data;
+ enum nvkm_object_map type;
int ret = -ENOSYS;
nvif_ioctl(object, "map size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "map vers %d\n", args->v0.version);
- ret = nvkm_object_map(object, &args->v0.handle,
- &args->v0.length);
+ ret = nvkm_object_map(object, data, size, &type,
+ &args->v0.handle,
+ &args->v0.length);
+ if (type == NVKM_OBJECT_MAP_IO)
+ args->v0.type = NVIF_IOCTL_MAP_V0_IO;
+ else
+ args->v0.type = NVIF_IOCTL_MAP_V0_VA;
}
return ret;
@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client,
nvif_ioctl(object, "unmap size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "unmap\n");
+ ret = nvkm_object_unmap(object);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 8903c04c977e..e85a08ecd9da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -22,27 +22,117 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <core/memory.h>
+#include <core/mm.h>
+#include <subdev/fb.h>
#include <subdev/instmem.h>
void
+nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
+ struct nvkm_tags **ptags)
+{
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_tags *tags = *ptags;
+ if (tags) {
+ mutex_lock(&fb->subdev.mutex);
+ if (refcount_dec_and_test(&tags->refcount)) {
+ nvkm_mm_free(&fb->tags, &tags->mn);
+ kfree(memory->tags);
+ memory->tags = NULL;
+ }
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = NULL;
+ }
+}
+
+int
+nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
+ u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
+ struct nvkm_tags **ptags)
+{
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_tags *tags;
+
+ mutex_lock(&fb->subdev.mutex);
+ if ((tags = memory->tags)) {
+ /* If comptags exist for the memory, but a different amount
+ * than requested, the buffer is being mapped with settings
+ * that are incompatible with existing mappings.
+ */
+ if (tags->mn && tags->mn->length != nr) {
+ mutex_unlock(&fb->subdev.mutex);
+ return -EINVAL;
+ }
+
+ refcount_inc(&tags->refcount);
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = tags;
+ return 0;
+ }
+
+ if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
+ mutex_unlock(&fb->subdev.mutex);
+ return -ENOMEM;
+ }
+
+ if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+ if (clr)
+ clr(device, tags->mn->offset, tags->mn->length);
+ } else {
+ /* Failure to allocate HW comptags is not an error, the
+ * caller should fall back to an uncompressed map.
+ *
+ * As memory can be mapped in multiple places, we still
+ * need to track the allocation failure and ensure that
+ * any additional mappings remain uncompressed.
+ *
+ * This is handled by returning an empty nvkm_tags.
+ */
+ tags->mn = NULL;
+ }
+
+ refcount_set(&tags->refcount, 1);
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = tags;
+ return 0;
+}
+
+void
nvkm_memory_ctor(const struct nvkm_memory_func *func,
struct nvkm_memory *memory)
{
memory->func = func;
+ kref_init(&memory->kref);
+}
+
+static void
+nvkm_memory_del(struct kref *kref)
+{
+ struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
+ if (!WARN_ON(!memory->func)) {
+ if (memory->func->dtor)
+ memory = memory->func->dtor(memory);
+ kfree(memory);
+ }
}
void
-nvkm_memory_del(struct nvkm_memory **pmemory)
+nvkm_memory_unref(struct nvkm_memory **pmemory)
{
struct nvkm_memory *memory = *pmemory;
- if (memory && !WARN_ON(!memory->func)) {
- if (memory->func->dtor)
- *pmemory = memory->func->dtor(memory);
- kfree(*pmemory);
+ if (memory) {
+ kref_put(&memory->kref, nvkm_memory_del);
*pmemory = NULL;
}
}
+struct nvkm_memory *
+nvkm_memory_ref(struct nvkm_memory *memory)
+{
+ if (memory)
+ kref_get(&memory->kref);
+ return memory;
+}
+
int
nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
u64 size, u32 align, bool zero,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 5c7891234eea..f78a06a6b2f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
}
int
-nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
{
struct nvkm_mm_node *node, *prev;
u32 next;
@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
- node->heap = ++mm->heap_nodes;
+ node->heap = heap;
+ mm->heap_nodes++;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index acd76fd4f6d8..301a5e5b5f7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
}
int
-nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
if (likely(object->func->map))
- return object->func->map(object, addr, size);
+ return object->func->map(object, argv, argc, type, addr, size);
+ return -ENODEV;
+}
+
+int
+nvkm_object_unmap(struct nvkm_object *object)
+{
+ if (likely(object->func->unmap))
+ return object->func->unmap(object);
return -ENODEV;
}
@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object)
}
nvif_debug(object, "destroy running...\n");
+ nvkm_object_unmap(object);
if (object->func->dtor)
data = object->func->dtor(object);
nvkm_engine_unref(&object->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index e31a0479add0..16299837a296 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
}
static int
-nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
- return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
+}
+
+static int
+nvkm_oproxy_unmap(struct nvkm_object *object)
+{
+ return nvkm_object_unmap(nvkm_oproxy(object)->object);
}
static int
@@ -171,6 +179,7 @@ nvkm_oproxy_func = {
.mthd = nvkm_oproxy_mthd,
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
+ .unmap = nvkm_oproxy_unmap,
.rd08 = nvkm_oproxy_rd08,
.rd16 = nvkm_oproxy_rd16,
.rd32 = nvkm_oproxy_rd32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 89da47234016..ccba4ae73cc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -21,6 +21,7 @@
*/
#include <core/ramht.h>
#include <core/engine.h>
+#include <core/object.h>
static u32
nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e096a5d9c292..e14643615698 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -927,7 +927,7 @@ nv84_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
.therm = g84_therm_new,
@@ -959,7 +959,7 @@ nv86_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
.therm = g84_therm_new,
@@ -991,7 +991,7 @@ nv92_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g92_pci_new,
.therm = g84_therm_new,
@@ -1023,7 +1023,7 @@ nv94_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1055,7 +1055,7 @@ nv96_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1087,7 +1087,7 @@ nv98_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1119,7 +1119,7 @@ nva0_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1151,7 +1151,7 @@ nva3_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1185,7 +1185,7 @@ nva5_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1218,7 +1218,7 @@ nva8_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1315,7 +1315,7 @@ nvaf_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1678,7 +1678,7 @@ nve4_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1717,7 +1717,7 @@ nve6_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1756,7 +1756,7 @@ nve7_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1790,7 +1790,7 @@ nvea_chipset = {
.imem = gk20a_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk20a_mmu_new,
.pmu = gk20a_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -1820,7 +1820,7 @@ nvf0_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
@@ -1858,7 +1858,7 @@ nvf1_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
@@ -1896,7 +1896,7 @@ nv106_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
@@ -1934,7 +1934,7 @@ nv108_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
@@ -1958,7 +1958,7 @@ nv108_chipset = {
static const struct nvkm_device_chip
nv117_chipset = {
.name = "GM107",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.clk = gk104_clk_new,
@@ -1972,7 +1972,7 @@ nv117_chipset = {
.imem = nv50_instmem_new,
.ltc = gm107_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -1992,7 +1992,7 @@ nv117_chipset = {
static const struct nvkm_device_chip
nv118_chipset = {
.name = "GM108",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.clk = gk104_clk_new,
@@ -2006,7 +2006,7 @@ nv118_chipset = {
.imem = nv50_instmem_new,
.ltc = gm107_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2026,7 +2026,7 @@ nv118_chipset = {
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2039,7 +2039,7 @@ nv120_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2061,7 +2061,7 @@ nv120_chipset = {
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2074,7 +2074,7 @@ nv124_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2096,7 +2096,7 @@ nv124_chipset = {
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2109,7 +2109,7 @@ nv126_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2131,7 +2131,7 @@ nv126_chipset = {
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
- .bar = gk20a_bar_new,
+ .bar = gm20b_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
.fb = gm20b_fb_new,
@@ -2140,7 +2140,7 @@ nv12b_chipset = {
.imem = gk20a_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm20b_mmu_new,
.pmu = gm20b_pmu_new,
.secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
@@ -2156,7 +2156,7 @@ nv12b_chipset = {
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2168,7 +2168,8 @@ nv130_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gm200_secboot_new,
.pci = gp100_pci_new,
.pmu = gp100_pmu_new,
@@ -2190,7 +2191,7 @@ nv130_chipset = {
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2202,7 +2203,8 @@ nv132_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2224,7 +2226,7 @@ nv132_chipset = {
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2236,7 +2238,8 @@ nv134_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2258,7 +2261,7 @@ nv134_chipset = {
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2270,7 +2273,8 @@ nv136_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2292,7 +2296,7 @@ nv136_chipset = {
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2304,7 +2308,8 @@ nv137_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2326,7 +2331,7 @@ nv137_chipset = {
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2338,7 +2343,8 @@ nv138_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2355,7 +2361,7 @@ nv138_chipset = {
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
- .bar = gk20a_bar_new,
+ .bar = gm20b_bar_new,
.bus = gf100_bus_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index f279162f48c6..ebcc5c52fbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object)
-#include <core/device.h>
+#include <core/object.h>
struct nvkm_control {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 74a1ffa425f7..f302d2b5782a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
const struct nvkm_device_pci_vendor *pciv;
const char *name = NULL;
struct nvkm_device_pci *pdev;
- int ret;
+ int ret, bits;
ret = pci_enable_device(pci_dev);
if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
if (ret)
return ret;
- /*
- * Set a preliminary DMA mask based on the .dma_bits member of the
- * MMU subdevice. This allows other subdevices to create DMA mappings
- * in their init() or oneinit() methods, which may be called before the
- * TTM layer sets the DMA mask definitively.
- * This is necessary for platforms where the default DMA mask of 32
- * does not cover any system memory, i.e., when all RAM is > 4 GB.
- */
- if (pdev->device.mmu)
- dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+ /* Set DMA mask based on capabilities reported by the MMU subdev. */
+ if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+ bits = pdev->device.mmu->dma_bits;
+ else
+ bits = 32;
+
+ ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+ if (ret && bits != 32) {
+ dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ pdev->device.mmu->dma_bits = 32;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 189ed80e21ff..78597da6313a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (ret)
goto free_domain;
- ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+ ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
(1ULL << tdev->func->iommu_bit) >>
tdev->iommu.pgshift, 1);
if (ret)
@@ -216,7 +216,7 @@ nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
if (tdev->irq) {
free_irq(tdev->irq, tdev);
tdev->irq = 0;
- };
+ }
}
static int
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
/**
* The IOMMU bit defines the upper limit of the GPU-addressable space.
- * This will be refined in nouveau_ttm_init but we need to do it early
- * for instmem to behave properly
*/
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 513ee6b79553..17adcb4e8854 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -206,10 +206,12 @@ nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
}
static int
-nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
+ *type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 0);
*size = device->func->resource_size(device, 0);
return 0;
@@ -292,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
if (!sclass) {
switch (index) {
case 0: sclass = &nvkm_control_oclass; break;
+ case 1:
+ if (!device->mmu)
+ return -EINVAL;
+ sclass = &device->mmu->user;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 0c0310498afd..723dcbde2ac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -191,11 +191,13 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
}
static int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 0) +
0x640000 + (chan->chid.user * 0x1000);
*size = 0x001000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 5701b3221a54..40681db91a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -2,6 +2,7 @@
#ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include <core/object.h>
#include "nv50.h"
struct nv50_disp_chan {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index cd6dd8742dc6..4548c031b937 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -148,7 +148,7 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
#define IOR_MSG(i,l,f,a...) do { \
struct nvkm_ior *_ior = (i); \
- nvkm_##l(&_ior->disp->engine.subdev, "%s: "f, _ior->name, ##a); \
+ nvkm_##l(&_ior->disp->engine.subdev, "%s: "f"\n", _ior->name, ##a); \
} while(0)
#define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
#define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index c95942ef8216..49ef7e57aad4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -26,7 +26,7 @@
#include <core/gpuobj.h>
#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
+#include <subdev/mmu/vmm.h>
#include <nvif/class.h>
@@ -49,8 +49,8 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int ret;
if (dmaobj->clone) {
- struct nv04_mmu *mmu = nv04_mmu(device->mmu);
- struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+ struct nvkm_memory *pgt =
+ device->mmu->vmm->pd->pt[0]->memory;
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 2e7b4e2105ef..816ccaedfc73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -99,7 +99,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
const u32 base = falcon->addr;
if (!suspend) {
- nvkm_memory_del(&falcon->core);
+ nvkm_memory_unref(&falcon->core);
if (falcon->external) {
vfree(falcon->data.data);
vfree(falcon->code.data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 660ca7aa95ea..64f6b7654a08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -27,6 +27,7 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/notify.h>
+#include <subdev/mc.h>
#include <nvif/event.h>
#include <nvif/unpack.h>
@@ -278,6 +279,12 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
return 0;
}
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+}
+
static int
nvkm_fifo_init(struct nvkm_engine *engine)
{
@@ -302,6 +309,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
static const struct nvkm_engine_func
nvkm_fifo = {
.dtor = nvkm_fifo_dtor,
+ .preinit = nvkm_fifo_preinit,
.oneinit = nvkm_fifo_oneinit,
.init = nvkm_fifo_init,
.fini = nvkm_fifo_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index fab760ae922f..d83485385934 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -117,8 +117,8 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
if (chan->func->engine_dtor)
chan->func->engine_dtor(chan, engine);
nvkm_object_del(&engn->object);
- if (chan->vm)
- atomic_dec(&chan->vm->engref[engine->subdev.index]);
+ if (chan->vmm)
+ atomic_dec(&chan->vmm->engref[engine->subdev.index]);
}
}
@@ -151,8 +151,8 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
.engine = oclass->engine,
};
- if (chan->vm)
- atomic_inc(&chan->vm->engref[engine->subdev.index]);
+ if (chan->vmm)
+ atomic_inc(&chan->vmm->engref[engine->subdev.index]);
if (engine->func->fifo.cclass) {
ret = engine->func->fifo.cclass(chan, &cclass,
@@ -253,9 +253,11 @@ nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
}
static int
-nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ *type = NVKM_OBJECT_MAP_IO;
*addr = chan->addr;
*size = chan->size;
return 0;
@@ -325,7 +327,10 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
if (chan->user)
iounmap(chan->user);
- nvkm_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst->memory);
+ nvkm_vmm_unref(&chan->vmm);
+ }
nvkm_gpuobj_del(&chan->push);
nvkm_gpuobj_del(&chan->inst);
@@ -347,13 +352,12 @@ nvkm_fifo_chan_func = {
int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
- u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
- const struct nvkm_oclass *oclass,
+ u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+ u32 user, const struct nvkm_oclass *oclass,
struct nvkm_fifo_chan *chan)
{
struct nvkm_client *client = oclass->client;
struct nvkm_device *device = fifo->engine.subdev.device;
- struct nvkm_mmu *mmu = device->mmu;
struct nvkm_dmaobj *dmaobj;
unsigned long flags;
int ret;
@@ -382,16 +386,19 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
}
/* channel address space */
- if (!vm && mmu) {
- if (!client->vm || client->vm->mmu == mmu) {
- ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
- if (ret)
- return ret;
- } else {
+ if (hvmm) {
+ struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (vmm->mmu != device->mmu)
return -EINVAL;
- }
- } else {
- return -ENOENT;
+
+ ret = nvkm_vmm_join(vmm, chan->inst->memory);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
}
/* allocate channel id */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 61797c4dd07a..a5c998fe4485 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -229,15 +229,18 @@ g84_fifo_chan_func = {
};
int
-g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
+ if (!vmm)
+ return -EINVAL;
+
ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
- 0x10000, 0x1000, false, vm, push,
+ 0x10000, 0x1000, false, vmm, push,
(1ULL << NVKM_ENGINE_BSP) |
(1ULL << NVKM_ENGINE_CE0) |
(1ULL << NVKM_ENGINE_CIPHER) |
@@ -277,9 +280,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
- ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
- if (ret)
- return ret;
-
- return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 27002caba420..b653664e081b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -12,12 +12,9 @@ struct gf100_fifo_chan {
struct list_head head;
bool killed;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-
struct {
struct nvkm_gpuobj *inst;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} engn[NVKM_SUBDEV_NR];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index ec10be2984a9..1208e3d9dbe2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -13,12 +13,9 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-
struct {
struct nvkm_gpuobj *inst;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} engn[NVKM_SUBDEV_NR];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 25b60aff40e4..85f7dbf53c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -206,7 +206,6 @@ void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
@@ -229,15 +228,18 @@ nv50_fifo_chan_func = {
};
int
-nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
+ if (!vmm)
+ return -EINVAL;
+
ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
- 0x10000, 0x1000, false, vm, push,
+ 0x10000, 0x1000, false, vmm, push,
(1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_SW) |
(1ULL << NVKM_ENGINE_GR) |
@@ -262,9 +264,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
- ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
- if (ret)
- return ret;
-
- return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index ad9aa157e078..2e3c4005b874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -14,19 +14,18 @@ struct nv50_fifo_chan {
struct nvkm_gpuobj *eng;
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
- struct nvkm_vm *vm;
struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
};
-int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
-int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
index caa914074752..fc34cddcd2f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -44,9 +44,9 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
"pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -57,7 +57,7 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index 0a7b6ed5ed28..c213122cf088 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -95,6 +95,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = fifo->ramfc;
+ nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
@@ -102,6 +103,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
+ nvkm_done(fctx);
c = fifo->ramfc;
do {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
index 480bc3777be5..8043718ad150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -44,9 +44,9 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
"pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -57,7 +57,7 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index cd468ab1db12..f69576868164 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -559,6 +559,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
/* Determine number of PBDMAs by checking valid enable bits. */
@@ -584,12 +585,12 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void
@@ -628,7 +629,7 @@ gf100_fifo_init(struct nvkm_fifo *base)
}
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
- nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -639,10 +640,11 @@ static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
- nvkm_vm_put(&fifo->user.bar);
- nvkm_memory_del(&fifo->user.mem);
- nvkm_memory_del(&fifo->runlist.mem[0]);
- nvkm_memory_del(&fifo->runlist.mem[1]);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
+ nvkm_memory_unref(&fifo->runlist.mem[0]);
+ nvkm_memory_unref(&fifo->runlist.mem[1]);
return fifo;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 571a6edb3f97..68f97ba03df6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -27,7 +27,7 @@ struct gf100_fifo {
struct {
struct nvkm_memory *mem;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
} user;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index a7e55c422501..84bd703dd897 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -771,6 +771,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
@@ -834,13 +835,12 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
- &fifo->user.bar);
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void
@@ -866,7 +866,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
}
- nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -876,14 +876,15 @@ static void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
- nvkm_vm_put(&fifo->user.bar);
- nvkm_memory_del(&fifo->user.mem);
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
for (i = 0; i < fifo->runlist_nr; i++) {
- nvkm_memory_del(&fifo->runlist[i].mem[1]);
- nvkm_memory_del(&fifo->runlist[i].mem[0]);
+ nvkm_memory_unref(&fifo->runlist[i].mem[1]);
+ nvkm_memory_unref(&fifo->runlist[i].mem[0]);
}
return fifo;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 0506c5290936..1579785cf941 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -38,7 +38,7 @@ struct gk104_fifo {
struct {
struct nvkm_memory *mem;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
} user;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
index 77c2f2a28bf3..2121f517b1dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -45,10 +45,10 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"pushbuf %llx ioffset %016llx "
"ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -59,7 +59,7 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index f9e0377d3d24..75f9632789b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -111,7 +111,7 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u64 addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
@@ -126,7 +126,7 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -146,8 +146,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+ &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+ chan->engn[engn].vma, NULL, 0);
}
static void
@@ -190,10 +195,7 @@ gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
- struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
- nvkm_gpuobj_del(&chan->pgd);
- return chan;
+ return gf100_fifo_chan(base);
}
static const struct nvkm_fifo_chan_func
@@ -216,7 +218,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct fermi_channel_gpfifo_v0 v0;
} *args = data;
struct gf100_fifo *fifo = gf100_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_object *parent = oclass->parent;
struct gf100_fifo_chan *chan;
u64 usermem, ioffset, ilength;
@@ -224,10 +225,12 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength);
+ if (!args->v0.vmm)
+ return -EINVAL;
} else
return ret;
@@ -239,7 +242,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, args->v0.vm, 0,
+ 0x1000, 0x1000, true, args->v0.vmm, 0,
(1ULL << NVKM_ENGINE_CE0) |
(1ULL << NVKM_ENGINE_CE1) |
(1ULL << NVKM_ENGINE_GR) |
@@ -247,29 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
(1ULL << NVKM_ENGINE_MSPPP) |
(1ULL << NVKM_ENGINE_MSVLD) |
(1ULL << NVKM_ENGINE_SW),
- 1, fifo->user.bar.offset, 0x1000,
+ 1, fifo->user.bar->addr, 0x1000,
oclass, &chan->base);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- /* page directory */
- ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
- if (ret)
- return ret;
-
- nvkm_kmap(chan->base.inst);
- nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
- nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
- nvkm_done(chan->base.inst);
-
- ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
- if (ret)
- return ret;
-
/* clear channel control registers */
usermem = chan->base.chid * 0x1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 8abf6f8ef445..80c87521bebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -117,7 +117,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u64 addr = chan->engn[engine->subdev.index].vma->addr;
u32 datalo = lower_32_bits(addr) | 0x00000004;
u32 datahi = upper_32_bits(addr);
nvkm_kmap(inst);
@@ -138,7 +138,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -158,8 +158,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+ &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+ chan->engn[engn].vma, NULL, 0);
}
static void
@@ -203,10 +208,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
static void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
- struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
- nvkm_gpuobj_del(&chan->pgd);
- return chan;
+ return gk104_fifo_chan(base);
}
static const struct nvkm_fifo_chan_func
@@ -229,17 +231,19 @@ struct gk104_fifo_chan_func {
static int
gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
- u64 vm, u64 ioffset, u64 ilength,
+ u64 vmm, u64 ioffset, u64 ilength,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = -1, ret = -ENOSYS, i, j;
u32 engines = 0, present = 0;
u64 subdevs = 0;
u64 usermem;
+ if (!vmm)
+ return -EINVAL;
+
/* Determine which downstream engines are present */
for (i = 0; i < fifo->engine_nr; i++) {
struct nvkm_engine *engine = fifo->engine[i].engine;
@@ -285,30 +289,14 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vm, 0, subdevs,
- 1, fifo->user.bar.offset, 0x200,
+ 0x1000, 0x1000, true, vmm, 0, subdevs,
+ 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
*chid = chan->base.chid;
- /* Page directory. */
- ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
- if (ret)
- return ret;
-
- nvkm_kmap(chan->base.inst);
- nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
- nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
- nvkm_done(chan->base.inst);
-
- ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
- if (ret)
- return ret;
-
/* Clear channel control registers. */
usermem = chan->base.chid * 0x200;
ilength = order_base_2(ilength / 8);
@@ -373,18 +361,17 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x engine %08x\n",
- args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.engines);
return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
&args->v0.engines,
&args->v0.chid,
- args->v0.vm,
+ args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
oclass, pobject);
-
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
index c5a7de9db259..d8f28ec1e4a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -45,10 +45,10 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"pushbuf %llx ioffset %016llx "
"ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -59,7 +59,7 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 66eb12c2b5ba..fa6e094d8068 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -100,8 +100,8 @@ void *
nv50_fifo_dtor(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
- nvkm_memory_del(&fifo->runlist[1]);
- nvkm_memory_del(&fifo->runlist[0]);
+ nvkm_memory_unref(&fifo->runlist[1]);
+ nvkm_memory_unref(&fifo->runlist[0]);
return fifo;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index bc77eea351a5..881015080d83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -986,14 +986,14 @@ gf100_grctx_pack_tpc[] = {
******************************************************************************/
int
-gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, u32 access)
+gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, bool priv)
{
if (info->data) {
info->buffer[info->buffer_nr] = round_up(info->addr, align);
info->addr = info->buffer[info->buffer_nr] + size;
info->data->size = size;
info->data->align = align;
- info->data->access = access;
+ info->data->priv = priv;
info->data++;
return info->buffer_nr++;
}
@@ -1028,9 +1028,8 @@ void
gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -1041,9 +1040,8 @@ void
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1057,9 +1055,8 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
int gpc, tpc;
u32 bo = 0;
@@ -1267,85 +1264,87 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mc_unk260(device, 1);
}
+#define CB_RESERVED 0x80000
+
int
gf100_grctx_generate(struct gf100_gr *gr)
{
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_memory *chan;
+ struct nvkm_memory *inst = NULL;
+ struct nvkm_memory *data = NULL;
+ struct nvkm_vmm *vmm = NULL;
+ struct nvkm_vma *ctx = NULL;
struct gf100_grctx info;
int ret, i;
u64 addr;
- /* allocate memory to for a "channel", which we'll use to generate
- * the default context values
+ /* Allocate memory to for a "channel", which we'll use to generate
+ * the default context values.
*/
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
- 0x1000, true, &chan);
- if (ret) {
- nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
- return ret;
- }
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x1000, 0x1000, true, &inst);
+ if (ret)
+ goto done;
- addr = nvkm_memory_addr(chan);
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "grctx", &vmm);
+ if (ret)
+ goto done;
- /* PGD pointer */
- nvkm_kmap(chan);
- nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
- nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
- nvkm_wo32(chan, 0x0208, 0xffffffff);
- nvkm_wo32(chan, 0x020c, 0x000000ff);
+ vmm->debug = subdev->debug;
- /* PGT[0] pointer */
- nvkm_wo32(chan, 0x1000, 0x00000000);
- nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
+ ret = nvkm_vmm_join(vmm, inst);
+ if (ret)
+ goto done;
- /* identity-map the whole "channel" into its own vm */
- for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
- u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
- nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
- nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
- }
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ CB_RESERVED + gr->size, 0, true, &data);
+ if (ret)
+ goto done;
- /* context pointer (virt) */
- nvkm_wo32(chan, 0x0210, 0x00080004);
- nvkm_wo32(chan, 0x0214, 0x00000000);
- nvkm_done(chan);
+ ret = nvkm_vmm_get(vmm, 0, nvkm_memory_size(data), &ctx);
+ if (ret)
+ goto done;
- nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
- nvkm_wr32(device, 0x100cbc, 0x80000001);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00008000)
- break;
- );
+ ret = nvkm_memory_map(data, 0, vmm, ctx, NULL, 0);
+ if (ret)
+ goto done;
+
+
+ /* Setup context pointer. */
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, 0x0210, lower_32_bits(ctx->addr + CB_RESERVED) | 4);
+ nvkm_wo32(inst, 0x0214, upper_32_bits(ctx->addr + CB_RESERVED));
+ nvkm_done(inst);
- /* setup default state for mmio list construction */
+ /* Setup default state for mmio list construction. */
info.gr = gr;
info.data = gr->mmio_data;
info.mmio = gr->mmio_list;
- info.addr = 0x2000 + (i * 8);
+ info.addr = ctx->addr;
info.buffer_nr = 0;
- /* make channel current */
+ /* Make channel current. */
+ addr = nvkm_memory_addr(inst) >> 12;
if (gr->firmware) {
nvkm_wr32(device, 0x409840, 0x00000030);
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
nvkm_wr32(device, 0x409504, 0x00000003);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000010)
break;
);
- nvkm_kmap(chan);
- nvkm_wo32(chan, 0x8001c, 1);
- nvkm_wo32(chan, 0x80020, 0);
- nvkm_wo32(chan, 0x80028, 0);
- nvkm_wo32(chan, 0x8002c, 0);
- nvkm_done(chan);
+ nvkm_kmap(data);
+ nvkm_wo32(data, 0x1c, 1);
+ nvkm_wo32(data, 0x20, 0);
+ nvkm_wo32(data, 0x28, 0);
+ nvkm_wo32(data, 0x2c, 0);
+ nvkm_done(data);
} else {
nvkm_wr32(device, 0x409840, 0x80000000);
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
nvkm_wr32(device, 0x409504, 0x00000001);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x80000000)
@@ -1355,8 +1354,8 @@ gf100_grctx_generate(struct gf100_gr *gr)
grctx->main(gr, &info);
- /* trigger a context unload by unsetting the "next channel valid" bit
- * and faking a context switch interrupt
+ /* Trigger a context unload by unsetting the "next channel valid" bit
+ * and faking a context switch interrupt.
*/
nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
nvkm_wr32(device, 0x409000, 0x00000100);
@@ -1370,17 +1369,21 @@ gf100_grctx_generate(struct gf100_gr *gr)
gr->data = kmalloc(gr->size, GFP_KERNEL);
if (gr->data) {
- nvkm_kmap(chan);
+ nvkm_kmap(data);
for (i = 0; i < gr->size; i += 4)
- gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
- nvkm_done(chan);
+ gr->data[i / 4] = nvkm_ro32(data, CB_RESERVED + i);
+ nvkm_done(data);
ret = 0;
} else {
ret = -ENOMEM;
}
done:
- nvkm_memory_del(&chan);
+ nvkm_vmm_put(vmm, &ctx);
+ nvkm_memory_unref(&data);
+ nvkm_vmm_part(vmm, inst);
+ nvkm_vmm_unref(&vmm);
+ nvkm_memory_unref(&inst);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 2812ca511c9c..5199e5aa0cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -12,7 +12,7 @@ struct gf100_grctx {
u64 addr;
};
-int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, u32 access);
+int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, bool priv);
void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int);
#define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d))
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 505cdcbfc085..82f71b10c06e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -735,9 +735,8 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 74a64e3fd59a..19301d88577d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -187,9 +187,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index c46b3fdf7203..825c8fd500bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -847,9 +847,8 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -861,9 +860,8 @@ void
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 4c4b5ab6e46d..9b43d4ce3eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -867,9 +867,8 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
@@ -881,9 +880,8 @@ void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -900,9 +898,8 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 7833bc777a29..88ea322d956c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -33,9 +33,8 @@ void
gp100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -51,9 +50,8 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size, (1 << s), access);
+ const int b = mmio_vram(info, size, (1 << s), false);
const int max_batches = 0xffff;
u32 ao = 0;
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 80b7ab0bee3a..7a66b4c2eb18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -38,9 +38,8 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size, (1 << s), access);
+ const int b = mmio_vram(info, size, (1 << s), false);
const int max_batches = 0xffff;
u32 ao = 0;
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 99689f4de502..2f8dc107047d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -37,6 +37,7 @@
#include <nvif/class.h>
#include <nvif/cl9097.h>
+#include <nvif/if900d.h>
#include <nvif/unpack.h>
/*******************************************************************************
@@ -327,13 +328,13 @@ gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
if (!gr->firmware) {
nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
- nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+ nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8);
} else {
nvkm_wo32(*pgpuobj, 0xf4, 0);
nvkm_wo32(*pgpuobj, 0xf8, 0);
nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
- nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
- nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr));
+ nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr));
nvkm_wo32(*pgpuobj, 0x1c, 1);
nvkm_wo32(*pgpuobj, 0x20, 0);
nvkm_wo32(*pgpuobj, 0x28, 0);
@@ -350,18 +351,13 @@ gf100_gr_chan_dtor(struct nvkm_object *object)
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
- if (chan->data[i].vma.node) {
- nvkm_vm_unmap(&chan->data[i].vma);
- nvkm_vm_put(&chan->data[i].vma);
- }
- nvkm_memory_del(&chan->data[i].mem);
+ nvkm_vmm_put(chan->vmm, &chan->data[i].vma);
+ nvkm_memory_unref(&chan->data[i].mem);
}
- if (chan->mmio_vma.node) {
- nvkm_vm_unmap(&chan->mmio_vma);
- nvkm_vm_put(&chan->mmio_vma);
- }
- nvkm_memory_del(&chan->mmio);
+ nvkm_vmm_put(chan->vmm, &chan->mmio_vma);
+ nvkm_memory_unref(&chan->mmio);
+ nvkm_vmm_unref(&chan->vmm);
return chan;
}
@@ -380,6 +376,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
+ struct gf100_vmm_map_v0 args = { .priv = 1 };
struct nvkm_device *device = gr->base.engine.subdev.device;
int ret, i;
@@ -387,6 +384,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
return -ENOMEM;
nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
chan->gr = gr;
+ chan->vmm = nvkm_vmm_ref(fifoch->vmm);
*pobject = &chan->object;
/* allocate memory for a "mmio list" buffer that's used by the HUB
@@ -398,12 +396,14 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
- NV_MEM_ACCESS_SYS, &chan->mmio_vma);
+ ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma);
if (ret)
return ret;
- nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+ ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm,
+ chan->mmio_vma, &args, sizeof(args));
+ if (ret)
+ return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
@@ -413,13 +413,19 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- ret = nvkm_vm_get(fifoch->vm,
- nvkm_memory_size(chan->data[i].mem), 12,
- data->access, &chan->data[i].vma);
+ ret = nvkm_vmm_get(fifoch->vmm, 12,
+ nvkm_memory_size(chan->data[i].mem),
+ &chan->data[i].vma);
+ if (ret)
+ return ret;
+
+ args.priv = data->priv;
+
+ ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm,
+ chan->data[i].vma, &args, sizeof(args));
if (ret)
return ret;
- nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
data++;
}
@@ -430,7 +436,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
u32 data = mmio->data;
if (mmio->buffer >= 0) {
- u64 info = chan->data[mmio->buffer].vma.offset;
+ u64 info = chan->data[mmio->buffer].vma->addr;
data |= info >> mmio->shift;
}
@@ -1855,8 +1861,12 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret)
- return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+ if (ret) {
+ ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+ if (ret)
+ return -ENODEV;
+ return 0;
+ }
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
@@ -1903,25 +1913,33 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return 0;
}
+void
+gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001);
+ nvkm_wr32(device, 0x4188a4, 0x00000000);
+ nvkm_wr32(device, 0x418888, 0x00000000);
+ nvkm_wr32(device, 0x41888c, 0x00000000);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+ nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
+}
+
int
gf100_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int i;
- nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+ gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -2036,6 +2054,7 @@ gf100_gr_gpccs_ucode = {
static const struct gf100_gr_func
gf100_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index a36e45a4a635..d7c2adb9b543 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -45,7 +45,7 @@
struct gf100_gr_data {
u32 size;
u32 align;
- u32 access;
+ bool priv;
};
struct gf100_gr_mmio {
@@ -156,18 +156,20 @@ int gp100_gr_init(struct gf100_gr *);
void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+#include <core/object.h>
struct gf100_gr_chan {
struct nvkm_object object;
struct gf100_gr *gr;
+ struct nvkm_vmm *vmm;
struct nvkm_memory *mmio;
- struct nvkm_vma mmio_vma;
+ struct nvkm_vma *mmio_vma;
int mmio_nr;
struct {
struct nvkm_memory *mem;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} data[4];
};
@@ -253,6 +255,7 @@ extern const struct gf100_gr_init gf100_gr_init_mpc_0[];
extern const struct gf100_gr_init gf100_gr_init_be_0[];
extern const struct gf100_gr_init gf100_gr_init_fe_1[];
extern const struct gf100_gr_init gf100_gr_init_pe_1[];
+void gf100_gr_init_gpc_mmu(struct gf100_gr *);
extern const struct gf100_gr_init gf104_gr_init_ds_0[];
extern const struct gf100_gr_init gf104_gr_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index d736dcd55ea2..ec0f11983b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -115,6 +115,7 @@ gf104_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf104_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 2f0d24498427..cc152eb74123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -106,6 +106,7 @@ gf108_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf108_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d1d942eb86af..10d2d73ca8c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -87,6 +87,7 @@ gf110_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf110_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 0124e468086e..ac09a07c4150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -123,6 +123,7 @@ gf117_gr_gpccs_ucode = {
static const struct gf100_gr_func
gf117_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 8d8e4cafe28f..7f449ec6f760 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -178,6 +178,7 @@ gf119_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf119_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index ec22da6c99fc..5e82f94c2245 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,8 +24,6 @@
#include "gf100.h"
#include "ctxgf100.h"
-#include <subdev/fb.h>
-
#include <nvif/class.h>
/*******************************************************************************
@@ -207,21 +205,13 @@ int
gk104_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int i;
- nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+ gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -339,6 +329,7 @@ gk104_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk104_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f31b171a4102..a38e19b61c1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -183,6 +183,7 @@ gk110_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk110_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index d76dd178007f..1912c0bfd7ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,6 +103,7 @@ gk110b_gr_pack_mmio[] = {
static const struct gf100_gr_func
gk110b_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 14bbe6ed02a9..1fc258163f25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,6 +162,7 @@ gk208_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk208_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index df2cd864147c..111c8bb4497b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -60,7 +60,7 @@ void *
nv20_gr_chan_dtor(struct nvkm_object *object)
{
struct nv20_gr_chan *chan = nv20_gr_chan(object);
- nvkm_memory_del(&chan->inst);
+ nvkm_memory_unref(&chan->inst);
return chan;
}
@@ -324,7 +324,7 @@ void *
nv20_gr_dtor(struct nvkm_gr *base)
{
struct nv20_gr *gr = nv20_gr(base);
- nvkm_memory_del(&gr->ctxtab);
+ nvkm_memory_unref(&gr->ctxtab);
return gr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ad7e53bb7c23..979dc5f7b32e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -20,6 +20,7 @@ void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
int nv30_gr_init(struct nvkm_gr *);
#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
+#include <core/object.h>
struct nv20_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index 89b773233ac5..731400937edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -17,6 +17,7 @@ void nv40_gr_intr(struct nvkm_gr *);
u64 nv40_gr_units(struct nvkm_gr *);
#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+#include <core/object.h>
struct nv40_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 567fa4f3e518..5b9d99bee207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -20,6 +20,7 @@ u64 nv50_gr_units(struct nvkm_gr *);
int g84_gr_tlb_flush(struct nvkm_gr *);
#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+#include <core/object.h>
struct nv50_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 1ac2b4558bec..b31fad8bdaad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -19,6 +19,7 @@ struct nv31_mpeg_func {
};
#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+#include <core/object.h>
struct nv31_mpeg_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 4e528851e9c0..6df880a39019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -24,6 +24,7 @@
#include "priv.h"
#include <core/gpuobj.h>
+#include <core/object.h>
#include <subdev/timer.h>
#include <nvif/class.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 17240d54b1eb..9fad3611a843 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -68,6 +68,7 @@ struct nvkm_specdom {
};
#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
+#include <core/object.h>
struct nvkm_perfdom {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index b1fa69314e4a..d42862fc43fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -2,9 +2,11 @@
#ifndef __NVKM_SW_CHAN_H__
#define __NVKM_SW_CHAN_H__
#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
-#include "priv.h"
+#include <core/object.h>
#include <core/event.h>
+#include "priv.h"
+
struct nvkm_sw_chan {
const struct nvkm_sw_chan_func *func;
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index 7050a9e49db1..d7034950ba87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_NVSW_H__
#define __NVKM_NVSW_H__
#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
-#include "priv.h"
+#include <core/object.h>
struct nvkm_nvsw {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 06bdb67a0205..70549381e082 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -86,7 +86,7 @@ nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
if (!suspend)
- nvkm_memory_del(&xtensa->gpu_fw);
+ nvkm_memory_unref(&xtensa->gpu_fw);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 1b7f48efd8b1..14be41f24155 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
}
void
-nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
+nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
{
if (!falcon->func->bind_context) {
nvkm_error(falcon->user,
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 669c24028470..9def926f24d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
static void
-nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
+nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
u32 inst_loc;
u32 fbif;
@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */
- switch (nvkm_memory_target(ctx->memory)) {
+ switch (nvkm_memory_target(ctx)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
nvkm_falcon_wr32(falcon, 0x054,
- ((ctx->addr >> 12) & 0xfffffff) |
+ ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30));
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1e138b337955..e5830453813d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/subdev/bar/nv50.o
nvkm-y += nvkm/subdev/bar/g84.o
nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
+nvkm-y += nvkm/subdev/bar/gm107.o
+nvkm-y += nvkm/subdev/bar/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index c561d148cebc..9646adec57cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -30,39 +30,76 @@ nvkm_bar_flush(struct nvkm_bar *bar)
bar->func->flush(bar);
}
-struct nvkm_vm *
-nvkm_bar_kmap(struct nvkm_bar *bar)
+struct nvkm_vmm *
+nvkm_bar_bar1_vmm(struct nvkm_device *device)
{
- /* disallow kmap() until after vm has been bootstrapped */
- if (bar && bar->func->kmap && bar->subdev.oneinit)
- return bar->func->kmap(bar);
+ return device->bar->func->bar1.vmm(device->bar);
+}
+
+struct nvkm_vmm *
+nvkm_bar_bar2_vmm(struct nvkm_device *device)
+{
+ /* Denies access to BAR2 when it's not initialised, used by INSTMEM
+ * to know when object access needs to go through the BAR0 window.
+ */
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2)
+ return bar->func->bar2.vmm(bar);
return NULL;
}
-int
-nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+void
+nvkm_bar_bar2_fini(struct nvkm_device *device)
{
- return bar->func->umap(bar, size, type, vma);
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2) {
+ bar->func->bar2.fini(bar);
+ bar->bar2 = false;
+ }
+}
+
+void
+nvkm_bar_bar2_init(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->subdev.oneinit && !bar->bar2 && bar->func->bar2.init) {
+ bar->func->bar2.init(bar);
+ bar->func->bar2.wait(bar);
+ bar->bar2 = true;
+ }
}
static int
-nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- return bar->func->oneinit(bar);
+ bar->func->bar1.fini(bar);
+ return 0;
}
static int
nvkm_bar_init(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- return bar->func->init(bar);
+ bar->func->bar1.init(bar);
+ bar->func->bar1.wait(bar);
+ if (bar->func->init)
+ bar->func->init(bar);
+ return 0;
+}
+
+static int
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_bar *bar = nvkm_bar(subdev);
+ return bar->func->oneinit(bar);
}
static void *
nvkm_bar_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
+ nvkm_bar_bar2_fini(subdev->device);
return bar->func->dtor(bar);
}
@@ -71,6 +108,7 @@ nvkm_bar = {
.dtor = nvkm_bar_dtor,
.oneinit = nvkm_bar_oneinit,
.init = nvkm_bar_init,
+ .fini = nvkm_bar_fini,
};
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index ef717136c838..87f26f54b481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -44,8 +44,14 @@ g84_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
- .kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
+ .bar1.init = nv50_bar_bar1_init,
+ .bar1.fini = nv50_bar_bar1_fini,
+ .bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
+ .bar2.init = nv50_bar_bar2_init,
+ .bar2.fini = nv50_bar_bar2_fini,
+ .bar2.wait = nv50_bar_bar1_wait,
+ .bar2.vmm = nv50_bar_bar2_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 676c167c95b9..a3ba7f50198b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -23,39 +23,73 @@
*/
#include "gf100.h"
-#include <core/gpuobj.h>
+#include <core/memory.h>
#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
-static struct nvkm_vm *
-gf100_bar_kmap(struct nvkm_bar *base)
+struct nvkm_vmm *
+gf100_bar_bar1_vmm(struct nvkm_bar *base)
{
- return gf100_bar(base)->bar[0].vm;
+ return gf100_bar(base)->bar[1].vmm;
}
-int
-gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+void
+gf100_bar_bar1_wait(struct nvkm_bar *base)
+{
+ /* NFI why it's twice. */
+ nvkm_bar_flush(base);
+ nvkm_bar_flush(base);
+}
+
+void
+gf100_bar_bar1_fini(struct nvkm_bar *bar)
{
+ nvkm_mask(bar->subdev.device, 0x001704, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
- return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
+ const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+ nvkm_wr32(device, 0x001704, 0x80000000 | addr);
+}
+
+struct nvkm_vmm *
+gf100_bar_bar2_vmm(struct nvkm_bar *base)
+{
+ return gf100_bar(base)->bar[0].vmm;
+}
+
+void
+gf100_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0x001714, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
static int
-gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
- struct lock_class_key *key, int bar_nr)
+gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
+ struct lock_class_key *key, int bar_nr)
{
struct nvkm_device *device = bar->base.subdev.device;
- struct nvkm_vm *vm;
resource_size_t bar_len;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
- &bar_vm->mem);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
+ &bar_vm->inst);
if (ret)
return ret;
@@ -63,98 +97,64 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
- ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
+ ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
+ (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar_vm->vmm->engref[NVKM_SUBDEV_BAR]);
+ bar_vm->vmm->debug = bar->base.subdev.debug;
/*
* Bootstrap page table lookup.
*/
if (bar_nr == 3) {
- ret = nvkm_vm_boot(vm, bar_len);
- if (ret) {
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_boot(bar_vm->vmm);
+ if (ret)
return ret;
- }
}
- ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
- if (ret)
- return ret;
-
- nvkm_kmap(bar_vm->mem);
- nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
- nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
- nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
- nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
- nvkm_done(bar_vm->mem);
- return 0;
+ return nvkm_vmm_join(bar_vm->vmm, bar_vm->inst);
}
int
gf100_bar_oneinit(struct nvkm_bar *base)
{
static struct lock_class_key bar1_lock;
- static struct lock_class_key bar3_lock;
+ static struct lock_class_key bar2_lock;
struct gf100_bar *bar = gf100_bar(base);
int ret;
- /* BAR3 */
- if (bar->base.func->kmap) {
- ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
+ /* BAR2 */
+ if (bar->base.func->bar2.init) {
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
if (ret)
return ret;
+
+ bar->base.subdev.oneinit = true;
+ nvkm_bar_bar2_init(bar->base.subdev.device);
}
/* BAR1 */
- ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
if (ret)
return ret;
return 0;
}
-int
-gf100_bar_init(struct nvkm_bar *base)
-{
- struct gf100_bar *bar = gf100_bar(base);
- struct nvkm_device *device = bar->base.subdev.device;
- u32 addr;
-
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
-
- addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
- nvkm_wr32(device, 0x001704, 0x80000000 | addr);
-
- if (bar->bar[0].mem) {
- addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- if (bar->bar2_halve)
- addr |= 0x40000000;
- nvkm_wr32(device, 0x001714, 0x80000000 | addr);
- }
-
- return 0;
-}
-
void *
gf100_bar_dtor(struct nvkm_bar *base)
{
struct gf100_bar *bar = gf100_bar(base);
- nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
- nvkm_gpuobj_del(&bar->bar[1].pgd);
- nvkm_memory_del(&bar->bar[1].mem);
+ nvkm_vmm_part(bar->bar[1].vmm, bar->bar[1].inst);
+ nvkm_vmm_unref(&bar->bar[1].vmm);
+ nvkm_memory_unref(&bar->bar[1].inst);
- if (bar->bar[0].vm) {
- nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
- }
- nvkm_gpuobj_del(&bar->bar[0].pgd);
- nvkm_memory_del(&bar->bar[0].mem);
+ nvkm_vmm_part(bar->bar[0].vmm, bar->bar[0].inst);
+ nvkm_vmm_unref(&bar->bar[0].vmm);
+ nvkm_memory_unref(&bar->bar[0].inst);
return bar;
}
@@ -175,9 +175,14 @@ static const struct nvkm_bar_func
gf100_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
- .init = gf100_bar_init,
- .kmap = gf100_bar_kmap,
- .umap = gf100_bar_umap,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gf100_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = gf100_bar_bar2_init,
+ .bar2.fini = gf100_bar_bar2_fini,
+ .bar2.wait = gf100_bar_bar1_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 9accd7923788..4f2b66e8d795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -4,22 +4,24 @@
#define gf100_bar(p) container_of((p), struct gf100_bar, base)
#include "priv.h"
-struct gf100_bar_vm {
- struct nvkm_memory *mem;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
+struct gf100_barN {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
};
struct gf100_bar {
struct nvkm_bar base;
bool bar2_halve;
- struct gf100_bar_vm bar[2];
+ struct gf100_barN bar[2];
};
int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
int, struct nvkm_bar **);
void *gf100_bar_dtor(struct nvkm_bar *);
int gf100_bar_oneinit(struct nvkm_bar *);
-int gf100_bar_init(struct nvkm_bar *);
-int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void gf100_bar_bar1_init(struct nvkm_bar *);
+void gf100_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar1_vmm(struct nvkm_bar *);
+void gf100_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar2_vmm(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 9232fab4274c..b10077d38839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -25,8 +25,10 @@ static const struct nvkm_bar_func
gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
- .init = gf100_bar_init,
- .umap = gf100_bar_umap,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gf100_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
new file mode 100644
index 000000000000..3ddf9222d935
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/timer.h>
+
+void
+gm107_bar_bar1_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001710) & 0x00000003))
+ break;
+ );
+}
+
+static void
+gm107_bar_bar2_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001710) & 0x0000000c))
+ break;
+ );
+}
+
+static const struct nvkm_bar_func
+gm107_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gm107_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = gf100_bar_bar2_init,
+ .bar2.fini = gf100_bar_bar2_fini,
+ .bar2.wait = gm107_bar_bar2_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
new file mode 100644
index 000000000000..950bff1955ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gm20b_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gm107_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+ if (ret == 0)
+ (*pbar)->iomap_uncached = true;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 6eff637ac301..157b076a1272 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -28,19 +28,6 @@
#include <subdev/mmu.h>
#include <subdev/timer.h>
-struct nvkm_vm *
-nv50_bar_kmap(struct nvkm_bar *base)
-{
- return nv50_bar(base)->bar3_vm;
-}
-
-int
-nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
-{
- struct nv50_bar *bar = nv50_bar(base);
- return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
-}
-
static void
nv50_bar_flush(struct nvkm_bar *base)
{
@@ -56,14 +43,72 @@ nv50_bar_flush(struct nvkm_bar *base)
spin_unlock_irqrestore(&bar->base.lock, flags);
}
+struct nvkm_vmm *
+nv50_bar_bar1_vmm(struct nvkm_bar *base)
+{
+ return nv50_bar(base)->bar1_vmm;
+}
+
+void
+nv50_bar_bar1_wait(struct nvkm_bar *base)
+{
+ nvkm_bar_flush(base);
+}
+
+void
+nv50_bar_bar1_fini(struct nvkm_bar *bar)
+{
+ nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
+}
+
+void
+nv50_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nv50_bar *bar = nv50_bar(base);
+ nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+}
+
+struct nvkm_vmm *
+nv50_bar_bar2_vmm(struct nvkm_bar *base)
+{
+ return nv50_bar(base)->bar2_vmm;
+}
+
+void
+nv50_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
+}
+
+void
+nv50_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nv50_bar *bar = nv50_bar(base);
+ nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
+}
+
+void
+nv50_bar_init(struct nvkm_bar *base)
+{
+ struct nv50_bar *bar = nv50_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
+}
+
int
nv50_bar_oneinit(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
- static struct lock_class_key bar3_lock;
- struct nvkm_vm *vm;
+ static struct lock_class_key bar2_lock;
u64 start, limit;
int ret;
@@ -80,51 +125,54 @@ nv50_bar_oneinit(struct nvkm_bar *base)
if (ret)
return ret;
- /* BAR3 */
+ /* BAR2 */
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar2_lock, "bar2", &bar->bar2_vmm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
+ bar->bar2_vmm->debug = bar->base.subdev.debug;
- ret = nvkm_vm_boot(vm, limit-- - start);
+ ret = nvkm_vmm_boot(bar->bar2_vmm);
if (ret)
return ret;
- ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
+ ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
if (ret)
return ret;
- nvkm_kmap(bar->bar3);
- nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
- nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
- nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
- nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+ nvkm_kmap(bar->bar2);
+ nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
+ nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
+ nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
+ nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
- nvkm_wo32(bar->bar3, 0x10, 0x00000000);
- nvkm_wo32(bar->bar3, 0x14, 0x00000000);
- nvkm_done(bar->bar3);
+ nvkm_wo32(bar->bar2, 0x10, 0x00000000);
+ nvkm_wo32(bar->bar2, 0x14, 0x00000000);
+ nvkm_done(bar->bar2);
+
+ bar->base.subdev.oneinit = true;
+ nvkm_bar_bar2_init(device);
/* BAR1 */
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
- if (ret)
- return ret;
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar1_lock, "bar1", &bar->bar1_vmm);
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
+ bar->bar1_vmm->debug = bar->base.subdev.debug;
- ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
if (ret)
return ret;
@@ -144,45 +192,21 @@ nv50_bar_oneinit(struct nvkm_bar *base)
return 0;
}
-int
-nv50_bar_init(struct nvkm_bar *base)
-{
- struct nv50_bar *bar = nv50_bar(base);
- struct nvkm_device *device = bar->base.subdev.device;
- int i;
-
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
- nvkm_wr32(device, 0x100c80, 0x00060001);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
- nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
- nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
- nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
- return 0;
-}
-
void *
nv50_bar_dtor(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
- nvkm_gpuobj_del(&bar->bar1);
- nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
- nvkm_gpuobj_del(&bar->bar3);
- if (bar->bar3_vm) {
- nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+ if (bar->mem) {
+ nvkm_gpuobj_del(&bar->bar1);
+ nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
+ nvkm_vmm_unref(&bar->bar1_vmm);
+ nvkm_gpuobj_del(&bar->bar2);
+ nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
+ nvkm_vmm_unref(&bar->bar2_vmm);
+ nvkm_gpuobj_del(&bar->pgd);
+ nvkm_gpuobj_del(&bar->pad);
+ nvkm_gpuobj_del(&bar->mem);
}
- nvkm_gpuobj_del(&bar->pgd);
- nvkm_gpuobj_del(&bar->pad);
- nvkm_gpuobj_del(&bar->mem);
return bar;
}
@@ -204,8 +228,14 @@ nv50_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
- .kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
+ .bar1.init = nv50_bar_bar1_init,
+ .bar1.fini = nv50_bar_bar1_fini,
+ .bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
+ .bar2.init = nv50_bar_bar2_init,
+ .bar2.fini = nv50_bar_bar2_fini,
+ .bar2.wait = nv50_bar_bar1_wait,
+ .bar2.vmm = nv50_bar_bar2_vmm,
.flush = nv50_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index ce9ab9110b08..2fe833f6d9f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -10,18 +10,20 @@ struct nv50_bar {
struct nvkm_gpuobj *mem;
struct nvkm_gpuobj *pad;
struct nvkm_gpuobj *pgd;
- struct nvkm_vm *bar1_vm;
+ struct nvkm_vmm *bar1_vmm;
struct nvkm_gpuobj *bar1;
- struct nvkm_vm *bar3_vm;
- struct nvkm_gpuobj *bar3;
+ struct nvkm_vmm *bar2_vmm;
+ struct nvkm_gpuobj *bar2;
};
int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
int, u32 pgd_addr, struct nvkm_bar **);
void *nv50_bar_dtor(struct nvkm_bar *);
int nv50_bar_oneinit(struct nvkm_bar *);
-int nv50_bar_init(struct nvkm_bar *);
-struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
-int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
-void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+void nv50_bar_init(struct nvkm_bar *);
+void nv50_bar_bar1_init(struct nvkm_bar *);
+void nv50_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar1_vmm(struct nvkm_bar *);
+void nv50_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar2_vmm(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 63d111c8afd4..01ba5b26666e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -10,11 +10,25 @@ void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
struct nvkm_bar_func {
void *(*dtor)(struct nvkm_bar *);
int (*oneinit)(struct nvkm_bar *);
- int (*init)(struct nvkm_bar *);
- struct nvkm_vm *(*kmap)(struct nvkm_bar *);
- int (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+ void (*init)(struct nvkm_bar *);
+
+ struct {
+ void (*init)(struct nvkm_bar *);
+ void (*fini)(struct nvkm_bar *);
+ void (*wait)(struct nvkm_bar *);
+ struct nvkm_vmm *(*vmm)(struct nvkm_bar *);
+ } bar1, bar2;
+
void (*flush)(struct nvkm_bar *);
};
+void nv50_bar_bar1_fini(struct nvkm_bar *);
+void nv50_bar_bar2_fini(struct nvkm_bar *);
+
void g84_bar_flush(struct nvkm_bar *);
+
+void gf100_bar_bar1_fini(struct nvkm_bar *);
+void gf100_bar_bar2_fini(struct nvkm_bar *);
+
+void gm107_bar_bar1_wait(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 23caef8df17f..73e463ed55c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -99,7 +99,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
res_start = 0x5;
break;
- };
+ }
if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
continue;
@@ -115,7 +115,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
default:
rail->resistor_count = 0;
break;
- };
+ }
for (r = 0; r < rail->resistor_count; ++r) {
rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index b58ee99f7bfc..9cc10e438b3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -36,6 +36,8 @@
#include <subdev/i2c.h>
#include <subdev/vga.h>
+#include <linux/kernel.h>
+
#define bioslog(lvl, fmt, args...) do { \
nvkm_printk(init->subdev, lvl, info, "0x%08x[%c]: "fmt, \
init->offset, init_exec(init) ? \
@@ -2271,8 +2273,6 @@ static struct nvbios_init_opcode {
[0xaa] = { init_reserved },
};
-#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
-
int
nvbios_exec(struct nvbios_init *init)
{
@@ -2281,7 +2281,8 @@ nvbios_exec(struct nvbios_init *init)
init->nested++;
while (init->offset) {
u8 opcode = nvbios_rd08(bios, init->offset);
- if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
+ if (opcode >= ARRAY_SIZE(init_opcode) ||
+ !init_opcode[opcode].exec) {
error("unknown opcode 0x%02x\n", opcode);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 7e83c3985020..20ff5173cf8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -115,16 +115,21 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
switch (min_t(u8, *hdr, 25)) {
case 25:
p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
+ /* fall through */
case 24:
case 23:
case 22:
p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
+ /* fall through */
case 21:
p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
+ /* fall through */
case 20:
p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
+ /* fall through */
case 19:
p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
+ /* fall through */
case 18:
case 17:
p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 158977f8a6e6..c3dae05348eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -119,11 +119,11 @@ powerctrl_1_shift(int chip_version, int reg)
switch (reg) {
case 0x680520:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680508:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680504:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680500:
shift += 4;
}
@@ -245,11 +245,11 @@ setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
switch (reg1) {
case 0x680504:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680500:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680520:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680508:
shift_c040 += 2;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index a7049c041594..73b5d46104bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -31,12 +31,6 @@
#include <engine/gr.h>
#include <engine/mpeg.h>
-bool
-nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
- return fb->func->memtype_valid(fb, memtype);
-}
-
void
nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{
@@ -100,6 +94,7 @@ static int
nvkm_fb_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
+ u32 tags = 0;
if (fb->func->ram_new) {
int ret = fb->func->ram_new(fb, &fb->ram);
@@ -115,7 +110,16 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
return ret;
}
- return 0;
+ /* Initialise compression tag allocator.
+ *
+ * LTC oneinit() will override this on Fermi and newer.
+ */
+ if (fb->func->tags) {
+ tags = fb->func->tags(fb);
+ nvkm_debug(subdev, "%d comptags\n", tags);
+ }
+
+ return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
}
static int
@@ -135,8 +139,13 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
- if (fb->func->init_page)
- fb->func->init_page(fb);
+
+ if (fb->func->init_page) {
+ ret = fb->func->init_page(fb);
+ if (WARN_ON(ret))
+ return ret;
+ }
+
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
return 0;
@@ -148,12 +157,13 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
struct nvkm_fb *fb = nvkm_fb(subdev);
int i;
- nvkm_memory_del(&fb->mmu_wr);
- nvkm_memory_del(&fb->mmu_rd);
+ nvkm_memory_unref(&fb->mmu_wr);
+ nvkm_memory_unref(&fb->mmu_rd);
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
+ nvkm_mm_fini(&fb->tags);
nvkm_ram_del(&fb->ram);
if (fb->func->dtor)
@@ -176,7 +186,8 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
- fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
+ fb->func->default_bigpage);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 9c28392d07e4..06bf95c0c549 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -27,6 +27,7 @@
static const struct nv50_fb_func
g84_fb = {
.ram_new = nv50_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x001d07ff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index a239e73562c8..47d28c279707 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -27,15 +27,6 @@
#include <core/memory.h>
#include <core/option.h>
-extern const u8 gf100_pte_storage_type_map[256];
-
-bool
-gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
- u8 memtype = (tile_flags & 0x0000ff00) >> 8;
- return likely((gf100_pte_storage_type_map[memtype] != 0xff));
-}
-
void
gf100_fb_intr(struct nvkm_fb *base)
{
@@ -80,20 +71,17 @@ gf100_fb_oneinit(struct nvkm_fb *base)
return 0;
}
-void
+int
gf100_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
- case 16:
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
- break;
- case 17:
+ case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
+ case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
default:
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
- fb->page = 17;
- break;
+ return -EINVAL;
}
+ return 0;
}
void
@@ -143,7 +131,7 @@ gf100_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 1756f7b02858..ab261310753a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -18,7 +18,5 @@ void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
-void gm200_fb_init_page(struct nvkm_fb *fb);
void gm200_fb_init(struct nvkm_fb *base);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 56af84aa333b..4a9f463745b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -32,7 +32,7 @@ gf108_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf108_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 4245e2e6e604..0a6e8eaad42c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -32,7 +32,7 @@ gk104_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 5d34d6136616..a7e29b125094 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -30,7 +30,7 @@ gk20a_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index db699025f546..69c876d5d1c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -32,7 +32,7 @@ gm107_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d83da5ddbc1e..8137e19d3292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,22 +26,18 @@
#include <core/memory.h>
-void
+int
gm200_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
- case 16:
- nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
- break;
- case 17:
- nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
- break;
+ case 16: nvkm_mask(device, 0x100c80, 0x00001801, 0x00001001); break;
+ case 17: nvkm_mask(device, 0x100c80, 0x00001801, 0x00000000); break;
+ case 0: nvkm_mask(device, 0x100c80, 0x00001800, 0x00001800); break;
default:
- nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
- fb->page = 0;
- break;
+ return -EINVAL;
}
+ return 0;
}
void
@@ -69,7 +65,7 @@ gm200_fb = {
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm200_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 0 /* per-instance. */,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index b87c233bcd6d..12db61e31128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -30,7 +30,7 @@ gm20b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 0 /* per-instance. */,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 98474aec1921..147f69b30cd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -59,7 +59,6 @@ gp100_fb = {
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 73b4ae1c73dc..b84b9861ef26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -33,7 +33,6 @@ gp102_fb = {
.init = gp100_fb_init,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index f2b1fbf428d5..af8e43979dc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -28,7 +28,6 @@ gp10b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index ebb30608d5ef..9266559b45f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -27,6 +27,7 @@
static const struct nv50_fb_func
gt215_fb = {
.ram_new = gt215_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x000d0fff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index 8ff2e5db4571..c886664533c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -25,14 +25,6 @@
#include "ram.h"
#include "regsnv04.h"
-bool
-nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
- if (!(tile_flags & 0xff00))
- return true;
- return false;
-}
-
static void
nv04_fb_init(struct nvkm_fb *fb)
{
@@ -49,7 +41,6 @@ static const struct nvkm_fb_func
nv04_fb = {
.init = nv04_fb_init,
.ram_new = nv04_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index e8c44f5a3d84..c998b7e96aa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -61,7 +61,6 @@ nv10_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv10_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 2ae0beb87567..7b9f04f44af8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -33,7 +33,6 @@ nv1a_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv1a_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index 126865dfe777..a021d21ff153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&fb->ram->tags, &tile->tag);
+ nvkm_mm_free(&fb->tags, &tile->tag);
}
void
@@ -77,15 +77,22 @@ nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
}
+u32
+nv20_fb_tags(struct nvkm_fb *fb)
+{
+ const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320);
+ return tags ? tags + 1 : 0;
+}
+
static const struct nvkm_fb_func
nv20_fb = {
+ .tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv20_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index c56746d2a502..7709f5fe9a45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -44,13 +44,13 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv25_fb = {
+ .tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv25_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 2a7c4831b821..8aa782666507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -116,6 +116,7 @@ nv30_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv30_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -123,7 +124,6 @@ nv30_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 1604b3789ad1..6e83dcff72e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -45,6 +45,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv35_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv35_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 80cc0a6e3416..2a07617bb44c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -45,6 +45,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv36_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv36_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index deec46a310f8..955160778b5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -51,6 +51,7 @@ nv40_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv40_fb = {
+ .tags = nv20_fb_tags,
.init = nv40_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -58,7 +59,6 @@ nv40_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv40_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index 79e57dd5a00f..b77f08d34cc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -45,6 +45,7 @@ nv41_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv41_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 12,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv41_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 06246cce5ec4..b59dc486083d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -62,7 +62,6 @@ nv44_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 3598a1aa65be..cab7d20fa039 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -48,7 +48,6 @@ nv46_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index c505e4429314..a8b0ad4c871d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -28,6 +28,7 @@
static const struct nvkm_fb_func
nv47_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv47_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 7b91b9f170e5..d0b317bb0252 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -28,6 +28,7 @@
static const struct nvkm_fb_func
nv49_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv49_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv49_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 4e98210c1b1c..6a6f0c086071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -34,7 +34,6 @@ nv4e_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0595e0722bfc..b2f5bf8144ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -28,18 +28,6 @@
#include <core/enum.h>
#include <engine/fifo.h>
-int
-nv50_fb_memtype[0x80] = {
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
static int
nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
{
@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
return fb->func->ram_new(&fb->base, pram);
}
-static bool
-nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
- return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
-}
-
static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX" },
{ 0x00000001, "NOTIFY" },
@@ -244,6 +226,15 @@ nv50_fb_init(struct nvkm_fb *base)
nvkm_wr32(device, 0x100c90, fb->func->trap);
}
+static u32
+nv50_fb_tags(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ if (fb->func->tags)
+ return fb->func->tags(&fb->base);
+ return 0;
+}
+
static void *
nv50_fb_dtor(struct nvkm_fb *base)
{
@@ -262,11 +253,11 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .tags = nv50_fb_tags,
.oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
- .memtype_valid = nv50_fb_memtype_valid,
};
int
@@ -287,6 +278,7 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
static const struct nv50_fb_func
nv50_fb = {
.ram_new = nv50_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x000707ff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index a37758c76268..dacc696387b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -13,10 +13,10 @@ struct nv50_fb {
struct nv50_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
+ u32 (*tags)(struct nvkm_fb *);
u32 trap;
};
int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
struct nvkm_fb **pfb);
-extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 8e87b887d4f5..9351188d5d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -7,9 +7,10 @@ struct nvkm_bios;
struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
+ u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
- void (*init_page)(struct nvkm_fb *);
+ int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -25,7 +26,7 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
- bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+ u8 default_bigpage;
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
@@ -34,13 +35,12 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
int index, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *);
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+u32 nv20_fb_tags(struct nvkm_fb *);
void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
@@ -63,8 +63,7 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
int gf100_fb_oneinit(struct nvkm_fb *);
-void gf100_fb_init_page(struct nvkm_fb *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+int gf100_fb_init_page(struct nvkm_fb *);
-void gm200_fb_init_page(struct nvkm_fb *);
+int gm200_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index c17d559dbfbe..24c7bd505731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -21,8 +21,132 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
#include "ram.h"
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+struct nvkm_vram {
+ struct nvkm_memory memory;
+ struct nvkm_ram *ram;
+ u8 page;
+ struct nvkm_mm_node *mn;
+};
+
+static int
+nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &vram->memory,
+ .offset = offset,
+ .mem = vram->mn,
+ };
+
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static u64
+nvkm_vram_size(struct nvkm_memory *memory)
+{
+ return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u64
+nvkm_vram_addr(struct nvkm_memory *memory)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ if (!nvkm_mm_contiguous(vram->mn))
+ return ~0ULL;
+ return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u8
+nvkm_vram_page(struct nvkm_memory *memory)
+{
+ return nvkm_vram(memory)->page;
+}
+
+static enum nvkm_memory_target
+nvkm_vram_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_VRAM;
+}
+
+static void *
+nvkm_vram_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ struct nvkm_mm_node *next = vram->mn;
+ struct nvkm_mm_node *node;
+ mutex_lock(&vram->ram->fb->subdev.mutex);
+ while ((node = next)) {
+ next = node->next;
+ nvkm_mm_free(&vram->ram->vram, &node);
+ }
+ mutex_unlock(&vram->ram->fb->subdev.mutex);
+ return vram;
+}
+
+static const struct nvkm_memory_func
+nvkm_vram = {
+ .dtor = nvkm_vram_dtor,
+ .target = nvkm_vram_target,
+ .page = nvkm_vram_page,
+ .addr = nvkm_vram_addr,
+ .size = nvkm_vram_size,
+ .map = nvkm_vram_map,
+};
+
+int
+nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
+ bool contig, bool back, struct nvkm_memory **pmemory)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_mm *mm;
+ struct nvkm_mm_node **node, *r;
+ struct nvkm_vram *vram;
+ u8 page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
+ u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
+ u32 max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
+ u32 min = contig ? max : align;
+ int ret;
+
+ if (!device->fb || !(ram = device->fb->ram))
+ return -ENODEV;
+ ram = device->fb->ram;
+ mm = &ram->vram;
+
+ if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+ vram->ram = ram;
+ vram->page = page;
+ *pmemory = &vram->memory;
+
+ mutex_lock(&ram->fb->subdev.mutex);
+ node = &vram->mn;
+ do {
+ if (back)
+ ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
+ else
+ ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
+ if (ret) {
+ mutex_unlock(&ram->fb->subdev.mutex);
+ nvkm_memory_unref(pmemory);
+ return ret;
+ }
+
+ *node = r;
+ node = &r->next;
+ max -= r->length;
+ } while (max);
+ mutex_unlock(&ram->fb->subdev.mutex);
+ return 0;
+}
+
int
nvkm_ram_init(struct nvkm_ram *ram)
{
@@ -38,7 +162,6 @@ nvkm_ram_del(struct nvkm_ram **pram)
if (ram && !WARN_ON(!ram->func)) {
if (ram->func->dtor)
*pram = ram->func->dtor(ram);
- nvkm_mm_fini(&ram->tags);
nvkm_mm_fini(&ram->vram);
kfree(*pram);
*pram = NULL;
@@ -47,8 +170,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
int
nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
- enum nvkm_ram_type type, u64 size, u32 tags,
- struct nvkm_ram *ram)
+ enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
{
static const char *name[] = {
[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
@@ -73,28 +195,20 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
ram->size = size;
if (!nvkm_mm_initialised(&ram->vram)) {
- ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
+ size >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
}
- if (!nvkm_mm_initialised(&ram->tags)) {
- ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
- if (ret)
- return ret;
-
- nvkm_debug(subdev, "%d compression tags\n", tags);
- }
-
return 0;
}
int
nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
- enum nvkm_ram_type type, u64 size, u32 tags,
- struct nvkm_ram **pram)
+ enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
{
if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+ return nvkm_ram_ctor(func, fb, type, size, *pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b2122d261f8d..330132e95b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -4,11 +4,9 @@
#include "priv.h"
int nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
- enum nvkm_ram_type, u64 size, u32 tags,
- struct nvkm_ram *);
+ enum nvkm_ram_type, u64 size, struct nvkm_ram *);
int nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
- enum nvkm_ram_type, u64 size, u32 tags,
- struct nvkm_ram **);
+ enum nvkm_ram_type, u64 size, struct nvkm_ram **);
void nvkm_ram_del(struct nvkm_ram **);
int nvkm_ram_init(struct nvkm_ram *);
@@ -16,9 +14,6 @@ extern const struct nvkm_ram_func nv04_ram_func;
int nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram *);
-int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram **);
@@ -29,8 +24,6 @@ u32 gf100_ram_probe_fbp(const struct nvkm_ram_func *,
u32 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
struct nvkm_device *, int, int *);
u32 gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
-int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
int gf100_ram_init(struct nvkm_ram *);
int gf100_ram_calc(struct nvkm_ram *, u32);
int gf100_ram_prog(struct nvkm_ram *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 4a9bd4f1cb93..ac87a3b6b7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -32,7 +32,6 @@
#include <subdev/bios/timing.h>
#include <subdev/clk.h>
#include <subdev/clk/pll.h>
-#include <subdev/ltc.h>
struct gf100_ramfuc {
struct ramfuc base;
@@ -420,86 +419,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-void
-gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
- struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
- struct nvkm_mem *mem = *pmem;
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (mem->tag)
- nvkm_ltc_tags_free(ltc, &mem->tag);
- __nv50_ram_put(ram, mem);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- kfree(mem);
-}
-
-int
-gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
- struct nvkm_mm *mm = &ram->vram;
- struct nvkm_mm_node **node, *r;
- struct nvkm_mem *mem;
- int type = (memtype & 0x0ff);
- int back = (memtype & 0x800);
- const bool comp = gf100_pte_storage_type_map[type] != type;
- int ret;
-
- size >>= NVKM_RAM_MM_SHIFT;
- align >>= NVKM_RAM_MM_SHIFT;
- ncmin >>= NVKM_RAM_MM_SHIFT;
- if (!ncmin)
- ncmin = size;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mem->size = size;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (comp) {
- /* compression only works with lpages */
- if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
- int n = size >> 5;
- nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
- }
-
- if (unlikely(!mem->tag))
- type = gf100_pte_storage_type_map[type];
- }
- mem->memtype = type;
-
- node = &mem->mem;
- do {
- if (back)
- ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
- else
- ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
- if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
- ram->func->put(ram, &mem);
- return ret;
- }
-
- *node = r;
- node = &r->next;
- size -= r->length;
- } while (size);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
- *pmem = mem;
- return 0;
-}
-
int
gf100_ram_init(struct nvkm_ram *base)
{
@@ -604,7 +523,7 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
- ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
+ ret = nvkm_ram_ctor(func, fb, type, total, ram);
if (ret)
return ret;
@@ -617,7 +536,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
*/
if (lower != total) {
/* The common memory amount is addressed normally. */
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
@@ -625,13 +545,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
/* And the rest is much higher in the physical address
* space, and may not be usable for certain operations.
*/
- ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
+ ubase >> NVKM_RAM_MM_SHIFT,
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
} else {
/* GPUs without mixed-memory are a lot nicer... */
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(total - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
@@ -738,8 +660,6 @@ gf100_ram = {
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 985ec64cf369..70a06e3cd55a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -48,8 +48,6 @@ gf108_ram = {
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 75814f15eb53..8bcb7e79a0cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1704,8 +1704,6 @@ gk104_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 3f0b56347291..27c68e3f9772 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -39,8 +39,6 @@ gm107_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index fd8facf90476..6b0cac1fe7b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -54,8 +54,6 @@ gm200_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index df8a87333b67..adb62a6beb63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -84,8 +84,6 @@ gp100_ram = {
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
.init = gp100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index f10664372161..920b3d347803 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -26,6 +26,7 @@
#include "ram.h"
#include "ramfuc.h"
+#include <core/memory.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0205.h>
@@ -86,7 +87,7 @@ struct gt215_ltrain {
u32 r_100720;
u32 r_1111e0;
u32 r_111400;
- struct nvkm_mem *mem;
+ struct nvkm_memory *memory;
};
struct gt215_ram {
@@ -279,10 +280,10 @@ gt215_link_train_init(struct gt215_ram *ram)
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_bios *bios = device->bios;
- struct nvkm_mem *mem;
struct nvbios_M0205E M0205E;
u8 ver, hdr, cnt, len;
u32 r001700;
+ u64 addr;
int ret, i = 0;
train->state = NVA3_TRAIN_UNSUPPORTED;
@@ -297,14 +298,14 @@ gt215_link_train_init(struct gt215_ram *ram)
train->state = NVA3_TRAIN_ONCE;
- ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
- &ram->ltrain.mem);
+ ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 16, 0x8000,
+ true, true, &ram->ltrain.memory);
if (ret)
return ret;
- mem = ram->ltrain.mem;
+ addr = nvkm_memory_addr(ram->ltrain.memory);
- nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+ nvkm_wr32(device, 0x100538, 0x10000000 | (addr >> 16));
nvkm_wr32(device, 0x1005a8, 0x0000ffff);
nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
@@ -320,7 +321,7 @@ gt215_link_train_init(struct gt215_ram *ram)
/* And upload the pattern */
r001700 = nvkm_rd32(device, 0x1700);
- nvkm_wr32(device, 0x1700, mem->offset >> 16);
+ nvkm_wr32(device, 0x1700, addr >> 16);
for (i = 0; i < 16; i++)
nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
for (i = 0; i < 16; i++)
@@ -336,8 +337,7 @@ gt215_link_train_init(struct gt215_ram *ram)
static void
gt215_link_train_fini(struct gt215_ram *ram)
{
- if (ram->ltrain.mem)
- ram->base.func->put(&ram->base, &ram->ltrain.mem);
+ nvkm_memory_unref(&ram->ltrain.memory);
}
/*
@@ -931,8 +931,6 @@ static const struct nvkm_ram_func
gt215_ram_func = {
.dtor = gt215_ram_dtor,
.init = gt215_ram_init,
- .get = nv50_ram_get,
- .put = nv50_ram_put,
.calc = gt215_ram_calc,
.prog = gt215_ram_prog,
.tidy = gt215_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 017a91de74a0..7de18e53ef45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -53,8 +53,6 @@ mcp77_ram_init(struct nvkm_ram *base)
static const struct nvkm_ram_func
mcp77_ram_func = {
.init = mcp77_ram_init,
- .get = nv50_ram_get,
- .put = nv50_ram_put,
};
int
@@ -73,7 +71,7 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
*pram = &ram->base;
ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
- size, 0, &ram->base);
+ size, &ram->base);
if (ret)
return ret;
@@ -81,7 +79,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
ram->base.stolen = base;
nvkm_mm_fini(&ram->base.vram);
- return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 6f053a03d61c..cc764a93f1a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -61,5 +61,5 @@ nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
else
type = NVKM_RAM_TYPE_SDRAM;
- return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index dfd155c98dbb..afe54e323b18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -36,5 +36,5 @@ nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
else
type = NVKM_RAM_TYPE_SDRAM;
- return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index 3c6a8710e812..4c07d10bb976 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -44,5 +44,5 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
}
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
- mib * 1024 * 1024, 0, pram);
+ mib * 1024 * 1024, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index 747e47c10cc7..71d63d7daa75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -29,7 +29,6 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
}
- ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
+ ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 70c63535d56b..2b12e388f47a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -187,13 +187,13 @@ nv40_ram_func = {
int
nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
- u32 tags, struct nvkm_ram **pram)
+ struct nvkm_ram **pram)
{
struct nv40_ram *ram;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
- return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+ return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, &ram->base);
}
int
@@ -202,7 +202,6 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -213,7 +212,7 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
}
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index 8549fdf2437c..11f6bb2936b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -10,6 +10,6 @@ struct nv40_ram {
u32 coef;
};
-int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64,
struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 114828be292e..d3fea3726461 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -28,7 +28,6 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
u32 fb474 = nvkm_rd32(device, 0x100474);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index bc56fbf1c788..ab2630e5e6fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -38,5 +38,5 @@ nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
- return nv40_ram_new_(fb, type, size, 0, pram);
+ return nv40_ram_new_(fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index c01f4b1022b8..946ca7c2e0b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -28,7 +28,6 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
u32 fb914 = nvkm_rd32(device, 0x100914);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000003: break;
}
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index fa3c2e06203d..02b8bdbc819f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -29,5 +29,5 @@ nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
- size, 0, pram);
+ size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 6549b0588309..2ccb4b6be153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -493,100 +493,8 @@ nv50_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->hwsq, false);
}
-void
-__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
-{
- struct nvkm_mm_node *next = mem->mem;
- struct nvkm_mm_node *node;
- while ((node = next)) {
- next = node->next;
- nvkm_mm_free(&ram->vram, &node);
- }
- nvkm_mm_free(&ram->tags, &mem->tag);
-}
-
-void
-nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
- struct nvkm_mem *mem = *pmem;
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&ram->fb->subdev.mutex);
- __nv50_ram_put(ram, mem);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- kfree(mem);
-}
-
-int
-nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct nvkm_mm *heap = &ram->vram;
- struct nvkm_mm *tags = &ram->tags;
- struct nvkm_mm_node **node, *r;
- struct nvkm_mem *mem;
- int comp = (memtype & 0x300) >> 8;
- int type = (memtype & 0x07f);
- int back = (memtype & 0x800);
- int min, max, ret;
-
- max = (size >> NVKM_RAM_MM_SHIFT);
- min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
- align >>= NVKM_RAM_MM_SHIFT;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (comp) {
- if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
- int n = (max >> 4) * comp;
-
- ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
- if (ret)
- mem->tag = NULL;
- }
-
- if (unlikely(!mem->tag))
- comp = 0;
- }
-
- mem->memtype = (comp << 7) | type;
- mem->size = max;
-
- type = nv50_fb_memtype[type];
- node = &mem->mem;
- do {
- if (back)
- ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
- else
- ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
- if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
- ram->func->put(ram, &mem);
- return ret;
- }
-
- *node = r;
- node = &r->next;
- max -= r->length;
- } while (max);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
- *pmem = mem;
- return 0;
-}
-
static const struct nvkm_ram_func
nv50_ram_func = {
- .get = nv50_ram_get,
- .put = nv50_ram_put,
.calc = nv50_ram_calc,
.prog = nv50_ram_prog,
.tidy = nv50_ram_tidy,
@@ -639,7 +547,6 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u64 size = nvkm_rd32(device, 0x10020c);
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -660,7 +567,7 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
- ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
+ ret = nvkm_ram_ctor(func, fb, type, size, ram);
if (ret)
return ret;
@@ -669,7 +576,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
nvkm_mm_fini(&ram->vram);
- return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 10c987a654ec..364ea4492acc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,181 +23,90 @@
*/
#include "priv.h"
-#include <core/memory.h>
#include <subdev/bar.h>
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
-#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
-
-struct nvkm_instobj {
- struct nvkm_memory memory;
- struct nvkm_memory *parent;
- struct nvkm_instmem *imem;
- struct list_head head;
- u32 *suspend;
- void __iomem *map;
-};
-
-static enum nvkm_memory_target
-nvkm_instobj_target(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_target(memory);
-}
-
-static u64
-nvkm_instobj_addr(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_addr(memory);
-}
-
-static u64
-nvkm_instobj_size(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_size(memory);
-}
-
static void
-nvkm_instobj_release(struct nvkm_memory *memory)
+nvkm_instobj_load(struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- nvkm_bar_flush(iobj->imem->subdev.device->bar);
-}
-
-static void __iomem *
-nvkm_instobj_acquire(struct nvkm_memory *memory)
-{
- return nvkm_instobj(memory)->map;
-}
-
-static u32
-nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
- return ioread32_native(nvkm_instobj(memory)->map + offset);
-}
-
-static void
-nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
-{
- iowrite32_native(data, nvkm_instobj(memory)->map + offset);
-}
+ struct nvkm_memory *memory = &iobj->memory;
+ const u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
+ int i;
-static void
-nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
-{
- memory = nvkm_instobj(memory)->parent;
- nvkm_memory_map(memory, vma, offset);
-}
+ if (!(map = nvkm_kmap(memory))) {
+ for (i = 0; i < size; i += 4)
+ nvkm_wo32(memory, i, iobj->suspend[i / 4]);
+ } else {
+ memcpy_toio(map, iobj->suspend, size);
+ }
+ nvkm_done(memory);
-static void *
-nvkm_instobj_dtor(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- spin_lock(&iobj->imem->lock);
- list_del(&iobj->head);
- spin_unlock(&iobj->imem->lock);
- nvkm_memory_del(&iobj->parent);
- return iobj;
+ kvfree(iobj->suspend);
+ iobj->suspend = NULL;
}
-static const struct nvkm_memory_func
-nvkm_instobj_func = {
- .dtor = nvkm_instobj_dtor,
- .target = nvkm_instobj_target,
- .addr = nvkm_instobj_addr,
- .size = nvkm_instobj_size,
- .acquire = nvkm_instobj_acquire,
- .release = nvkm_instobj_release,
- .rd32 = nvkm_instobj_rd32,
- .wr32 = nvkm_instobj_wr32,
- .map = nvkm_instobj_map,
-};
-
-static void
-nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+static int
+nvkm_instobj_save(struct nvkm_instobj *iobj)
{
- memory = nvkm_instobj(memory)->parent;
- nvkm_memory_boot(memory, vm);
-}
+ struct nvkm_memory *memory = &iobj->memory;
+ const u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
+ int i;
-static void
-nvkm_instobj_release_slow(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- nvkm_instobj_release(memory);
- nvkm_done(iobj->parent);
-}
+ iobj->suspend = kvmalloc(size, GFP_KERNEL);
+ if (!iobj->suspend)
+ return -ENOMEM;
-static void __iomem *
-nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- iobj->map = nvkm_kmap(iobj->parent);
- if (iobj->map)
- memory->func = &nvkm_instobj_func;
- return iobj->map;
+ if (!(map = nvkm_kmap(memory))) {
+ for (i = 0; i < size; i += 4)
+ iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+ } else {
+ memcpy_fromio(iobj->suspend, map, size);
+ }
+ nvkm_done(memory);
+ return 0;
}
-static u32
-nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+void
+nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- return nvkm_ro32(iobj->parent, offset);
+ spin_lock(&imem->lock);
+ list_del(&iobj->head);
+ spin_unlock(&imem->lock);
}
-static void
-nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+void
+nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+ struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- return nvkm_wo32(iobj->parent, offset, data);
+ nvkm_memory_ctor(func, &iobj->memory);
+ iobj->suspend = NULL;
+ spin_lock(&imem->lock);
+ list_add_tail(&iobj->head, &imem->list);
+ spin_unlock(&imem->lock);
}
-static const struct nvkm_memory_func
-nvkm_instobj_func_slow = {
- .dtor = nvkm_instobj_dtor,
- .target = nvkm_instobj_target,
- .addr = nvkm_instobj_addr,
- .size = nvkm_instobj_size,
- .boot = nvkm_instobj_boot,
- .acquire = nvkm_instobj_acquire_slow,
- .release = nvkm_instobj_release_slow,
- .rd32 = nvkm_instobj_rd32_slow,
- .wr32 = nvkm_instobj_wr32_slow,
- .map = nvkm_instobj_map,
-};
-
int
nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
+ struct nvkm_subdev *subdev = &imem->subdev;
struct nvkm_memory *memory = NULL;
- struct nvkm_instobj *iobj;
u32 offset;
int ret;
ret = imem->func->memory_new(imem, size, align, zero, &memory);
- if (ret)
+ if (ret) {
+ nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
goto done;
-
- if (!imem->func->persistent) {
- if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
- ret = -ENOMEM;
- goto done;
- }
-
- nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
- iobj->parent = memory;
- iobj->imem = imem;
- spin_lock(&iobj->imem->lock);
- list_add_tail(&iobj->head, &imem->list);
- spin_unlock(&iobj->imem->lock);
- memory = &iobj->memory;
}
+ nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
+ zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+
if (!imem->func->zero && zero) {
void __iomem *map = nvkm_kmap(memory);
if (unlikely(!map)) {
@@ -211,7 +120,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
done:
if (ret)
- nvkm_memory_del(&memory);
+ nvkm_memory_unref(&memory);
*pmemory = memory;
return ret;
}
@@ -232,39 +141,46 @@ nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
return imem->func->wr32(imem, addr, data);
}
+void
+nvkm_instmem_boot(struct nvkm_instmem *imem)
+{
+ /* Separate bootstrapped objects from normal list, as we need
+ * to make sure they're accessed with the slowpath on suspend
+ * and resume.
+ */
+ struct nvkm_instobj *iobj, *itmp;
+ spin_lock(&imem->lock);
+ list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
+ list_move_tail(&iobj->head, &imem->boot);
+ }
+ spin_unlock(&imem->lock);
+}
+
static int
nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
- int i;
-
- if (imem->func->fini)
- imem->func->fini(imem);
if (suspend) {
list_for_each_entry(iobj, &imem->list, head) {
- struct nvkm_memory *memory = iobj->parent;
- u64 size = nvkm_memory_size(memory);
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
- iobj->suspend = vmalloc(size);
- if (!iobj->suspend)
- return -ENOMEM;
+ nvkm_bar_bar2_fini(subdev->device);
- for (i = 0; i < size; i += 4)
- iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+ list_for_each_entry(iobj, &imem->boot, head) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
}
}
- return 0;
-}
+ if (imem->func->fini)
+ imem->func->fini(imem);
-static int
-nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_instmem *imem = nvkm_instmem(subdev);
- if (imem->func->oneinit)
- return imem->func->oneinit(imem);
return 0;
}
@@ -273,22 +189,31 @@ nvkm_instmem_init(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
- int i;
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ nvkm_bar_bar2_init(subdev->device);
list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->suspend) {
- struct nvkm_memory *memory = iobj->parent;
- u64 size = nvkm_memory_size(memory);
- for (i = 0; i < size; i += 4)
- nvkm_wo32(memory, i, iobj->suspend[i / 4]);
- vfree(iobj->suspend);
- iobj->suspend = NULL;
- }
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
}
return 0;
}
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ if (imem->func->oneinit)
+ return imem->func->oneinit(imem);
+ return 0;
+}
+
static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
@@ -315,4 +240,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
+ INIT_LIST_HEAD(&imem->boot);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index cd5adbec5e57..985f2990ab0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -44,14 +44,13 @@
#include "priv.h"
#include <core/memory.h>
-#include <core/mm.h>
#include <core/tegra.h>
-#include <subdev/fb.h>
#include <subdev/ltc.h>
+#include <subdev/mmu.h>
struct gk20a_instobj {
struct nvkm_memory memory;
- struct nvkm_mem mem;
+ struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
@@ -119,16 +118,22 @@ gk20a_instobj_target(struct nvkm_memory *memory)
return NVKM_MEM_TARGET_NCOH;
}
+static u8
+gk20a_instobj_page(struct nvkm_memory *memory)
+{
+ return 12;
+}
+
static u64
gk20a_instobj_addr(struct nvkm_memory *memory)
{
- return gk20a_instobj(memory)->mem.offset;
+ return (u64)gk20a_instobj(memory)->mn->offset << 12;
}
static u64
gk20a_instobj_size(struct nvkm_memory *memory)
{
- return (u64)gk20a_instobj(memory)->mem.size << 12;
+ return (u64)gk20a_instobj(memory)->mn->length << 12;
}
/*
@@ -272,12 +277,18 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
node->vaddr[offset / 4] = data;
}
-static void
-gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &node->memory,
+ .offset = offset,
+ .mem = node->mn,
+ };
- nvkm_vm_map_at(vma, offset, &node->mem);
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static void *
@@ -290,8 +301,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
if (unlikely(!node->base.vaddr))
goto out;
- dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, imem->attrs);
+ dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
+ node->base.vaddr, node->handle, imem->attrs);
out:
return node;
@@ -303,7 +314,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
- struct nvkm_mm_node *r = node->base.mem.mem;
+ struct nvkm_mm_node *r = node->base.mn;
int i;
if (unlikely(!r))
@@ -321,7 +332,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
- for (i = 0; i < node->base.mem.size; i++) {
+ for (i = 0; i < node->base.mn->length; i++) {
iommu_unmap(imem->domain,
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
@@ -342,12 +353,11 @@ static const struct nvkm_memory_func
gk20a_instobj_func_dma = {
.dtor = gk20a_instobj_dtor_dma,
.target = gk20a_instobj_target,
+ .page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_dma,
.release = gk20a_instobj_release_dma,
- .rd32 = gk20a_instobj_rd32,
- .wr32 = gk20a_instobj_wr32,
.map = gk20a_instobj_map,
};
@@ -355,13 +365,18 @@ static const struct nvkm_memory_func
gk20a_instobj_func_iommu = {
.dtor = gk20a_instobj_dtor_iommu,
.target = gk20a_instobj_target,
+ .page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_iommu,
.release = gk20a_instobj_release_iommu,
+ .map = gk20a_instobj_map,
+};
+
+static const struct nvkm_memory_ptrs
+gk20a_instobj_ptrs = {
.rd32 = gk20a_instobj_rd32,
.wr32 = gk20a_instobj_wr32,
- .map = gk20a_instobj_map,
};
static int
@@ -377,6 +392,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base;
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
+ node->base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
@@ -397,8 +413,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
- node->base.mem.offset = node->handle;
- node->base.mem.mem = &node->r;
+ node->base.mn = &node->r;
return 0;
}
@@ -424,6 +439,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
node->dma_addrs = (void *)(node->pages + npages);
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
+ node->base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -474,8 +490,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
- node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
- node->base.mem.mem = r;
+ node->base.mn = r;
return 0;
release_area:
@@ -523,13 +538,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
node->imem = imem;
- /* present memory for being mapped using small pages */
- node->mem.size = size >> 12;
- node->mem.memtype = 0;
- node->mem.page_shift = 12;
-
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
- size, align, node->mem.offset);
+ size, align, (u64)node->mn->offset << 12);
return 0;
}
@@ -554,7 +564,6 @@ static const struct nvkm_instmem_func
gk20a_instmem = {
.dtor = gk20a_instmem_dtor,
.memory_new = gk20a_instobj_new,
- .persistent = true,
.zero = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6133c8bb2d42..6bf0dad46919 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -24,7 +24,6 @@
#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
#include "priv.h"
-#include <core/memory.h>
#include <core/ramht.h>
struct nv04_instmem {
@@ -35,30 +34,39 @@ struct nv04_instmem {
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
struct nv04_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv04_instmem *imem;
struct nvkm_mm_node *node;
};
-static enum nvkm_memory_target
-nv04_instobj_target(struct nvkm_memory *memory)
+static void
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_INST;
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
}
-static u64
-nv04_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- return nv04_instobj(memory)->node->offset;
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
}
-static u64
-nv04_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv04_instobj_ptrs = {
+ .rd32 = nv04_instobj_rd32,
+ .wr32 = nv04_instobj_wr32,
+};
+
+static void
+nv04_instobj_release(struct nvkm_memory *memory)
{
- return nv04_instobj(memory)->node->length;
}
static void __iomem *
@@ -69,25 +77,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory)
return device->pri + 0x700000 + iobj->node->offset;
}
-static void
-nv04_instobj_release(struct nvkm_memory *memory)
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
{
+ return nv04_instobj(memory)->node->length;
}
-static u32
-nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
{
- struct nv04_instobj *iobj = nv04_instobj(memory);
- struct nvkm_device *device = iobj->imem->base.subdev.device;
- return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
+ return nv04_instobj(memory)->node->offset;
}
-static void
-nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
{
- struct nv04_instobj *iobj = nv04_instobj(memory);
- struct nvkm_device *device = iobj->imem->base.subdev.device;
- nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
+ return NVKM_MEM_TARGET_INST;
}
static void *
@@ -97,6 +102,7 @@ nv04_instobj_dtor(struct nvkm_memory *memory)
mutex_lock(&iobj->imem->base.subdev.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
+ nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -108,8 +114,6 @@ nv04_instobj_func = {
.addr = nv04_instobj_addr,
.acquire = nv04_instobj_acquire,
.release = nv04_instobj_release,
- .rd32 = nv04_instobj_rd32,
- .wr32 = nv04_instobj_wr32,
};
static int
@@ -122,9 +126,10 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base);
+ iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
@@ -160,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base)
/* PRAMIN aperture maps over the end of VRAM, reserve it */
imem->base.reserved = 512 * 1024;
- ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
@@ -194,10 +199,10 @@ static void *
nv04_instmem_dtor(struct nvkm_instmem *base)
{
struct nv04_instmem *imem = nv04_instmem(base);
- nvkm_memory_del(&imem->base.ramfc);
- nvkm_memory_del(&imem->base.ramro);
+ nvkm_memory_unref(&imem->base.ramfc);
+ nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
- nvkm_memory_del(&imem->base.vbios);
+ nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
return imem;
}
@@ -209,7 +214,6 @@ nv04_instmem = {
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
.memory_new = nv04_instobj_new,
- .persistent = false,
.zero = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index c0543875e490..086c118488ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -24,7 +24,6 @@
#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
#include "priv.h"
-#include <core/memory.h>
#include <core/ramht.h>
#include <engine/gr/nv40.h>
@@ -37,30 +36,38 @@ struct nv40_instmem {
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
struct nv40_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv40_instmem *imem;
struct nvkm_mm_node *node;
};
-static enum nvkm_memory_target
-nv40_instobj_target(struct nvkm_memory *memory)
+static void
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_INST;
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
}
-static u64
-nv40_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- return nv40_instobj(memory)->node->offset;
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
}
-static u64
-nv40_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv40_instobj_ptrs = {
+ .rd32 = nv40_instobj_rd32,
+ .wr32 = nv40_instobj_wr32,
+};
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
{
- return nv40_instobj(memory)->node->length;
+ wmb();
}
static void __iomem *
@@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory)
return iobj->imem->iomem + iobj->node->offset;
}
-static void
-nv40_instobj_release(struct nvkm_memory *memory)
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
{
+ return nv40_instobj(memory)->node->length;
}
-static u32
-nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
{
- struct nv40_instobj *iobj = nv40_instobj(memory);
- return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
+ return nv40_instobj(memory)->node->offset;
}
-static void
-nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
{
- struct nv40_instobj *iobj = nv40_instobj(memory);
- iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+ return NVKM_MEM_TARGET_INST;
}
static void *
@@ -96,6 +102,7 @@ nv40_instobj_dtor(struct nvkm_memory *memory)
mutex_lock(&iobj->imem->base.subdev.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
+ nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -107,8 +114,6 @@ nv40_instobj_func = {
.addr = nv40_instobj_addr,
.acquire = nv40_instobj_acquire,
.release = nv40_instobj_release,
- .rd32 = nv40_instobj_rd32,
- .wr32 = nv40_instobj_wr32,
};
static int
@@ -121,9 +126,10 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
+ iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
@@ -171,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base)
imem->base.reserved += 512 * 1024; /* object storage */
imem->base.reserved = round_up(imem->base.reserved, 4096);
- ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
@@ -209,10 +215,10 @@ static void *
nv40_instmem_dtor(struct nvkm_instmem *base)
{
struct nv40_instmem *imem = nv40_instmem(base);
- nvkm_memory_del(&imem->base.ramfc);
- nvkm_memory_del(&imem->base.ramro);
+ nvkm_memory_unref(&imem->base.ramfc);
+ nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
- nvkm_memory_del(&imem->base.vbios);
+ nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
if (imem->iomem)
iounmap(imem->iomem);
@@ -226,7 +232,6 @@ nv40_instmem = {
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
.memory_new = nv40_instobj_new,
- .persistent = false,
.zero = false,
};
@@ -248,8 +253,8 @@ nv40_instmem_new(struct nvkm_device *device, int index,
else
bar = 3;
- imem->iomem = ioremap(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
+ device->func->resource_size(device, bar));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 6d512c062ae3..1ba7289684aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -31,147 +31,293 @@
struct nv50_instmem {
struct nvkm_instmem base;
- unsigned long lock_flags;
- spinlock_t lock;
u64 addr;
+
+ /* Mappings that can be evicted when BAR2 space has been exhausted. */
+ struct list_head lru;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
struct nv50_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv50_instmem *imem;
- struct nvkm_mem *mem;
- struct nvkm_vma bar;
+ struct nvkm_memory *ram;
+ struct nvkm_vma *bar;
+ refcount_t maps;
void *map;
+ struct list_head lru;
};
-static enum nvkm_memory_target
-nv50_instobj_target(struct nvkm_memory *memory)
+static void
+nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_VRAM;
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+ u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imem->base.lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
+ }
+ nvkm_wr32(device, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
}
-static u64
-nv50_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
{
- return nv50_instobj(memory)->mem->offset;
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+ u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imem->base.lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
+ }
+ data = nvkm_rd32(device, 0x700000 + addr);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
+ return data;
}
-static u64
-nv50_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv50_instobj_slow = {
+ .rd32 = nv50_instobj_rd32_slow,
+ .wr32 = nv50_instobj_wr32_slow,
+};
+
+static void
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+ iowrite32_native(data, nv50_instobj(memory)->map + offset);
}
+static u32
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+ return ioread32_native(nv50_instobj(memory)->map + offset);
+}
+
+static const struct nvkm_memory_ptrs
+nv50_instobj_fast = {
+ .rd32 = nv50_instobj_rd32,
+ .wr32 = nv50_instobj_wr32,
+};
+
static void
-nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+ struct nv50_instmem *imem = iobj->imem;
+ struct nv50_instobj *eobj;
+ struct nvkm_memory *memory = &iobj->base.memory;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vma *bar = NULL, *ebar;
u64 size = nvkm_memory_size(memory);
- void __iomem *map;
+ void *emap;
int ret;
- iobj->map = ERR_PTR(-ENOMEM);
-
- ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
- if (ret == 0) {
- map = ioremap(device->func->resource_addr(device, 3) +
- (u32)iobj->bar.offset, size);
- if (map) {
- nvkm_memory_map(memory, &iobj->bar, 0);
- iobj->map = map;
- } else {
- nvkm_warn(subdev, "PRAMIN ioremap failed\n");
- nvkm_vm_put(&iobj->bar);
+ /* Attempt to allocate BAR2 address-space and map the object
+ * into it. The lock has to be dropped while doing this due
+ * to the possibility of recursion for page table allocation.
+ */
+ mutex_unlock(&subdev->mutex);
+ while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
+ /* Evict unused mappings, and keep retrying until we either
+ * succeed,or there's no more objects left on the LRU.
+ */
+ mutex_lock(&subdev->mutex);
+ eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
+ if (eobj) {
+ nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
+ nvkm_memory_addr(&eobj->base.memory),
+ nvkm_memory_size(&eobj->base.memory),
+ eobj->bar->addr);
+ list_del_init(&eobj->lru);
+ ebar = eobj->bar;
+ eobj->bar = NULL;
+ emap = eobj->map;
+ eobj->map = NULL;
}
- } else {
- nvkm_warn(subdev, "PRAMIN exhausted\n");
+ mutex_unlock(&subdev->mutex);
+ if (!eobj)
+ break;
+ iounmap(emap);
+ nvkm_vmm_put(vmm, &ebar);
}
+
+ if (ret == 0)
+ ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
+ mutex_lock(&subdev->mutex);
+ if (ret || iobj->bar) {
+ /* We either failed, or another thread beat us. */
+ mutex_unlock(&subdev->mutex);
+ nvkm_vmm_put(vmm, &bar);
+ mutex_lock(&subdev->mutex);
+ return;
+ }
+
+ /* Make the mapping visible to the host. */
+ iobj->bar = bar;
+ iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ (u32)iobj->bar->addr, size);
+ if (!iobj->map) {
+ nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+ nvkm_vmm_put(vmm, &iobj->bar);
+ }
+}
+
+static int
+nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ memory = nv50_instobj(memory)->ram;
+ return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
}
static void
nv50_instobj_release(struct nvkm_memory *memory)
{
- struct nv50_instmem *imem = nv50_instobj(memory)->imem;
- spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
+
+ wmb();
+ nvkm_bar_flush(subdev->device->bar);
+
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ /* Add the now-unused mapping to the LRU instead of directly
+ * unmapping it here, in case we need to map it again later.
+ */
+ if (likely(iobj->lru.next) && iobj->map) {
+ BUG_ON(!list_empty(&iobj->lru));
+ list_add_tail(&iobj->lru, &imem->lru);
+ }
+
+ /* Switch back to NULL accessors when last map is gone. */
+ iobj->base.memory.ptrs = NULL;
+ mutex_unlock(&subdev->mutex);
+ }
}
static void __iomem *
nv50_instobj_acquire(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_bar *bar = imem->base.subdev.device->bar;
- struct nvkm_vm *vm;
- unsigned long flags;
+ struct nvkm_instmem *imem = &iobj->imem->base;
+ struct nvkm_vmm *vmm;
+ void __iomem *map = NULL;
- if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
- nvkm_memory_boot(memory, vm);
- if (!IS_ERR_OR_NULL(iobj->map))
+ /* Already mapped? */
+ if (refcount_inc_not_zero(&iobj->maps))
return iobj->map;
- spin_lock_irqsave(&imem->lock, flags);
- imem->lock_flags = flags;
- return NULL;
-}
+ /* Take the lock, and re-check that another thread hasn't
+ * already mapped the object in the meantime.
+ */
+ mutex_lock(&imem->subdev.mutex);
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ mutex_unlock(&imem->subdev.mutex);
+ return iobj->map;
+ }
-static u32
-nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_device *device = imem->base.subdev.device;
- u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
- u32 data;
+ /* Attempt to get a direct CPU mapping of the object. */
+ if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
+ if (!iobj->map)
+ nv50_instobj_kmap(iobj, vmm);
+ map = iobj->map;
+ }
- if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
- imem->addr = base;
+ if (!refcount_inc_not_zero(&iobj->maps)) {
+ /* Exclude object from eviction while it's being accessed. */
+ if (likely(iobj->lru.next))
+ list_del_init(&iobj->lru);
+
+ if (map)
+ iobj->base.memory.ptrs = &nv50_instobj_fast;
+ else
+ iobj->base.memory.ptrs = &nv50_instobj_slow;
+ refcount_inc(&iobj->maps);
}
- data = nvkm_rd32(device, 0x700000 + addr);
- return data;
+
+ mutex_unlock(&imem->subdev.mutex);
+ return map;
}
static void
-nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_device *device = imem->base.subdev.device;
- u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
-
- if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
- imem->addr = base;
+ struct nvkm_instmem *imem = &iobj->imem->base;
+
+ /* Exclude bootstrapped objects (ie. the page tables for the
+ * instmem BAR itself) from eviction.
+ */
+ mutex_lock(&imem->subdev.mutex);
+ if (likely(iobj->lru.next)) {
+ list_del_init(&iobj->lru);
+ iobj->lru.next = NULL;
}
- nvkm_wr32(device, 0x700000 + addr, data);
+
+ nv50_instobj_kmap(iobj, vmm);
+ nvkm_instmem_boot(imem);
+ mutex_unlock(&imem->subdev.mutex);
}
-static void
-nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- nvkm_vm_map_at(vma, offset, iobj->mem);
+ return nvkm_memory_size(nv50_instobj(memory)->ram);
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+ return nvkm_memory_addr(nv50_instobj(memory)->ram);
+}
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+ return nvkm_memory_target(nv50_instobj(memory)->ram);
}
static void *
nv50_instobj_dtor(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
- if (!IS_ERR_OR_NULL(iobj->map)) {
- nvkm_vm_put(&iobj->bar);
- iounmap(iobj->map);
+ struct nvkm_instmem *imem = &iobj->imem->base;
+ struct nvkm_vma *bar;
+ void *map = map;
+
+ mutex_lock(&imem->subdev.mutex);
+ if (likely(iobj->lru.next))
+ list_del(&iobj->lru);
+ map = iobj->map;
+ bar = iobj->bar;
+ mutex_unlock(&imem->subdev.mutex);
+
+ if (map) {
+ struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
+ iounmap(map);
+ if (likely(vmm)) /* Can be NULL during BAR destructor. */
+ nvkm_vmm_put(vmm, &bar);
}
- ram->func->put(ram, &iobj->mem);
+
+ nvkm_memory_unref(&iobj->ram);
+ nvkm_instobj_dtor(imem, &iobj->base);
return iobj;
}
@@ -184,8 +330,6 @@ nv50_instobj_func = {
.boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire,
.release = nv50_instobj_release,
- .rd32 = nv50_instobj_rd32,
- .wr32 = nv50_instobj_wr32,
.map = nv50_instobj_map,
};
@@ -195,25 +339,19 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
{
struct nv50_instmem *imem = nv50_instmem(base);
struct nv50_instobj *iobj;
- struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
- int ret;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u8 page = max(order_base_2(align), 12);
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
iobj->imem = imem;
+ refcount_set(&iobj->maps, 0);
+ INIT_LIST_HEAD(&iobj->lru);
- size = max((size + 4095) & ~4095, (u32)4096);
- align = max((align + 4095) & ~4095, (u32)4096);
-
- ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
- if (ret)
- return ret;
-
- iobj->mem->page_shift = 12;
- return 0;
+ return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
}
/******************************************************************************
@@ -230,7 +368,6 @@ static const struct nvkm_instmem_func
nv50_instmem = {
.fini = nv50_instmem_fini,
.memory_new = nv50_instobj_new,
- .persistent = false,
.zero = false,
};
@@ -243,7 +380,7 @@ nv50_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
+ INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 021e7a1f39a1..b9e4751b9921 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -12,10 +12,22 @@ struct nvkm_instmem_func {
void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
bool zero, struct nvkm_memory **);
- bool persistent;
bool zero;
};
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
int index, struct nvkm_instmem *);
+void nvkm_instmem_boot(struct nvkm_instmem *);
+
+#include <core/memory.h>
+
+struct nvkm_instobj {
+ struct nvkm_memory memory;
+ struct list_head head;
+ u32 *suspend;
+};
+
+void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+ struct nvkm_instmem *, struct nvkm_instobj *);
+void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 0c7ef250dcaf..1f185274d3e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,26 +23,12 @@
*/
#include "priv.h"
-#include <subdev/fb.h>
-
-int
-nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
-{
- int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
- if (ret)
- *pnode = NULL;
- return ret;
-}
-
-void
-nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
-{
- nvkm_mm_free(&ltc->tags, pnode);
-}
+#include <core/memory.h>
void
-nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
+nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
{
+ struct nvkm_ltc *ltc = device->ltc;
const u32 limit = first + count - 1;
BUG_ON((first > limit) || (limit >= ltc->num_tags));
@@ -116,10 +102,7 @@ static void *
nvkm_ltc_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
- struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
- nvkm_mm_fini(&ltc->tags);
- if (ram)
- nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+ nvkm_memory_unref(&ltc->tag_ram);
return ltc;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 4a0fa0a9b802..a21ef45b8572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,6 +23,7 @@
*/
#include "priv.h"
+#include <core/memory.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
@@ -152,7 +153,10 @@ gf100_ltc_flush(struct nvkm_ltc *ltc)
int
gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
{
- struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+ struct nvkm_device *device = ltc->subdev.device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_ram *ram = fb->ram;
+ u32 bits = (nvkm_rd32(device, 0x100c80) & 0x00001000) ? 16 : 17;
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -164,8 +168,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
ltc->num_tags = (ram->size >> 17) / 4;
- if (ltc->num_tags > (1 << 17))
- ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+ if (ltc->num_tags > (1 << bits))
+ ltc->num_tags = 1 << bits; /* we have 16/17 bits in PTE */
ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
tag_align = ltc->ltc_nr * 0x800;
@@ -181,14 +185,13 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
*/
tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
tag_size += tag_align;
- tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
- &ltc->tag_ram);
+ ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 12, tag_size,
+ true, true, &ltc->tag_ram);
if (ret) {
ltc->num_tags = 0;
} else {
- u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
+ u64 tag_base = nvkm_memory_addr(ltc->tag_ram) + tag_margin;
tag_base += tag_align - 1;
do_div(tag_base, tag_align);
@@ -197,7 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
}
mm_init:
- return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
+ nvkm_mm_fini(&fb->tags);
+ return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index 0bdfb2f40266..e34d42108019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
/*XXX: tagram allocation - TBD */
- return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 012c9db687b2..352a65f9371c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -3,4 +3,33 @@ nvkm-y += nvkm/subdev/mmu/nv04.o
nvkm-y += nvkm/subdev/mmu/nv41.o
nvkm-y += nvkm/subdev/mmu/nv44.o
nvkm-y += nvkm/subdev/mmu/nv50.o
+nvkm-y += nvkm/subdev/mmu/g84.o
nvkm-y += nvkm/subdev/mmu/gf100.o
+nvkm-y += nvkm/subdev/mmu/gk104.o
+nvkm-y += nvkm/subdev/mmu/gk20a.o
+nvkm-y += nvkm/subdev/mmu/gm200.o
+nvkm-y += nvkm/subdev/mmu/gm20b.o
+nvkm-y += nvkm/subdev/mmu/gp100.o
+nvkm-y += nvkm/subdev/mmu/gp10b.o
+
+nvkm-y += nvkm/subdev/mmu/mem.o
+nvkm-y += nvkm/subdev/mmu/memnv04.o
+nvkm-y += nvkm/subdev/mmu/memnv50.o
+nvkm-y += nvkm/subdev/mmu/memgf100.o
+
+nvkm-y += nvkm/subdev/mmu/vmm.o
+nvkm-y += nvkm/subdev/mmu/vmmnv04.o
+nvkm-y += nvkm/subdev/mmu/vmmnv41.o
+nvkm-y += nvkm/subdev/mmu/vmmnv44.o
+nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmgf100.o
+nvkm-y += nvkm/subdev/mmu/vmmgk104.o
+nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
+nvkm-y += nvkm/subdev/mmu/vmmgm200.o
+nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
+nvkm-y += nvkm/subdev/mmu/vmmgp100.o
+nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
+
+nvkm-y += nvkm/subdev/mmu/umem.o
+nvkm-y += nvkm/subdev/mmu/ummu.o
+nvkm-y += nvkm/subdev/mmu/uvmm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 455da298227f..ee11ccaf0563 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,480 +21,367 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ummu.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
+#include <subdev/bar.h>
#include <subdev/fb.h>
-void
-nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_mm_node *r = node->mem;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- delta = 0;
- while (r) {
- u64 phys = (u64)r->offset << 12;
- u32 num = r->length >> bits;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map(vma, pgt, node, pte, len, phys, delta);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- phys += len << (bits + 12);
- pde++;
- pte = 0;
- }
-
- delta += (u64)len << vma->node->type;
- }
- r = r->next;
- };
-
- mmu->func->flush(vm);
-}
+#include <nvif/if500d.h>
+#include <nvif/if900d.h>
-static void
-nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- unsigned m, sglen;
- u32 end, len;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
- sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
-
- if (num == 0)
- goto finish;
- }
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- if (m < sglen) {
- for (; m < sglen; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
- if (num == 0)
- goto finish;
- }
- }
-
- }
-finish:
- mmu->func->flush(vm);
-}
+struct nvkm_mmu_ptp {
+ struct nvkm_mmu_pt *pt;
+ struct list_head head;
+ u8 shift;
+ u16 mask;
+ u16 free;
+};
static void
-nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
+nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- dma_addr_t *list = mem->pages;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map_sg(vma, pgt, mem, pte, len, list);
-
- num -= len;
- pte += len;
- list += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
+ const int slot = pt->base >> pt->ptp->shift;
+ struct nvkm_mmu_ptp *ptp = pt->ptp;
+
+ /* If there were no free slots in the parent allocation before,
+ * there will be now, so return PTP to the cache.
+ */
+ if (!ptp->free)
+ list_add(&ptp->head, &mmu->ptp.list);
+ ptp->free |= BIT(slot);
+
+ /* If there's no more sub-allocations, destroy PTP. */
+ if (ptp->free == ptp->mask) {
+ nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
+ list_del(&ptp->head);
+ kfree(ptp);
}
- mmu->func->flush(vm);
+ kfree(pt);
}
-void
-nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
{
- if (node->sg)
- nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
- else
- if (node->pages)
- nvkm_vm_map_sg(vma, 0, node->size << 12, node);
- else
- nvkm_vm_map_at(vma, 0, node);
-}
+ struct nvkm_mmu_pt *pt;
+ struct nvkm_mmu_ptp *ptp;
+ int slot;
+
+ if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+
+ ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
+ if (!ptp) {
+ /* Need to allocate a new parent to sub-allocate from. */
+ if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
+ kfree(pt);
+ return NULL;
+ }
-void
-nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->unmap(vma, pgt, pte, len);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
+ ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
+ if (!ptp->pt) {
+ kfree(ptp);
+ kfree(pt);
+ return NULL;
}
- }
- mmu->func->flush(vm);
+ ptp->shift = order_base_2(size);
+ slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
+ ptp->mask = (1 << slot) - 1;
+ ptp->free = ptp->mask;
+ list_add(&ptp->head, &mmu->ptp.list);
+ }
+ pt->ptp = ptp;
+ pt->sub = true;
+
+ /* Sub-allocate from parent object, removing PTP from cache
+ * if there's no more free slots left.
+ */
+ slot = __ffs(ptp->free);
+ ptp->free &= ~BIT(slot);
+ if (!ptp->free)
+ list_del(&ptp->head);
+
+ pt->memory = pt->ptp->pt->memory;
+ pt->base = slot << ptp->shift;
+ pt->addr = pt->ptp->pt->addr + pt->base;
+ return pt;
}
-void
-nvkm_vm_unmap(struct nvkm_vma *vma)
-{
- nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
+struct nvkm_mmu_ptc {
+ struct list_head head;
+ struct list_head item;
+ u32 size;
+ u32 refs;
+};
-static void
-nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
+static inline struct nvkm_mmu_ptc *
+nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgd *vpgd;
- struct nvkm_vm_pgt *vpgt;
- struct nvkm_memory *pgt;
- u32 pde;
-
- for (pde = fpde; pde <= lpde; pde++) {
- vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount[big])
- continue;
-
- pgt = vpgt->mem[big];
- vpgt->mem[big] = NULL;
-
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
- }
+ struct nvkm_mmu_ptc *ptc;
- mmu->func->flush(vm);
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ if (ptc->size == size)
+ return ptc;
+ }
- nvkm_memory_del(&pgt);
+ ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
+ if (ptc) {
+ INIT_LIST_HEAD(&ptc->item);
+ ptc->size = size;
+ ptc->refs = 0;
+ list_add(&ptc->head, &mmu->ptc.list);
}
+
+ return ptc;
}
-static int
-nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
+void
+nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- struct nvkm_vm_pgd *vpgd;
- int big = (type != mmu->func->spg_shift);
- u32 pgt_size;
- int ret;
-
- pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
- pgt_size *= 8;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- pgt_size, 0x1000, true, &vpgt->mem[big]);
- if (unlikely(ret))
- return ret;
+ struct nvkm_mmu_pt *pt = *ppt;
+ if (pt) {
+ /* Handle sub-allocated page tables. */
+ if (pt->sub) {
+ mutex_lock(&mmu->ptp.mutex);
+ nvkm_mmu_ptp_put(mmu, force, pt);
+ mutex_unlock(&mmu->ptp.mutex);
+ return;
+ }
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
+ /* Either cache or free the object. */
+ mutex_lock(&mmu->ptc.mutex);
+ if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
+ list_add_tail(&pt->head, &pt->ptc->item);
+ pt->ptc->refs++;
+ } else {
+ nvkm_memory_unref(&pt->memory);
+ kfree(pt);
+ }
+ mutex_unlock(&mmu->ptc.mutex);
}
-
- vpgt->refcount[big]++;
- return 0;
}
-int
-nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
- struct nvkm_vma *vma)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
{
- struct nvkm_mmu *mmu = vm->mmu;
- u32 align = (1 << page_shift) >> 12;
- u32 msize = size >> 12;
- u32 fpde, lpde, pde;
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_pt *pt;
int ret;
- mutex_lock(&vm->mutex);
- ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
- &vma->node);
- if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mutex);
- return ret;
+ /* Sub-allocated page table (ie. GP100 LPT). */
+ if (align < 0x1000) {
+ mutex_lock(&mmu->ptp.mutex);
+ pt = nvkm_mmu_ptp_get(mmu, align, zero);
+ mutex_unlock(&mmu->ptp.mutex);
+ return pt;
}
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+ /* Lookup cache for this page table size. */
+ mutex_lock(&mmu->ptc.mutex);
+ ptc = nvkm_mmu_ptc_find(mmu, size);
+ if (!ptc) {
+ mutex_unlock(&mmu->ptc.mutex);
+ return NULL;
+ }
- for (pde = fpde; pde <= lpde; pde++) {
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != mmu->func->spg_shift);
+ /* If there's a free PT in the cache, reuse it. */
+ pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
+ if (pt) {
+ if (zero)
+ nvkm_fo64(pt->memory, 0, 0, size >> 3);
+ list_del(&pt->head);
+ ptc->refs--;
+ mutex_unlock(&mmu->ptc.mutex);
+ return pt;
+ }
+ mutex_unlock(&mmu->ptc.mutex);
- if (likely(vpgt->refcount[big])) {
- vpgt->refcount[big]++;
- continue;
- }
+ /* No such luck, we need to allocate. */
+ if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+ pt->ptc = ptc;
+ pt->sub = false;
- ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
- if (ret) {
- if (pde != fpde)
- nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
- return ret;
- }
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ size, align, zero, &pt->memory);
+ if (ret) {
+ kfree(pt);
+ return NULL;
}
- mutex_unlock(&vm->mutex);
- vma->vm = NULL;
- nvkm_vm_ref(vm, &vma->vm, NULL);
- vma->offset = (u64)vma->node->offset << 12;
- vma->access = access;
- return 0;
+ pt->base = 0;
+ pt->addr = nvkm_memory_addr(pt->memory);
+ return pt;
}
void
-nvkm_vm_put(struct nvkm_vma *vma)
-{
- struct nvkm_mmu *mmu;
- struct nvkm_vm *vm;
- u32 fpde, lpde;
-
- if (unlikely(vma->node == NULL))
- return;
- vm = vma->vm;
- mmu = vm->mmu;
-
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
-
- mutex_lock(&vm->mutex);
- nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
-
- nvkm_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
+nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_memory *pgt;
- int ret;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
- if (ret == 0) {
- vm->pgt[0].refcount[0] = 1;
- vm->pgt[0].mem[0] = pgt;
- nvkm_memory_boot(pgt, vm);
+ struct nvkm_mmu_ptc *ptc;
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ struct nvkm_mmu_pt *pt, *tt;
+ list_for_each_entry_safe(pt, tt, &ptc->item, head) {
+ nvkm_memory_unref(&pt->memory);
+ list_del(&pt->head);
+ kfree(pt);
+ }
}
-
- return ret;
}
-int
-nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
{
- static struct lock_class_key _key;
- struct nvkm_vm *vm;
- u64 mm_length = (offset + length) - mm_offset;
- int ret;
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm)
- return -ENOMEM;
+ struct nvkm_mmu_ptc *ptc, *ptct;
- __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
- INIT_LIST_HEAD(&vm->pgd_list);
- vm->mmu = mmu;
- kref_init(&vm->refcount);
- vm->fpde = offset >> (mmu->func->pgt_bits + 12);
- vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
-
- vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
- if (!vm->pgt) {
- kfree(vm);
- return -ENOMEM;
+ list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
+ WARN_ON(!list_empty(&ptc->item));
+ list_del(&ptc->head);
+ kfree(ptc);
}
-
- ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
- block >> 12);
- if (ret) {
- vfree(vm->pgt);
- kfree(vm);
- return ret;
- }
-
- *pvm = vm;
-
- return 0;
}
-int
-nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
{
- struct nvkm_mmu *mmu = device->mmu;
- if (!mmu->func->create)
- return -EINVAL;
- return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
+ mutex_init(&mmu->ptc.mutex);
+ INIT_LIST_HEAD(&mmu->ptc.list);
+ mutex_init(&mmu->ptp.mutex);
+ INIT_LIST_HEAD(&mmu->ptp.list);
}
-static int
-nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgd *vpgd;
- int i;
-
- if (!pgd)
- return 0;
-
- vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
- if (!vpgd)
- return -ENOMEM;
-
- vpgd->obj = pgd;
-
- mutex_lock(&vm->mutex);
- for (i = vm->fpde; i <= vm->lpde; i++)
- mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
- list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&vm->mutex);
- return 0;
+ if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+ mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+ mmu->type[mmu->type_nr].heap = heap;
+ mmu->type_nr++;
+ }
}
-static void
-nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
{
- struct nvkm_vm_pgd *vpgd, *tmp;
-
- if (!mpgd)
- return;
-
- mutex_lock(&vm->mutex);
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj == mpgd) {
- list_del(&vpgd->head);
- kfree(vpgd);
- break;
+ if (size) {
+ if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+ mmu->heap[mmu->heap_nr].type = type;
+ mmu->heap[mmu->heap_nr].size = size;
+ return mmu->heap_nr++;
}
}
- mutex_unlock(&vm->mutex);
+ return -EINVAL;
}
static void
-nvkm_vm_del(struct kref *kref)
+nvkm_mmu_host(struct nvkm_mmu *mmu)
{
- struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
- struct nvkm_vm_pgd *vpgd, *tmp;
-
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- nvkm_vm_unlink(vm, vpgd->obj);
- }
-
- nvkm_mm_fini(&vm->mm);
- vfree(vm->pgt);
- kfree(vm);
+ struct nvkm_device *device = mmu->subdev.device;
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+ int heap;
+
+ /* Non-mappable system memory. */
+ heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Non-coherent, cached, system memory.
+ *
+ * Block-linear mappings of system memory must be done through
+ * BAR1, and cannot be supported on systems where we're unable
+ * to map BAR1 with write-combining.
+ */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar || device->bar->iomap_uncached)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+ else
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Coherent, cached, system memory.
+ *
+ * Unsupported on systems that aren't able to support snooped
+ * mappings, and also for block-linear mappings which must be
+ * done through BAR1.
+ */
+ type |= NVKM_MEM_COHERENT;
+ if (device->func->cpu_coherent)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+ /* Uncached system memory. */
+ nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
}
-int
-nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
- if (ref) {
- int ret = nvkm_vm_link(ref, pgd);
- if (ret)
- return ret;
-
- kref_get(&ref->refcount);
- }
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+ const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+
+ /* Mixed-memory doesn't support compression or display. */
+ heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+ heap |= NVKM_MEM_COMP;
+ heap |= NVKM_MEM_DISP;
+ heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+ heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+ /* Add non-mappable VRAM types first so that they're preferred
+ * over anything else. Mixed-memory will be slower than other
+ * heaps, it's prioritised last.
+ */
+ nvkm_mmu_type(mmu, heapU, type);
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+
+ /* Add host memory types next, under the assumption that users
+ * wanting mappable memory want to use them as staging buffers
+ * or the like.
+ */
+ nvkm_mmu_host(mmu);
+
+ /* Mappable VRAM types go last, as they're basically the worst
+ * possible type to ask for unless there's no other choice.
+ */
+ if (device->bar) {
+ /* Write-combined BAR1 access. */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar->iomap_uncached) {
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
- if (*ptr) {
- nvkm_vm_unlink(*ptr, pgd);
- kref_put(&(*ptr)->refcount, nvkm_vm_del);
+ /* Uncached BAR1 access. */
+ type |= NVKM_MEM_COHERENT;
+ type |= NVKM_MEM_UNCACHED;
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
}
-
- *ptr = ref;
- return 0;
}
static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
- if (mmu->func->oneinit)
- return mmu->func->oneinit(mmu);
+
+ /* Determine available memory types. */
+ if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+ nvkm_mmu_vram(mmu);
+ else
+ nvkm_mmu_host(mmu);
+
+ if (mmu->func->vmm.global) {
+ int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
+ "gart", &mmu->vmm);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -511,8 +398,10 @@ static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
- if (mmu->func->dtor)
- return mmu->func->dtor(mmu);
+
+ nvkm_vmm_unref(&mmu->vmm);
+
+ nvkm_mmu_ptc_fini(mmu);
return mmu;
}
@@ -529,9 +418,10 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
{
nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func;
- mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits;
- mmu->lpg_shift = func->lpg_shift;
+ nvkm_mmu_ptc_init(mmu);
+ mmu->user.ctor = nvkm_ummu_new;
+ mmu->user.base = func->mmu.user;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
new file mode 100644
index 000000000000..8accda5a772b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+g84_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 7ac507c927bb..2d075246dc46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,197 +21,65 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/timer.h>
-
-#include <core/gpuobj.h>
+#include <nvif/class.h>
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
-const u8 gf100_pte_storage_type_map[256] =
-{
- 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
- 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
- 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
- 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
- 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
- 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
- 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
- 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
- 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
- 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
- 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
- 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
- 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
- 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
- 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
- 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
-};
-
-
-static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
-{
- u32 pde[2] = { 0, 0 };
-
- if (pgt[0])
- pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
- if (pgt[1])
- pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
-
- nvkm_kmap(pgd);
- nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
- nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
- nvkm_done(pgd);
-}
-
-static inline u64
-gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys >>= 8;
-
- phys |= 0x00000001; /* present */
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= 0x00000002;
-
- phys |= ((u64)target << 32);
- phys |= ((u64)memtype << 36);
- return phys;
-}
-
-static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- u64 next = 1 << (vma->node->type - 8);
-
- phys = gf100_vm_addr(vma, phys, mem->memtype, 0);
- pte <<= 3;
-
- if (mem->tag) {
- struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
- u32 tag = mem->tag->offset + (delta >> 17);
- phys |= (u64)tag << (32 + 12);
- next |= (u64)1 << (32 + 12);
- nvkm_ltc_tags_clear(ltc, tag, cnt);
- }
-
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- phys += next;
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
- /* compressed storage types are invalid for system memory */
- u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
-
- nvkm_kmap(pgt);
- pte <<= 3;
- while (cnt--) {
- u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- nvkm_kmap(pgt);
- pte <<= 3;
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, 0x00000000);
- nvkm_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_flush(struct nvkm_vm *vm)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_device *device = mmu->subdev.device;
- struct nvkm_vm_pgd *vpgd;
- u32 type;
-
- type = 0x00000001; /* PAGE_ALL */
- if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
-
- mutex_lock(&mmu->subdev.mutex);
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- /* looks like maybe a "free flush slots" counter, the
- * faster you write to 0x100cbc to more it decreases
- */
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
- break;
- );
-
- nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
- nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
-
- /* wait for flush to be queued? */
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00008000)
- break;
- );
- }
- mutex_unlock(&mmu->subdev.mutex);
-}
-
-static int
-gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
+const u8 *
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
{
- return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+
+ *count = ARRAY_SIZE(kind);
+ return kind;
}
static const struct nvkm_mmu_func
gf100_mmu = {
- .limit = (1ULL << 40),
.dma_bits = 40,
- .pgt_bits = 27 - 12,
- .spg_shift = 12,
- .lpg_shift = 17,
- .create = gf100_vm_create,
- .map_pgt = gf100_vm_map_pgt,
- .map = gf100_vm_map,
- .map_sg = gf100_vm_map_sg,
- .unmap = gf100_vm_unmap,
- .flush = gf100_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
new file mode 100644
index 000000000000..3d7d1eb1cff9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk104_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
new file mode 100644
index 000000000000..ac74965a60d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk20a_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
new file mode 100644
index 000000000000..dbf644ebac97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+const u8 *
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+{
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+ *count = ARRAY_SIZE(kind);
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+gm200_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm200_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 1df8154d0626..7353a94b4091 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,38 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
*/
-#ifndef POLARIS10_SMC_H
-#define POLARIS10_SMC_H
+#include "mem.h"
+#include "vmm.h"
-#include "smumgr.h"
+#include <subdev/fb.h>
+#include <nvif/class.h>
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
-uint32_t polaris10_get_mac_definition(uint32_t value);
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
+static const struct nvkm_mmu_func
+gm20b_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
-#endif
+static const struct nvkm_mmu_func
+gm20b_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+int
+gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
new file mode 100644
index 000000000000..651b8805c67c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp100_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm200_mmu_new(device, index, pmmu);
+ return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+}
diff --git a/drivers/net/virtio_net. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
index e69de29bb2d1..e69de29bb2d1 100644
--- a/drivers/net/virtio_net.
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
new file mode 100644
index 000000000000..3bd3db31e0bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp10b_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm20b_mmu_new(device, index, pmmu);
+ return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
new file mode 100644
index 000000000000..39808489f21d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
+#include "mem.h"
+
+#include <core/memory.h>
+
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+struct nvkm_mem {
+ struct nvkm_memory memory;
+ enum nvkm_memory_target target;
+ struct nvkm_mmu *mmu;
+ u64 pages;
+ struct page **mem;
+ union {
+ struct scatterlist *sgl;
+ dma_addr_t *dma;
+ };
+};
+
+static enum nvkm_memory_target
+nvkm_mem_target(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->target;
+}
+
+static u8
+nvkm_mem_page(struct nvkm_memory *memory)
+{
+ return PAGE_SHIFT;
+}
+
+static u64
+nvkm_mem_addr(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->pages == 1 && mem->mem)
+ return mem->dma[0];
+ return ~0ULL;
+}
+
+static u64
+nvkm_mem_size(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->pages << PAGE_SHIFT;
+}
+
+static int
+nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .dma = mem->dma,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static void *
+nvkm_mem_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ while (mem->pages--) {
+ dma_unmap_page(mem->mmu->subdev.device->dev,
+ mem->dma[mem->pages], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(mem->mem[mem->pages]);
+ }
+ kvfree(mem->dma);
+ kvfree(mem->mem);
+ }
+ return mem;
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_dma = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_dma,
+};
+
+static int
+nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .sgl = mem->sgl,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_sgl = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_sgl,
+};
+
+int
+nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
+ return *pmap ? 0 : -EFAULT;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct device *dev = mmu->subdev.device->dev;
+ union {
+ struct nvif_mem_ram_vn vn;
+ struct nvif_mem_ram_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ enum nvkm_memory_target target;
+ struct nvkm_mem *mem;
+ gfp_t gfp = GFP_USER | __GFP_ZERO;
+
+ if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
+ !(mmu->type[type].type & NVKM_MEM_UNCACHED))
+ target = NVKM_MEM_TARGET_HOST;
+ else
+ target = NVKM_MEM_TARGET_NCOH;
+
+ if (page != PAGE_SHIFT)
+ return -EINVAL;
+
+ if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+ return -ENOMEM;
+ mem->target = target;
+ mem->mmu = mmu;
+ *pmemory = &mem->memory;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if (args->v0.dma) {
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ mem->dma = args->v0.dma;
+ } else {
+ nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
+ mem->sgl = args->v0.sgl;
+ }
+
+ if (!IS_ALIGNED(size, PAGE_SIZE))
+ return -EINVAL;
+ mem->pages = size >> PAGE_SHIFT;
+ return 0;
+ } else
+ if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ kfree(mem);
+ return ret;
+ }
+
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (!(mem->mem = kvmalloc(sizeof(*mem->mem) * size, GFP_KERNEL)))
+ return -ENOMEM;
+ if (!(mem->dma = kvmalloc(sizeof(*mem->dma) * size, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (mmu->dma_bits > 32)
+ gfp |= GFP_HIGHUSER;
+ else
+ gfp |= GFP_DMA32;
+
+ for (mem->pages = 0; size; size--, mem->pages++) {
+ struct page *p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
+ p, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, mem->dma[mem->pages])) {
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ mem->mem[mem->pages] = p;
+ }
+
+ return 0;
+}
+
+int
+nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct nvkm_memory *memory = NULL;
+ int ret;
+
+ if (mmu->type[type].type & NVKM_MEM_VRAM) {
+ ret = mmu->func->mem.vram(mmu, type, page, size,
+ argv, argc, &memory);
+ } else {
+ ret = nvkm_mem_new_host(mmu, type, page, size,
+ argv, argc, &memory);
+ }
+
+ if (ret)
+ nvkm_memory_unref(&memory);
+ *pmemory = memory;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
new file mode 100644
index 000000000000..234267e1b215
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_MEM_H__
+#define __NVKM_MEM_H__
+#include "priv.h"
+
+int nvkm_mem_new_type(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+int nvkm_mem_map_host(struct nvkm_memory *, void **pmap);
+
+int nv04_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv04_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int nv50_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv50_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int gf100_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int gf100_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
new file mode 100644
index 000000000000..d9c9bee45222
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+int
+gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct gf100_vmm_map_v0 uvmm = {};
+ union {
+ struct gf100_mem_map_vn vn;
+ struct gf100_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, nvkm_memory_page(memory),
+ nvkm_memory_size(memory), pvma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return 0;
+}
+
+int
+gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct gf100_mem_vn vn;
+ struct gf100_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ contig = false;
+ } else
+ return ret;
+
+ if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_MIXED;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
new file mode 100644
index 000000000000..79a3b0cc9f5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/fb.h>
+
+#include <nvif/if000b.h>
+#include <nvif/unpack.h>
+
+int
+nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ union {
+ struct nv04_mem_map_vn vn;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ const u64 addr = nvkm_memory_addr(memory);
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + addr;
+ *psize = nvkm_memory_size(memory);
+ *pvma = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+int
+nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv04_mem_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_NOMAP;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, true, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
new file mode 100644
index 000000000000..46759b89fc1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+int
+nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct nv50_vmm_map_v0 uvmm = {};
+ union {
+ struct nv50_mem_map_vn vn;
+ struct nv50_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ u64 size = nvkm_memory_size(memory);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ uvmm.comp = args->v0.comp;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, 12, size, pvma);
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+}
+
+int
+nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv50_mem_vn vn;
+ struct nv50_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ type = args->v0.bankswz ? 0x02 : 0x01;
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ type = 0x01;
+ contig = false;
+ } else
+ return -ENOSYS;
+
+ return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
+ page, size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index 37927c3fdc3e..d201c887c2cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -21,129 +21,21 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
-
-#define NV04_PDMA_SIZE (128 * 1024 * 1024)
-#define NV04_PDMA_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt) {
- u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
- u32 phys = (u32)*list++;
- while (cnt && page--) {
- nvkm_wo32(pgt, pte, phys | 3);
- phys += NV04_PDMA_PAGE;
- pte += 4;
- cnt -= 1;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte, 0x00000000);
- pte += 4;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_flush(struct nvkm_vm *vm)
-{
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv04_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *dma;
- int ret;
-
- ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
- 16, true, &dma);
- mmu->vm->pgt[0].mem[0] = dma;
- mmu->vm->pgt[0].refcount[0] = 1;
- if (ret)
- return ret;
-
- nvkm_kmap(dma);
- nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
- nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
- nvkm_done(dma);
- return 0;
-}
-
-void *
-nv04_mmu_dtor(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- if (mmu->vm) {
- nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &mmu->vm, NULL);
- }
- if (mmu->nullp) {
- dma_free_coherent(device->dev, 16 * 1024,
- mmu->nullp, mmu->null);
- }
- return mmu;
-}
-
-int
-nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu **pmmu)
-{
- struct nv04_mmu *mmu;
- if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
- return -ENOMEM;
- *pmmu = &mmu->base;
- nvkm_mmu_ctor(func, device, index, &mmu->base);
- return 0;
-}
+#include <nvif/class.h>
const struct nvkm_mmu_func
nv04_mmu = {
- .oneinit = nv04_mmu_oneinit,
- .dtor = nv04_mmu_dtor,
- .limit = NV04_PDMA_SIZE,
.dma_bits = 32,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv04_vm_map_sg,
- .unmap = nv04_vm_unmap,
- .flush = nv04_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
};
int
nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
deleted file mode 100644
index 9c35c43635c2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NV04_MMU_PRIV__
-#define __NV04_MMU_PRIV__
-#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
-#include "priv.h"
-
-struct nv04_mmu {
- struct nvkm_mmu base;
- struct nvkm_vm *vm;
- dma_addr_t null;
- void *nullp;
-};
-
-int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu **);
-void *nv04_mmu_dtor(struct nvkm_mmu *);
-
-extern const struct nvkm_mmu_func nv04_mmu;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index c6a26f907009..adca81895c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -21,113 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
#include <core/option.h>
-#include <subdev/timer.h>
-#define NV41_GART_SIZE (512 * 1024 * 1024)
-#define NV41_GART_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- pte = pte * 4;
- nvkm_kmap(pgt);
- while (cnt) {
- u32 page = PAGE_SIZE / NV41_GART_PAGE;
- u64 phys = (u64)*list++;
- while (cnt && page--) {
- nvkm_wo32(pgt, pte, (phys >> 7) | 1);
- phys += NV41_GART_PAGE;
- pte += 4;
- cnt -= 1;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte = pte * 4;
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte, 0x00000000);
- pte += 4;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv41_vm_flush(struct nvkm_vm *vm)
-{
- struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
- struct nvkm_device *device = mmu->base.subdev.device;
-
- mutex_lock(&mmu->base.subdev.mutex);
- nvkm_wr32(device, 0x100810, 0x00000022);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100810) & 0x00000020)
- break;
- );
- nvkm_wr32(device, 0x100810, 0x00000000);
- mutex_unlock(&mmu->base.subdev.mutex);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv41_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- int ret;
-
- ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
- &mmu->vm->pgt[0].mem[0]);
- mmu->vm->pgt[0].refcount[0] = 1;
- return ret;
-}
+#include <nvif/class.h>
static void
-nv41_mmu_init(struct nvkm_mmu *base)
+nv41_mmu_init(struct nvkm_mmu *mmu)
{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
- nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+ struct nvkm_device *device = mmu->subdev.device;
+ nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000);
}
static const struct nvkm_mmu_func
nv41_mmu = {
- .dtor = nv04_mmu_dtor,
- .oneinit = nv41_mmu_oneinit,
.init = nv41_mmu_init,
- .limit = NV41_GART_SIZE,
.dma_bits = 39,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv41_vm_map_sg,
- .unmap = nv41_vm_unmap,
- .flush = nv41_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
};
int
@@ -137,5 +53,5 @@ nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
- return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index a648c2395545..598c53a27bde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -21,176 +21,18 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
#include <core/option.h>
-#include <subdev/timer.h>
-#define NV44_GART_SIZE (512 * 1024 * 1024)
-#define NV44_GART_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
- dma_addr_t *list, u32 pte, u32 cnt)
-{
- u32 base = (pte << 2) & ~0x0000000f;
- u32 tmp[4];
-
- tmp[0] = nvkm_ro32(pgt, base + 0x0);
- tmp[1] = nvkm_ro32(pgt, base + 0x4);
- tmp[2] = nvkm_ro32(pgt, base + 0x8);
- tmp[3] = nvkm_ro32(pgt, base + 0xc);
-
- while (cnt--) {
- u32 addr = list ? (*list++ >> 12) : (null >> 12);
- switch (pte++ & 0x3) {
- case 0:
- tmp[0] &= ~0x07ffffff;
- tmp[0] |= addr;
- break;
- case 1:
- tmp[0] &= ~0xf8000000;
- tmp[0] |= addr << 27;
- tmp[1] &= ~0x003fffff;
- tmp[1] |= addr >> 5;
- break;
- case 2:
- tmp[1] &= ~0xffc00000;
- tmp[1] |= addr << 22;
- tmp[2] &= ~0x0001ffff;
- tmp[2] |= addr >> 10;
- break;
- case 3:
- tmp[2] &= ~0xfffe0000;
- tmp[2] |= addr << 17;
- tmp[3] &= ~0x00000fff;
- tmp[3] |= addr >> 15;
- break;
- }
- }
-
- nvkm_wo32(pgt, base + 0x0, tmp[0]);
- nvkm_wo32(pgt, base + 0x4, tmp[1]);
- nvkm_wo32(pgt, base + 0x8, tmp[2]);
- nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
-}
-
-static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
- u32 tmp[4];
- int i;
-
- nvkm_kmap(pgt);
- if (pte & 3) {
- u32 max = 4 - (pte & 3);
- u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, mmu->null, list, pte, part);
- pte += part;
- list += part;
- cnt -= part;
- }
-
- while (cnt >= 4) {
- for (i = 0; i < 4; i++)
- tmp[i] = *list++ >> 12;
- nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
- nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
- nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
- nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
- cnt -= 4;
- }
-
- if (cnt)
- nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
- nvkm_done(pgt);
-}
-
-static void
-nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
-
- nvkm_kmap(pgt);
- if (pte & 3) {
- u32 max = 4 - (pte & 3);
- u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
- pte += part;
- cnt -= part;
- }
-
- while (cnt >= 4) {
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- cnt -= 4;
- }
-
- if (cnt)
- nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
- nvkm_done(pgt);
-}
-
-static void
-nv44_vm_flush(struct nvkm_vm *vm)
-{
- struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
- struct nvkm_device *device = mmu->base.subdev.device;
- nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
- nvkm_wr32(device, 0x100808, 0x00000020);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100808) & 0x00000001)
- break;
- );
- nvkm_wr32(device, 0x100808, 0x00000000);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv44_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- int ret;
-
- mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
- &mmu->null, GFP_KERNEL);
- if (!mmu->nullp) {
- nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
- mmu->null = 0;
- }
-
- ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
- 512 * 1024, true,
- &mmu->vm->pgt[0].mem[0]);
- mmu->vm->pgt[0].refcount[0] = 1;
- return ret;
-}
+#include <nvif/class.h>
static void
-nv44_mmu_init(struct nvkm_mmu *base)
+nv44_mmu_init(struct nvkm_mmu *mmu)
{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
u32 addr;
/* calculate vram address of this PRAMIN block, object must be
@@ -198,11 +40,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
* of 512KiB for this to work correctly
*/
addr = nvkm_rd32(device, 0x10020c);
- addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+ addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
nvkm_wr32(device, 0x100850, 0x80000000);
- nvkm_wr32(device, 0x100818, mmu->null);
- nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+ nvkm_wr32(device, 0x100818, mmu->vmm->null);
+ nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
nvkm_wr32(device, 0x100850, 0x00008000);
nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
nvkm_wr32(device, 0x100820, 0x00000000);
@@ -212,17 +54,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
static const struct nvkm_mmu_func
nv44_mmu = {
- .dtor = nv04_mmu_dtor,
- .oneinit = nv44_mmu_oneinit,
.init = nv44_mmu_init,
- .limit = NV44_GART_SIZE,
.dma_bits = 39,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv44_vm_map_sg,
- .unmap = nv44_vm_unmap,
- .flush = nv44_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
};
int
@@ -232,5 +68,5 @@ nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
- return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index a1f8d65f0276..db3dfbbb2aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,207 +21,52 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <engine/gr.h>
+#include <nvif/class.h>
-static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
+const u8 *
+nv50_mmu_kind(struct nvkm_mmu *base, int *count)
{
- u64 phys = 0xdeadcafe00000000ULL;
- u32 coverage = 0;
-
- if (pgt[0]) {
- /* present, 4KiB pages */
- phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
- coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
- } else
- if (pgt[1]) {
- /* present, 64KiB pages */
- phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
- coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
- }
-
- if (phys & 1) {
- if (coverage <= 32 * 1024 * 1024)
- phys |= 0x60;
- else if (coverage <= 64 * 1024 * 1024)
- phys |= 0x40;
- else if (coverage <= 128 * 1024 * 1024)
- phys |= 0x20;
- }
-
- nvkm_kmap(pgd);
- nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
- nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
- nvkm_done(pgd);
-}
-
-static inline u64
-vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys |= 1; /* present */
- phys |= (u64)memtype << 40;
- phys |= target << 4;
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= (1 << 6);
- if (!(vma->access & NV_MEM_ACCESS_WO))
- phys |= (1 << 3);
- return phys;
-}
-
-static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
- u32 comp = (mem->memtype & 0x180) >> 7;
- u32 block, target;
- int i;
-
- /* IGPs don't have real VRAM, re-target to stolen system memory */
- target = 0;
- if (ram->stolen) {
- phys += ram->stolen;
- target = 3;
- }
-
- phys = vm_addr(vma, phys, mem->memtype, target);
- pte <<= 3;
- cnt <<= 3;
-
- nvkm_kmap(pgt);
- while (cnt) {
- u32 offset_h = upper_32_bits(phys);
- u32 offset_l = lower_32_bits(phys);
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 3);
- if (cnt >= block && !(pte & (block - 1)))
- break;
- }
- offset_l |= (i << 7);
-
- phys += block << (vma->node->type - 3);
- cnt -= block;
- if (comp) {
- u32 tag = mem->tag->offset + ((delta >> 16) * comp);
- offset_h |= (tag << 17);
- delta += block << (vma->node->type - 3);
- }
-
- while (block) {
- nvkm_wo32(pgt, pte + 0, offset_l);
- nvkm_wo32(pgt, pte + 4, offset_h);
- pte += 8;
- block -= 8;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
- pte <<= 3;
- nvkm_kmap(pgt);
- while (cnt--) {
- u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte <<= 3;
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, 0x00000000);
- nvkm_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_flush(struct nvkm_vm *vm)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_subdev *subdev = &mmu->subdev;
- struct nvkm_device *device = subdev->device;
- int i, vme;
-
- mutex_lock(&subdev->mutex);
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if (!atomic_read(&vm->engref[i]))
- continue;
-
- /* unfortunate hw bug workaround... */
- if (i == NVKM_ENGINE_GR && device->gr) {
- int ret = nvkm_gr_tlb_flush(device->gr);
- if (ret != -ENODEV)
- continue;
- }
-
- switch (i) {
- case NVKM_ENGINE_GR : vme = 0x00; break;
- case NVKM_ENGINE_VP :
- case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
- case NVKM_SUBDEV_BAR : vme = 0x06; break;
- case NVKM_ENGINE_MSPPP :
- case NVKM_ENGINE_MPEG : vme = 0x08; break;
- case NVKM_ENGINE_BSP :
- case NVKM_ENGINE_MSVLD : vme = 0x09; break;
- case NVKM_ENGINE_CIPHER:
- case NVKM_ENGINE_SEC : vme = 0x0a; break;
- case NVKM_ENGINE_CE0 : vme = 0x0d; break;
- default:
- continue;
- }
-
- nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
- break;
- ) < 0)
- nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
- }
- mutex_unlock(&subdev->mutex);
-}
-
-static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
-{
- u32 block = (1 << (mmu->func->pgt_bits + 12));
- if (block > length)
- block = length;
-
- return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
+ /* 0x01: no bank swizzle
+ * 0x02: bank swizzled
+ * 0x7f: invalid
+ *
+ * 0x01/0x02 are values understood by the VRAM allocator,
+ * and are required to avoid mixing the two types within
+ * a certain range.
+ */
+ static const u8
+ kind[128] = {
+ 0x01, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x00 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x10 */
+ 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x20 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x30 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, /* 0x40 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x01, 0x01, 0x01, 0x7f, /* 0x50 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x60 */
+ 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
+ 0x01, 0x7f, 0x02, 0x7f, 0x01, 0x7f, 0x02, 0x7f, /* 0x70 */
+ 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
+ };
+ *count = ARRAY_SIZE(kind);
+ return kind;
}
static const struct nvkm_mmu_func
nv50_mmu = {
- .limit = (1ULL << 40),
.dma_bits = 40,
- .pgt_bits = 29 - 12,
- .spg_shift = 12,
- .lpg_shift = 16,
- .create = nv50_vm_create,
- .map_pgt = nv50_vm_map_pgt,
- .map = nv50_vm_map,
- .map_sg = nv50_vm_map_sg,
- .unmap = nv50_vm_unmap,
- .flush = nv50_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
+ .kind = nv50_mmu_kind,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index bf37f313b5bb..948a48c21be4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -10,31 +10,57 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
int index, struct nvkm_mmu **);
struct nvkm_mmu_func {
- void *(*dtor)(struct nvkm_mmu *);
- int (*oneinit)(struct nvkm_mmu *);
void (*init)(struct nvkm_mmu *);
- u64 limit;
u8 dma_bits;
- u32 pgt_bits;
- u8 spg_shift;
- u8 lpg_shift;
-
- int (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *, struct nvkm_vm **);
-
- void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
- struct nvkm_memory *pgt[2]);
- void (*map)(struct nvkm_vma *, struct nvkm_memory *,
- struct nvkm_mem *, u32 pte, u32 cnt,
- u64 phys, u64 delta);
- void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
- struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
- u32 pte, u32 cnt);
- void (*flush)(struct nvkm_vm *);
+
+ struct {
+ struct nvkm_sclass user;
+ } mmu;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+ int (*umap)(struct nvkm_mmu *, struct nvkm_memory *, void *argv,
+ u32 argc, u64 *addr, u64 *size, struct nvkm_vma **);
+ } mem;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *,
+ const char *name, struct nvkm_vmm **);
+ bool global;
+ u32 pd_offset;
+ } vmm;
+
+ const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ bool kind_sys;
+};
+
+extern const struct nvkm_mmu_func nv04_mmu;
+
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+
+struct nvkm_mmu_pt {
+ union {
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_ptp *ptp;
+ };
+ struct nvkm_memory *memory;
+ bool sub;
+ u16 base;
+ u64 addr;
+ struct list_head head;
};
-int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
- struct lock_class_key *, struct nvkm_vm **);
+void nvkm_mmu_ptc_dump(struct nvkm_mmu *);
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *, u32 size, u32 align, bool zero);
+void nvkm_mmu_ptc_put(struct nvkm_mmu *, bool force, struct nvkm_mmu_pt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
new file mode 100644
index 000000000000..fac2f9a45ea6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_umem;
+struct nvkm_memory *
+nvkm_umem_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_client *master = client->object.client;
+ struct nvkm_memory *memory = NULL;
+ struct nvkm_object *object;
+ struct nvkm_umem *umem;
+
+ object = nvkm_object_search(client, handle, &nvkm_umem);
+ if (IS_ERR(object)) {
+ if (client->super && client != master) {
+ spin_lock(&master->lock);
+ list_for_each_entry(umem, &master->umem, head) {
+ if (umem->object.object == handle) {
+ memory = nvkm_memory_ref(umem->memory);
+ break;
+ }
+ }
+ spin_unlock(&master->lock);
+ }
+ } else {
+ umem = nvkm_umem(object);
+ if (!umem->priv || client->super)
+ memory = nvkm_memory_ref(umem->memory);
+ }
+
+ return memory ? memory : ERR_PTR(-ENOENT);
+}
+
+static int
+nvkm_umem_unmap(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+
+ if (!umem->map)
+ return -EEXIST;
+
+ if (umem->io) {
+ if (!IS_ERR(umem->bar)) {
+ struct nvkm_device *device = umem->mmu->subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
+ } else {
+ umem->bar = NULL;
+ }
+ } else {
+ vunmap(umem->map);
+ umem->map = NULL;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *handle, u64 *length)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ struct nvkm_mmu *mmu = umem->mmu;
+
+ if (!umem->mappable)
+ return -EINVAL;
+ if (umem->map)
+ return -EEXIST;
+
+ if ((umem->type & NVKM_MEM_HOST) && !argc) {
+ int ret = nvkm_mem_map_host(umem->memory, &umem->map);
+ if (ret)
+ return ret;
+
+ *handle = (unsigned long)(void *)umem->map;
+ *length = nvkm_memory_size(umem->memory);
+ *type = NVKM_OBJECT_MAP_VA;
+ return 0;
+ } else
+ if ((umem->type & NVKM_MEM_VRAM) ||
+ (umem->type & NVKM_MEM_KIND)) {
+ int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
+ handle, length, &umem->bar);
+ if (ret)
+ return ret;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ } else {
+ return -EINVAL;
+ }
+
+ umem->io = (*type == NVKM_OBJECT_MAP_IO);
+ return 0;
+}
+
+static void *
+nvkm_umem_dtor(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ spin_lock(&umem->object.client->lock);
+ list_del_init(&umem->head);
+ spin_unlock(&umem->object.client->lock);
+ nvkm_memory_unref(&umem->memory);
+ return umem;
+}
+
+static const struct nvkm_object_func
+nvkm_umem = {
+ .dtor = nvkm_umem_dtor,
+ .map = nvkm_umem_map,
+ .unmap = nvkm_umem_unmap,
+};
+
+int
+nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ union {
+ struct nvif_mem_v0 v0;
+ } *args = argv;
+ struct nvkm_umem *umem;
+ int type, ret = -ENOSYS;
+ u8 page;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ type = args->v0.type;
+ page = args->v0.page;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (type >= mmu->type_nr)
+ return -EINVAL;
+
+ if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
+ umem->mmu = mmu;
+ umem->type = mmu->type[type].type;
+ umem->priv = oclass->client->super;
+ INIT_LIST_HEAD(&umem->head);
+ *pobject = &umem->object;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
+ page = max_t(u8, page, PAGE_SHIFT);
+ umem->mappable = true;
+ }
+
+ ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
+ &umem->memory);
+ if (ret)
+ return ret;
+
+ spin_lock(&umem->object.client->lock);
+ list_add(&umem->head, &umem->object.client->umem);
+ spin_unlock(&umem->object.client->lock);
+
+ args->v0.page = nvkm_memory_page(umem->memory);
+ args->v0.addr = nvkm_memory_addr(umem->memory);
+ args->v0.size = nvkm_memory_size(umem->memory);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
new file mode 100644
index 000000000000..85cf692d620a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_UMEM_H__
+#define __NVKM_UMEM_H__
+#define nvkm_umem(p) container_of((p), struct nvkm_umem, object)
+#include <core/object.h>
+#include "mem.h"
+
+struct nvkm_umem {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+ u8 type:8;
+ bool priv:1;
+ bool mappable:1;
+ bool io:1;
+
+ struct nvkm_memory *memory;
+ struct list_head head;
+
+ union {
+ struct nvkm_vma *bar;
+ void *map;
+ };
+};
+
+int nvkm_umem_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
new file mode 100644
index 000000000000..353f10f92b77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ummu.h"
+#include "umem.h"
+#include "uvmm.h"
+
+#include <core/client.h>
+
+#include <nvif/if0008.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ummu_sclass(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
+
+ if (mmu->func->mem.user.oclass && oclass->client->super) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->mem.user;
+ oclass->ctor = nvkm_umem_new;
+ return 0;
+ }
+ }
+
+ if (mmu->func->vmm.user.oclass) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->vmm.user;
+ oclass->ctor = nvkm_uvmm_new;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_heap_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->heap_nr)
+ return -EINVAL;
+ args->v0.size = mmu->heap[index].size;
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_type_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 type, index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->type_nr)
+ return -EINVAL;
+ type = mmu->type[index].type;
+ args->v0.heap = mmu->type[index].heap;
+ args->v0.vram = !!(type & NVKM_MEM_VRAM);
+ args->v0.host = !!(type & NVKM_MEM_HOST);
+ args->v0.comp = !!(type & NVKM_MEM_COMP);
+ args->v0.disp = !!(type & NVKM_MEM_DISP);
+ args->v0.kind = !!(type & NVKM_MEM_KIND);
+ args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
+ args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
+ args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_kind_v0 v0;
+ } *args = argv;
+ const u8 *kind = NULL;
+ int ret = -ENOSYS, count = 0;
+
+ if (mmu->func->kind)
+ kind = mmu->func->kind(mmu, &count);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ if (argc != args->v0.count * sizeof(*args->v0.data))
+ return -EINVAL;
+ if (args->v0.count > count)
+ return -EINVAL;
+ memcpy(args->v0.data, kind, args->v0.count);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_ummu *ummu = nvkm_ummu(object);
+ switch (mthd) {
+ case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
+ case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
+ case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_ummu = {
+ .mthd = nvkm_ummu_mthd,
+ .sclass = nvkm_ummu_sclass,
+};
+
+int
+nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ union {
+ struct nvif_mmu_v0 v0;
+ } *args = argv;
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_ummu *ummu;
+ int ret = -ENOSYS, kinds = 0;
+
+ if (mmu->func->kind)
+ mmu->func->kind(mmu, &kinds);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ args->v0.dmabits = mmu->dma_bits;
+ args->v0.heap_nr = mmu->heap_nr;
+ args->v0.type_nr = mmu->type_nr;
+ args->v0.kind_nr = kinds;
+ } else
+ return ret;
+
+ if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
+ ummu->mmu = mmu;
+ *pobject = &ummu->object;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
new file mode 100644
index 000000000000..0cd510dcfc68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UMMU_H__
+#define __NVKM_UMMU_H__
+#define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
+#include <core/object.h>
+#include "priv.h"
+
+struct nvkm_ummu {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+};
+
+int nvkm_ummu_new(struct nvkm_device *, const struct nvkm_oclass *,
+ void *argv, u32 argc, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
new file mode 100644
index 000000000000..fa81d0c1ba41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "uvmm.h"
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+
+#include <nvif/if000c.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_uvmm;
+struct nvkm_vmm *
+nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_object *object;
+
+ object = nvkm_object_search(client, handle, &nvkm_uvmm);
+ if (IS_ERR(object))
+ return (void *)object;
+
+ return nvkm_uvmm(object)->vmm;
+}
+
+static int
+nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_unmap_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ vma = nvkm_vmm_node_search(vmm, addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx",
+ addr, vma ? vma->addr : ~0ULL);
+ goto done;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto done;
+ }
+
+ if (ret = -EINVAL, !vma->memory) {
+ VMM_DEBUG(vmm, "unmapped");
+ goto done;
+ }
+
+ nvkm_vmm_unmap_locked(vmm, vma);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_map_v0 v0;
+ } *args = argv;
+ u64 addr, size, handle, offset;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ struct nvkm_memory *memory;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ handle = args->v0.memory;
+ offset = args->v0.offset;
+ } else
+ return ret;
+
+ if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ mutex_lock(&vmm->mutex);
+ if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
+ VMM_DEBUG(vmm, "lookup %016llx", addr);
+ goto fail;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto fail;
+ }
+
+ if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
+ if (addr + size > vma->addr + vma->size || vma->memory ||
+ (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
+ VMM_DEBUG(vmm, "split %d %d %d "
+ "%016llx %016llx %016llx %016llx",
+ !!vma->memory, vma->refd, vma->mapref,
+ addr, size, vma->addr, (u64)vma->size);
+ goto fail;
+ }
+
+ if (vma->addr != addr) {
+ const u64 tail = vma->size + vma->addr - addr;
+ if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
+ goto fail;
+ vma->part = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ }
+
+ if (vma->size != size) {
+ const u64 tail = vma->size - size;
+ struct nvkm_vma *tmp;
+ if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
+ nvkm_vmm_unmap_region(vmm, vma);
+ goto fail;
+ }
+ tmp->part = true;
+ nvkm_vmm_node_insert(vmm, tmp);
+ }
+ }
+ vma->busy = true;
+ mutex_unlock(&vmm->mutex);
+
+ ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
+ if (ret == 0) {
+ /* Successful map will clear vma->busy. */
+ nvkm_memory_unref(&memory);
+ return 0;
+ }
+
+ mutex_lock(&vmm->mutex);
+ vma->busy = false;
+ nvkm_vmm_unmap_region(vmm, vma);
+fail:
+ mutex_unlock(&vmm->mutex);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_put_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ vma = nvkm_vmm_node_search(vmm, args->v0.addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
+ vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
+ goto done;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto done;
+ }
+
+ nvkm_vmm_put_locked(vmm, vma);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_get_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ bool getref, mapref, sparse;
+ u8 page, align;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
+ mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
+ sparse = args->v0.sparse;
+ page = args->v0.page;
+ align = args->v0.align;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
+ page, align, size, &vma);
+ mutex_unlock(&vmm->mutex);
+ if (ret)
+ return ret;
+
+ args->v0.addr = vma->addr;
+ vma->user = !client->super;
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_page_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ int ret = -ENOSYS;
+ u8 type, index, nr;
+
+ page = uvmm->vmm->func->page;
+ for (nr = 0; page[nr].shift; nr++);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= nr)
+ return -EINVAL;
+ type = page[index].type;
+ args->v0.shift = page[index].shift;
+ args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
+ args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
+ args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
+ args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
+ } else
+ return -ENOSYS;
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ switch (mthd) {
+ case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
+ case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
+ case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
+ case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
+ case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void *
+nvkm_uvmm_dtor(struct nvkm_object *object)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ nvkm_vmm_unref(&uvmm->vmm);
+ return uvmm;
+}
+
+static const struct nvkm_object_func
+nvkm_uvmm = {
+ .dtor = nvkm_uvmm_dtor,
+ .mthd = nvkm_uvmm_mthd,
+};
+
+int
+nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ const bool more = oclass->base.maxver >= 0;
+ union {
+ struct nvif_vmm_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ struct nvkm_uvmm *uvmm;
+ int ret = -ENOSYS;
+ u64 addr, size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
+ *pobject = &uvmm->object;
+
+ if (!mmu->vmm) {
+ ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
+ NULL, "user", &uvmm->vmm);
+ if (ret)
+ return ret;
+
+ uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
+ } else {
+ if (size)
+ return -EINVAL;
+
+ uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
+ }
+
+ page = uvmm->vmm->func->page;
+ args->v0.page_nr = 0;
+ while (page && (page++)->shift)
+ args->v0.page_nr++;
+ args->v0.addr = uvmm->vmm->start;
+ args->v0.size = uvmm->vmm->limit;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
new file mode 100644
index 000000000000..71dab55e18a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UVMM_H__
+#define __NVKM_UVMM_H__
+#define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
+#include <core/object.h>
+#include "vmm.h"
+
+struct nvkm_uvmm {
+ struct nvkm_object object;
+ struct nvkm_vmm *vmm;
+};
+
+int nvkm_uvmm_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
new file mode 100644
index 000000000000..e35d3e17cd7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -0,0 +1,1513 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define NVKM_VMM_LEVELS_MAX 5
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+static void
+nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
+{
+ struct nvkm_vmm_pt *pgt = *ppgt;
+ if (pgt) {
+ kvfree(pgt->pde);
+ kfree(pgt);
+ *ppgt = NULL;
+ }
+}
+
+
+static struct nvkm_vmm_pt *
+nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
+ const struct nvkm_vmm_page *page)
+{
+ const u32 pten = 1 << desc->bits;
+ struct nvkm_vmm_pt *pgt;
+ u32 lpte = 0;
+
+ if (desc->type > PGT) {
+ if (desc->type == SPT) {
+ const struct nvkm_vmm_desc *pair = page[-1].desc;
+ lpte = pten >> (desc->bits - pair->bits);
+ } else {
+ lpte = pten;
+ }
+ }
+
+ if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
+ return NULL;
+ pgt->page = page ? page->shift : 0;
+ pgt->sparse = sparse;
+
+ if (desc->type == PGD) {
+ pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
+ if (!pgt->pde) {
+ kfree(pgt);
+ return NULL;
+ }
+ }
+
+ return pgt;
+}
+
+struct nvkm_vmm_iter {
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm *vmm;
+ u64 cnt;
+ u16 max, lvl;
+ u32 pte[NVKM_VMM_LEVELS_MAX];
+ struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
+ int flush;
+};
+
+#ifdef CONFIG_NOUVEAU_DEBUG_MMU
+static const char *
+nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
+{
+ switch (desc->type) {
+ case PGD: return "PGD";
+ case PGT: return "PGT";
+ case SPT: return "SPT";
+ case LPT: return "LPT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void
+nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
+{
+ int lvl;
+ for (lvl = it->max; lvl >= 0; lvl--) {
+ if (lvl >= it->lvl)
+ buf += sprintf(buf, "%05x:", it->pte[lvl]);
+ else
+ buf += sprintf(buf, "xxxxx:");
+ }
+}
+
+#define TRA(i,f,a...) do { \
+ char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
+ struct nvkm_vmm_iter *_it = (i); \
+ nvkm_vmm_trace(_it, _buf); \
+ VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
+} while(0)
+#else
+#define TRA(i,f,a...)
+#endif
+
+static inline void
+nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
+{
+ it->flush = min(it->flush, it->max - it->lvl);
+}
+
+static inline void
+nvkm_vmm_flush(struct nvkm_vmm_iter *it)
+{
+ if (it->flush != NVKM_VMM_LEVELS_MAX) {
+ if (it->vmm->func->flush) {
+ TRA(it, "flush: %d", it->flush);
+ it->vmm->func->flush(it->vmm, it->flush);
+ }
+ it->flush = NVKM_VMM_LEVELS_MAX;
+ }
+}
+
+static void
+nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc[it->lvl].type == SPT;
+ struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
+ struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 pdei = it->pte[it->lvl + 1];
+
+ /* Recurse up the tree, unreferencing/destroying unneeded PDs. */
+ it->lvl++;
+ if (--pgd->refs[0]) {
+ const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
+ /* PD has other valid PDEs, so we need a proper update. */
+ TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ pgt->pt[type] = NULL;
+ if (!pgt->refs[!type]) {
+ /* PDE no longer required. */
+ if (pgd->pt[0]) {
+ if (pgt->sparse) {
+ func->sparse(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
+ } else {
+ func->unmap(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* Special handling for Tesla-class GPUs,
+ * where there's no central PD, but each
+ * instance has its own embedded PD.
+ */
+ func->pde(vmm, pgd, pdei);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* PDE was pointing at dual-PTs and we're removing
+ * one of them, leaving the other in place.
+ */
+ func->pde(vmm, pgd, pdei);
+ }
+
+ /* GPU may have cached the PTs, flush before freeing. */
+ nvkm_vmm_flush_mark(it);
+ nvkm_vmm_flush(it);
+ } else {
+ /* PD has no valid PDEs left, so we can just destroy it. */
+ nvkm_vmm_unref_pdes(it);
+ }
+
+ /* Destroy PD/PT. */
+ TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
+ if (!pgt->refs[!type])
+ nvkm_vmm_pt_del(&pgt);
+ it->lvl--;
+}
+
+static void
+nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and drop reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] -= pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that still have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
+ break;
+ }
+ continue;
+ }
+
+ /* As there's no more non-UNMAPPED SPTEs left in the range
+ * covered by a number of LPTEs, the LPTEs once again take
+ * control over their address range.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
+ break;
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
+ pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* If the MMU supports it, restore the LPTE to the
+ * INVALID state to tell the MMU there is no point
+ * trying to fetch the corresponding SPTEs.
+ */
+ TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
+ pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+
+ /* Drop PTE references. */
+ pgt->refs[type] -= ptes;
+
+ /* Dual-PTs need special handling, unless PDE becoming invalid. */
+ if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
+ nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
+
+ /* PT no longer neeed? Destroy it. */
+ if (!pgt->refs[type]) {
+ it->lvl++;
+ TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false; /* PTE writes for unmap() not necessary. */
+ }
+
+ return true;
+}
+
+static void
+nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and increase reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] += pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that already have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
+ break;
+ }
+ continue;
+ }
+
+ /* As there are now non-UNMAPPED SPTEs in the range covered
+ * by a number of LPTEs, we need to transfer control of the
+ * address range to the SPTEs.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
+ break;
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ const u32 spti = pteb * sptn;
+ const u32 sptc = ptes * sptn;
+ /* The entire LPTE is marked as sparse, we need
+ * to make sure that the SPTEs are too.
+ */
+ TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
+ desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
+ /* Sparse LPTEs prevent SPTEs from being accessed. */
+ TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* MMU supports blocking SPTEs by marking an LPTE
+ * as INVALID. We need to reverse that here.
+ */
+ TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+
+ /* Take PTE references. */
+ pgt->refs[type] += ptes;
+
+ /* Dual-PTs need special handling. */
+ if (desc->type == SPT)
+ nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
+
+ return true;
+}
+
+static void
+nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
+ struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
+{
+ if (desc->type == PGD) {
+ while (ptes--)
+ pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
+ } else
+ if (desc->type == LPT) {
+ memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
+ }
+}
+
+static bool
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ struct nvkm_vmm_pt *pt = it->pt[0];
+ if (it->desc->type == PGD)
+ memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
+ else
+ if (it->desc->type == LPT)
+ memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
+ return nvkm_vmm_unref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
+ return nvkm_vmm_ref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ const bool zero = !pgt->sparse && !desc->func->invalid;
+ struct nvkm_vmm *vmm = it->vmm;
+ struct nvkm_mmu *mmu = vmm->mmu;
+ struct nvkm_mmu_pt *pt;
+ u32 pten = 1 << desc->bits;
+ u32 pteb, ptei, ptes;
+ u32 size = desc->size * pten;
+
+ pgd->refs[0]++;
+
+ pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
+ if (!pgt->pt[type]) {
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ if (zero)
+ goto done;
+
+ pt = pgt->pt[type];
+
+ if (desc->type == LPT && pgt->refs[1]) {
+ /* SPT already exists covering the same range as this LPT,
+ * which means we need to be careful that any LPTEs which
+ * overlap valid SPTEs are unmapped as opposed to invalid
+ * or sparse, which would prevent the MMU from looking at
+ * the SPTEs on some GPUs.
+ */
+ for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
+ bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
+ bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ if (spte != next)
+ break;
+ }
+
+ if (!spte) {
+ if (pgt->sparse)
+ desc->func->sparse(vmm, pt, pteb, ptes);
+ else
+ desc->func->invalid(vmm, pt, pteb, ptes);
+ memset(&pgt->pte[pteb], 0x00, ptes);
+ } else {
+ desc->func->unmap(vmm, pt, pteb, ptes);
+ while (ptes--)
+ pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
+ }
+ }
+ } else {
+ if (pgt->sparse) {
+ nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
+ desc->func->sparse(vmm, pt, 0, pten);
+ } else {
+ desc->func->invalid(vmm, pt, 0, pten);
+ }
+ }
+
+done:
+ TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
+ it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
+ nvkm_vmm_flush_mark(it);
+ return true;
+}
+
+static bool
+nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+
+ pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
+ if (!pgt) {
+ if (!pgd->refs[0])
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ pgd->pde[pdei] = pgt;
+ return true;
+}
+
+static inline u64
+nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, const char *name, bool ref,
+ bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+ nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
+ nvkm_vmm_pxe_func CLR_PTES)
+{
+ const struct nvkm_vmm_desc *desc = page->desc;
+ struct nvkm_vmm_iter it;
+ u64 bits = addr >> page->shift;
+
+ it.page = page;
+ it.desc = desc;
+ it.vmm = vmm;
+ it.cnt = size >> page->shift;
+ it.flush = NVKM_VMM_LEVELS_MAX;
+
+ /* Deconstruct address into PTE indices for each mapping level. */
+ for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
+ it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
+ bits >>= desc[it.lvl].bits;
+ }
+ it.max = --it.lvl;
+ it.pt[it.max] = vmm->pd;
+
+ it.lvl = 0;
+ TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
+ addr, size, page->shift, it.cnt);
+ it.lvl = it.max;
+
+ /* Depth-first traversal of page tables. */
+ while (it.cnt) {
+ struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
+ const int type = desc->type == SPT;
+ const u32 pten = 1 << desc->bits;
+ const u32 ptei = it.pte[0];
+ const u32 ptes = min_t(u64, it.cnt, pten - ptei);
+
+ /* Walk down the tree, finding page tables for each level. */
+ for (; it.lvl; it.lvl--) {
+ const u32 pdei = it.pte[it.lvl];
+ struct nvkm_vmm_pt *pgd = pgt;
+
+ /* Software PT. */
+ if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
+ if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
+ goto fail;
+ }
+ it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
+
+ /* Hardware PT.
+ *
+ * This is a separate step from above due to GF100 and
+ * newer having dual page tables at some levels, which
+ * are refcounted independently.
+ */
+ if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
+ if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
+ goto fail;
+ }
+ }
+
+ /* Handle PTE updates. */
+ if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ if (MAP_PTES || CLR_PTES) {
+ if (MAP_PTES)
+ MAP_PTES(vmm, pt, ptei, ptes, map);
+ else
+ CLR_PTES(vmm, pt, ptei, ptes);
+ nvkm_vmm_flush_mark(&it);
+ }
+ }
+
+ /* Walk back up the tree to the next position. */
+ it.pte[it.lvl] += ptes;
+ it.cnt -= ptes;
+ if (it.cnt) {
+ while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
+ it.pte[it.lvl++] = 0;
+ it.pte[it.lvl]++;
+ }
+ }
+ };
+
+ nvkm_vmm_flush(&it);
+ return ~0ULL;
+
+fail:
+ /* Reconstruct the failure address so the caller is able to
+ * reverse any partially completed operations.
+ */
+ addr = it.pte[it.max--];
+ do {
+ addr = addr << desc[it.max].bits;
+ addr |= it.pte[it.max];
+ } while (it.max--);
+
+ return addr << page->shift;
+}
+
+static void
+nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+ nvkm_vmm_sparse_unref_ptes, NULL, NULL,
+ page->desc->func->invalid ?
+ page->desc->func->invalid : page->desc->func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
+ true, nvkm_vmm_sparse_ref_ptes, NULL,
+ NULL, page->desc->func->sparse);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
+ return -ENOMEM;
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ int m = 0, i;
+ u64 start = addr;
+ u64 block;
+
+ while (size) {
+ /* Limit maximum page size based on remaining size. */
+ while (size < (1ULL << page[m].shift))
+ m++;
+ i = m;
+
+ /* Find largest page size suitable for alignment. */
+ while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
+ i++;
+
+ /* Determine number of PTEs at this page size. */
+ if (i != m) {
+ /* Limited to alignment boundary of next page size. */
+ u64 next = 1ULL << page[i - 1].shift;
+ u64 part = ALIGN(addr, next) - addr;
+ if (size - part >= next)
+ block = (part >> page[i].shift) << page[i].shift;
+ else
+ block = (size >> page[i].shift) << page[i].shift;
+ } else {
+ block = (size >> page[i].shift) << page[i].shift;;
+ }
+
+ /* Perform operation. */
+ if (ref) {
+ int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
+ if (ret) {
+ if ((size = addr - start))
+ nvkm_vmm_ptes_sparse(vmm, start, size, false);
+ return ret;
+ }
+ } else {
+ nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
+ }
+
+ size -= block;
+ addr += block;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+ false, nvkm_vmm_unref_ptes, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+ nvkm_vmm_ref_ptes, func, map, NULL);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+ NULL, func, map, NULL);
+}
+
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+ nvkm_vmm_unref_ptes, NULL, NULL, NULL);
+}
+
+static int
+nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+ nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+ if (fail != ~0ULL) {
+ if (fail != addr)
+ nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static inline struct nvkm_vma *
+nvkm_vma_new(u64 addr, u64 size)
+{
+ struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma) {
+ vma->addr = addr;
+ vma->size = size;
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ }
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
+{
+ struct nvkm_vma *new;
+
+ BUG_ON(vma->size == tail);
+
+ if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
+ return NULL;
+ vma->size -= tail;
+
+ new->mapref = vma->mapref;
+ new->sparse = vma->sparse;
+ new->page = vma->page;
+ new->refd = vma->refd;
+ new->used = vma->used;
+ new->part = vma->part;
+ new->user = vma->user;
+ new->busy = vma->busy;
+ list_add(&new->head, &vma->head);
+ return new;
+}
+
+static void
+nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->free.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->size < this->size)
+ ptr = &parent->rb_left;
+ else
+ if (vma->size > this->size)
+ ptr = &parent->rb_right;
+ else
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->free);
+}
+
+void
+nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->root.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->root);
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct rb_node *node = vmm->root.rb_node;
+ while (node) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ if (addr < vma->addr)
+ node = node->rb_left;
+ else
+ if (addr >= vma->addr + vma->size)
+ node = node->rb_right;
+ else
+ return vma;
+ }
+ return NULL;
+}
+
+static void
+nvkm_vmm_dtor(struct nvkm_vmm *vmm)
+{
+ struct nvkm_vma *vma;
+ struct rb_node *node;
+
+ while ((node = rb_first(&vmm->root))) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ nvkm_vmm_put(vmm, &vma);
+ }
+
+ if (vmm->bootstrapped) {
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+
+ while (page[1].shift)
+ page++;
+
+ nvkm_mmu_ptc_dump(vmm->mmu);
+ nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
+ }
+
+ vma = list_first_entry(&vmm->list, typeof(*vma), head);
+ list_del(&vma->head);
+ kfree(vma);
+ WARN_ON(!list_empty(&vmm->list));
+
+ if (vmm->nullp) {
+ dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
+ vmm->nullp, vmm->null);
+ }
+
+ if (vmm->pd) {
+ nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
+ nvkm_vmm_pt_del(&vmm->pd);
+ }
+}
+
+int
+nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm *vmm)
+{
+ static struct lock_class_key _key;
+ const struct nvkm_vmm_page *page = func->page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vma *vma;
+ int levels, bits = 0;
+
+ vmm->func = func;
+ vmm->mmu = mmu;
+ vmm->name = name;
+ vmm->debug = mmu->subdev.debug;
+ kref_init(&vmm->kref);
+
+ __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+
+ /* Locate the smallest page size supported by the backend, it will
+ * have the the deepest nesting of page tables.
+ */
+ while (page[1].shift)
+ page++;
+
+ /* Locate the structure that describes the layout of the top-level
+ * page table, and determine the number of valid bits in a virtual
+ * address.
+ */
+ for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
+ bits += desc->bits;
+ bits += page->shift;
+ desc--;
+
+ if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
+ return -EINVAL;
+
+ vmm->start = addr;
+ vmm->limit = size ? (addr + size) : (1ULL << bits);
+ if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+ return -EINVAL;
+
+ /* Allocate top-level page table. */
+ vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
+ if (!vmm->pd)
+ return -ENOMEM;
+ vmm->pd->refs[0] = 1;
+ INIT_LIST_HEAD(&vmm->join);
+
+ /* ... and the GPU storage for it, except on Tesla-class GPUs that
+ * have the PD embedded in the instance structure.
+ */
+ if (desc->size) {
+ const u32 size = pd_header + desc->size * (1 << desc->bits);
+ vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
+ if (!vmm->pd->pt[0])
+ return -ENOMEM;
+ }
+
+ /* Initialise address-space MM. */
+ INIT_LIST_HEAD(&vmm->list);
+ vmm->free = RB_ROOT;
+ vmm->root = RB_ROOT;
+
+ if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+ return -ENOMEM;
+
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add(&vma->head, &vmm->list);
+ return 0;
+}
+
+int
+nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
+}
+
+#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
+ list_entry((root)->head.dir, struct nvkm_vma, head)
+
+void
+nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *next;
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+
+ if (vma->part) {
+ struct nvkm_vma *prev = node(vma, prev);
+ if (!prev->memory) {
+ prev->size += vma->size;
+ rb_erase(&vma->tree, &vmm->root);
+ list_del(&vma->head);
+ kfree(vma);
+ vma = prev;
+ }
+ }
+
+ next = node(vma, next);
+ if (next && next->part) {
+ if (!next->memory) {
+ vma->size += next->size;
+ rb_erase(&next->tree, &vmm->root);
+ list_del(&next->head);
+ kfree(next);
+ }
+ }
+}
+
+void
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
+
+ if (vma->mapref) {
+ nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ } else {
+ nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+ }
+
+ nvkm_vmm_unmap_region(vmm, vma);
+}
+
+void
+nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ if (vma->memory) {
+ mutex_lock(&vmm->mutex);
+ nvkm_vmm_unmap_locked(vmm, vma);
+ mutex_unlock(&vmm->mutex);
+ }
+}
+
+static int
+nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ switch (nvkm_memory_target(map->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
+ VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ case NVKM_MEM_TARGET_NCOH:
+ if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
+ VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
+ !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
+ !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
+ nvkm_memory_page(map->memory) < map->page->shift) {
+ VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
+ vma->addr, (u64)vma->size, map->offset, map->page->shift,
+ nvkm_memory_page(map->memory));
+ return -EINVAL;
+ }
+
+ return vmm->func->valid(vmm, argv, argc, map);
+}
+
+static int
+nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ for (map->page = vmm->func->page; map->page->shift; map->page++) {
+ VMM_DEBUG(vmm, "trying %d", map->page->shift);
+ if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ nvkm_vmm_pte_func func;
+ int ret;
+
+ /* Make sure we won't overrun the end of the memory object. */
+ if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
+ VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
+ nvkm_memory_size(map->memory),
+ map->offset, (u64)vma->size);
+ return -EINVAL;
+ }
+
+ /* Check remaining arguments for validity. */
+ if (vma->page == NVKM_VMA_PAGE_NONE &&
+ vma->refd == NVKM_VMA_PAGE_NONE) {
+ /* Find the largest page size we can perform the mapping at. */
+ const u32 debug = vmm->debug;
+ vmm->debug = 0;
+ ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ vmm->debug = debug;
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid at any page size");
+ nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ return -EINVAL;
+ }
+ } else {
+ /* Page size of the VMA is already pre-determined. */
+ if (vma->refd != NVKM_VMA_PAGE_NONE)
+ map->page = &vmm->func->page[vma->refd];
+ else
+ map->page = &vmm->func->page[vma->page];
+
+ ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Deal with the 'offset' argument, and fetch the backend function. */
+ map->off = map->offset;
+ if (map->mem) {
+ for (; map->off; map->mem = map->mem->next) {
+ u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->mem;
+ } else
+ if (map->sgl) {
+ for (; map->off; map->sgl = sg_next(map->sgl)) {
+ u64 size = sg_dma_len(map->sgl);
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->sgl;
+ } else {
+ map->dma += map->offset >> PAGE_SHIFT;
+ map->off = map->offset & PAGE_MASK;
+ func = map->page->desc->func->dma;
+ }
+
+ /* Perform the map. */
+ if (vma->refd == NVKM_VMA_PAGE_NONE) {
+ ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
+ if (ret)
+ return ret;
+
+ vma->refd = map->page - vmm->func->page;
+ } else {
+ nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
+ }
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+ vma->memory = nvkm_memory_ref(map->memory);
+ vma->tags = map->tags;
+ return 0;
+}
+
+int
+nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ int ret;
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+ vma->busy = false;
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static void
+nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *prev, *next;
+
+ if ((prev = node(vma, prev)) && !prev->used) {
+ rb_erase(&prev->tree, &vmm->free);
+ list_del(&prev->head);
+ vma->addr = prev->addr;
+ vma->size += prev->size;
+ kfree(prev);
+ }
+
+ if ((next = node(vma, next)) && !next->used) {
+ rb_erase(&next->tree, &vmm->free);
+ list_del(&next->head);
+ vma->size += next->size;
+ kfree(next);
+ }
+
+ nvkm_vmm_free_insert(vmm, vma);
+}
+
+void
+nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ struct nvkm_vma *next = vma;
+
+ BUG_ON(vma->part);
+
+ if (vma->mapref || !vma->sparse) {
+ do {
+ const bool map = next->memory != NULL;
+ const u8 refd = next->refd;
+ const u64 addr = next->addr;
+ u64 size = next->size;
+
+ /* Merge regions that are in the same state. */
+ while ((next = node(next, next)) && next->part &&
+ (next->memory != NULL) == map &&
+ (next->refd == refd))
+ size += next->size;
+
+ if (map) {
+ /* Region(s) are mapped, merge the unmap
+ * and dereference into a single walk of
+ * the page tree.
+ */
+ nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
+ size, vma->sparse);
+ } else
+ if (refd != NVKM_VMA_PAGE_NONE) {
+ /* Drop allocation-time PTE references. */
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+ }
+ } while (next && next->part);
+ }
+
+ /* Merge any mapped regions that were split from the initial
+ * address-space allocation back into the allocated VMA, and
+ * release memory/compression resources.
+ */
+ next = vma;
+ do {
+ if (next->memory)
+ nvkm_vmm_unmap_region(vmm, next);
+ } while ((next = node(vma, next)) && next->part);
+
+ if (vma->sparse && !vma->mapref) {
+ /* Sparse region that was allocated with a fixed page size,
+ * meaning all relevant PTEs were referenced once when the
+ * region was allocated, and remained that way, regardless
+ * of whether memory was mapped into it afterwards.
+ *
+ * The process of unmapping, unsparsing, and dereferencing
+ * PTEs can be done in a single page tree walk.
+ */
+ nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
+ } else
+ if (vma->sparse) {
+ /* Sparse region that wasn't allocated with a fixed page size,
+ * PTE references were taken both at allocation time (to make
+ * the GPU see the region as sparse), and when mapping memory
+ * into the region.
+ *
+ * The latter was handled above, and the remaining references
+ * are dealt with here.
+ */
+ nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
+ }
+
+ /* Remove VMA from the list of allocated nodes. */
+ rb_erase(&vma->tree, &vmm->root);
+
+ /* Merge VMA back into the free list. */
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ vma->used = false;
+ vma->user = false;
+ nvkm_vmm_put_region(vmm, vma);
+}
+
+void
+nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
+{
+ struct nvkm_vma *vma = *pvma;
+ if (vma) {
+ mutex_lock(&vmm->mutex);
+ nvkm_vmm_put_locked(vmm, vma);
+ mutex_unlock(&vmm->mutex);
+ *pvma = NULL;
+ }
+}
+
+int
+nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
+ u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
+ struct rb_node *node = NULL, *temp;
+ struct nvkm_vma *vma = NULL, *tmp;
+ u64 addr, tail;
+ int ret;
+
+ VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
+ "shift: %d align: %d size: %016llx",
+ getref, mapref, sparse, shift, align, size);
+
+ /* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
+ if (unlikely(!size || (!getref && !mapref && sparse))) {
+ VMM_DEBUG(vmm, "args %016llx %d %d %d",
+ size, getref, mapref, sparse);
+ return -EINVAL;
+ }
+
+ /* Tesla-class GPUs can only select page size per-PDE, which means
+ * we're required to know the mapping granularity up-front to find
+ * a suitable region of address-space.
+ *
+ * The same goes if we're requesting up-front allocation of PTES.
+ */
+ if (unlikely((getref || vmm->func->page_block) && !shift)) {
+ VMM_DEBUG(vmm, "page size required: %d %016llx",
+ getref, vmm->func->page_block);
+ return -EINVAL;
+ }
+
+ /* If a specific page size was requested, determine its index and
+ * make sure the requested size is a multiple of the page size.
+ */
+ if (shift) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ align = max_t(u8, align, shift);
+ } else {
+ align = max_t(u8, align, 12);
+ }
+
+ /* Locate smallest block that can possibly satisfy the allocation. */
+ temp = vmm->free.rb_node;
+ while (temp) {
+ struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
+ if (this->size < size) {
+ temp = temp->rb_right;
+ } else {
+ node = temp;
+ temp = temp->rb_left;
+ }
+ }
+
+ if (unlikely(!node))
+ return -ENOSPC;
+
+ /* Take into account alignment restrictions, trying larger blocks
+ * in turn until we find a suitable free block.
+ */
+ do {
+ struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
+ struct nvkm_vma *prev = node(this, prev);
+ struct nvkm_vma *next = node(this, next);
+ const int p = page - vmm->func->page;
+
+ addr = this->addr;
+ if (vmm->func->page_block && prev && prev->page != p)
+ addr = ALIGN(addr, vmm->func->page_block);
+ addr = ALIGN(addr, 1ULL << align);
+
+ tail = this->addr + this->size;
+ if (vmm->func->page_block && next && next->page != p)
+ tail = ALIGN_DOWN(addr, vmm->func->page_block);
+
+ if (addr <= tail && tail - addr >= size) {
+ rb_erase(&this->tree, &vmm->free);
+ vma = this;
+ break;
+ }
+ } while ((node = rb_next(node)));
+
+ if (unlikely(!vma))
+ return -ENOSPC;
+
+ /* If the VMA we found isn't already exactly the requested size,
+ * it needs to be split, and the remaining free blocks returned.
+ */
+ if (addr != vma->addr) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, vma);
+ vma = tmp;
+ }
+
+ if (size != vma->size) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, tmp);
+ }
+
+ /* Pre-allocate page tables and/or setup sparse mappings. */
+ if (sparse && getref)
+ ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
+ else if (sparse)
+ ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
+ else if (getref)
+ ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
+ else
+ ret = 0;
+ if (ret) {
+ nvkm_vmm_put_region(vmm, vma);
+ return ret;
+ }
+
+ vma->mapref = mapref && !getref;
+ vma->sparse = sparse;
+ vma->page = page - vmm->func->page;
+ vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
+ vma->used = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ *pvma = vma;
+ return 0;
+}
+
+int
+nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
+{
+ int ret;
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+void
+nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ if (vmm->func->part && inst) {
+ mutex_lock(&vmm->mutex);
+ vmm->func->part(vmm, inst);
+ mutex_unlock(&vmm->mutex);
+ }
+}
+
+int
+nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ int ret = 0;
+ if (vmm->func->join) {
+ mutex_lock(&vmm->mutex);
+ ret = vmm->func->join(vmm, inst);
+ mutex_unlock(&vmm->mutex);
+ }
+ return ret;
+}
+
+static bool
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
+ return false;
+}
+
+int
+nvkm_vmm_boot(struct nvkm_vmm *vmm)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+ int ret;
+
+ while (page[1].shift)
+ page++;
+
+ ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+ nvkm_vmm_boot_ptes, NULL, NULL, NULL);
+ vmm->bootstrapped = true;
+ return 0;
+}
+
+static void
+nvkm_vmm_del(struct kref *kref)
+{
+ struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
+ nvkm_vmm_dtor(vmm);
+ kfree(vmm);
+}
+
+void
+nvkm_vmm_unref(struct nvkm_vmm **pvmm)
+{
+ struct nvkm_vmm *vmm = *pvmm;
+ if (vmm) {
+ kref_put(&vmm->kref, nvkm_vmm_del);
+ *pvmm = NULL;
+ }
+}
+
+struct nvkm_vmm *
+nvkm_vmm_ref(struct nvkm_vmm *vmm)
+{
+ if (vmm)
+ kref_get(&vmm->kref);
+ return vmm;
+}
+
+int
+nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
+ u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_vmm *vmm = NULL;
+ int ret;
+ ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm);
+ if (ret)
+ nvkm_vmm_unref(&vmm);
+ *pvmm = vmm;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
new file mode 100644
index 000000000000..6d8f61ea467a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -0,0 +1,310 @@
+#ifndef __NVKM_VMM_H__
+#define __NVKM_VMM_H__
+#include "priv.h"
+#include <core/memory.h>
+enum nvkm_memory_target;
+
+struct nvkm_vmm_pt {
+ /* Some GPUs have a mapping level with a dual page tables to
+ * support large and small pages in the same address-range.
+ *
+ * We track the state of both page tables in one place, which
+ * is why there's multiple PT pointers/refcounts here.
+ */
+ struct nvkm_mmu_pt *pt[2];
+ u32 refs[2];
+
+ /* Page size handled by this PT.
+ *
+ * Tesla backend needs to know this when writinge PDEs,
+ * otherwise unnecessary.
+ */
+ u8 page;
+
+ /* Entire page table sparse.
+ *
+ * Used to propagate sparseness to child page tables.
+ */
+ bool sparse:1;
+
+ /* Tracking for page directories.
+ *
+ * The array is indexed by PDE, and will either point to the
+ * child page table, or indicate the PDE is marked as sparse.
+ **/
+#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
+#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
+#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
+ struct nvkm_vmm_pt **pde;
+
+ /* Tracking for dual page tables.
+ *
+ * There's one entry for each LPTE, keeping track of whether
+ * there are valid SPTEs in the same address-range.
+ *
+ * This information is used to manage LPTE state transitions.
+ */
+#define NVKM_VMM_PTE_SPARSE 0x80
+#define NVKM_VMM_PTE_VALID 0x40
+#define NVKM_VMM_PTE_SPTES 0x3f
+ u8 pte[];
+};
+
+typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
+ struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
+ struct nvkm_vmm_pt *, u32 pdei);
+typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *);
+
+struct nvkm_vmm_desc_func {
+ nvkm_vmm_pxe_func invalid;
+ nvkm_vmm_pxe_func unmap;
+ nvkm_vmm_pxe_func sparse;
+
+ nvkm_vmm_pde_func pde;
+
+ nvkm_vmm_pte_func mem;
+ nvkm_vmm_pte_func dma;
+ nvkm_vmm_pte_func sgl;
+};
+
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
+void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
+void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+
+void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+
+struct nvkm_vmm_desc {
+ enum {
+ PGD,
+ PGT,
+ SPT,
+ LPT,
+ } type;
+ u8 bits; /* VMA bits covered by PT. */
+ u8 size; /* Bytes-per-PTE. */
+ u32 align; /* PT address alignment. */
+ const struct nvkm_vmm_desc_func *func;
+};
+
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
+extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];
+
+struct nvkm_vmm_page {
+ u8 shift;
+ const struct nvkm_vmm_desc *desc;
+#define NVKM_VMM_PAGE_SPARSE 0x01
+#define NVKM_VMM_PAGE_VRAM 0x02
+#define NVKM_VMM_PAGE_HOST 0x04
+#define NVKM_VMM_PAGE_COMP 0x08
+#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
+#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
+ u8 type;
+};
+
+struct nvkm_vmm_func {
+ int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
+ void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
+
+ int (*aper)(enum nvkm_memory_target);
+ int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
+ struct nvkm_vmm_map *);
+ void (*flush)(struct nvkm_vmm *, int depth);
+
+ u64 page_block;
+ const struct nvkm_vmm_page page[];
+};
+
+struct nvkm_vmm_join {
+ struct nvkm_memory *inst;
+ struct list_head head;
+};
+
+int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+ const char *name, struct nvkm_vmm **);
+int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+ const char *name, struct nvkm_vmm *);
+struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
+ bool sparse, u8 page, u8 align, u64 size,
+ struct nvkm_vma **pvma);
+void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
+
+struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
+void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
+
+int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
+ u64, u64, void *, u32, struct lock_class_key *,
+ const char *, struct nvkm_vmm **);
+int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+
+int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int gf100_vmm_aper(enum nvkm_memory_target);
+int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gf100_vmm_flush_(struct nvkm_vmm *, int);
+void gf100_vmm_flush(struct nvkm_vmm *, int);
+
+int gk20a_vmm_aper(enum nvkm_memory_target);
+
+int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
+int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gp100_vmm_flush(struct nvkm_vmm *, int);
+
+int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+
+#define VMM_PRINT(l,v,p,f,a...) do { \
+ struct nvkm_vmm *_vmm = (v); \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \
+ nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \
+ _vmm->name, ##a); \
+ } \
+} while(0)
+#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
+#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
+#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a)
+
+#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \
+ nvkm_kmap((PT)->memory); \
+ while (PTEN) { \
+ u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \
+ u64 _addr = ((BASE) + MAP->off); \
+ \
+ if (_ptes > PTEN) { \
+ MAP->off += PTEN << MAP->page->shift; \
+ _ptes = PTEN; \
+ } else { \
+ MAP->off = 0; \
+ NEXT; \
+ } \
+ \
+ VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \
+ \
+ FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
+ PTEI += _ptes; \
+ PTEN -= _ptes; \
+ }; \
+ nvkm_done((PT)->memory); \
+} while(0)
+
+#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \
+ ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \
+ (MAP->mem = MAP->mem->next))
+#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ *MAP->dma, PAGE_SIZE, MAP->dma++)
+#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \
+ (MAP->sgl = sg_next(MAP->sgl)))
+
+#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
+#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
+#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \
+ const u32 _pteo = (o); u##b _data = (d); \
+ VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \
+ VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \
+} while(0)
+
+#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x")
+#define VMM_FO032(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
+
+#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx")
+#define VMM_FO064(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
+
+#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \
+ u32 _pteo = (o), _ptes = (c); \
+ const u64 _addr = (m)->addr + _pteo; \
+ VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \
+ while (_ptes--) { \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \
+ _pteo += 0x10; \
+ } \
+} while(0)
+
+#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
+#define VMM_FO128(m,v,o,lo,hi,c) do { \
+ nvkm_kmap((m)->memory); \
+ VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \
+ nvkm_done((m)->memory); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
new file mode 100644
index 000000000000..faf5a7e9265e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/timer.h>
+
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 base = (addr >> 8) | map->type;
+ u64 data = base;
+
+ if (map->ctag && !(map->next & (1ULL << 44))) {
+ while (ptes--) {
+ data = base | ((map->ctag >> 1) << 44);
+ if (!(map->ctag++ & 1))
+ data |= BIT_ULL(60);
+
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ base += map->next;
+ }
+ } else {
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+ }
+}
+
+void
+gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 8) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+void
+gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0;
+
+ if ((pt = pgt->pt[0])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
+ data |= BIT_ULL(35); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr >> 8;
+ }
+
+ if ((pt = pgt->pt[1])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
+ data |= BIT_ULL(34); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr << 24;
+ }
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+void
+gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 type = depth << 24;
+
+ type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+
+ mutex_lock(&subdev->mutex);
+ /* Looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases.
+ */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+ break;
+ );
+
+ nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
+ nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
+
+ /* Wait for flush to be queued? */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
+ mutex_unlock(&subdev->mutex);
+}
+
+void
+gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ gf100_vmm_flush_(vmm, 0);
+}
+
+int
+gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ const bool gm20x = page->desc->func->sparse != NULL;
+ union {
+ struct gf100_vmm_map_vn vn;
+ struct gf100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1 << page->shift) >> 8;
+ map->type = map->ctag = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0xff) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
+ u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ u64 tags = map->tags->mn->offset + (map->offset >> 17);
+ if (page->shift == 17 || !gm20x) {
+ map->type |= tags << 44;
+ map->ctag |= 1ULL << 44;
+ map->next |= 1ULL << 44;
+ } else {
+ map->ctag |= tags << 1 | 1;
+ }
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)priv << 1;
+ map->type |= (u64) ro << 2;
+ map->type |= (u64) vol << 32;
+ map->type |= (u64)aper << 33;
+ map->type |= (u64)kind << 36;
+ return 0;
+}
+
+int
+gf100_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_VRAM: return 0;
+ case NVKM_MEM_TARGET_HOST: return 2;
+ case NVKM_MEM_TARGET_NCOH: return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+void
+gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ nvkm_fo64(inst, 0x0200, 0x00000000, 2);
+}
+
+int
+gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+
+ switch (nvkm_memory_target(pd->memory)) {
+ case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
+ base |= BIT_ULL(2) /* VOL. */;
+ break;
+ case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ base |= pd->addr;
+
+ nvkm_kmap(inst);
+ nvkm_wo64(inst, 0x0200, base);
+ nvkm_wo64(inst, 0x0208, vmm->limit - 1);
+ nvkm_done(inst);
+ return 0;
+}
+
+int
+gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gf100_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gf100_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gf100_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ switch (mmu->subdev.device->fb->page) {
+ case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+ case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+int
+gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
new file mode 100644
index 000000000000..0ebb7bccfcd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+void
+gk104_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gk104_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gk104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
new file mode 100644
index 000000000000..8086994a0446
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <core/memory.h>
+
+int
+gk20a_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_NCOH: return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct nvkm_vmm_func
+gk20a_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk20a_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+int
+gk20a_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
new file mode 100644
index 000000000000..a1676a4644fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/ifb00d.h>
+#include <nvif/unpack.h>
+
+static void
+gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+static void
+gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgd_sparse,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+int
+gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ if (vmm->func->page[1].shift == 16)
+ base |= BIT_ULL(11);
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+int
+gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gm200_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gm200_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm200_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ const struct nvkm_vmm_func *func;
+ union {
+ struct gm200_vmm_vn vn;
+ struct gm200_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ switch (args->v0.bigpage) {
+ case 16: func = func_16; break;
+ case 17: func = func_17; break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ func = func_17;
+ } else
+ return ret;
+
+ return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm);
+}
+
+int
+gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
new file mode 100644
index 000000000000..64d4b6cff8dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gm20b_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm20b_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gm20b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
new file mode 100644
index 000000000000..059fafe0e771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 4) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+ .dma = gp100_vmm_pgt_dma,
+ .sgl = gp100_vmm_pgt_sgl,
+};
+
+static void
+gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_lpt = {
+ .invalid = gp100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+};
+
+static inline void
+gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
+}
+
+static inline bool
+gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
+ case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
+ *data |= BIT_ULL(3); /* VOL. */
+ break;
+ case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ *data |= pt->addr >> 4;
+ return true;
+}
+
+static void
+gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
+}
+
+static void
+gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd0 = {
+ .unmap = gp100_vmm_pd0_unmap,
+ .sparse = gp100_vmm_pd0_sparse,
+ .pde = gp100_vmm_pd0_pde,
+ .mem = gp100_vmm_pd0_mem,
+};
+
+static void
+gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gp100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .pde = gp100_vmm_pd1_pde,
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+int
+gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct gp100_vmm_map_vn vn;
+ struct gp100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1ULL << page->shift) >> 4;
+ map->type = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0xff) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u64 tags = nvkm_memory_size(memory) >> 16;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ tags = map->tags->mn->offset + (map->offset >> 16);
+ map->ctag |= ((1ULL << page->shift) >> 16) << 36;
+ map->type |= tags << 36;
+ map->next |= map->ctag;
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)aper << 1;
+ map->type |= (u64) vol << 3;
+ map->type |= (u64)priv << 5;
+ map->type |= (u64) ro << 6;
+ map->type |= (u64)kind << 56;
+ return 0;
+}
+
+void
+gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
+}
+
+int
+gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+static const struct nvkm_vmm_func
+gp100_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
new file mode 100644
index 000000000000..3dcc6bddb32f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gp10b_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gp10b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&gp10b_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
new file mode 100644
index 000000000000..0cab1ffc9f64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/if000d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = addr | 0x00000003; /* PRESENT, RW. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
+ data += 0x00001000;
+ }
+}
+
+static void
+nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+}
+
+static void
+nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--)
+ VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv04_vmm_desc_pgt = {
+ .unmap = nv04_vmm_pgt_unmap,
+ .dma = nv04_vmm_pgt_dma,
+ .sgl = nv04_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv04_vmm_desc_12[] = {
+ { PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
+ {}
+};
+
+int
+nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ union {
+ struct nv04_vmm_map_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ VMM_DEBUG(vmm, "args");
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv04_vmm = {
+ .valid = nv04_vmm_valid,
+ .page = {
+ { 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ union {
+ struct nv04_vmm_vn vn;
+ } *args = argv;
+ int ret;
+
+ ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
+ if (ret)
+ return ret;
+
+ return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
+}
+
+int
+nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_memory *mem;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ mem = vmm->pd->pt[0]->memory;
+ nvkm_kmap(mem);
+ nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+ nvkm_wo32(mem, 0x00004, vmm->limit - 1);
+ nvkm_done(mem);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
new file mode 100644
index 000000000000..b595f130e573
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = (addr >> 7) | 0x00000001; /* VALID. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ data += 0x00000020;
+ }
+}
+
+static void
+nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+}
+
+static void
+nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u32 data = (*map->dma++ >> 7) | 0x00000001;
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv41_vmm_desc_pgt = {
+ .unmap = nv41_vmm_pgt_unmap,
+ .dma = nv41_vmm_pgt_dma,
+ .sgl = nv41_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv41_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+
+ mutex_lock(&subdev->mutex);
+ nvkm_wr32(device, 0x100810, 0x00000022);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100810) & 0x00000020)
+ break;
+ );
+ nvkm_wr32(device, 0x100810, 0x00000000);
+ mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+nv41_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv41_vmm_flush,
+ .page = {
+ { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv41_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv41_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
new file mode 100644
index 000000000000..b834e4352334
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ dma_addr_t *list, u32 ptei, u32 ptes)
+{
+ u32 pteo = (ptei << 2) & ~0x0000000f;
+ u32 tmp[4];
+
+ tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+ tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+ tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+ tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+ while (ptes--) {
+ u32 addr = (list ? *list++ : vmm->null) >> 12;
+ switch (ptei++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
+ }
+ }
+
+ VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+ VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+ VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+ VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ dma_addr_t tmp[4], i;
+
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ for (i = 0; i < pten; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes >= 4) {
+ for (i = 0; i < 4; i++, addr += 0x1000)
+ tmp[i] = addr >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ for (i = 0; i < ptes; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+ }
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ map->dma += pten;
+ }
+
+ while (ptes >= 4) {
+ u32 tmp[4], i;
+ for (i = 0; i < 4; i++)
+ tmp[i] = *map->dma++ >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+ map->dma += ptes;
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes > 4) {
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ ptes -= 4;
+ }
+
+ if (ptes)
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+ nvkm_done(pt->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+nv44_vmm_desc_pgt = {
+ .unmap = nv44_vmm_pgt_unmap,
+ .dma = nv44_vmm_pgt_dma,
+ .sgl = nv44_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv44_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+ nvkm_wr32(device, 0x100808, 0x000000020);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
+ nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
+static const struct nvkm_vmm_func
+nv44_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv44_vmm_flush,
+ .page = {
+ { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv44_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_subdev *subdev = &mmu->subdev;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
+ &vmm->null, GFP_KERNEL);
+ if (!vmm->nullp) {
+ nvkm_warn(subdev, "unable to allocate dummy pages\n");
+ vmm->null = 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
new file mode 100644
index 000000000000..863a2edd9861
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
+
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 next = addr | map->type, data;
+ u32 pten;
+ int log2blk;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes) {
+ for (log2blk = 7; log2blk >= 0; log2blk--) {
+ pten = 1 << log2blk;
+ if (ptes >= pten && IS_ALIGNED(ptei, pten))
+ break;
+ }
+
+ data = next | (log2blk << 7);
+ next += pten * map->next;
+ ptes -= pten;
+
+ while (pten--)
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ }
+}
+
+static void
+nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgt = {
+ .unmap = nv50_vmm_pgt_unmap,
+ .mem = nv50_vmm_pgt_mem,
+ .dma = nv50_vmm_pgt_dma,
+ .sgl = nv50_vmm_pgt_sgl,
+};
+
+static bool
+nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
+{
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0xdeadcafe00000000ULL;
+ if (pgt && (pt = pgt->pt[0])) {
+ switch (pgt->page) {
+ case 16: data = 0x00000001; break;
+ case 12: data = 0x00000003;
+ switch (nvkm_memory_size(pt->memory)) {
+ case 0x100000: data |= 0x00000000; break;
+ case 0x040000: data |= 0x00000020; break;
+ case 0x020000: data |= 0x00000040; break;
+ case 0x010000: data |= 0x00000060; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
+ case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
+ case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ data |= pt->addr;
+ }
+ *pdata = data;
+ return true;
+}
+
+static void
+nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_join *join;
+ u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
+ u64 data;
+
+ if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
+ return;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ nvkm_kmap(join->inst);
+ nvkm_wo64(join->inst, pdeo, data);
+ nvkm_done(join->inst);
+ }
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgd = {
+ .pde = nv50_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_12[] = {
+ { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_16[] = {
+ { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+static void
+nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ int i, id;
+
+ mutex_lock(&subdev->mutex);
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+ if (!atomic_read(&vmm->engref[i]))
+ continue;
+
+ /* unfortunate hw bug workaround... */
+ if (i == NVKM_ENGINE_GR && device->gr) {
+ int ret = nvkm_gr_tlb_flush(device->gr);
+ if (ret != -ENODEV)
+ continue;
+ }
+
+ switch (i) {
+ case NVKM_ENGINE_GR : id = 0x00; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: id = 0x01; break;
+ case NVKM_SUBDEV_BAR : id = 0x06; break;
+ case NVKM_ENGINE_MSPPP :
+ case NVKM_ENGINE_MPEG : id = 0x08; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : id = 0x09; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : id = 0x0a; break;
+ case NVKM_ENGINE_CE0 : id = 0x0d; break;
+ default:
+ continue;
+ }
+
+ nvkm_wr32(device, 0x100c80, (id << 16) | 1);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "%s mmu invalidate timeout\n",
+ nvkm_subdev_name[i]);
+ }
+ mutex_unlock(&subdev->mutex);
+}
+
+static int
+nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct nv50_vmm_map_vn vn;
+ struct nv50_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_ram *ram = device->fb->ram;
+ struct nvkm_memory *memory = map->memory;
+ u8 aper, kind, comp, priv, ro;
+ int kindn, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->type = map->ctag = 0;
+ map->next = 1 << page->shift;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind & 0x7f;
+ comp = args->v0.comp & 0x03;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ comp = 0;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (ram->stolen) {
+ map->type |= ram->stolen;
+ aper = 3;
+ } else {
+ aper = 0;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ aper = 2;
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ aper = 3;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0x7f) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (map->mem && map->mem->type != kindm[kind]) {
+ VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
+ kindm[kind], map->mem->type);
+ return -EINVAL;
+ }
+
+ if (comp) {
+ u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ u32 tags = map->tags->mn->offset + (map->offset >> 16);
+ map->ctag |= (u64)comp << 49;
+ map->type |= (u64)comp << 47;
+ map->type |= (u64)tags << 49;
+ map->next |= map->ctag;
+ }
+ }
+
+ map->type |= BIT(0); /* Valid. */
+ map->type |= (u64)ro << 3;
+ map->type |= (u64)aper << 4;
+ map->type |= (u64)priv << 6;
+ map->type |= (u64)kind << 40;
+ return 0;
+}
+
+static void
+nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ struct nvkm_vmm_join *join;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ if (join->inst == inst) {
+ list_del(&join->head);
+ kfree(join);
+ break;
+ }
+ }
+}
+
+static int
+nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
+ struct nvkm_vmm_join *join;
+ int ret = 0;
+ u64 data;
+ u32 pdei;
+
+ if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
+ return -ENOMEM;
+ join->inst = inst;
+ list_add_tail(&join->head, &vmm->join);
+
+ nvkm_kmap(join->inst);
+ for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
+ if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
+ ret = -EINVAL;
+ break;
+ }
+ nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
+ }
+ nvkm_done(join->inst);
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv50_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+nv50_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv50_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index a4cb82495cee..b1b1f3626b96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -87,7 +87,7 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
if (pci->irq >= 0) {
free_irq(pci->irq, pci);
pci->irq = -1;
- };
+ }
if (pci->agp.bridge)
nvkm_agp_fini(pci);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 73ca1203281d..5e91b3f90065 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -39,7 +39,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma = NULL;
u32 start_address;
int ret;
@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return ret;
/* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+ ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
if (ret) {
nvkm_falcon_put(falcon, subdev);
return ret;
}
+ ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
+ if (ret)
+ goto end;
+
/* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon);
if (ret)
@@ -61,7 +65,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
+ ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
if (ret < 0)
goto end;
@@ -91,7 +95,7 @@ end:
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */
- nvkm_gpuobj_unmap(&vma);
+ nvkm_vmm_put(gsb->vmm, &vma);
nvkm_falcon_put(falcon, subdev);
return ret;
@@ -102,37 +106,26 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_device *device = sb->subdev.device;
- struct nvkm_vm *vm;
- const u64 vm_area_len = 600 * 1024;
int ret;
/* Allocate instance block and VM */
- ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+ &gsb->inst);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
+ ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
+ &gsb->vmm);
if (ret)
return ret;
- ret = nvkm_vm_new(device, 0, vm_area_len, 0, NULL, &vm);
- if (ret)
- return ret;
-
- atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
+ atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
+ gsb->vmm->debug = gsb->base.subdev.debug;
- ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
if (ret)
return ret;
- nvkm_kmap(gsb->inst);
- nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
- nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
- nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
- nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
- nvkm_done(gsb->inst);
-
if (sb->acr->func->oneinit) {
ret = sb->acr->func->oneinit(sb->acr, sb);
if (ret)
@@ -160,9 +153,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
sb->acr->func->dtor(sb->acr);
- nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
- nvkm_gpuobj_del(&gsb->pgd);
- nvkm_gpuobj_del(&gsb->inst);
+ nvkm_vmm_part(gsb->vmm, gsb->inst);
+ nvkm_vmm_unref(&gsb->vmm);
+ nvkm_memory_unref(&gsb->inst);
return gsb;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index c8ab3d76bdef..62c5e162099a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -29,9 +29,8 @@ struct gm200_secboot {
struct nvkm_secboot base;
/* Instance block & address space used for HS FW execution */
- struct nvkm_gpuobj *inst;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index ee989210725e..6f10b098676c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -183,7 +183,7 @@ acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
break;
);
if (reg & BIT(4)) {
- nvkm_debug(subdev, "applying workaround for start bug...");
+ nvkm_debug(subdev, "applying workaround for start bug...\n");
nvkm_falcon_start(sb->boot_falcon);
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index 885e919a8720..d9091f029506 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -25,6 +25,7 @@
#include <subdev/secboot.h>
#include <subdev/mmu.h>
+struct nvkm_gpuobj;
struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 2bafcc1d1818..7ba56b12badd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/therm/gt215.o
nvkm-y += nvkm/subdev/therm/gf119.o
nvkm-y += nvkm/subdev/therm/gm107.o
nvkm-y += nvkm/subdev/therm/gm200.o
+nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 952a7cb0a59a..f27fc6d0d4c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -341,7 +341,8 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
- therm->func->init(therm);
+ if (therm->func->init)
+ therm->func->init(therm);
if (therm->suspend >= 0) {
/* restore the pwm value only when on manual or auto mode */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
new file mode 100644
index 000000000000..9f0dea3f61dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Rhys Kidd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rhys Kidd
+ */
+#include "priv.h"
+
+static int
+gp100_temp_get(struct nvkm_therm *therm)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_subdev *subdev = &therm->subdev;
+ u32 tsensor = nvkm_rd32(device, 0x020460);
+ u32 inttemp = (tsensor & 0x0001fff8);
+
+ /* device SHADOWed */
+ if (tsensor & 0x40000000)
+ nvkm_trace(subdev, "reading temperature from SHADOWed sensor\n");
+
+ /* device valid */
+ if (tsensor & 0x20000000)
+ return (inttemp >> 8);
+ else
+ return -ENODEV;
+}
+
+static const struct nvkm_therm_func
+gp100_therm = {
+ .temp_get = gp100_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gp100_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c226da145fb3..a349cb61961e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
+ depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DPI panels.
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index d9d25df6fc1b..4600d3841c25 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -165,11 +165,15 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
+ bool connected;
if (gpio_is_valid(ddata->hpd_gpio))
- return gpio_get_value_cansleep(ddata->hpd_gpio);
+ connected = gpio_get_value_cansleep(ddata->hpd_gpio);
else
- return in->ops.hdmi->detect(in);
+ connected = in->ops.hdmi->detect(in);
+ if (!connected && in->ops.hdmi->lost_hotplug)
+ in->ops.hdmi->lost_hotplug(in);
+ return connected;
}
static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index a9e9d667c55e..e3d98d78fc40 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -51,6 +51,8 @@ static int tpd_connect(struct omap_dss_device *dssdev,
dssdev->dst = dst;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
+
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
@@ -69,6 +71,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
return;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
dst->src = NULL;
dssdev->dst = NULL;
@@ -146,25 +149,22 @@ static int tpd_read_edid(struct omap_dss_device *dssdev,
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
return -ENODEV;
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- r = in->ops.hdmi->read_edid(in, edid, len);
-
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- return r;
+ return in->ops.hdmi->read_edid(in, edid, len);
}
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio);
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
+ if (!connected && in->ops.hdmi->lost_hotplug)
+ in->ops.hdmi->lost_hotplug(in);
+ return connected;
}
static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 8b87d5cf45fc..f24ebf7f61dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -65,6 +65,14 @@ config OMAP4_DSS_HDMI
help
HDMI support for OMAP4 based SoCs.
+config OMAP4_DSS_HDMI_CEC
+ bool "Enable HDMI CEC support for OMAP4"
+ depends on OMAP4_DSS_HDMI
+ select CEC_CORE
+ default y
+ ---help---
+ When selected the HDMI transmitter will support the CEC feature.
+
config OMAP5_DSS_HDMI
bool "HDMI support for OMAP5"
default n
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 62d5b4f45420..904101c5e79d 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -15,5 +15,6 @@ omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
hdmi_phy.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index daf286fc8a40..ca1e3b489540 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
}
static const struct soc_device_attribute dpi_soc_devices[] = {
- { .family = "OMAP3[456]*" },
- { .family = "[AD]M37*" },
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b56a05730314..c2cf6d98e577 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index a820b394af09..c2609c448ddc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
+#include <media/cec.h>
#include "omapdss.h"
#include "dss.h"
@@ -264,6 +265,10 @@ struct hdmi_core_data {
void __iomem *base;
bool cts_swmode;
bool audio_use_mclk;
+
+ struct hdmi_wp_data *wp;
+ unsigned int core_pwr_cnt;
+ struct cec_adapter *adap;
};
static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -373,7 +378,7 @@ struct omap_hdmi {
bool audio_configured;
struct omap_dss_audio audio_config;
- /* This lock should be taken when booleans bellow are touched. */
+ /* This lock should be taken when booleans below are touched. */
spinlock_t audio_playing_lock;
bool audio_playing;
bool display_enabled;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f169348da377..a598dfdeb585 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -36,9 +36,11 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <media/cec.h>
#include "omapdss.h"
#include "hdmi4_core.h"
+#include "hdmi4_cec.h"
#include "dss.h"
#include "hdmi.h"
@@ -70,7 +72,8 @@ static void hdmi_runtime_put(void)
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
- struct hdmi_wp_data *wp = data;
+ struct omap_hdmi *hdmi = data;
+ struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
@@ -95,6 +98,13 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
}
+ if (irqstatus & HDMI_IRQ_CORE) {
+ u32 intr4 = hdmi_read_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4);
+
+ hdmi_write_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4, intr4);
+ if (intr4 & 8)
+ hdmi4_cec_irq(&hdmi->core);
+ }
return IRQ_HANDLED;
}
@@ -123,14 +133,19 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
+ if (hdmi.core.core_pwr_cnt++)
+ return 0;
+
r = regulator_enable(hdmi.vdda_reg);
if (r)
- return r;
+ goto err_reg_enable;
r = hdmi_runtime_get();
if (r)
goto err_runtime_get;
+ hdmi4_core_powerdown_disable(&hdmi.core);
+
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
@@ -140,12 +155,17 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
err_runtime_get:
regulator_disable(hdmi.vdda_reg);
+err_reg_enable:
+ hdmi.core.core_pwr_cnt--;
return r;
}
static void hdmi_power_off_core(struct omap_dss_device *dssdev)
{
+ if (--hdmi.core.core_pwr_cnt)
+ return;
+
hdmi.core_enabled = false;
hdmi_runtime_put();
@@ -166,8 +186,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
return r;
/* disable and clear irqs */
- hdmi_wp_clear_irqenable(wp, 0xffffffff);
- hdmi_wp_set_irqstatus(wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
+ hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
vm = &hdmi.cfg.vm;
@@ -242,7 +262,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
enum omap_channel channel = dssdev->dispc_channel;
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(&hdmi.wp, ~HDMI_IRQ_CORE);
hdmi_wp_video_stop(&hdmi.wp);
@@ -393,11 +413,11 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
+int hdmi4_core_enable(struct omap_dss_device *dssdev)
{
int r = 0;
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
mutex_lock(&hdmi.lock);
@@ -415,9 +435,9 @@ err0:
return r;
}
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
+void hdmi4_core_disable(struct omap_dss_device *dssdev)
{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
mutex_lock(&hdmi.lock);
@@ -475,19 +495,28 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
need_enable = hdmi.core_enabled == false;
if (need_enable) {
- r = hdmi_core_enable(dssdev);
+ r = hdmi4_core_enable(dssdev);
if (r)
return r;
}
r = read_edid(edid, len);
-
+ if (r >= 256)
+ hdmi4_cec_set_phys_addr(&hdmi.core,
+ cec_get_edid_phys_addr(edid, r, NULL));
+ else
+ hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
if (need_enable)
- hdmi_core_disable(dssdev);
+ hdmi4_core_disable(dssdev);
return r;
}
+static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
+{
+ hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+}
+
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -514,6 +543,7 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.get_timings = hdmi_display_get_timings,
.read_edid = hdmi_read_edid,
+ .lost_hotplug = hdmi_lost_hotplug,
.set_infoframe = hdmi_set_infoframe,
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
@@ -715,6 +745,10 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err;
+ r = hdmi4_cec_init(pdev, &hdmi.core, &hdmi.wp);
+ if (r)
+ goto err;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
@@ -724,7 +758,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
+ IRQF_ONESHOT, "OMAP HDMI", &hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
goto err;
@@ -759,6 +793,8 @@ static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
hdmi_uninit_output(pdev);
+ hdmi4_cec_uninit(&hdmi.core);
+
hdmi_pll_uninit(&hdmi.pll);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
new file mode 100644
index 000000000000..e626eddf24d5
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -0,0 +1,381 @@
+/*
+ * HDMI CEC
+ *
+ * Based on the CEC code from hdmi_ti_4xxx_ip.c from Android.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * Heavily modified to use the linux CEC framework:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dss.h"
+#include "hdmi.h"
+#include "hdmi4_core.h"
+#include "hdmi4_cec.h"
+
+/* HDMI CEC */
+#define HDMI_CEC_DEV_ID 0x900
+#define HDMI_CEC_SPEC 0x904
+
+/* Not really a debug register, more a low-level control register */
+#define HDMI_CEC_DBG_3 0x91C
+#define HDMI_CEC_TX_INIT 0x920
+#define HDMI_CEC_TX_DEST 0x924
+#define HDMI_CEC_SETUP 0x938
+#define HDMI_CEC_TX_COMMAND 0x93C
+#define HDMI_CEC_TX_OPERAND 0x940
+#define HDMI_CEC_TRANSMIT_DATA 0x97C
+#define HDMI_CEC_CA_7_0 0x988
+#define HDMI_CEC_CA_15_8 0x98C
+#define HDMI_CEC_INT_STATUS_0 0x998
+#define HDMI_CEC_INT_STATUS_1 0x99C
+#define HDMI_CEC_INT_ENABLE_0 0x990
+#define HDMI_CEC_INT_ENABLE_1 0x994
+#define HDMI_CEC_RX_CONTROL 0x9B0
+#define HDMI_CEC_RX_COUNT 0x9B4
+#define HDMI_CEC_RX_CMD_HEADER 0x9B8
+#define HDMI_CEC_RX_COMMAND 0x9BC
+#define HDMI_CEC_RX_OPERAND 0x9C0
+
+#define HDMI_CEC_TX_FIFO_INT_MASK 0x64
+#define HDMI_CEC_RETRANSMIT_CNT_INT_MASK 0x2
+
+#define HDMI_CORE_CEC_RETRY 200
+
+static void hdmi_cec_received_msg(struct hdmi_core_data *core)
+{
+ u32 cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
+
+ /* While there are CEC frames in the FIFO */
+ while (cnt & 0x70) {
+ /* and the frame doesn't have an error */
+ if (!(cnt & 0x80)) {
+ struct cec_msg msg = {};
+ unsigned int i;
+
+ /* then read the message */
+ msg.len = cnt & 0xf;
+ msg.msg[0] = hdmi_read_reg(core->base,
+ HDMI_CEC_RX_CMD_HEADER);
+ msg.msg[1] = hdmi_read_reg(core->base,
+ HDMI_CEC_RX_COMMAND);
+ for (i = 0; i < msg.len; i++) {
+ unsigned int reg = HDMI_CEC_RX_OPERAND + i * 4;
+
+ msg.msg[2 + i] =
+ hdmi_read_reg(core->base, reg);
+ }
+ msg.len += 2;
+ cec_received_msg(core->adap, &msg);
+ }
+ /* Clear the current frame from the FIFO */
+ hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 1);
+ /* Wait until the current frame is cleared */
+ while (hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL) & 1)
+ udelay(1);
+ /*
+ * Re-read the count register and loop to see if there are
+ * more messages in the FIFO.
+ */
+ cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
+ }
+}
+
+static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
+{
+ if (stat1 & 2) {
+ u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, (dbg3 >> 4) & 7, 0, 0);
+ } else if (stat1 & 1) {
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_ARB_LOST |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, 0, 0, 0);
+ } else if (stat1 == 0) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
+ }
+}
+
+void hdmi4_cec_irq(struct hdmi_core_data *core)
+{
+ u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
+ u32 stat1 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
+
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
+
+ if (stat0 & 0x40)
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+ else if (stat0 & 0x24)
+ hdmi_cec_transmit_fifo_empty(core, stat1);
+ if (stat1 & 2) {
+ u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, (dbg3 >> 4) & 7, 0, 0);
+ } else if (stat1 & 1) {
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_ARB_LOST |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, 0, 0, 0);
+ }
+ if (stat0 & 0x02)
+ hdmi_cec_received_msg(core);
+ if (stat1 & 0x3)
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+}
+
+static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int retry = HDMI_CORE_CEC_RETRY;
+ int temp;
+
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+ while (retry) {
+ temp = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+ if (FLD_GET(temp, 7, 7) == 0)
+ break;
+ retry--;
+ }
+ return retry != 0;
+}
+
+static bool hdmi_cec_clear_rx_fifo(struct cec_adapter *adap)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int retry = HDMI_CORE_CEC_RETRY;
+ int temp;
+
+ hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 0x3);
+ retry = HDMI_CORE_CEC_RETRY;
+ while (retry) {
+ temp = hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL);
+ if (FLD_GET(temp, 1, 0) == 0)
+ break;
+ retry--;
+ }
+ return retry != 0;
+}
+
+static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int temp, err;
+
+ if (!enable) {
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
+ hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
+ hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+ hdmi4_core_disable(NULL);
+ return 0;
+ }
+ err = hdmi4_core_enable(NULL);
+ if (err)
+ return err;
+
+ /* Clear TX FIFO */
+ if (!hdmi_cec_clear_tx_fifo(adap)) {
+ pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
+ return -EIO;
+ }
+
+ /* Clear RX FIFO */
+ if (!hdmi_cec_clear_rx_fifo(adap)) {
+ pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
+ return -EIO;
+ }
+
+ /* Clear CEC interrupts */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
+ hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1));
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
+ hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0));
+
+ /* Enable HDMI core interrupts */
+ hdmi_wp_set_irqenable(core->wp, HDMI_IRQ_CORE);
+ /* Unmask CEC interrupt */
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0x1, 3, 3);
+ /*
+ * Enable CEC interrupts:
+ * Transmit Buffer Full/Empty Change event
+ * Transmitter FIFO Empty event
+ * Receiver FIFO Not Empty event
+ */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ /*
+ * Enable CEC interrupts:
+ * RX FIFO Overrun Error event
+ * Short Pulse Detected event
+ * Frame Retransmit Count Exceeded event
+ * Start Bit Irregularity event
+ */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+
+ /* cec calibration enable (self clearing) */
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
+ msleep(20);
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x04);
+
+ temp = hdmi_read_reg(core->base, HDMI_CEC_SETUP);
+ if (FLD_GET(temp, 4, 4) != 0) {
+ temp = FLD_MOD(temp, 0, 4, 4);
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, temp);
+
+ /*
+ * If we enabled CEC in middle of a CEC message on the bus,
+ * we could have start bit irregularity and/or short
+ * pulse event. Clear them now.
+ */
+ temp = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
+ temp = FLD_MOD(0x0, 0x5, 2, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
+ }
+ return 0;
+}
+
+static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ u32 v;
+
+ if (log_addr == CEC_LOG_ADDR_INVALID) {
+ hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, 0);
+ return 0;
+ }
+ if (log_addr <= 7) {
+ v = hdmi_read_reg(core->base, HDMI_CEC_CA_7_0);
+ v |= 1 << log_addr;
+ hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, v);
+ } else {
+ v = hdmi_read_reg(core->base, HDMI_CEC_CA_15_8);
+ v |= 1 << (log_addr - 8);
+ hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, v);
+ }
+ return 0;
+}
+
+static int hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int temp;
+ u32 i;
+
+ /* Clear TX FIFO */
+ if (!hdmi_cec_clear_tx_fifo(adap)) {
+ pr_err("cec-%s: could not clear TX FIFO for transmit\n",
+ adap->name);
+ return -EIO;
+ }
+
+ /* Clear TX interrupts */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
+ HDMI_CEC_TX_FIFO_INT_MASK);
+
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
+ HDMI_CEC_RETRANSMIT_CNT_INT_MASK);
+
+ /* Set the retry count */
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, attempts - 1, 6, 4);
+
+ /* Set the initiator addresses */
+ hdmi_write_reg(core->base, HDMI_CEC_TX_INIT, cec_msg_initiator(msg));
+
+ /* Set destination id */
+ temp = cec_msg_destination(msg);
+ if (msg->len == 1)
+ temp |= 0x80;
+ hdmi_write_reg(core->base, HDMI_CEC_TX_DEST, temp);
+ if (msg->len == 1)
+ return 0;
+
+ /* Setup command and arguments for the command */
+ hdmi_write_reg(core->base, HDMI_CEC_TX_COMMAND, msg->msg[1]);
+
+ for (i = 0; i < msg->len - 2; i++)
+ hdmi_write_reg(core->base, HDMI_CEC_TX_OPERAND + i * 4,
+ msg->msg[2 + i]);
+
+ /* Operand count */
+ hdmi_write_reg(core->base, HDMI_CEC_TRANSMIT_DATA,
+ (msg->len - 2) | 0x10);
+ return 0;
+}
+
+static const struct cec_adap_ops hdmi_cec_adap_ops = {
+ .adap_enable = hdmi_cec_adap_enable,
+ .adap_log_addr = hdmi_cec_adap_log_addr,
+ .adap_transmit = hdmi_cec_adap_transmit,
+};
+
+void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
+{
+ cec_s_phys_addr(core->adap, pa, false);
+}
+
+int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp)
+{
+ const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
+ int ret;
+
+ core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
+ "omap4", caps, CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(core->adap);
+ if (ret < 0)
+ return ret;
+ core->wp = wp;
+
+ /*
+ * Initialize CEC clock divider: CEC needs 2MHz clock hence
+ * set the devider to 24 to get 48/24=2MHz clock
+ */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
+ ret = cec_register_adapter(core->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(core->adap);
+ return ret;
+ }
+ return 0;
+}
+
+void hdmi4_cec_uninit(struct hdmi_core_data *core)
+{
+ cec_unregister_adapter(core->adap);
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
new file mode 100644
index 000000000000..0292337c97cc
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
@@ -0,0 +1,55 @@
+/*
+ * HDMI header definition for OMAP4 HDMI CEC IP
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HDMI4_CEC_H_
+#define _HDMI4_CEC_H_
+
+struct hdmi_core_data;
+struct hdmi_wp_data;
+struct platform_device;
+
+/* HDMI CEC funcs */
+#ifdef CONFIG_OMAP4_DSS_HDMI_CEC
+void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa);
+void hdmi4_cec_irq(struct hdmi_core_data *core);
+int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp);
+void hdmi4_cec_uninit(struct hdmi_core_data *core);
+#else
+static inline void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
+{
+}
+
+static inline void hdmi4_cec_irq(struct hdmi_core_data *core)
+{
+}
+
+static inline int hdmi4_cec_init(struct platform_device *pdev,
+ struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp)
+{
+ return 0;
+}
+
+static inline void hdmi4_cec_uninit(struct hdmi_core_data *core)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 365cf07daa01..b06f9956e733 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -208,9 +208,9 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
}
-static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
+void hdmi4_core_powerdown_disable(struct hdmi_core_data *core)
{
- DSSDBG("Enter hdmi_core_powerdown_disable\n");
+ DSSDBG("Enter hdmi4_core_powerdown_disable\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
}
@@ -335,9 +335,6 @@ void hdmi4_configure(struct hdmi_core_data *core,
*/
hdmi_core_swreset_assert(core);
- /* power down off */
- hdmi_core_powerdown_disable(core);
-
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode;
@@ -889,25 +886,36 @@ struct hdmi4_features {
bool audio_use_mclk;
};
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true,
.audio_use_mclk = true,
};
static const struct soc_device_attribute hdmi4_soc_devices[] = {
- { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
- { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
- { .family = "OMAP4", .data = &hdmi4_es3_features },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES1.?",
+ .data = &hdmi4430_es1_features,
+ },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES2.?",
+ .data = &hdmi4430_es2_features,
+ },
+ {
+ .family = "OMAP4",
+ .data = &hdmi4_features,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index a069f96ec6f6..b6ab579e44d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -266,6 +266,10 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
+int hdmi4_core_enable(struct omap_dss_device *dssdev);
+void hdmi4_core_disable(struct omap_dss_device *dssdev);
+void hdmi4_core_powerdown_disable(struct hdmi_core_data *core);
+
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 47a331670963..990422b35784 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -395,6 +395,7 @@ struct omapdss_hdmi_ops {
struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ void (*lost_hotplug)(struct omap_dss_device *dssdev);
bool (*detect)(struct omap_dss_device *dssdev);
int (*register_hpd_cb)(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 1dd3dafc59af..c60a85e82c6d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
omap_dmm->plat_data = match->data;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d84a031fae24..726f3fb3312d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -63,6 +63,15 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_ORISETECH_OTM8009A
+ tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Orise Technology
+ otm8009a 480x800 dsi 2dl panel.
+
config DRM_PANEL_PANASONIC_VVX10F034N00
tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
depends on OF
@@ -73,6 +82,14 @@ config DRM_PANEL_PANASONIC_VVX10F034N00
WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
Xperia Z2 tablets
+config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
+ tristate "Raspberry Pi 7-inch touchscreen panel"
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for the Raspberry
+ Pi 7" Touchscreen. To compile this driver as a module,
+ choose M here.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
@@ -80,12 +97,28 @@ config DRM_PANEL_SAMSUNG_S6E3HA2
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E63J0X03
+ tristate "Samsung S6E63J0X03 DSI command mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SEIKO_43WVF1G
+ tristate "Seiko 43WVF1G panel"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for the Seiko
+ 43WVF1G controller for 800x480 LCD panels
+
config DRM_PANEL_SHARP_LQ101R1SX01
tristate "Sharp LQ101R1SX01 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d73d3e661cec..2c4e1a93e05f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,10 +4,14 @@ obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
+obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
new file mode 100644
index 000000000000..c189cd6329c8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <video/mipi_display.h>
+
+#define DRV_NAME "orisetech_otm8009a"
+
+#define OTM8009A_BACKLIGHT_DEFAULT 240
+#define OTM8009A_BACKLIGHT_MAX 255
+
+/* Manufacturer Command Set */
+#define MCS_ADRSFT 0x0000 /* Address Shift Function */
+#define MCS_PANSET 0xB3A6 /* Panel Type Setting */
+#define MCS_SD_CTRL 0xC0A2 /* Source Driver Timing Setting */
+#define MCS_P_DRV_M 0xC0B4 /* Panel Driving Mode */
+#define MCS_OSC_ADJ 0xC181 /* Oscillator Adjustment for Idle/Normal mode */
+#define MCS_RGB_VID_SET 0xC1A1 /* RGB Video Mode Setting */
+#define MCS_SD_PCH_CTRL 0xC480 /* Source Driver Precharge Control */
+#define MCS_NO_DOC1 0xC48A /* Command not documented */
+#define MCS_PWR_CTRL1 0xC580 /* Power Control Setting 1 */
+#define MCS_PWR_CTRL2 0xC590 /* Power Control Setting 2 for Normal Mode */
+#define MCS_PWR_CTRL4 0xC5B0 /* Power Control Setting 4 for DC Voltage */
+#define MCS_PANCTRLSET1 0xCB80 /* Panel Control Setting 1 */
+#define MCS_PANCTRLSET2 0xCB90 /* Panel Control Setting 2 */
+#define MCS_PANCTRLSET3 0xCBA0 /* Panel Control Setting 3 */
+#define MCS_PANCTRLSET4 0xCBB0 /* Panel Control Setting 4 */
+#define MCS_PANCTRLSET5 0xCBC0 /* Panel Control Setting 5 */
+#define MCS_PANCTRLSET6 0xCBD0 /* Panel Control Setting 6 */
+#define MCS_PANCTRLSET7 0xCBE0 /* Panel Control Setting 7 */
+#define MCS_PANCTRLSET8 0xCBF0 /* Panel Control Setting 8 */
+#define MCS_PANU2D1 0xCC80 /* Panel U2D Setting 1 */
+#define MCS_PANU2D2 0xCC90 /* Panel U2D Setting 2 */
+#define MCS_PANU2D3 0xCCA0 /* Panel U2D Setting 3 */
+#define MCS_PAND2U1 0xCCB0 /* Panel D2U Setting 1 */
+#define MCS_PAND2U2 0xCCC0 /* Panel D2U Setting 2 */
+#define MCS_PAND2U3 0xCCD0 /* Panel D2U Setting 3 */
+#define MCS_GOAVST 0xCE80 /* GOA VST Setting */
+#define MCS_GOACLKA1 0xCEA0 /* GOA CLKA1 Setting */
+#define MCS_GOACLKA3 0xCEB0 /* GOA CLKA3 Setting */
+#define MCS_GOAECLK 0xCFC0 /* GOA ECLK Setting */
+#define MCS_NO_DOC2 0xCFD0 /* Command not documented */
+#define MCS_GVDDSET 0xD800 /* GVDD/NGVDD */
+#define MCS_VCOMDC 0xD900 /* VCOM Voltage Setting */
+#define MCS_GMCT2_2P 0xE100 /* Gamma Correction 2.2+ Setting */
+#define MCS_GMCT2_2N 0xE200 /* Gamma Correction 2.2- Setting */
+#define MCS_NO_DOC3 0xF5B6 /* Command not documented */
+#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
+#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
+
+struct otm8009a {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 32729,
+ .hdisplay = 480,
+ .hsync_start = 480 + 120,
+ .hsync_end = 480 + 120 + 63,
+ .htotal = 480 + 120 + 63 + 120,
+ .vdisplay = 800,
+ .vsync_start = 800 + 12,
+ .vsync_end = 800 + 12 + 12,
+ .vtotal = 800 + 12 + 12 + 12,
+ .vrefresh = 50,
+ .flags = 0,
+ .width_mm = 52,
+ .height_mm = 86,
+};
+
+static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
+{
+ return container_of(panel, struct otm8009a, panel);
+}
+
+static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ if (mipi_dsi_dcs_write_buffer(dsi, data, len) < 0)
+ DRM_WARN("mipi dsi dcs write buffer failed\n");
+}
+
+#define dcs_write_seq(ctx, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ otm8009a_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define dcs_write_cmd_at(ctx, cmd, seq...) \
+({ \
+ dcs_write_seq(ctx, MCS_ADRSFT, (cmd) & 0xFF); \
+ dcs_write_seq(ctx, (cmd) >> 8, seq); \
+})
+
+static int otm8009a_init_sequence(struct otm8009a *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /* Enter CMD2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0x80, 0x09, 0x01);
+
+ /* Enter Orise Command2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA2, 0x80, 0x09);
+
+ dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL, 0x30);
+ mdelay(10);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC1, 0x40);
+ mdelay(10);
+
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL4 + 1, 0xA9);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 1, 0x34);
+ dcs_write_cmd_at(ctx, MCS_P_DRV_M, 0x50);
+ dcs_write_cmd_at(ctx, MCS_VCOMDC, 0x4E);
+ dcs_write_cmd_at(ctx, MCS_OSC_ADJ, 0x66); /* 65Hz */
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 2, 0x01);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 5, 0x34);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 4, 0x33);
+ dcs_write_cmd_at(ctx, MCS_GVDDSET, 0x79, 0x79);
+ dcs_write_cmd_at(ctx, MCS_SD_CTRL + 1, 0x1B);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 2, 0x83);
+ dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL + 1, 0x83);
+ dcs_write_cmd_at(ctx, MCS_RGB_VID_SET, 0x0E);
+ dcs_write_cmd_at(ctx, MCS_PANSET, 0x00, 0x01);
+
+ dcs_write_cmd_at(ctx, MCS_GOAVST, 0x85, 0x01, 0x00, 0x84, 0x01, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOACLKA1, 0x18, 0x04, 0x03, 0x39, 0x00, 0x00,
+ 0x00, 0x18, 0x03, 0x03, 0x3A, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOACLKA3, 0x18, 0x02, 0x03, 0x3B, 0x00, 0x00,
+ 0x00, 0x18, 0x01, 0x03, 0x3C, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOAECLK, 0x01, 0x01, 0x20, 0x20, 0x00, 0x00,
+ 0x01, 0x02, 0x00, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC2, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET5, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET6, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4,
+ 4, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+
+ dcs_write_cmd_at(ctx, MCS_PANU2D1, 0x00, 0x26, 0x09, 0x0B, 0x01, 0x25,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PANU2D2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x0A, 0x0C, 0x02);
+ dcs_write_cmd_at(ctx, MCS_PANU2D3, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PAND2U1, 0x00, 0x25, 0x0C, 0x0A, 0x02, 0x26,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PAND2U2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x0B, 0x09, 0x01);
+ dcs_write_cmd_at(ctx, MCS_PAND2U3, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 1, 0x66);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC3, 0x06);
+
+ dcs_write_cmd_at(ctx, MCS_GMCT2_2P, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10,
+ 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A,
+ 0x01);
+ dcs_write_cmd_at(ctx, MCS_GMCT2_2N, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10,
+ 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A,
+ 0x01);
+
+ /* Exit CMD2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0xFF, 0xFF, 0xFF);
+
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ /* Wait for sleep out exit */
+ mdelay(120);
+
+ /* Default portrait 480x800 rgb24 */
+ dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0,
+ default_mode.hdisplay - 1);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ if (ret)
+ return ret;
+
+ /* See otm8009a driver documentation for pixel format descriptions */
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
+ MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ if (ret)
+ return ret;
+
+ /* Disable CABC feature */
+ dcs_write_seq(ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret)
+ return ret;
+
+ /* Send Command GRAM memory write (no parameters) */
+ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+
+ return 0;
+}
+
+static int otm8009a_disable(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->enabled)
+ return 0; /* This is not an issue so we return 0 here */
+
+ /* Power off the backlight. Note: end-user still controls brightness */
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ret = backlight_update_status(ctx->bl_dev);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ msleep(120);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int otm8009a_unprepare(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+
+ if (!ctx->prepared)
+ return 0;
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ }
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int otm8009a_prepare(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(100);
+ }
+
+ ret = otm8009a_init_sequence(ctx);
+ if (ret)
+ return ret;
+
+ ctx->prepared = true;
+
+ /*
+ * Power on the backlight. Note: end-user still controls brightness
+ * Note: ctx->prepared must be true before updating the backlight.
+ */
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ctx->bl_dev);
+
+ return 0;
+}
+
+static int otm8009a_enable(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int otm8009a_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs otm8009a_drm_funcs = {
+ .disable = otm8009a_disable,
+ .unprepare = otm8009a_unprepare,
+ .prepare = otm8009a_prepare,
+ .enable = otm8009a_enable,
+ .get_modes = otm8009a_get_modes,
+};
+
+/*
+ * DSI-BASED BACKLIGHT
+ */
+
+static int otm8009a_backlight_update_status(struct backlight_device *bd)
+{
+ struct otm8009a *ctx = bl_get_data(bd);
+ u8 data[2];
+
+ if (!ctx->prepared) {
+ DRM_DEBUG("lcd not ready yet for setting its backlight!\n");
+ return -ENXIO;
+ }
+
+ if (bd->props.power <= FB_BLANK_NORMAL) {
+ /* Power on the backlight with the requested brightness
+ * Note We can not use mipi_dsi_dcs_set_display_brightness()
+ * as otm8009a driver support only 8-bit brightness (1 param).
+ */
+ data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
+ data[1] = bd->props.brightness;
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+
+ /* set Brightness Control & Backlight on */
+ data[1] = 0x24;
+
+ } else {
+ /* Power off the backlight: set Brightness Control & Bl off */
+ data[1] = 0;
+ }
+
+ /* Update Brightness Control & Backlight */
+ data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+
+ return 0;
+}
+
+static const struct backlight_ops otm8009a_backlight_ops = {
+ .update_status = otm8009a_backlight_update_status,
+};
+
+static int otm8009a_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct otm8009a *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &otm8009a_drm_funcs;
+
+ ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
+ &otm8009a_backlight_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
+ ctx->bl_dev->props.brightness = OTM8009A_BACKLIGHT_DEFAULT;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.type = BACKLIGHT_RAW;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ drm_panel_remove(&ctx->panel);
+ backlight_device_unregister(ctx->bl_dev);
+ return ret;
+ }
+
+ DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ return 0;
+}
+
+static int otm8009a_remove(struct mipi_dsi_device *dsi)
+{
+ struct otm8009a *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id orisetech_otm8009a_of_match[] = {
+ { .compatible = "orisetech,otm8009a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orisetech_otm8009a_of_match);
+
+static struct mipi_dsi_driver orisetech_otm8009a_driver = {
+ .probe = otm8009a_probe,
+ .remove = otm8009a_remove,
+ .driver = {
+ .name = DRV_NAME "_panel",
+ .of_match_table = orisetech_otm8009a_of_match,
+ },
+};
+module_mipi_dsi_driver(orisetech_otm8009a_driver);
+
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("DRM driver for Orise Tech OTM8009A MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
new file mode 100644
index 000000000000..890fd6ff397c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Portions of this file (derived from panel-simple.c) are:
+ *
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Raspberry Pi 7" touchscreen panel driver.
+ *
+ * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
+ * TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
+ * controlling power management, the LCD PWM, and initial register
+ * setup of the Tohsiba.
+ *
+ * This driver controls the TC358762 and ATTINY88, presenting a DSI
+ * device with a drm_panel.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
+
+/* I2C registers of the Atmel microcontroller. */
+enum REG_ADDR {
+ REG_ID = 0x80,
+ REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
+ REG_PORTB,
+ REG_PORTC,
+ REG_PORTD,
+ REG_POWERON,
+ REG_PWM,
+ REG_DDRA,
+ REG_DDRB,
+ REG_DDRC,
+ REG_DDRD,
+ REG_TEST,
+ REG_WR_ADDRL,
+ REG_WR_ADDRH,
+ REG_READH,
+ REG_READL,
+ REG_WRITEH,
+ REG_WRITEL,
+ REG_ID2,
+};
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTRX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define DFTMODE_CNTRL 0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI 0x0104
+#define PPI_BUSYPPI 0x0108
+#define PPI_LINEINITCNT 0x0110
+#define PPI_LPTXTIMECNT 0x0114
+#define PPI_CLS_ATMR 0x0140
+#define PPI_D0S_ATMR 0x0144
+#define PPI_D1S_ATMR 0x0148
+#define PPI_D0S_CLRSIPOCOUNT 0x0164
+#define PPI_D1S_CLRSIPOCOUNT 0x0168
+#define CLS_PRE 0x0180
+#define D0S_PRE 0x0184
+#define D1S_PRE 0x0188
+#define CLS_PREP 0x01A0
+#define D0S_PREP 0x01A4
+#define D1S_PREP 0x01A8
+#define CLS_ZERO 0x01C0
+#define D0S_ZERO 0x01C4
+#define D1S_ZERO 0x01C8
+#define PPI_CLRFLG 0x01E0
+#define PPI_CLRSIPO 0x01E4
+#define HSTIMEOUT 0x01F0
+#define HSTIMEOUTENABLE 0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI 0x0204
+#define DSI_BUSYDSI 0x0208
+#define DSI_LANEENABLE 0x0210
+# define DSI_LANEENABLE_CLOCK BIT(0)
+# define DSI_LANEENABLE_D0 BIT(1)
+# define DSI_LANEENABLE_D1 BIT(2)
+
+#define DSI_LANESTATUS0 0x0214
+#define DSI_LANESTATUS1 0x0218
+#define DSI_INTSTATUS 0x0220
+#define DSI_INTMASK 0x0224
+#define DSI_INTCLR 0x0228
+#define DSI_LPTXTO 0x0230
+#define DSI_MODE 0x0260
+#define DSI_PAYLOAD0 0x0268
+#define DSI_PAYLOAD1 0x026C
+#define DSI_SHORTPKTDAT 0x0270
+#define DSI_SHORTPKTREQ 0x0274
+#define DSI_BTASTA 0x0278
+#define DSI_BTACLR 0x027C
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300
+#define DSISIGMOD 0x0304
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400
+#define APLSTAT 0x0404
+#define APLERR 0x0408
+#define PWRMOD 0x040C
+#define RDPKTLN 0x0410
+#define PXLFMT 0x0414
+#define MEMWRCMD 0x0418
+
+/* LCDC/DPI Host Registers */
+#define LCDCTRL 0x0420
+#define HSR 0x0424
+#define HDISPR 0x0428
+#define VSR 0x042C
+#define VDISPR 0x0430
+#define VFUEN 0x0434
+
+/* DBI-B Host Registers */
+#define DBIBCTRL 0x0440
+
+/* SPI Master Registers */
+#define SPICMR 0x0450
+#define SPITCR 0x0454
+
+/* System Controller Registers */
+#define SYSSTAT 0x0460
+#define SYSCTRL 0x0464
+#define SYSPLL1 0x0468
+#define SYSPLL2 0x046C
+#define SYSPLL3 0x0470
+#define SYSPMCTRL 0x047C
+
+/* GPIO Registers */
+#define GPIOC 0x0480
+#define GPIOO 0x0484
+#define GPIOI 0x0488
+
+/* I2C Registers */
+#define I2CCLKCTRL 0x0490
+
+/* Chip/Rev Registers */
+#define IDREG 0x04A0
+
+/* Debug Registers */
+#define WCMDQUEUE 0x0500
+#define RCMDQUEUE 0x0504
+
+struct rpi_touchscreen {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *i2c;
+};
+
+static const struct drm_display_mode rpi_touchscreen_modes[] = {
+ {
+ /* Modeline comes from the Raspberry Pi firmware, with HFP=1
+ * plugged in and clock re-computed from that.
+ */
+ .clock = 25979400 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 1,
+ .hsync_end = 800 + 1 + 2,
+ .htotal = 800 + 1 + 2 + 46,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 2,
+ .vtotal = 480 + 7 + 2 + 21,
+ .vrefresh = 60,
+ },
+};
+
+static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
+{
+ return container_of(panel, struct rpi_touchscreen, base);
+}
+
+static u8 rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
+{
+ return i2c_smbus_read_byte_data(ts->i2c, reg);
+}
+
+static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
+ u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
+ if (ret)
+ dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+}
+
+static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
+{
+#if 0
+ /* The firmware uses LP DSI transactions like this to bring up
+ * the hardware, which should be faster than using I2C to then
+ * pass to the Toshiba. However, I was unable to get it to
+ * work.
+ */
+ u8 msg[] = {
+ reg,
+ reg >> 8,
+ val,
+ val >> 8,
+ val >> 16,
+ val >> 24,
+ };
+
+ mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg));
+#else
+ rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8);
+ rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg);
+ rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8);
+ rpi_touchscreen_i2c_write(ts, REG_WRITEL, val);
+#endif
+
+ return 0;
+}
+
+static int rpi_touchscreen_disable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
+ rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
+
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+ udelay(1);
+
+ return 0;
+}
+
+static int rpi_touchscreen_noop(struct drm_panel *panel)
+{
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+ int i;
+
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
+ /* Wait for nPWRDWN to go low to indicate poweron is done. */
+ for (i = 0; i < 100; i++) {
+ if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
+ break;
+ }
+
+ rpi_touchscreen_write(ts, DSI_LANEENABLE,
+ DSI_LANEENABLE_CLOCK |
+ DSI_LANEENABLE_D0);
+ rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
+ rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
+ rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
+ rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
+ rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
+
+ rpi_touchscreen_write(ts, SPICMR, 0x00);
+ rpi_touchscreen_write(ts, LCDCTRL, 0x00100150);
+ rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
+ msleep(100);
+
+ rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
+ rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
+ msleep(100);
+
+ /* Turn on the backlight. */
+ rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
+
+ /* Default to the same orientation as the closed source
+ * firmware used for the panel. Runtime rotation
+ * configuration will be supported using VC4's plane
+ * orientation bits.
+ */
+ rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
+
+ return 0;
+}
+
+static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_device *drm = panel->drm;
+ unsigned int i, num = 0;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
+ const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = 154;
+ connector->display_info.height_mm = 86;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return num;
+}
+
+static const struct drm_panel_funcs rpi_touchscreen_funcs = {
+ .disable = rpi_touchscreen_disable,
+ .unprepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_noop,
+ .enable = rpi_touchscreen_enable,
+ .get_modes = rpi_touchscreen_get_modes,
+};
+
+static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct rpi_touchscreen *ts;
+ struct device_node *endpoint, *dsi_host_node;
+ struct mipi_dsi_host *host;
+ int ret, ver;
+ struct mipi_dsi_device_info info = {
+ .type = RPI_DSI_DRIVER_NAME,
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, ts);
+
+ ts->i2c = i2c;
+
+ ver = rpi_touchscreen_i2c_read(ts, REG_ID);
+ if (ver < 0) {
+ dev_err(dev, "Atmel I2C read failed: %d\n", ver);
+ return -ENODEV;
+ }
+
+ switch (ver) {
+ case 0xde: /* ver 1 */
+ case 0xc3: /* ver 2 */
+ break;
+ default:
+ dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
+ return -ENODEV;
+ }
+
+ /* Turn off at boot, so we can cleanly sequence powering on. */
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+
+ /* Look up the DSI host. It needs to probe before we do. */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host) {
+ of_node_put(endpoint);
+ return -EPROBE_DEFER;
+ }
+
+ info.node = of_graph_get_remote_port(endpoint);
+ of_node_put(endpoint);
+
+ ts->dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(ts->dsi)) {
+ dev_err(dev, "DSI device registration failed: %ld\n",
+ PTR_ERR(ts->dsi));
+ return PTR_ERR(ts->dsi);
+ }
+
+ ts->base.dev = dev;
+ ts->base.funcs = &rpi_touchscreen_funcs;
+
+ /* This appears last, as it's what will unblock the DSI host
+ * driver's component bind function.
+ */
+ ret = drm_panel_add(&ts->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rpi_touchscreen_remove(struct i2c_client *i2c)
+{
+ struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
+
+ mipi_dsi_detach(ts->dsi);
+
+ drm_panel_remove(&ts->base);
+
+ mipi_dsi_device_unregister(ts->dsi);
+ kfree(ts->dsi);
+
+ return 0;
+}
+
+static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM);
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 1;
+
+ ret = mipi_dsi_attach(dsi);
+
+ if (ret)
+ dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
+
+ return ret;
+}
+
+static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
+ .driver.name = RPI_DSI_DRIVER_NAME,
+ .probe = rpi_touchscreen_dsi_probe,
+};
+
+static const struct of_device_id rpi_touchscreen_of_ids[] = {
+ { .compatible = "raspberrypi,7inch-touchscreen-panel" },
+ { } /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
+
+static struct i2c_driver rpi_touchscreen_driver = {
+ .driver = {
+ .name = "rpi_touchscreen",
+ .of_match_table = rpi_touchscreen_of_ids,
+ },
+ .probe = rpi_touchscreen_probe,
+ .remove = rpi_touchscreen_remove,
+};
+
+static int __init rpi_touchscreen_init(void)
+{
+ mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
+ return i2c_add_driver(&rpi_touchscreen_driver);
+}
+module_init(rpi_touchscreen_init);
+
+static void __exit rpi_touchscreen_exit(void)
+{
+ i2c_del_driver(&rpi_touchscreen_driver);
+ mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
+}
+module_exit(rpi_touchscreen_exit);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
new file mode 100644
index 000000000000..aeb32aa58899
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -0,0 +1,532 @@
+/*
+ * MIPI-DSI based S6E63J0X03 AMOLED lcd 1.63 inch panel driver.
+ *
+ * Copyright (c) 2014-2017 Samsung Electronics Co., Ltd
+ *
+ * Inki Dae <inki.dae@samsung.com>
+ * Hoegeun Kwon <hoegeun.kwon@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+
+#define MCS_LEVEL2_KEY 0xf0
+#define MCS_MTP_KEY 0xf1
+#define MCS_MTP_SET3 0xd4
+
+#define MAX_BRIGHTNESS 100
+#define DEFAULT_BRIGHTNESS 80
+
+#define NUM_GAMMA_STEPS 9
+#define GAMMA_CMD_CNT 28
+
+#define FIRST_COLUMN 20
+
+struct s6e63j0x03 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 4649,
+ .hdisplay = 320,
+ .hsync_start = 320 + 1,
+ .hsync_end = 320 + 1 + 1,
+ .htotal = 320 + 1 + 1 + 1,
+ .vdisplay = 320,
+ .vsync_start = 320 + 150,
+ .vsync_end = 320 + 150 + 1,
+ .vtotal = 320 + 150 + 1 + 2,
+ .vrefresh = 30,
+ .flags = 0,
+};
+
+static const unsigned char gamma_tbl[NUM_GAMMA_STEPS][GAMMA_CMD_CNT] = {
+ { /* Gamma 10 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x7f, 0x7f, 0x7f, 0x52, 0x6b, 0x6f, 0x26,
+ 0x28, 0x2d, 0x28, 0x26, 0x27, 0x33, 0x34, 0x32, 0x36, 0x36,
+ 0x35, 0x00, 0xab, 0x00, 0xae, 0x00, 0xbf
+ },
+ { /* gamma 30 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x70, 0x7f, 0x7f, 0x4e, 0x64, 0x69, 0x26,
+ 0x27, 0x2a, 0x28, 0x29, 0x27, 0x31, 0x32, 0x31, 0x35, 0x34,
+ 0x35, 0x00, 0xc4, 0x00, 0xca, 0x00, 0xdc
+ },
+ { /* gamma 60 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x65, 0x7b, 0x7d, 0x5f, 0x67, 0x68, 0x2a,
+ 0x28, 0x29, 0x28, 0x2a, 0x27, 0x31, 0x2f, 0x30, 0x34, 0x33,
+ 0x34, 0x00, 0xd9, 0x00, 0xe4, 0x00, 0xf5
+ },
+ { /* gamma 90 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x4d, 0x6f, 0x71, 0x67, 0x6a, 0x6c, 0x29,
+ 0x28, 0x28, 0x28, 0x29, 0x27, 0x30, 0x2e, 0x30, 0x32, 0x31,
+ 0x31, 0x00, 0xea, 0x00, 0xf6, 0x01, 0x09
+ },
+ { /* gamma 120 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x3d, 0x66, 0x68, 0x69, 0x69, 0x69, 0x28,
+ 0x28, 0x27, 0x28, 0x28, 0x27, 0x30, 0x2e, 0x2f, 0x31, 0x31,
+ 0x30, 0x00, 0xf9, 0x01, 0x05, 0x01, 0x1b
+ },
+ { /* gamma 150 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x31, 0x51, 0x53, 0x66, 0x66, 0x67, 0x28,
+ 0x29, 0x27, 0x28, 0x27, 0x27, 0x2e, 0x2d, 0x2e, 0x31, 0x31,
+ 0x30, 0x01, 0x04, 0x01, 0x11, 0x01, 0x29
+ },
+ { /* gamma 200 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x2f, 0x4f, 0x51, 0x67, 0x65, 0x65, 0x29,
+ 0x2a, 0x28, 0x27, 0x25, 0x26, 0x2d, 0x2c, 0x2c, 0x30, 0x30,
+ 0x30, 0x01, 0x14, 0x01, 0x23, 0x01, 0x3b
+ },
+ { /* gamma 240 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x2c, 0x4d, 0x50, 0x65, 0x63, 0x64, 0x2a,
+ 0x2c, 0x29, 0x26, 0x24, 0x25, 0x2c, 0x2b, 0x2b, 0x30, 0x30,
+ 0x30, 0x01, 0x1e, 0x01, 0x2f, 0x01, 0x47
+ },
+ { /* gamma 300 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x38, 0x61, 0x64, 0x65, 0x63, 0x64, 0x28,
+ 0x2a, 0x27, 0x26, 0x23, 0x25, 0x2b, 0x2b, 0x2a, 0x30, 0x2f,
+ 0x30, 0x01, 0x2d, 0x01, 0x3f, 0x01, 0x57
+ }
+};
+
+static inline struct s6e63j0x03 *panel_to_s6e63j0x03(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e63j0x03, panel);
+}
+
+static inline ssize_t s6e63j0x03_dcs_write_seq(struct s6e63j0x03 *ctx,
+ const void *seq, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ return mipi_dsi_dcs_write_buffer(dsi, seq, len);
+}
+
+#define s6e63j0x03_dcs_write_seq_static(ctx, seq...) \
+ ({ \
+ static const u8 d[] = { seq }; \
+ s6e63j0x03_dcs_write_seq(ctx, d, ARRAY_SIZE(d)); \
+ })
+
+static inline int s6e63j0x03_enable_lv2_command(struct s6e63j0x03 *ctx)
+{
+ return s6e63j0x03_dcs_write_seq_static(ctx, MCS_LEVEL2_KEY, 0x5a, 0x5a);
+}
+
+static inline int s6e63j0x03_apply_mtp_key(struct s6e63j0x03 *ctx, bool on)
+{
+ if (on)
+ return s6e63j0x03_dcs_write_seq_static(ctx,
+ MCS_MTP_KEY, 0x5a, 0x5a);
+
+ return s6e63j0x03_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0xa5, 0xa5);
+}
+
+static int s6e63j0x03_power_on(struct s6e63j0x03 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+
+ return 0;
+}
+
+static int s6e63j0x03_power_off(struct s6e63j0x03 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static unsigned int s6e63j0x03_get_brightness_index(unsigned int brightness)
+{
+ unsigned int index;
+
+ index = brightness / (MAX_BRIGHTNESS / NUM_GAMMA_STEPS);
+
+ if (index >= NUM_GAMMA_STEPS)
+ index = NUM_GAMMA_STEPS - 1;
+
+ return index;
+}
+
+static int s6e63j0x03_update_gamma(struct s6e63j0x03 *ctx,
+ unsigned int brightness)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int index = s6e63j0x03_get_brightness_index(brightness);
+ int ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_dcs_write_seq(ctx, gamma_tbl[index], GAMMA_CMD_CNT);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ bl_dev->props.brightness = brightness;
+
+ return 0;
+}
+
+static int s6e63j0x03_set_brightness(struct backlight_device *bl_dev)
+{
+ struct s6e63j0x03 *ctx = bl_get_data(bl_dev);
+ unsigned int brightness = bl_dev->props.brightness;
+
+ return s6e63j0x03_update_gamma(ctx, brightness);
+}
+
+static const struct backlight_ops s6e63j0x03_bl_ops = {
+ .update_status = s6e63j0x03_set_brightness,
+};
+
+static int s6e63j0x03_disable(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e63j0x03_unprepare(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ int ret;
+
+ ret = s6e63j0x03_power_off(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ return 0;
+}
+
+static int s6e63j0x03_panel_init(struct s6e63j0x03 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = s6e63j0x03_enable_lv2_command(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ /* set porch adjustment */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf2, 0x1c, 0x28);
+ if (ret < 0)
+ return ret;
+
+ /* set frame freq */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb5, 0x00, 0x02, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* set caset, paset */
+ ret = mipi_dsi_dcs_set_column_address(dsi, FIRST_COLUMN,
+ default_mode.hdisplay - 1 + FIRST_COLUMN);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ if (ret < 0)
+ return ret;
+
+ /* set ltps timming 0, 1 */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf8, 0x08, 0x08, 0x08, 0x17,
+ 0x00, 0x2a, 0x02, 0x26, 0x00, 0x00, 0x02, 0x00, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf7, 0x02);
+ if (ret < 0)
+ return ret;
+
+ /* set param pos te_edge */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* set te rising edge */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xe2, 0x0f);
+ if (ret < 0)
+ return ret;
+
+ /* set param pos default */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int s6e63j0x03_prepare(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ int ret;
+
+ ret = s6e63j0x03_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_panel_init(ctx);
+ if (ret < 0)
+ goto err;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err:
+ s6e63j0x03_power_off(ctx);
+ return ret;
+}
+
+static int s6e63j0x03_enable(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ msleep(120);
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ /* set elvss_cond */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb1, 0x00, 0x09);
+ if (ret < 0)
+ return ret;
+
+ /* set pos */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_SET_ADDRESS_MODE, 0x40);
+ if (ret < 0)
+ return ret;
+
+ /* set default white brightness */
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
+ if (ret < 0)
+ return ret;
+
+ /* set white ctrl */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ if (ret < 0)
+ return ret;
+
+ /* set acl off */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static int s6e63j0x03_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 29;
+ connector->display_info.height_mm = 29;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e63j0x03_funcs = {
+ .disable = s6e63j0x03_disable,
+ .unprepare = s6e63j0x03_unprepare,
+ .prepare = s6e63j0x03_prepare,
+ .enable = s6e63j0x03_enable,
+ .get_modes = s6e63j0x03_get_modes,
+};
+
+static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e63j0x03 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_EOT_PACKET;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpio: %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e63j0x03_funcs;
+
+ ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
+ &s6e63j0x03_bl_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS;
+ ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto unregister_backlight;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ goto remove_panel;
+
+ return ret;
+
+remove_panel:
+ drm_panel_remove(&ctx->panel);
+
+unregister_backlight:
+ backlight_device_unregister(ctx->bl_dev);
+
+ return ret;
+}
+
+static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e63j0x03 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id s6e63j0x03_of_match[] = {
+ { .compatible = "samsung,s6e63j0x03" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e63j0x03_of_match);
+
+static struct mipi_dsi_driver s6e63j0x03_driver = {
+ .probe = s6e63j0x03_probe,
+ .remove = s6e63j0x03_remove,
+ .driver = {
+ .name = "panel_samsung_s6e63j0x03",
+ .of_match_table = s6e63j0x03_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e63j0x03_driver);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e63j0x03 AMOLED LCD Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
new file mode 100644
index 000000000000..71c09ed436ae
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors.
+ * Author: Marco Franchi <marco.franchi@nxp.com>
+ *
+ * Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
+struct seiko_panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int num_modes;
+ const struct display_timing *timings;
+ unsigned int num_timings;
+
+ unsigned int bpc;
+
+ /**
+ * @width: width (in millimeters) of the panel's active display area
+ * @height: height (in millimeters) of the panel's active display area
+ */
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+struct seiko_panel {
+ struct drm_panel base;
+ bool prepared;
+ bool enabled;
+ const struct seiko_panel_desc *desc;
+ struct backlight_device *backlight;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+};
+
+static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct seiko_panel, base);
+}
+
+static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_timings == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.bpc = panel->desc->bpc;
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+ if (panel->desc->bus_format)
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel->desc->bus_format, 1);
+ connector->display_info.bus_flags = panel->desc->bus_flags;
+
+ return num;
+}
+
+static int seiko_panel_disable(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_POWERDOWN;
+ p->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int seiko_panel_unprepare(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (!p->prepared)
+ return 0;
+
+ regulator_disable(p->avdd);
+
+ /* Add a 100ms delay as per the panel datasheet */
+ msleep(100);
+
+ regulator_disable(p->dvdd);
+
+ p->prepared = false;
+
+ return 0;
+}
+
+static int seiko_panel_prepare(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+ int err;
+
+ if (p->prepared)
+ return 0;
+
+ err = regulator_enable(p->dvdd);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable dvdd: %d\n", err);
+ return err;
+ }
+
+ /* Add a 100ms delay as per the panel datasheet */
+ msleep(100);
+
+ err = regulator_enable(p->avdd);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable avdd: %d\n", err);
+ goto disable_dvdd;
+ }
+
+ p->prepared = true;
+
+ return 0;
+
+disable_dvdd:
+ regulator_disable(p->dvdd);
+ return err;
+}
+
+static int seiko_panel_enable(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.state &= ~BL_CORE_FBBLANK;
+ p->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int seiko_panel_get_modes(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ /* add hard-coded panel modes */
+ return seiko_panel_get_fixed_modes(p);
+}
+
+static int seiko_panel_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
+static const struct drm_panel_funcs seiko_panel_funcs = {
+ .disable = seiko_panel_disable,
+ .unprepare = seiko_panel_unprepare,
+ .prepare = seiko_panel_prepare,
+ .enable = seiko_panel_enable,
+ .get_modes = seiko_panel_get_modes,
+ .get_timings = seiko_panel_get_timings,
+};
+
+static int seiko_panel_probe(struct device *dev,
+ const struct seiko_panel_desc *desc)
+{
+ struct device_node *backlight;
+ struct seiko_panel *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->prepared = false;
+ panel->desc = desc;
+
+ panel->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(panel->dvdd))
+ return PTR_ERR(panel->dvdd);
+
+ panel->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(panel->avdd))
+ return PTR_ERR(panel->avdd);
+
+ backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (backlight) {
+ panel->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!panel->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&panel->base);
+ panel->base.dev = dev;
+ panel->base.funcs = &seiko_panel_funcs;
+
+ err = drm_panel_add(&panel->base);
+ if (err < 0)
+ return err;
+
+ dev_set_drvdata(dev, panel);
+
+ return 0;
+}
+
+static int seiko_panel_remove(struct platform_device *pdev)
+{
+ struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+
+ drm_panel_detach(&panel->base);
+ drm_panel_remove(&panel->base);
+
+ seiko_panel_disable(&panel->base);
+
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+
+ return 0;
+}
+
+static void seiko_panel_shutdown(struct platform_device *pdev)
+{
+ struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+
+ seiko_panel_disable(&panel->base);
+}
+
+static const struct display_timing seiko_43wvf1g_timing = {
+ .pixelclock = { 33500000, 33500000, 33500000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 164, 164, 164 },
+ .hback_porch = { 89, 89, 89 },
+ .hsync_len = { 10, 10, 10 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_DE_LOW,
+};
+
+static const struct seiko_panel_desc seiko_43wvf1g = {
+ .timings = &seiko_43wvf1g_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 93,
+ .height = 57,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ .compatible = "sii,43wvf1g",
+ .data = &seiko_43wvf1g,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int seiko_panel_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return seiko_panel_probe(&pdev->dev, id->data);
+}
+
+static struct platform_driver seiko_panel_platform_driver = {
+ .driver = {
+ .name = "seiko_panel",
+ .of_match_table = platform_of_match,
+ },
+ .probe = seiko_panel_platform_probe,
+ .remove = seiko_panel_remove,
+ .shutdown = seiko_panel_shutdown,
+};
+module_platform_driver(seiko_panel_platform_driver);
+
+MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com");
+MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 474fa759e06e..b7c4709f7b34 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -187,8 +187,7 @@ static int panel_simple_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
- if (p->enable_gpio)
- gpiod_set_value_cansleep(p->enable_gpio, 0);
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
@@ -214,8 +213,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
return err;
}
- if (p->enable_gpio)
- gpiod_set_value_cansleep(p->enable_gpio, 1);
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
@@ -315,7 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- dev_err(dev, "failed to request GPIO: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to request GPIO: %d\n", err);
return err;
}
@@ -369,6 +368,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -384,6 +384,7 @@ static void panel_simple_shutdown(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
@@ -1007,6 +1008,10 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
.width = 195,
.height = 117,
},
+ .delay = {
+ .enable = 160,
+ .disable = 160,
+ },
};
static const struct drm_display_mode innolux_at043tn24_mode = {
@@ -1017,8 +1022,8 @@ static const struct drm_display_mode innolux_at043tn24_mode = {
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
- .vsync_end = 272 + 2 + 11,
- .vtotal = 272 + 2 + 11 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1032,6 +1037,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
@@ -1522,8 +1528,8 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.modes = &olimex_lcd_olinuxino_43ts_mode,
.num_modes = 1,
.size = {
- .width = 105,
- .height = 67,
+ .width = 95,
+ .height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -1831,6 +1837,30 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode toshiba_lt089ac29000_mode = {
+ .clock = 79500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 192,
+ .hsync_end = 1280 + 192 + 128,
+ .htotal = 1280 + 192 + 128 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 20,
+ .vsync_end = 768 + 20 + 7,
+ .vtotal = 768 + 20 + 7 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc toshiba_lt089ac29000 = {
+ .modes = &toshiba_lt089ac29000_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 194,
+ .height = 116,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -2113,6 +2143,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "toshiba,lt089ac29000",
+ .data = &toshiba_lt089ac29000,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index bbfba87cd1a8..e5e2abd66491 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,8 @@ config DRM_PL111
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_PANEL
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the PL111 CLCD controller.
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index f2874bbdaa14..9c5e8dba8ac6 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-pl111_drm-y += pl111_connector.o \
- pl111_display.o \
+pl111_drm-y += pl111_display.o \
+ pl111_versatile.o \
pl111_drv.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
diff --git a/drivers/gpu/drm/pl111/pl111_connector.c b/drivers/gpu/drm/pl111/pl111_connector.c
deleted file mode 100644
index d335f9a29ce4..000000000000
--- a/drivers/gpu/drm/pl111/pl111_connector.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
- *
- * Parts of this file were based on sources as follows:
- *
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (C) 2011 Texas Instruments
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms of
- * such GNU licence.
- *
- */
-
-/**
- * pl111_drm_connector.c
- * Implementation of the connector functions for PL111 DRM
- */
-#include <linux/amba/clcd-regs.h>
-#include <linux/version.h>
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-
-#include "pl111_drm.h"
-
-static void pl111_connector_destroy(struct drm_connector *connector)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- if (pl111_connector->panel)
- drm_panel_detach(pl111_connector->panel);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status pl111_connector_detect(struct drm_connector
- *connector, bool force)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- return (pl111_connector->panel ?
- connector_status_connected :
- connector_status_disconnected);
-}
-
-static int pl111_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- if (!pl111_connector->panel)
- return 0;
-
- return drm_panel_get_modes(pl111_connector->panel);
-}
-
-const struct drm_connector_funcs connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = pl111_connector_destroy,
- .detect = pl111_connector_detect,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = pl111_connector_helper_get_modes,
-};
-
-/* Walks the OF graph to find the panel node and then asks DRM to look
- * up the panel.
- */
-static struct drm_panel *pl111_get_panel(struct device *dev)
-{
- struct device_node *endpoint, *panel_node;
- struct device_node *np = dev->of_node;
- struct drm_panel *panel;
-
- endpoint = of_graph_get_next_endpoint(np, NULL);
- if (!endpoint) {
- dev_err(dev, "no endpoint to fetch panel\n");
- return NULL;
- }
-
- /* don't proceed if we have an endpoint but no panel_node tied to it */
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no valid panel node\n");
- return NULL;
- }
-
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
-
- return panel;
-}
-
-int pl111_connector_init(struct drm_device *dev)
-{
- struct pl111_drm_dev_private *priv = dev->dev_private;
- struct pl111_drm_connector *pl111_connector = &priv->connector;
- struct drm_connector *connector = &pl111_connector->connector;
-
- drm_connector_init(dev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- pl111_connector->panel = pl111_get_panel(dev->dev);
- if (pl111_connector->panel)
- drm_panel_attach(pl111_connector->panel, connector);
-
- return 0;
-}
-
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 0d9dee199b2c..7ddc7e3b9e7d 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -22,8 +22,14 @@ static const struct {
REGDEF(CLCD_TIM2),
REGDEF(CLCD_TIM3),
REGDEF(CLCD_UBAS),
+ REGDEF(CLCD_LBAS),
REGDEF(CLCD_PL111_CNTL),
REGDEF(CLCD_PL111_IENB),
+ REGDEF(CLCD_PL111_RIS),
+ REGDEF(CLCD_PL111_MIS),
+ REGDEF(CLCD_PL111_ICR),
+ REGDEF(CLCD_PL111_UCUR),
+ REGDEF(CLCD_PL111_LCUR),
};
int pl111_debugfs_regs(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index b58c988d9da0..06c4bf756b69 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -21,7 +21,6 @@
#include <linux/of_graph.h>
#include <drm/drmP.h>
-#include <drm/drm_panel.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -94,7 +93,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
struct pl111_drm_dev_private *priv = drm->dev_private;
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_connector *connector = &priv->connector.connector;
+ struct drm_connector *connector = priv->connector;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
@@ -156,10 +155,8 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
writel(0, priv->regs + CLCD_TIM3);
- drm_panel_prepare(priv->connector.panel);
-
- /* Enable and Power Up */
- cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDPWR | CNTL_LCDVCOMP(1);
+ /* Hard-code TFT panel */
+ cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
/* Note that the the hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
@@ -202,9 +199,21 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
break;
}
- writel(cntl, priv->regs + CLCD_PL111_CNTL);
+ /* Power sequence: first enable and chill */
+ writel(cntl, priv->regs + priv->ctrl);
+
+ /*
+ * We expect this delay to stabilize the contrast
+ * voltage Vee as stipulated by the manual
+ */
+ msleep(20);
+
+ if (priv->variant_display_enable)
+ priv->variant_display_enable(drm, fb->format->format);
- drm_panel_enable(priv->connector.panel);
+ /* Power Up */
+ cntl |= CNTL_LCDPWR;
+ writel(cntl, priv->regs + priv->ctrl);
drm_crtc_vblank_on(crtc);
}
@@ -214,15 +223,28 @@ void pl111_display_disable(struct drm_simple_display_pipe *pipe)
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
+ u32 cntl;
drm_crtc_vblank_off(crtc);
- drm_panel_disable(priv->connector.panel);
+ /* Power Down */
+ cntl = readl(priv->regs + priv->ctrl);
+ if (cntl & CNTL_LCDPWR) {
+ cntl &= ~CNTL_LCDPWR;
+ writel(cntl, priv->regs + priv->ctrl);
+ }
+
+ /*
+ * We expect this delay to stabilize the contrast voltage Vee as
+ * stipulated by the manual
+ */
+ msleep(20);
- /* Disable and Power Down */
- writel(0, priv->regs + CLCD_PL111_CNTL);
+ if (priv->variant_display_disable)
+ priv->variant_display_disable(drm);
- drm_panel_unprepare(priv->connector.panel);
+ /* Disable */
+ writel(0, priv->regs + priv->ctrl);
clk_disable_unprepare(priv->clk);
}
@@ -260,7 +282,7 @@ int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + CLCD_PL111_IENB);
+ writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + priv->ienb);
return 0;
}
@@ -269,7 +291,7 @@ void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- writel(0, priv->regs + CLCD_PL111_IENB);
+ writel(0, priv->regs + priv->ienb);
}
static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
@@ -413,22 +435,6 @@ int pl111_display_init(struct drm_device *drm)
struct device_node *endpoint;
u32 tft_r0b0g0[3];
int ret;
- static const u32 formats[] = {
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_XBGR4444,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_XRGB4444,
- };
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
@@ -444,21 +450,16 @@ int pl111_display_init(struct drm_device *drm)
}
of_node_put(endpoint);
- if (tft_r0b0g0[0] != 0 ||
- tft_r0b0g0[1] != 8 ||
- tft_r0b0g0[2] != 16) {
- dev_err(dev, "arm,pl11x,tft-r0g0b0-pads != [0,8,16] not yet supported\n");
- return -EINVAL;
- }
-
ret = pl111_init_clock_divider(drm);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
- formats, ARRAY_SIZE(formats),
- NULL, &priv->connector.connector);
+ priv->variant->formats,
+ priv->variant->nformats,
+ NULL,
+ priv->connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 5c685bfc8fdc..440f53ebee8c 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -21,25 +21,43 @@
#include <drm/drm_gem.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_bridge.h>
#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
#define CLCD_IRQ_NEXTBASE_UPDATE BIT(2)
struct drm_minor;
-struct pl111_drm_connector {
- struct drm_connector connector;
- struct drm_panel *panel;
+/**
+ * struct pl111_variant_data - encodes IP differences
+ * @name: the name of this variant
+ * @is_pl110: this is the early PL110 variant
+ * @formats: array of supported pixel formats on this variant
+ * @nformats: the length of the array of supported pixel formats
+ */
+struct pl111_variant_data {
+ const char *name;
+ bool is_pl110;
+ const u32 *formats;
+ unsigned int nformats;
};
struct pl111_drm_dev_private {
struct drm_device *drm;
- struct pl111_drm_connector connector;
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
struct drm_fbdev_cma *fbdev;
void *regs;
+ u32 ienb;
+ u32 ctrl;
/* The pixel clock (a reference to our clock divider off of CLCDCLK). */
struct clk *clk;
/* pl111's internal clock divider. */
@@ -48,20 +66,15 @@ struct pl111_drm_dev_private {
* subsystem and pl111_display_enable().
*/
spinlock_t tim2_lock;
+ const struct pl111_variant_data *variant;
+ void (*variant_display_enable) (struct drm_device *drm, u32 format);
+ void (*variant_display_disable) (struct drm_device *drm);
};
-#define to_pl111_connector(x) \
- container_of(x, struct pl111_drm_connector, connector)
-
int pl111_display_init(struct drm_device *dev);
int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc);
void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc);
irqreturn_t pl111_irq(int irq, void *data);
-int pl111_connector_init(struct drm_device *dev);
-int pl111_encoder_init(struct drm_device *dev);
-int pl111_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
int pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 581c452cede1..201d57d5cb54 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -41,9 +41,6 @@
* - Fix race between setting plane base address and getting IRQ for
* vsync firing the pageflip completion.
*
- * - Expose the correct set of formats we can support based on the
- * "arm,pl11x,tft-r0g0b0-pads" DT property.
- *
* - Use the "max-memory-bandwidth" DT property to filter the
* supported formats.
*
@@ -68,8 +65,12 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
#include "pl111_drm.h"
+#include "pl111_versatile.h"
#define DRIVER_DESC "DRM module for PL111"
@@ -83,6 +84,8 @@ static int pl111_modeset_init(struct drm_device *dev)
{
struct drm_mode_config *mode_config;
struct pl111_drm_dev_private *priv = dev->dev_private;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
int ret = 0;
drm_mode_config_init(dev);
@@ -93,34 +96,43 @@ static int pl111_modeset_init(struct drm_device *dev)
mode_config->min_height = 1;
mode_config->max_height = 768;
- ret = pl111_connector_init(dev);
- if (ret) {
- dev_err(dev->dev, "Failed to create pl111_drm_connector\n");
- goto out_config;
- }
-
- /* Don't actually attach if we didn't find a drm_panel
- * attached to us. This will allow a kernel to include both
- * the fbdev pl111 driver and this one, and choose between
- * them based on which subsystem has support for the panel.
- */
- if (!priv->connector.panel) {
- dev_info(dev->dev,
- "Disabling due to lack of DRM panel device.\n");
- ret = -ENODEV;
- goto out_config;
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto out_config;
+ }
+ /*
+ * TODO: when we are using a different bridge than a panel
+ * (such as a dumb VGA connector) we need to devise a different
+ * method to get the connector out of the bridge.
+ */
}
ret = pl111_display_init(dev);
if (ret != 0) {
dev_err(dev->dev, "Failed to init display\n");
- goto out_config;
+ goto out_bridge;
}
+ ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
+ bridge);
+ if (ret)
+ return ret;
+
+ priv->bridge = bridge;
+ priv->panel = panel;
+ priv->connector = panel->connector;
+
ret = drm_vblank_init(dev, 1);
if (ret != 0) {
dev_err(dev->dev, "Failed to init vblank\n");
- goto out_config;
+ goto out_bridge;
}
drm_mode_config_reset(dev);
@@ -132,6 +144,9 @@ static int pl111_modeset_init(struct drm_device *dev)
goto finish;
+out_bridge:
+ if (panel)
+ drm_panel_bridge_remove(bridge);
out_config:
drm_mode_config_cleanup(dev);
finish:
@@ -183,6 +198,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
{
struct device *dev = &amba_dev->dev;
struct pl111_drm_dev_private *priv;
+ struct pl111_variant_data *variant = id->data;
struct drm_device *drm;
int ret;
@@ -196,6 +212,33 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
amba_set_drvdata(amba_dev, drm);
priv->drm = drm;
drm->dev_private = priv;
+ priv->variant = variant;
+
+ /*
+ * The PL110 and PL111 variants have two registers
+ * swapped: interrupt enable and control. For this reason
+ * we use offsets that we can change per variant.
+ */
+ if (variant->is_pl110) {
+ /*
+ * The ARM Versatile boards are even more special:
+ * their PrimeCell ID say they are PL110 but the
+ * control and interrupt enable registers are anyway
+ * swapped to the PL111 order so they are not following
+ * the PL110 datasheet.
+ */
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ priv->ienb = CLCD_PL111_IENB;
+ priv->ctrl = CLCD_PL111_CNTL;
+ } else {
+ priv->ienb = CLCD_PL110_IENB;
+ priv->ctrl = CLCD_PL110_CNTL;
+ }
+ } else {
+ priv->ienb = CLCD_PL111_IENB;
+ priv->ctrl = CLCD_PL111_CNTL;
+ }
priv->regs = devm_ioremap_resource(dev, &amba_dev->res);
if (IS_ERR(priv->regs)) {
@@ -204,15 +247,19 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
}
/* turn off interrupts before requesting the irq */
- writel(0, priv->regs + CLCD_PL111_IENB);
+ writel(0, priv->regs + priv->ienb);
ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0,
- "pl111", priv);
+ variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
return ret;
}
+ ret = pl111_versatile_init(dev, priv);
+ if (ret)
+ goto dev_unref;
+
ret = pl111_modeset_init(drm);
if (ret != 0)
goto dev_unref;
@@ -236,16 +283,70 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_dev_unregister(drm);
if (priv->fbdev)
drm_fbdev_cma_fini(priv->fbdev);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
drm_dev_unref(drm);
return 0;
}
-static struct amba_id pl111_id_table[] = {
+/*
+ * This variant exist in early versions like the ARM Integrator
+ * and this version lacks the 565 and 444 pixel formats.
+ */
+static const u32 pl110_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+};
+
+static const struct pl111_variant_data pl110_variant = {
+ .name = "PL110",
+ .is_pl110 = true,
+ .formats = pl110_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_pixel_formats),
+};
+
+/* RealView, Versatile Express etc use this modern variant */
+static const u32 pl111_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl111_variant = {
+ .name = "PL111",
+ .formats = pl111_pixel_formats,
+ .nformats = ARRAY_SIZE(pl111_pixel_formats),
+};
+
+static const struct amba_id pl111_id_table[] = {
+ {
+ .id = 0x00041110,
+ .mask = 0x000fffff,
+ .data = (void*)&pl110_variant,
+ },
{
.id = 0x00041111,
.mask = 0x000fffff,
+ .data = (void*)&pl111_variant,
},
{0, 0},
};
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
new file mode 100644
index 000000000000..97d4af6925a3
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -0,0 +1,270 @@
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "pl111_versatile.h"
+#include "pl111_drm.h"
+
+static struct regmap *versatile_syscon_map;
+
+/*
+ * We detect the different syscon types from the compatible strings.
+ */
+enum versatile_clcd {
+ INTEGRATOR_CLCD_CM,
+ VERSATILE_CLCD,
+ REALVIEW_CLCD_EB,
+ REALVIEW_CLCD_PB1176,
+ REALVIEW_CLCD_PB11MP,
+ REALVIEW_CLCD_PBA8,
+ REALVIEW_CLCD_PBX,
+};
+
+static const struct of_device_id versatile_clcd_of_match[] = {
+ {
+ .compatible = "arm,core-module-integrator",
+ .data = (void *)INTEGRATOR_CLCD_CM,
+ },
+ {
+ .compatible = "arm,versatile-sysreg",
+ .data = (void *)VERSATILE_CLCD,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_CLCD_EB,
+ },
+ {
+ .compatible = "arm,realview-pb1176-syscon",
+ .data = (void *)REALVIEW_CLCD_PB1176,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_CLCD_PB11MP,
+ },
+ {
+ .compatible = "arm,realview-pba8-syscon",
+ .data = (void *)REALVIEW_CLCD_PBA8,
+ },
+ {
+ .compatible = "arm,realview-pbx-syscon",
+ .data = (void *)REALVIEW_CLCD_PBX,
+ },
+ {},
+};
+
+/*
+ * Core module CLCD control on the Integrator/CP, bits
+ * 8 thru 19 of the CM_CONTROL register controls a bunch
+ * of CLCD settings.
+ */
+#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
+#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
+#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
+#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
+/* Bits 11,12,13 controls the LCD type */
+#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
+#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
+#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
+#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
+#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
+#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
+/* R/L flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
+/* U/D flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
+/* No connection on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
+/* 0 = 24bit VGA, 1 = 18bit VGA */
+#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
+
+#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
+ INTEGRATOR_CLCD_LCDBIASUP | \
+ INTEGRATOR_CLCD_LCDBIASDN | \
+ INTEGRATOR_CLCD_LCDMUX_MASK | \
+ INTEGRATOR_CLCD_LCD0_EN | \
+ INTEGRATOR_CLCD_LCD1_EN | \
+ INTEGRATOR_CLCD_LCD_STATIC1 | \
+ INTEGRATOR_CLCD_LCD_STATIC2 | \
+ INTEGRATOR_CLCD_LCD_STATIC | \
+ INTEGRATOR_CLCD_LCD_N24BITEN)
+
+static void pl111_integrator_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable Integrator CLCD connectors\n");
+
+ /* FIXME: really needed? */
+ val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
+ INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
+
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ break;
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
+ /* truecolor RGB565 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_XRGB1555:
+ /* Pseudocolor, RGB555, BGR555 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
+ break;
+ default:
+ dev_err(drm->dev, "unhandled format on Integrator 0x%08x\n",
+ format);
+ break;
+ }
+
+ regmap_update_bits(versatile_syscon_map,
+ INTEGRATOR_HDR_CTRL_OFFSET,
+ INTEGRATOR_CLCD_MASK,
+ val);
+}
+
+/*
+ * This configuration register in the Versatile and RealView
+ * family is uniformly present but appears more and more
+ * unutilized starting with the RealView series.
+ */
+#define SYS_CLCD 0x50
+#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
+#define SYS_CLCD_MODE_888 0
+#define SYS_CLCD_MODE_5551 BIT(0)
+#define SYS_CLCD_MODE_565_R_LSB BIT(1)
+#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
+#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
+#define SYS_CLCD_NLCDIOON BIT(2)
+#define SYS_CLCD_VDDPOSSWITCH BIT(3)
+#define SYS_CLCD_PWR3V5SWITCH BIT(4)
+#define SYS_CLCD_VDDNEGSWITCH BIT(5)
+
+static void pl111_versatile_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable Versatile CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+}
+
+static void pl111_versatile_enable(struct drm_device *drm, u32 format)
+{
+ u32 val = 0;
+
+ dev_info(drm->dev, "enable Versatile CLCD connectors\n");
+
+ switch (format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ val |= SYS_CLCD_MODE_888;
+ break;
+ case DRM_FORMAT_BGR565:
+ val |= SYS_CLCD_MODE_565_R_LSB;
+ break;
+ case DRM_FORMAT_RGB565:
+ val |= SYS_CLCD_MODE_565_B_LSB;
+ break;
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ val |= SYS_CLCD_MODE_5551;
+ break;
+ default:
+ dev_err(drm->dev, "unhandled format on Versatile 0x%08x\n",
+ format);
+ break;
+ }
+
+ /* Set up the MUX */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_MODE_MASK,
+ val);
+
+ /* Then enable the display */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+static void pl111_realview_clcd_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+}
+
+static void pl111_realview_clcd_enable(struct drm_device *drm, u32 format)
+{
+ dev_info(drm->dev, "enable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
+{
+ const struct of_device_id *clcd_id;
+ enum versatile_clcd versatile_clcd_type;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
+ &clcd_id);
+ if (!np) {
+ /* Non-ARM reference designs, just bail out */
+ return 0;
+ }
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "no Versatile syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ switch (versatile_clcd_type) {
+ case INTEGRATOR_CLCD_CM:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_integrator_enable;
+ dev_info(dev, "set up callbacks for Integrator PL110\n");
+ break;
+ case VERSATILE_CLCD:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_versatile_enable;
+ priv->variant_display_disable = pl111_versatile_disable;
+ dev_info(dev, "set up callbacks for Versatile PL110+\n");
+ break;
+ case REALVIEW_CLCD_EB:
+ case REALVIEW_CLCD_PB1176:
+ case REALVIEW_CLCD_PB11MP:
+ case REALVIEW_CLCD_PBA8:
+ case REALVIEW_CLCD_PBX:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_realview_clcd_enable;
+ priv->variant_display_disable = pl111_realview_clcd_disable;
+ dev_info(dev, "set up callbacks for RealView PL111\n");
+ break;
+ default:
+ dev_info(dev, "unknown Versatile system controller\n");
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pl111_versatile_init);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.h b/drivers/gpu/drm/pl111/pl111_versatile.h
new file mode 100644
index 000000000000..41aa6d969dc6
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_versatile.h
@@ -0,0 +1,9 @@
+#include <linux/device.h>
+#include "pl111_drm.h"
+
+#ifndef PL111_VERSATILE_H
+#define PL111_VERSATILE_H
+
+int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 74fc9362ecf9..c0fb52c6d4ca 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -219,7 +219,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
- QXL_INFO(qdev, "popped %lld\n", id);
+ DRM_DEBUG_DRIVER("popped %lld\n", id);
while (id) {
release = qxl_release_from_id_locked(qdev, id);
if (release == NULL)
@@ -229,8 +229,8 @@ int qxl_garbage_collect(struct qxl_device *qdev)
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- QXL_INFO(qdev, "popped %lld, next %lld\n", id,
- next_id);
+ DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
+ next_id);
switch (release->type) {
case QXL_RELEASE_DRAWABLE:
@@ -248,7 +248,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
}
}
- QXL_INFO(qdev, "%s: %d\n", __func__, i);
+ DRM_DEBUG_DRIVER("%d\n", i);
return i;
}
@@ -381,17 +381,19 @@ void qxl_io_create_primary(struct qxl_device *qdev,
{
struct qxl_surface_create *create;
- QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
- qdev->ram_header);
+ DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
create = &qdev->ram_header->create_surface;
create->format = bo->surf.format;
create->width = bo->surf.width;
create->height = bo->surf.height;
create->stride = bo->surf.stride;
- create->mem = qxl_bo_physical_address(qdev, bo, offset);
+ if (bo->shadow) {
+ create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
+ } else {
+ create->mem = qxl_bo_physical_address(qdev, bo, offset);
+ }
- QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
- bo->kptr);
+ DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
create->flags = QXL_SURF_FLAG_KEEP_DATA;
create->type = QXL_SURF_TYPE_PRIMARY;
@@ -401,7 +403,7 @@ void qxl_io_create_primary(struct qxl_device *qdev,
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
{
- QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+ DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index afbf50d0c08f..4756b3c9bf2c 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -305,7 +305,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+ struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
+ WARN_ON(bo->shadow);
drm_gem_object_unreference_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
@@ -508,6 +510,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
.x2 = qfb->base.width,
.y2 = qfb->base.height
};
+ bool same_shadow = false;
if (old_state->fb) {
qfb_old = to_qxl_framebuffer(old_state->fb);
@@ -519,15 +522,23 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
if (bo == bo_old)
return;
+ if (bo_old && bo_old->shadow && bo->shadow &&
+ bo_old->shadow == bo->shadow) {
+ same_shadow = true;
+ }
+
if (bo_old && bo_old->is_primary) {
- qxl_io_destroy_primary(qdev);
+ if (!same_shadow)
+ qxl_io_destroy_primary(qdev);
bo_old->is_primary = false;
}
if (!bo->is_primary) {
- qxl_io_create_primary(qdev, 0, bo);
+ if (!same_shadow)
+ qxl_io_create_primary(qdev, 0, bo);
bo->is_primary = true;
}
+
qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
}
@@ -679,8 +690,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct qxl_device *qdev = plane->dev->dev_private;
struct drm_gem_object *obj;
- struct qxl_bo *user_bo;
+ struct qxl_bo *user_bo, *old_bo = NULL;
int ret;
if (!new_state->fb)
@@ -689,6 +701,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
obj = to_qxl_framebuffer(new_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ user_bo->is_dumb && !user_bo->shadow) {
+ if (plane->state->fb) {
+ obj = to_qxl_framebuffer(plane->state->fb)->obj;
+ old_bo = gem_to_qxl_bo(obj);
+ }
+ if (old_bo && old_bo->shadow &&
+ user_bo->gem_base.size == old_bo->gem_base.size &&
+ plane->state->crtc == new_state->crtc &&
+ plane->state->crtc_w == new_state->crtc_w &&
+ plane->state->crtc_h == new_state->crtc_h &&
+ plane->state->src_x == new_state->src_x &&
+ plane->state->src_y == new_state->src_y &&
+ plane->state->src_w == new_state->src_w &&
+ plane->state->src_h == new_state->src_h &&
+ plane->state->rotation == new_state->rotation &&
+ plane->state->zpos == new_state->zpos) {
+ drm_gem_object_get(&old_bo->shadow->gem_base);
+ user_bo->shadow = old_bo->shadow;
+ } else {
+ qxl_bo_create(qdev, user_bo->gem_base.size,
+ true, true, QXL_GEM_DOMAIN_VRAM, NULL,
+ &user_bo->shadow);
+ }
+ }
+
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
if (ret)
return ret;
@@ -713,6 +751,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
obj = to_qxl_framebuffer(old_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
+
+ if (user_bo->shadow && !user_bo->is_primary) {
+ drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
+ user_bo->shadow = NULL;
+ }
}
static const uint32_t qxl_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3397a1907336..08752c0ffb35 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -62,33 +62,9 @@
#define QXL_DEBUGFS_MAX_COMPONENTS 32
-extern int qxl_log_level;
extern int qxl_num_crtc;
extern int qxl_max_ioctls;
-enum {
- QXL_INFO_LEVEL = 1,
- QXL_DEBUG_LEVEL = 2,
-};
-
-#define QXL_INFO(qdev, fmt, ...) do { \
- if (qxl_log_level >= QXL_INFO_LEVEL) { \
- qxl_io_log(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-#define QXL_DEBUG(qdev, fmt, ...) do { \
- if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
- qxl_io_log(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
- static int done; \
- if (!done) { \
- done = 1; \
- QXL_INFO(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-
#define DRM_FILE_OFFSET 0x100000000ULL
#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
@@ -113,6 +89,8 @@ struct qxl_bo {
/* Constant after initialization */
struct drm_gem_object gem_base;
bool is_primary; /* is this now a primary surface */
+ bool is_dumb;
+ struct qxl_bo *shadow;
bool hw_surf_alloc;
struct qxl_surface surf;
uint32_t surface_id;
@@ -351,7 +329,7 @@ int qxl_check_idle(struct qxl_ring *ring);
static inline void *
qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
{
- QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+ DRM_DEBUG_DRIVER("not implemented (%lu)\n", physical);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 5e65d5d2d937..11085ab01374 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
&handle);
if (r)
return r;
+ qobj->is_dumb = true;
args->pitch = pitch;
args->handle = handle;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 844c4a31ca13..23af3e352673 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -240,18 +240,15 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
return ret;
qbo = gem_to_qxl_bo(gobj);
- QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
- mode_cmd.height, mode_cmd.pitches[0]);
+ DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
+ mode_cmd.height, mode_cmd.pitches[0]);
shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
/* TODO: what's the usual response to memory allocation errors? */
BUG_ON(!shadow);
- QXL_INFO(qdev,
- "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
- qxl_bo_gpu_offset(qbo),
- qxl_bo_mmap_offset(qbo),
- qbo->kptr,
- shadow);
+ DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+ qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo),
+ qbo->kptr, shadow);
size = mode_cmd.pitches[0] * mode_cmd.height;
info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e6ec845b5be0..a6da6fa6ad58 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -154,7 +154,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return handle;
}
*ret = release;
- QXL_INFO(qdev, "allocated release %d\n", handle);
+ DRM_DEBUG_DRIVER("allocated release %d\n", handle);
release->id = handle;
return handle;
}
@@ -179,8 +179,7 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- QXL_INFO(qdev, "release %d, type %d\n", release->id,
- release->type);
+ DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7ecf8a4b9fe6..ab4823875311 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -136,8 +136,8 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
"filp->private_data->minor->dev->dev_private == NULL\n");
return -EINVAL;
}
- QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- __func__, filp->private_data, vma->vm_pgoff);
+ DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+ filp->private_data, vma->vm_pgoff);
r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
if (unlikely(r != 0))
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 09143b840482..2de40d276116 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -147,6 +147,10 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
extern void r128_freelist_reset(struct drm_device *dev);
extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 663f38c63ba6..6589f9e0310e 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -63,39 +63,36 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_init32_t init32;
- drm_r128_init_t __user *init;
+ drm_r128_init_t init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cce_mode, &init->cce_mode)
- || __put_user(init32.cce_secure, &init->cce_secure)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.span_offset, &init->span_offset)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.agp_textures_offset,
- &init->agp_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ init.is_pci = init32.is_pci;
+ init.cce_mode = init32.cce_mode;
+ init.cce_secure = init32.cce_secure;
+ init.ring_size = init32.ring_size;
+ init.usec_timeout = init32.usec_timeout;
+ init.fb_bpp = init32.fb_bpp;
+ init.front_offset = init32.front_offset;
+ init.front_pitch = init32.front_pitch;
+ init.back_offset = init32.back_offset;
+ init.back_pitch = init32.back_pitch;
+ init.depth_bpp = init32.depth_bpp;
+ init.depth_offset = init32.depth_offset;
+ init.depth_pitch = init32.depth_pitch;
+ init.span_offset = init32.span_offset;
+ init.fb_offset = init32.fb_offset;
+ init.mmio_offset = init32.mmio_offset;
+ init.ring_offset = init32.ring_offset;
+ init.ring_rptr_offset = init32.ring_rptr_offset;
+ init.buffers_offset = init32.buffers_offset;
+ init.agp_textures_offset = init32.agp_textures_offset;
+
+ return drm_ioctl_kernel(file, r128_cce_init, &init,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_r128_depth32 {
@@ -111,25 +108,19 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_depth32_t depth32;
- drm_r128_depth_t __user *depth;
+ drm_r128_depth_t depth;
if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
return -EFAULT;
- depth = compat_alloc_user_space(sizeof(*depth));
- if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
- || __put_user(depth32.func, &depth->func)
- || __put_user(depth32.n, &depth->n)
- || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
- || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
- || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
- &depth->buffer)
- || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
- &depth->mask))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ depth.func = depth32.func;
+ depth.n = depth32.n;
+ depth.x = compat_ptr(depth32.x);
+ depth.y = compat_ptr(depth32.y);
+ depth.buffer = compat_ptr(depth32.buffer);
+ depth.mask = compat_ptr(depth32.mask);
+ return drm_ioctl_kernel(file, r128_cce_depth, &depth, DRM_AUTH);
}
typedef struct drm_r128_stipple32 {
@@ -140,18 +131,14 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_stipple32_t stipple32;
- drm_r128_stipple_t __user *stipple;
+ drm_r128_stipple_t stipple;
if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
return -EFAULT;
- stipple = compat_alloc_user_space(sizeof(*stipple));
- if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
- || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
- &stipple->mask))
- return -EFAULT;
+ stipple.mask = compat_ptr(stipple32.mask);
- return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl_kernel(file, r128_cce_stipple, &stipple, DRM_AUTH);
}
typedef struct drm_r128_getparam32 {
@@ -163,19 +150,15 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_getparam32_t getparam32;
- drm_r128_getparam_t __user *getparam;
+ drm_r128_getparam_t getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
- getparam = compat_alloc_user_space(sizeof(*getparam));
- if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
- || __put_user(getparam32.param, &getparam->param)
- || __put_user((void __user *)(unsigned long)getparam32.value,
- &getparam->value))
- return -EFAULT;
+ getparam.param = getparam32.param;
+ getparam.value = compat_ptr(getparam32.value);
- return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl_kernel(file, r128_getparam, &getparam, DRM_AUTH);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fd2d9f58f77..8fdc56c1c953 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1460,7 +1460,7 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi
return ret;
}
-static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t *depth = data;
@@ -1492,7 +1492,7 @@ static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *f
return ret;
}
-static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_stipple_t *stipple = data;
@@ -1582,7 +1582,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
return 0;
}
-static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_getparam_t *param = data;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0ad8244b5ccf..92ccd7aed0d4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -103,12 +103,9 @@ radeon-y += \
radeon-y += \
radeon_vce.o \
vce_v1_0.o \
- vce_v2_0.o \
- radeon_kfd.o
+ vce_v2_0.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-
-CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 432cb46f6a34..3e798593e042 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
/***** radeon AUX functions *****/
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@@ -304,10 +302,10 @@ static int convert_bpc_to_bpp(int bpc)
/***** radeon specific DP functions *****/
-int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- unsigned pix_clock,
- unsigned *dp_lanes, unsigned *dp_rate)
+static int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index c97fbb2ab48b..7e1b04dc5593 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -184,6 +184,7 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter);
@@ -1651,6 +1652,27 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
}
#endif
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter)
{
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 723220ffbea2..dff2a63df38f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -330,7 +330,6 @@ int ci_program_jump_on_start(struct radeon_device *rdev);
void ci_stop_smc_clock(struct radeon_device *rdev);
void ci_start_smc_clock(struct radeon_device *rdev);
bool ci_is_smc_running(struct radeon_device *rdev);
-PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
int ci_read_smc_sram_dword(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 3356a21d97ec..371121913756 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -163,27 +163,6 @@ bool ci_is_smc_running(struct radeon_device *rdev)
return false;
}
-PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!ci_is_smc_running(rdev))
- return PPSMC_Result_Failed;
-
- WREG32(SMC_MESSAGE_0, msg);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(SMC_RESP_0);
-
- return (PPSMC_Result)tmp;
-}
-
#if 0
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 3cb6c55b268d..a6511918f632 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -33,7 +33,6 @@
#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_ci.h"
-#include "radeon_kfd.h"
#define SH_MEM_CONFIG_GFX_DEFAULT \
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
@@ -5452,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32(VM_INVALIDATE_REQUEST, 0x1);
}
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
- int i;
- uint32_t sh_mem_bases, sh_mem_config;
-
- sh_mem_bases = 0x6000 | 0x6000 << 16;
- sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
- mutex_lock(&rdev->srbm_mutex);
- for (i = 8; i < 16; i++) {
- cik_srbm_select(rdev, 0, 0, 0, i);
- /* CP and shaders */
- WREG32(SH_MEM_CONFIG, sh_mem_config);
- WREG32(SH_MEM_APE1_BASE, 1);
- WREG32(SH_MEM_APE1_LIMIT, 0);
- WREG32(SH_MEM_BASES, sh_mem_bases);
- }
- cik_srbm_select(rdev, 0, 0, 0, 0);
- mutex_unlock(&rdev->srbm_mutex);
-}
-
/**
* cik_pcie_gart_enable - gart enable
*
@@ -5587,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
- cik_pcie_init_compute_vmid(rdev);
-
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
@@ -5684,10 +5659,9 @@ int cik_vm_init(struct radeon_device *rdev)
/*
* number of VMs
* VMID 0 is reserved for System
- * radeon graphics/compute will use VMIDs 1-7
- * amdkfd will use VMIDs 8-15
+ * radeon graphics/compute will use VMIDs 1-15
*/
- rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
+ rdev->vm_manager.nvm = 16;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -7589,9 +7563,6 @@ restart_ih:
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- radeon_kfd_interrupt(rdev,
- (const void *) &rdev->ih.ring[ring_index]);
-
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8486,10 +8457,6 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_kfd_resume(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -8538,7 +8505,6 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
- radeon_kfd_suspend(rdev);
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index e21015475ed5..cda16fcd43bb 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -30,8 +30,6 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
-#define RADEON_NUM_OF_VMIDS 8
-
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
# define DIDT_CTRL_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c31e660e35db..7d39ed63e5be 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1456,7 +1456,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 97fd58e97043..c96b31950ca7 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e82a99cb2459..ab32830c4e23 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -58,7 +58,7 @@ enum r600_hdmi_iec_status_bits {
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio_pin status;
+ struct r600_audio_pin status = {};
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8cbaeec090c9..a8e546569858 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2456,9 +2456,6 @@ struct radeon_device {
u64 vram_pin_size;
u64 gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
};
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2f642cbefd8e..59dcefb2df3b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -263,7 +263,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -290,7 +290,7 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -404,7 +404,7 @@ static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *conn
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1368,7 +1368,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1454,7 +1454,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1473,7 +1473,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1620,7 +1620,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1649,7 +1649,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f4becad0a78c..31dd04f6baa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -43,7 +43,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include "radeon_kfd.h"
/*
* KMS wrapper.
@@ -338,14 +337,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
{
int ret;
- /*
- * Initialize amdkfd before starting radeon. If it was not loaded yet,
- * defer radeon probing
- */
- ret = radeon_kfd_init();
- if (ret == -EPROBE_DEFER)
- return ret;
-
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
@@ -645,7 +636,6 @@ static int __init radeon_init(void)
static void __exit radeon_exit(void)
{
- radeon_kfd_fini();
pci_unregister_driver(pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fd25361ac681..33b821d6d018 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
@@ -322,10 +321,10 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
deleted file mode 100644
index f6578c96925c..000000000000
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "cikd.h"
-#include "cik_reg.h"
-#include "radeon_kfd.h"
-#include "radeon_ucode.h"
-#include <linux/firmware.h>
-#include "cik_structs.h"
-
-#define CIK_PIPE_PER_MEC (4)
-
-static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
- TCP_WATCH0_ADDR_H, TCP_WATCH0_ADDR_L, TCP_WATCH0_CNTL,
- TCP_WATCH1_ADDR_H, TCP_WATCH1_ADDR_L, TCP_WATCH1_CNTL,
- TCP_WATCH2_ADDR_H, TCP_WATCH2_ADDR_L, TCP_WATCH2_CNTL,
- TCP_WATCH3_ADDR_H, TCP_WATCH3_ADDR_L, TCP_WATCH3_CNTL
-};
-
-struct kgd_mem {
- struct radeon_bo *bo;
- uint64_t gpu_addr;
- void *cpu_ptr;
-};
-
-
-static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
-
-static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-
-static uint64_t get_vmem_size(struct kgd_dev *kgd);
-static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
-
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
-
-static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_pipeline = kgd_init_pipeline,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
-};
-
-static const struct kgd2kfd_calls *kgd2kfd;
-
-int radeon_kfd_init(void)
-{
- int ret;
-
-#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
-
- kgd2kfd_init_p = symbol_request(kgd2kfd_init);
-
- if (kgd2kfd_init_p == NULL)
- return -ENOENT;
-
- ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret) {
- symbol_put(kgd2kfd_init);
- kgd2kfd = NULL;
- }
-
-#elif defined(CONFIG_HSA_AMD)
- ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret)
- kgd2kfd = NULL;
-
-#else
- ret = -ENOENT;
-#endif
-
- return ret;
-}
-
-void radeon_kfd_fini(void)
-{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
-}
-
-void radeon_kfd_device_probe(struct radeon_device *rdev)
-{
- if (kgd2kfd)
- rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
- rdev->pdev, &kfd2kgd);
-}
-
-void radeon_kfd_device_init(struct radeon_device *rdev)
-{
- int i, queue, pipe, mec;
-
- if (rdev->kfd) {
- struct kgd2kfd_shared_resources gpu_resources = {
- .compute_vmid_bitmap = 0xFF00,
- .num_pipe_per_mec = 4,
- .num_queue_per_pipe = 8
- };
-
- bitmap_zero(gpu_resources.queue_bitmap, KGD_MAX_QUEUES);
-
- for (i = 0; i < KGD_MAX_QUEUES; ++i) {
- queue = i % gpu_resources.num_queue_per_pipe;
- pipe = (i / gpu_resources.num_queue_per_pipe)
- % gpu_resources.num_pipe_per_mec;
- mec = (i / gpu_resources.num_queue_per_pipe)
- / gpu_resources.num_pipe_per_mec;
-
- if (mec == 0 && pipe > 0)
- set_bit(i, gpu_resources.queue_bitmap);
- }
-
- radeon_doorbell_get_kfd_info(rdev,
- &gpu_resources.doorbell_physical_address,
- &gpu_resources.doorbell_aperture_size,
- &gpu_resources.doorbell_start_offset);
-
- kgd2kfd->device_init(rdev->kfd, &gpu_resources);
- }
-}
-
-void radeon_kfd_device_fini(struct radeon_device *rdev)
-{
- if (rdev->kfd) {
- kgd2kfd->device_exit(rdev->kfd);
- rdev->kfd = NULL;
- }
-}
-
-void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
-{
- if (rdev->kfd)
- kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
-}
-
-void radeon_kfd_suspend(struct radeon_device *rdev)
-{
- if (rdev->kfd)
- kgd2kfd->suspend(rdev->kfd);
-}
-
-int radeon_kfd_resume(struct radeon_device *rdev)
-{
- int r = 0;
-
- if (rdev->kfd)
- r = kgd2kfd->resume(rdev->kfd);
-
- return r;
-}
-
-static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
- struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
- int r;
-
- BUG_ON(kgd == NULL);
- BUG_ON(gpu_addr == NULL);
- BUG_ON(cpu_ptr == NULL);
-
- *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if ((*mem) == NULL)
- return -ENOMEM;
-
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo);
- if (r) {
- dev_err(rdev->dev,
- "failed to allocate BO for amdkfd (%d)\n", r);
- return r;
- }
-
- /* map the buffer */
- r = radeon_bo_reserve((*mem)->bo, true);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
- goto allocate_mem_reserve_bo_failed;
- }
-
- r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT,
- &(*mem)->gpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
- goto allocate_mem_pin_bo_failed;
- }
- *gpu_addr = (*mem)->gpu_addr;
-
- r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
- if (r) {
- dev_err(rdev->dev,
- "(%d) failed to map bo to kernel for amdkfd\n", r);
- goto allocate_mem_kmap_bo_failed;
- }
- *cpu_ptr = (*mem)->cpu_ptr;
-
- radeon_bo_unreserve((*mem)->bo);
-
- return 0;
-
-allocate_mem_kmap_bo_failed:
- radeon_bo_unpin((*mem)->bo);
-allocate_mem_pin_bo_failed:
- radeon_bo_unreserve((*mem)->bo);
-allocate_mem_reserve_bo_failed:
- radeon_bo_unref(&(*mem)->bo);
-
- return r;
-}
-
-static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
-{
- struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
-
- BUG_ON(mem == NULL);
-
- radeon_bo_reserve(mem->bo, true);
- radeon_bo_kunmap(mem->bo);
- radeon_bo_unpin(mem->bo);
- radeon_bo_unreserve(mem->bo);
- radeon_bo_unref(&(mem->bo));
- kfree(mem);
-}
-
-static uint64_t get_vmem_size(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- BUG_ON(kgd == NULL);
-
- return rdev->mc.real_vram_size;
-}
-
-static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- return rdev->asic->get_gpu_clock_counter(rdev);
-}
-
-static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- /* The sclk is in quantas of 10kHz */
- return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
-}
-
-static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
-{
- return (struct radeon_device *)kgd;
-}
-
-static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- writel(value, (void __iomem *)(rdev->rmmio + offset));
-}
-
-static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- return readl((void __iomem *)(rdev->rmmio + offset));
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
- uint32_t queue, uint32_t vmid)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
- uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
-
- mutex_lock(&rdev->srbm_mutex);
- write_register(kgd, SRBM_GFX_CNTL, value);
-}
-
-static void unlock_srbm(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- write_register(kgd, SRBM_GFX_CNTL, 0);
- mutex_unlock(&rdev->srbm_mutex);
-}
-
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t queue_id)
-{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
- uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
-
- lock_srbm(kgd, mec, pipe, queue_id, 0);
-}
-
-static void release_queue(struct kgd_dev *kgd)
-{
- unlock_srbm(kgd);
-}
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
-{
- lock_srbm(kgd, 0, 0, 0, vmid);
-
- write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
- write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
- write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
- write_register(kgd, SH_MEM_BASES, sh_mem_bases);
-
- unlock_srbm(kgd);
-}
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid)
-{
- /*
- * We have to assume that there is no outstanding mapping.
- * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
- * because a mapping is in progress or because a mapping finished and
- * the SW cleared it.
- * So the protocol is to always wait & clear.
- */
- uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID_MASK;
-
- write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
- pasid_mapping);
-
- while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
- (1U << vmid)))
- cpu_relax();
- write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
-
- /* Mapping vmid to pasid also for IH block */
- write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
- pasid_mapping);
-
- return 0;
-}
-
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
- /* nothing to do here */
- return 0;
-}
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
-{
- uint32_t mec;
- uint32_t pipe;
-
- mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
- pipe = (pipe_id % CIK_PIPE_PER_MEC);
-
- lock_srbm(kgd, mec, pipe, 0, 0);
-
- write_register(kgd, CPC_INT_CNTL,
- TIME_STAMP_INT_ENABLE | OPCODE_ERROR_INT_ENABLE);
-
- unlock_srbm(kgd);
-
- return 0;
-}
-
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
-{
- uint32_t retval;
-
- retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
- m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
-
- pr_debug("kfd: sdma base address: 0x%x\n", retval);
-
- return retval;
-}
-
-static inline struct cik_mqd *get_mqd(void *mqd)
-{
- return (struct cik_mqd *)mqd;
-}
-
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
-{
- return (struct cik_sdma_rlc_registers *)mqd;
-}
-
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
-{
- uint32_t wptr_shadow, is_wptr_shadow_valid;
- struct cik_mqd *m;
-
- m = get_mqd(mqd);
-
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
-
- acquire_queue(kgd, pipe_id, queue_id);
- write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
- write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
- write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
-
- write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
- write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
- write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
-
- write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
- write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
- write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
-
- write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
-
- write_register(kgd, CP_HQD_PERSISTENT_STATE,
- m->cp_hqd_persistent_state);
- write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
- write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
-
- write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
- m->cp_hqd_atomic0_preop_lo);
-
- write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
- m->cp_hqd_atomic0_preop_hi);
-
- write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
- m->cp_hqd_atomic1_preop_lo);
-
- write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
- m->cp_hqd_atomic1_preop_hi);
-
- write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
- m->cp_hqd_pq_rptr_report_addr_lo);
-
- write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
- m->cp_hqd_pq_rptr_report_addr_hi);
-
- write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
-
- write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
- m->cp_hqd_pq_wptr_poll_addr_lo);
-
- write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
- m->cp_hqd_pq_wptr_poll_addr_hi);
-
- write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
- m->cp_hqd_pq_doorbell_control);
-
- write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
-
- write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
-
- write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
- write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
-
- write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
-
- if (is_wptr_shadow_valid)
- write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
-
- write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
- release_queue(kgd);
-
- return 0;
-}
-
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_BASE_HI,
- m->sdma_rlc_rb_base_hi);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO,
- m->sdma_rlc_rb_rptr_addr_lo);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI,
- m->sdma_rlc_rb_rptr_addr_hi);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
-
- return 0;
-}
-
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
-{
- uint32_t act;
- bool retval = false;
- uint32_t low, high;
-
- acquire_queue(kgd, pipe_id, queue_id);
- act = read_register(kgd, CP_HQD_ACTIVE);
- if (act) {
- low = lower_32_bits(queue_address >> 8);
- high = upper_32_bits(queue_address >> 8);
-
- if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
- high == read_register(kgd, CP_HQD_PQ_BASE_HI))
- retval = true;
- }
- release_queue(kgd);
- return retval;
-}
-
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
- uint32_t sdma_rlc_rb_cntl;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- sdma_rlc_rb_cntl = read_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_CNTL);
-
- if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE)
- return true;
-
- return false;
-}
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- uint32_t temp;
-
- acquire_queue(kgd, pipe_id, queue_id);
- write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
-
- write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
-
- while (true) {
- temp = read_register(kgd, CP_HQD_ACTIVE);
- if (temp & 0x1)
- break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
- release_queue(kgd);
- return -ETIME;
- }
- msleep(20);
- timeout -= 20;
- }
-
- release_queue(kgd);
- return 0;
-}
-
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
- uint32_t temp;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL);
- temp = temp & ~SDMA_RB_ENABLE;
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp);
-
- while (true) {
- temp = read_register(kgd, sdma_base_addr +
- SDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA_RLC_IDLE)
- break;
- if (timeout == 0)
- return -ETIME;
- msleep(20);
- timeout -= 20;
- }
-
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0);
-
- return 0;
-}
-
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
-{
- union TCP_WATCH_CNTL_BITS cntl;
- unsigned int i;
-
- cntl.u32All = 0;
-
- cntl.bitfields.valid = 0;
- cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
- cntl.bitfields.atc = 1;
-
- /* Turning off this address until we set all the registers */
- for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
- write_register(kgd,
- watchRegs[i * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- return 0;
-}
-
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- union TCP_WATCH_CNTL_BITS cntl;
-
- cntl.u32All = cntl_val;
-
- /* Turning off this watch point until we set all the registers */
- cntl.bitfields.valid = 0;
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_HI],
- addr_hi);
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_LO],
- addr_lo);
-
- /* Enable the watch point */
- cntl.bitfields.valid = 1;
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- return 0;
-}
-
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
- uint32_t data;
-
- mutex_lock(&rdev->grbm_idx_mutex);
-
- write_register(kgd, GRBM_GFX_INDEX, gfx_index_val);
- write_register(kgd, SQ_CMD, sq_cmd);
-
- /* Restore the GRBM_GFX_INDEX register */
-
- data = INSTANCE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
- SE_BROADCAST_WRITES;
-
- write_register(kgd, GRBM_GFX_INDEX, data);
-
- mutex_unlock(&rdev->grbm_idx_mutex);
-
- return 0;
-}
-
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
- / 4;
-}
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
-{
- uint32_t reg;
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
- return reg & ATC_VMID_PASID_MAPPING_VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
- return reg & ATC_VMID_PASID_MAPPING_PASID_MASK;
-}
-
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- return WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
-}
-
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct radeon_device *rdev = (struct radeon_device *) kgd;
- const union radeon_firmware_header *hdr;
-
- BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union radeon_firmware_header *)
- rdev->mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- case KGD_ENGINE_SDMA2:
- hdr = (const union radeon_firmware_header *)
- rdev->sdma_fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dfee8f7d94ae..cde037f213d7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -34,8 +34,6 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include "radeon_kfd.h"
-
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx(void);
#else
@@ -68,8 +66,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
- radeon_kfd_device_fini(rdev);
-
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
@@ -174,9 +170,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- radeon_kfd_device_probe(rdev);
- radeon_kfd_device_init(rdev);
-
if (radeon_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index da44ac234f64..ca0a7ed28c9b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -762,10 +762,6 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- const u8 *dpcd,
- unsigned pix_clock,
- unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index db8f079e441e..bc26efd1793e 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -205,5 +205,5 @@ DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/radeon
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index bf69bf9086bf..6ada64db00e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -597,7 +597,7 @@ release_sg:
kfree(ttm->sg);
release_pages:
- release_pages(ttm->pages, pinned, 0);
+ release_pages(ttm->pages, pinned);
return r;
}
@@ -725,8 +725,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -762,33 +760,13 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
+ return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
@@ -815,14 +793,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
}
int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 7278b9703c15..566d1a948c8f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/of_graph.h>
#include <linux/wait.h>
@@ -213,7 +214,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void rcar_du_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index dcc539ba85d6..0ccc76217ee4 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -57,4 +57,13 @@ config ROCKCHIP_INNO_HDMI
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+config ROCKCHIP_LVDS
+ bool "Rockchip LVDS support"
+ depends on DRM_ROCKCHIP
+ depends on PINCTRL && OF
+ help
+ Choose this option to enable support for Rockchip LVDS controllers.
+ Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
+ support LVDS, rgb, dual LVDS output mode. say Y to enable its
+ driver.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 305409818ffb..a314e2109e76 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -13,5 +13,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
+rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 9606121fa185..93b7102dd008 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -72,7 +72,7 @@ struct rockchip_dp_device {
struct reset_control *rst;
struct work_struct psr_work;
- spinlock_t psr_lock;
+ struct mutex psr_lock;
unsigned int psr_state;
const struct rockchip_dp_chip_data *data;
@@ -83,21 +83,20 @@ struct rockchip_dp_device {
static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
{
struct rockchip_dp_device *dp = to_dp(encoder);
- unsigned long flags;
if (!analogix_dp_psr_supported(dp->dev))
return;
- dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+ DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
- spin_lock_irqsave(&dp->psr_lock, flags);
+ mutex_lock(&dp->psr_lock);
if (enabled)
dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
else
dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
schedule_work(&dp->psr_work);
- spin_unlock_irqrestore(&dp->psr_lock, flags);
+ mutex_unlock(&dp->psr_lock);
}
static void analogix_dp_psr_work(struct work_struct *work)
@@ -105,21 +104,20 @@ static void analogix_dp_psr_work(struct work_struct *work)
struct rockchip_dp_device *dp =
container_of(work, typeof(*dp), psr_work);
int ret;
- unsigned long flags;
ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
if (ret) {
- dev_err(dp->dev, "line flag interrupt did not arrive\n");
+ DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
return;
}
- spin_lock_irqsave(&dp->psr_lock, flags);
+ mutex_lock(&dp->psr_lock);
if (dp->psr_state == EDP_VSC_PSR_STATE_ACTIVE)
analogix_dp_enable_psr(dp->dev);
else
analogix_dp_disable_psr(dp->dev);
- spin_unlock_irqrestore(&dp->psr_lock, flags);
+ mutex_unlock(&dp->psr_lock);
}
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -140,13 +138,13 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
return ret;
}
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
- dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to dp pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -211,17 +209,17 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
else
val = dp->data->lcdsel_big;
- dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
ret = clk_prepare_enable(dp->grfclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
if (ret != 0)
- dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(dp->grfclk);
}
@@ -277,7 +275,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
- dev_err(dev, "failed to get rockchip,grf property\n");
+ DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
return PTR_ERR(dp->grf);
}
@@ -287,31 +285,31 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
} else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(dp->grfclk)) {
- dev_err(dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(dev, "failed to get grf clock\n");
return PTR_ERR(dp->grfclk);
}
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
- dev_err(dev, "failed to get pclk property\n");
+ DRM_DEV_ERROR(dev, "failed to get pclk property\n");
return PTR_ERR(dp->pclk);
}
dp->rst = devm_reset_control_get(dev, "dp");
if (IS_ERR(dp->rst)) {
- dev_err(dev, "failed to get dp reset control\n");
+ DRM_DEV_ERROR(dev, "failed to get dp reset control\n");
return PTR_ERR(dp->rst);
}
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
return ret;
}
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
- dev_err(dp->dev, "failed to pre init %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -381,7 +379,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- spin_lock_init(&dp->psr_lock);
+ mutex_init(&dp->psr_lock);
dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a57da051f516..275844d0d0ec 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -287,14 +287,6 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *
-cdn_dp_connector_best_encoder(struct drm_connector *connector)
-{
- struct cdn_dp_device *dp = connector_to_dp(connector);
-
- return &dp->encoder;
-}
-
static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -346,7 +338,6 @@ static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
.get_modes = cdn_dp_connector_get_modes,
- .best_encoder = cdn_dp_connector_best_encoder,
.mode_valid = cdn_dp_connector_mode_valid,
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index b14d211f6c21..eb3042c6d1b2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -323,7 +323,7 @@ int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
dp->fw_version |= reg << 24;
- dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version);
+ DRM_DEV_DEBUG(dp->dev, "firmware version: %x\n", dp->fw_version);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 9a20b9dc27c8..b1fe0639227e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -430,9 +430,9 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
testdin = max_mbps_to_testdin(dsi->lane_mbps);
if (testdin < 0) {
- dev_err(dsi->dev,
- "failed to get testdin for %dmbps lane clock\n",
- dsi->lane_mbps);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get testdin for %dmbps lane clock\n",
+ dsi->lane_mbps);
return testdin;
}
@@ -443,7 +443,7 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
ret = clk_prepare_enable(dsi->phy_cfg_clk);
if (ret) {
- dev_err(dsi->dev, "Failed to enable phy_cfg_clk\n");
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
return ret;
}
@@ -501,7 +501,7 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to wait for phy lock state\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n");
goto phy_init_end;
}
@@ -509,8 +509,8 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
val, val & STOP_STATE_CLK_LANE, 1000,
PHY_STATUS_TIMEOUT_US);
if (ret < 0)
- dev_err(dsi->dev,
- "failed to wait for phy clk lane stop state\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to wait for phy clk lane stop state\n");
phy_init_end:
clk_disable_unprepare(dsi->phy_cfg_clk);
@@ -529,8 +529,9 @@ static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0) {
- dev_err(dsi->dev, "failed to get bpp for pixel format %d\n",
- dsi->format);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get bpp for pixel format %d\n",
+ dsi->format);
return bpp;
}
@@ -541,7 +542,8 @@ static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
if (tmp < max_mbps)
target_mbps = tmp;
else
- dev_err(dsi->dev, "DPHY clock frequency is out of range\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "DPHY clock frequency is out of range\n");
}
pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
@@ -582,8 +584,9 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct dw_mipi_dsi *dsi = host_to_dsi(host);
if (device->lanes > dsi->pdata->max_data_lanes) {
- dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
- device->lanes);
+ DRM_DEV_ERROR(dsi->dev,
+ "the number of data lanes(%u) is too many\n",
+ device->lanes);
return -EINVAL;
}
@@ -632,7 +635,8 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
val, !(val & GEN_CMD_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to get available command FIFO\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get available command FIFO\n");
return ret;
}
@@ -643,7 +647,7 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
val, (val & mask) == mask,
1000, CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to write command FIFO\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to write command FIFO\n");
return ret;
}
@@ -663,8 +667,9 @@ static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
data |= tx_buf[1] << 8;
if (msg->tx_len > 2) {
- dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
- msg->tx_len);
+ DRM_DEV_ERROR(dsi->dev,
+ "too long tx buf length %zu for short write\n",
+ msg->tx_len);
return -EINVAL;
}
@@ -682,8 +687,9 @@ static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
u32 val;
if (msg->tx_len < 3) {
- dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
- msg->tx_len);
+ DRM_DEV_ERROR(dsi->dev,
+ "wrong tx buf length %zu for long write\n",
+ msg->tx_len);
return -EINVAL;
}
@@ -704,8 +710,8 @@ static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
val, !(val & GEN_PLD_W_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev,
- "failed to get available write payload FIFO\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get available write payload FIFO\n");
return ret;
}
}
@@ -731,8 +737,8 @@ static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
break;
default:
- dev_err(dsi->dev, "unsupported message type 0x%02x\n",
- msg->type);
+ DRM_DEV_ERROR(dsi->dev, "unsupported message type 0x%02x\n",
+ msg->type);
ret = -EINVAL;
}
@@ -935,7 +941,7 @@ static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
return;
if (clk_prepare_enable(dsi->pclk)) {
- dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
return;
}
@@ -967,7 +973,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
return;
if (clk_prepare_enable(dsi->pclk)) {
- dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
return;
}
@@ -991,7 +997,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
*/
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
- dev_err(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
return;
}
@@ -1004,7 +1010,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
if (drm_panel_prepare(dsi->panel))
- dev_err(dsi->dev, "failed to prepare panel\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to prepare panel\n");
dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
drm_panel_enable(dsi->panel);
@@ -1017,7 +1023,8 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
val = pdata->dsi0_en_bit << 16;
regmap_write(dsi->grf_regmap, pdata->grf_switch_reg, val);
- dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(dsi->dev,
+ "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
dsi->dpms_mode = DRM_MODE_DPMS_ON;
clk_disable_unprepare(dsi->grf_clk);
@@ -1111,7 +1118,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, NULL);
if (ret) {
- dev_err(dev, "Failed to initialize encoder with drm\n");
+ DRM_DEV_ERROR(dev, "Failed to initialize encoder with drm\n");
return ret;
}
@@ -1133,7 +1140,7 @@ static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
- dev_err(dsi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
@@ -1205,14 +1212,15 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- dev_err(dev, "Unable to get pll reference clock: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n", ret);
return ret;
}
dsi->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dsi->pclk)) {
ret = PTR_ERR(dsi->pclk);
- dev_err(dev, "Unable to get pclk: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
return ret;
}
@@ -1226,7 +1234,8 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
if (ret == -ENOENT) {
apb_rst = NULL;
} else {
- dev_err(dev, "Unable to get reset control: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get reset control: %d\n", ret);
return ret;
}
}
@@ -1234,7 +1243,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
if (apb_rst) {
ret = clk_prepare_enable(dsi->pclk);
if (ret) {
- dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dev, "Failed to enable pclk\n");
return ret;
}
@@ -1249,7 +1258,8 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
if (IS_ERR(dsi->phy_cfg_clk)) {
ret = PTR_ERR(dsi->phy_cfg_clk);
- dev_err(dev, "Unable to get phy_cfg_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get phy_cfg_clk: %d\n", ret);
return ret;
}
}
@@ -1258,30 +1268,28 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->grf_clk = devm_clk_get(dev, "grf");
if (IS_ERR(dsi->grf_clk)) {
ret = PTR_ERR(dsi->grf_clk);
- dev_err(dev, "Unable to get grf_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
return ret;
}
}
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
- dev_err(dev, "%s: Failed to enable pllref_clk\n", __func__);
+ DRM_DEV_ERROR(dev, "Failed to enable pllref_clk\n");
return ret;
}
ret = dw_mipi_dsi_register(drm, dsi);
if (ret) {
- dev_err(dev, "Failed to register mipi_dsi: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to register mipi_dsi: %d\n", ret);
goto err_pllref;
}
- pm_runtime_enable(dev);
-
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
dsi->dsi_host.dev = dev;
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
- dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
goto err_cleanup;
}
@@ -1291,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
}
dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
return 0;
err_mipi_dsi_host:
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index ccd5d595ada7..1eb02a82fd91 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -168,7 +168,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
@@ -178,7 +178,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->vpll_clk)) {
- dev_err(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
return PTR_ERR(hdmi->vpll_clk);
}
@@ -188,13 +188,14 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->grf_clk)) {
- dev_err(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
return PTR_ERR(hdmi->grf_clk);
}
ret = clk_prepare_enable(hdmi->vpll_clk);
if (ret) {
- dev_err(hdmi->dev, "Failed to enable HDMI vpll: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev,
+ "Failed to enable HDMI vpll: %d\n", ret);
return ret;
}
@@ -259,17 +260,17 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- dev_dbg(hdmi->dev, "vop %s output to hdmi\n",
- ret ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
+ ret ? "LIT" : "BIG");
}
static int
@@ -368,7 +369,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
- dev_err(hdmi->dev, "Unable to parse OF data\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7a251a54e792..ee584d87111f 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -224,7 +224,7 @@ static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
break;
default:
- dev_err(hdmi->dev, "Unknown power mode %d\n", mode);
+ DRM_DEV_ERROR(hdmi->dev, "Unknown power mode %d\n", mode);
}
}
@@ -742,8 +742,9 @@ static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap,
hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
for (i = 0; i < num; i++) {
- dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
- i + 1, num, msgs[i].len, msgs[i].flags);
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
if (msgs[i].flags & I2C_M_RD)
ret = inno_hdmi_i2c_read(hdmi, &msgs[i]);
@@ -806,7 +807,7 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
hdmi->i2c = i2c;
- dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+ DRM_DEV_INFO(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
return adap;
}
@@ -838,13 +839,14 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
if (IS_ERR(hdmi->pclk)) {
- dev_err(hdmi->dev, "Unable to get HDMI pclk clk\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
return PTR_ERR(hdmi->pclk);
}
ret = clk_prepare_enable(hdmi->pclk);
if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI pclk clock: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev,
+ "Cannot enable HDMI pclk clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index ff3d0f5efbb1..76d63de5921d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -58,7 +58,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
ret = iommu_attach_device(private->domain, dev);
if (ret) {
- dev_err(dev, "Failed to attach iommu device\n");
+ DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
return ret;
}
@@ -373,8 +373,9 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
iommu = of_parse_phandle(port->parent, "iommus", 0);
if (!iommu || !of_device_is_available(iommu->parent)) {
- dev_dbg(dev, "no iommu attached for %pOF, using non-iommu buffers\n",
- port->parent);
+ DRM_DEV_DEBUG(dev,
+ "no iommu attached for %pOF, using non-iommu buffers\n",
+ port->parent);
/*
* if there is a crtc not support iommu, force set all
* crtc use non-iommu buffer.
@@ -389,12 +390,13 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
}
if (i == 0) {
- dev_err(dev, "missing 'ports' property\n");
+ DRM_DEV_ERROR(dev, "missing 'ports' property\n");
return -ENODEV;
}
if (!found) {
- dev_err(dev, "No available vop found for display-subsystem.\n");
+ DRM_DEV_ERROR(dev,
+ "No available vop found for display-subsystem.\n");
return -ENODEV;
}
@@ -453,6 +455,8 @@ static int __init rockchip_drm_init(void)
num_rockchip_sub_drivers = 0;
ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP);
+ ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
+ CONFIG_ROCKCHIP_LVDS);
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c7e96b82cf63..498dfbc52cec 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -69,5 +69,6 @@ extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
+extern struct platform_driver rockchip_lvds_driver;
extern struct platform_driver vop_platform_driver;
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 70773041785b..cd2ace0c3caa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -100,8 +100,9 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
&rockchip_drm_fb_funcs);
if (ret) {
- dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to initialize framebuffer: %d\n",
+ ret);
kfree(rockchip_fb);
return ERR_PTR(ret);
}
@@ -134,7 +135,8 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
- dev_err(dev->dev, "Failed to lookup GEM object\n");
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to lookup GEM object\n");
ret = -ENXIO;
goto err_gem_object_unreference;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 724579ebf947..e6650553f5d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -76,7 +76,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "Failed to create framebuffer info.\n");
+ DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n");
ret = PTR_ERR(fbi);
goto out;
}
@@ -84,7 +84,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
private->fbdev_bo);
if (IS_ERR(helper->fb)) {
- dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto out;
}
@@ -138,21 +139,24 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
if (ret < 0) {
- dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to initialize drm fb helper - %d.\n",
+ ret);
return ret;
}
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret < 0) {
- dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to add connectors - %d.\n", ret);
goto err_drm_fb_helper_fini;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
- dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to set initial hw config - %d.\n",
+ ret);
goto err_drm_fb_helper_fini;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 1869c8bb76c8..1d9655576b6e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -220,7 +220,7 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
unsigned int i, count = obj->size >> PAGE_SHIFT;
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
unsigned long uaddr = vma->vm_start;
unsigned long offset = vma->vm_pgoff;
unsigned long end = user_count + offset;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index a553e182ff53..3acfd576b7df 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
spin_unlock_irqrestore(&psr->lock, flags);
}
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
{
- struct psr_drv *psr = (struct psr_drv *)data;
+ struct psr_drv *psr = from_timer(psr, t, flush_timer);
unsigned long flags;
/* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
if (!psr)
return -ENOMEM;
- setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ timer_setup(&psr->flush_timer, psr_flush_handler, 0);
spin_lock_init(&psr->lock);
psr->active = true;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index bf9ed0e63973..19128b4dea54 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -160,7 +160,7 @@ static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
int offset, mask, shift;
if (!reg || !reg->mask) {
- dev_dbg(vop->dev, "Warning: not support %s\n", reg_name);
+ DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
return;
}
@@ -499,7 +499,7 @@ static int vop_enable(struct drm_crtc *crtc)
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
- dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
@@ -523,7 +523,8 @@ static int vop_enable(struct drm_crtc *crtc)
*/
ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
if (ret) {
- dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+ DRM_DEV_ERROR(vop->dev,
+ "failed to attach dma mapping, %d\n", ret);
goto err_disable_aclk;
}
@@ -1361,42 +1362,42 @@ static int vop_initial(struct vop *vop)
vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
if (IS_ERR(vop->hclk)) {
- dev_err(vop->dev, "failed to get hclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
return PTR_ERR(vop->hclk);
}
vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
if (IS_ERR(vop->aclk)) {
- dev_err(vop->dev, "failed to get aclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
return PTR_ERR(vop->aclk);
}
vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
if (IS_ERR(vop->dclk)) {
- dev_err(vop->dev, "failed to get dclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
return PTR_ERR(vop->dclk);
}
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
- dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
ret = clk_prepare(vop->dclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare dclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
goto err_put_pm_runtime;
}
/* Enable both the hclk and aclk to setup the vop */
ret = clk_prepare_enable(vop->hclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare/enable hclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
goto err_unprepare_dclk;
}
ret = clk_prepare_enable(vop->aclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare/enable aclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
goto err_disable_hclk;
}
@@ -1405,7 +1406,7 @@ static int vop_initial(struct vop *vop)
*/
ahb_rst = devm_reset_control_get(vop->dev, "ahb");
if (IS_ERR(ahb_rst)) {
- dev_err(vop->dev, "failed to get ahb reset\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
ret = PTR_ERR(ahb_rst);
goto err_disable_aclk;
}
@@ -1434,7 +1435,7 @@ static int vop_initial(struct vop *vop)
*/
vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
if (IS_ERR(vop->dclk_rst)) {
- dev_err(vop->dev, "failed to get dclk reset\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
ret = PTR_ERR(vop->dclk_rst);
goto err_disable_aclk;
}
@@ -1511,7 +1512,7 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
vop_line_flag_irq_disable(vop);
if (jiffies_left == 0) {
- dev_err(vop->dev, "Timeout waiting for IRQ\n");
+ DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
return -ETIMEDOUT;
}
@@ -1558,7 +1559,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "cannot find irq for vop\n");
+ DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
return irq;
}
vop->irq = (unsigned int)irq;
@@ -1584,7 +1585,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
ret = vop_initial(vop);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev,
+ "cannot initial vop dev - err %d\n", ret);
goto err_disable_pm_runtime;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
new file mode 100644
index 000000000000..84911bdc27d1
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Mark Yao <mark.yao@rock-chips.com>
+ * Sandy Huang <hjc@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+#include "rockchip_lvds.h"
+
+#define DISPLAY_OUTPUT_RGB 0
+#define DISPLAY_OUTPUT_LVDS 1
+#define DISPLAY_OUTPUT_DUAL_LVDS 2
+
+#define connector_to_lvds(c) \
+ container_of(c, struct rockchip_lvds, connector)
+
+#define encoder_to_lvds(c) \
+ container_of(c, struct rockchip_lvds, encoder)
+
+/**
+ * rockchip_lvds_soc_data - rockchip lvds Soc private data
+ * @ch1_offset: lvds channel 1 registe offset
+ * grf_soc_con6: general registe offset for LVDS contrl
+ * grf_soc_con7: general registe offset for LVDS contrl
+ * has_vop_sel: to indicate whether need to choose from different VOP.
+ */
+struct rockchip_lvds_soc_data {
+ u32 ch1_offset;
+ int grf_soc_con6;
+ int grf_soc_con7;
+ bool has_vop_sel;
+};
+
+struct rockchip_lvds {
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *grf;
+ struct clk *pclk;
+ const struct rockchip_lvds_soc_data *soc_data;
+ int output; /* rgb lvds or dual lvds output */
+ int format; /* vesa or jeida format */
+ struct drm_device *drm_dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct dev_pin_info *pins;
+};
+
+static inline void lvds_writel(struct rockchip_lvds *lvds, u32 offset, u32 val)
+{
+ writel_relaxed(val, lvds->regs + offset);
+ if (lvds->output == DISPLAY_OUTPUT_LVDS)
+ return;
+ writel_relaxed(val, lvds->regs + offset + lvds->soc_data->ch1_offset);
+}
+
+static inline int lvds_name_to_format(const char *s)
+{
+ if (strncmp(s, "jeida-18", 8) == 0)
+ return LVDS_JEIDA_18;
+ else if (strncmp(s, "jeida-24", 8) == 0)
+ return LVDS_JEIDA_24;
+ else if (strncmp(s, "vesa-24", 7) == 0)
+ return LVDS_VESA_24;
+
+ return -EINVAL;
+}
+
+static inline int lvds_name_to_output(const char *s)
+{
+ if (strncmp(s, "rgb", 3) == 0)
+ return DISPLAY_OUTPUT_RGB;
+ else if (strncmp(s, "lvds", 4) == 0)
+ return DISPLAY_OUTPUT_LVDS;
+ else if (strncmp(s, "duallvds", 8) == 0)
+ return DISPLAY_OUTPUT_DUAL_LVDS;
+
+ return -EINVAL;
+}
+
+static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
+{
+ int ret;
+ u32 val;
+
+ ret = clk_enable(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ return ret;
+ }
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ clk_disable(lvds->pclk);
+ return ret;
+ }
+ val = RK3288_LVDS_CH0_REG0_LANE4_EN | RK3288_LVDS_CH0_REG0_LANE3_EN |
+ RK3288_LVDS_CH0_REG0_LANE2_EN | RK3288_LVDS_CH0_REG0_LANE1_EN |
+ RK3288_LVDS_CH0_REG0_LANE0_EN;
+ if (lvds->output == DISPLAY_OUTPUT_RGB) {
+ val |= RK3288_LVDS_CH0_REG0_TTL_EN |
+ RK3288_LVDS_CH0_REG0_LANECK_EN;
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG4,
+ RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG5,
+ RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
+ } else {
+ val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
+ RK3288_LVDS_CH0_REG0_LANECK_EN;
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG1,
+ RK3288_LVDS_CH0_REG1_LANECK_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE4_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE3_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE2_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE1_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE0_BIAS);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_CH0_REG2_RESERVE_ON |
+ RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
+ }
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG3, RK3288_LVDS_PLL_FBDIV_REG3(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REGD, RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG20, RK3288_LVDS_CH0_REG20_LSB);
+
+ lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+
+ return 0;
+}
+
+static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
+{
+ int ret;
+ u32 val;
+
+ lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
+ val |= val << 16;
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ if (ret != 0)
+ DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
+
+ pm_runtime_put(lvds->dev);
+ clk_disable(lvds->pclk);
+}
+
+static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct drm_panel *panel = lvds->panel;
+
+ return drm_panel_get_modes(panel);
+}
+
+static const
+struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
+ .get_modes = rockchip_lvds_connector_get_modes,
+};
+
+static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
+ u8 pin_dclk = (mode->flags & DRM_MODE_FLAG_PCSYNC) ? 1 : 0;
+ u32 val;
+ int ret;
+
+ /* iomux to LCD data/sync mode */
+ if (lvds->output == DISPLAY_OUTPUT_RGB)
+ if (lvds->pins && !IS_ERR(lvds->pins->default_state))
+ pinctrl_select_state(lvds->pins->p,
+ lvds->pins->default_state);
+ val = lvds->format | LVDS_CH0_EN;
+ if (lvds->output == DISPLAY_OUTPUT_RGB)
+ val |= LVDS_TTL_EN | LVDS_CH1_EN;
+ else if (lvds->output == DISPLAY_OUTPUT_DUAL_LVDS)
+ val |= LVDS_DUAL | LVDS_CH1_EN;
+
+ if ((mode->htotal - mode->hsync_start) & 0x01)
+ val |= LVDS_START_PHASE_RST_1;
+
+ val |= (pin_dclk << 8) | (pin_hsync << 9);
+ val |= (0xffff << 16);
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ if (ret != 0) {
+ DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
+ return;
+ }
+}
+
+static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
+{
+ u32 val;
+ int ret;
+
+ if (!lvds->soc_data->has_vop_sel)
+ return 0;
+
+ ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
+ if (ret < 0)
+ return ret;
+
+ val = RK3288_LVDS_SOC_CON6_SEL_VOP_LIT << 16;
+ if (ret)
+ val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
+
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con6, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ int ret;
+
+ drm_panel_prepare(lvds->panel);
+ ret = rockchip_lvds_poweron(lvds);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on lvds: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ }
+ rockchip_lvds_grf_config(encoder, mode);
+ rockchip_lvds_set_vop_source(lvds, encoder);
+ drm_panel_enable(lvds->panel);
+}
+
+static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ drm_panel_disable(lvds->panel);
+ rockchip_lvds_poweroff(lvds);
+ drm_panel_unprepare(lvds->panel);
+}
+
+static const
+struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
+ .enable = rockchip_lvds_encoder_enable,
+ .disable = rockchip_lvds_encoder_disable,
+ .atomic_check = rockchip_lvds_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
+ .ch1_offset = 0x100,
+ .grf_soc_con6 = 0x025c,
+ .grf_soc_con7 = 0x0260,
+ .has_vop_sel = true,
+};
+
+static const struct of_device_id rockchip_lvds_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3288-lvds",
+ .data = &rk3288_lvds_data
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
+
+static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct device_node *remote = NULL;
+ struct device_node *port, *endpoint;
+ int ret = 0, child_count = 0;
+ const char *name;
+ u32 endpoint_id;
+
+ lvds->drm_dev = drm_dev;
+ port = of_graph_get_port_by_id(dev->of_node, 1);
+ if (!port) {
+ DRM_DEV_ERROR(dev,
+ "can't found port point, please init lvds panel port!\n");
+ return -EINVAL;
+ }
+ for_each_child_of_node(port, endpoint) {
+ child_count++;
+ of_property_read_u32(endpoint, "reg", &endpoint_id);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
+ &lvds->panel, &lvds->bridge);
+ if (!ret)
+ break;
+ }
+ if (!child_count) {
+ DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
+ ret = -EINVAL;
+ goto err_put_port;
+ } else if (ret) {
+ DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
+ ret = -EPROBE_DEFER;
+ goto err_put_port;
+ }
+ if (lvds->panel)
+ remote = lvds->panel->dev->of_node;
+ else
+ remote = lvds->bridge->of_node;
+ if (of_property_read_string(dev->of_node, "rockchip,output", &name))
+ /* default set it as output rgb */
+ lvds->output = DISPLAY_OUTPUT_RGB;
+ else
+ lvds->output = lvds_name_to_output(name);
+
+ if (lvds->output < 0) {
+ DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
+ ret = lvds->output;
+ goto err_put_remote;
+ }
+
+ if (of_property_read_string(remote, "data-mapping", &name))
+ /* default set it as format vesa 18 */
+ lvds->format = LVDS_VESA_18;
+ else
+ lvds->format = lvds_name_to_format(name);
+
+ if (lvds->format < 0) {
+ DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
+ ret = lvds->format;
+ goto err_put_remote;
+ }
+
+ encoder = &lvds->encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dev->of_node);
+
+ ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize encoder: %d\n", ret);
+ goto err_put_remote;
+ }
+
+ drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
+
+ if (lvds->panel) {
+ connector = &lvds->connector;
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ ret = drm_connector_init(drm_dev, connector,
+ &rockchip_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize connector: %d\n", ret);
+ goto err_free_encoder;
+ }
+
+ drm_connector_helper_add(connector,
+ &rockchip_lvds_connector_helper_funcs);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
+ }
+
+ ret = drm_panel_attach(lvds->panel, connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach panel: %d\n", ret);
+ goto err_free_connector;
+ }
+ } else {
+ lvds->bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach bridge: %d\n", ret);
+ goto err_free_encoder;
+ }
+ encoder->bridge = lvds->bridge;
+ }
+
+ pm_runtime_enable(dev);
+ of_node_put(remote);
+ of_node_put(port);
+
+ return 0;
+
+err_free_connector:
+ drm_connector_cleanup(connector);
+err_free_encoder:
+ drm_encoder_cleanup(encoder);
+err_put_remote:
+ of_node_put(remote);
+err_put_port:
+ of_node_put(port);
+
+ return ret;
+}
+
+static void rockchip_lvds_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+
+ rockchip_lvds_encoder_disable(&lvds->encoder);
+ if (lvds->panel)
+ drm_panel_detach(lvds->panel);
+ pm_runtime_disable(dev);
+ drm_connector_cleanup(&lvds->connector);
+ drm_encoder_cleanup(&lvds->encoder);
+}
+
+static const struct component_ops rockchip_lvds_component_ops = {
+ .bind = rockchip_lvds_bind,
+ .unbind = rockchip_lvds_unbind,
+};
+
+static int rockchip_lvds_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_lvds *lvds;
+ const struct of_device_id *match;
+ struct resource *res;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = dev;
+ match = of_match_node(rockchip_lvds_dt_ids, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ lvds->soc_data = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lvds->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(&pdev->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk)) {
+ DRM_DEV_ERROR(dev, "could not get pclk_lvds\n");
+ return PTR_ERR(lvds->pclk);
+ }
+
+ lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
+ GFP_KERNEL);
+ if (!lvds->pins)
+ return -ENOMEM;
+
+ lvds->pins->p = devm_pinctrl_get(lvds->dev);
+ if (IS_ERR(lvds->pins->p)) {
+ DRM_DEV_ERROR(dev, "no pinctrl handle\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ } else {
+ lvds->pins->default_state =
+ pinctrl_lookup_state(lvds->pins->p, "lcdc");
+ if (IS_ERR(lvds->pins->default_state)) {
+ DRM_DEV_ERROR(dev, "no default pinctrl state\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ }
+ }
+
+ lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(lvds->grf)) {
+ DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
+ return PTR_ERR(lvds->grf);
+ }
+
+ dev_set_drvdata(dev, lvds);
+
+ ret = clk_prepare(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to prepare pclk_lvds\n");
+ return ret;
+ }
+ ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to add component\n");
+ clk_unprepare(lvds->pclk);
+ }
+
+ return ret;
+}
+
+static int rockchip_lvds_remove(struct platform_device *pdev)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(&pdev->dev);
+
+ component_del(&pdev->dev, &rockchip_lvds_component_ops);
+ clk_unprepare(lvds->pclk);
+
+ return 0;
+}
+
+struct platform_driver rockchip_lvds_driver = {
+ .probe = rockchip_lvds_probe,
+ .remove = rockchip_lvds_remove,
+ .driver = {
+ .name = "rockchip-lvds",
+ .of_match_table = of_match_ptr(rockchip_lvds_dt_ids),
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
new file mode 100644
index 000000000000..15810b737809
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <hjc@rock-chips.com>
+ * Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_LVDS_
+#define _ROCKCHIP_LVDS_
+
+#define RK3288_LVDS_CH0_REG0 0x00
+#define RK3288_LVDS_CH0_REG0_LVDS_EN BIT(7)
+#define RK3288_LVDS_CH0_REG0_TTL_EN BIT(6)
+#define RK3288_LVDS_CH0_REG0_LANECK_EN BIT(5)
+#define RK3288_LVDS_CH0_REG0_LANE4_EN BIT(4)
+#define RK3288_LVDS_CH0_REG0_LANE3_EN BIT(3)
+#define RK3288_LVDS_CH0_REG0_LANE2_EN BIT(2)
+#define RK3288_LVDS_CH0_REG0_LANE1_EN BIT(1)
+#define RK3288_LVDS_CH0_REG0_LANE0_EN BIT(0)
+
+#define RK3288_LVDS_CH0_REG1 0x04
+#define RK3288_LVDS_CH0_REG1_LANECK_BIAS BIT(5)
+#define RK3288_LVDS_CH0_REG1_LANE4_BIAS BIT(4)
+#define RK3288_LVDS_CH0_REG1_LANE3_BIAS BIT(3)
+#define RK3288_LVDS_CH0_REG1_LANE2_BIAS BIT(2)
+#define RK3288_LVDS_CH0_REG1_LANE1_BIAS BIT(1)
+#define RK3288_LVDS_CH0_REG1_LANE0_BIAS BIT(0)
+
+#define RK3288_LVDS_CH0_REG2 0x08
+#define RK3288_LVDS_CH0_REG2_RESERVE_ON BIT(7)
+#define RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE BIT(6)
+#define RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE BIT(5)
+#define RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE BIT(4)
+#define RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE BIT(3)
+#define RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE BIT(2)
+#define RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE BIT(1)
+#define RK3288_LVDS_CH0_REG2_PLL_FBDIV8 BIT(0)
+
+#define RK3288_LVDS_CH0_REG3 0x0c
+#define RK3288_LVDS_CH0_REG3_PLL_FBDIV_MASK 0xff
+
+#define RK3288_LVDS_CH0_REG4 0x10
+#define RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE BIT(5)
+#define RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE BIT(4)
+#define RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE BIT(3)
+#define RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE BIT(2)
+#define RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE BIT(1)
+#define RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE BIT(0)
+
+#define RK3288_LVDS_CH0_REG5 0x14
+#define RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA BIT(5)
+#define RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA BIT(4)
+#define RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA BIT(3)
+#define RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA BIT(2)
+#define RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA BIT(1)
+#define RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA BIT(0)
+
+#define RK3288_LVDS_CFG_REGC 0x30
+#define RK3288_LVDS_CFG_REGC_PLL_ENABLE 0x00
+#define RK3288_LVDS_CFG_REGC_PLL_DISABLE 0xff
+
+#define RK3288_LVDS_CH0_REGD 0x34
+#define RK3288_LVDS_CH0_REGD_PLL_PREDIV_MASK 0x1f
+
+#define RK3288_LVDS_CH0_REG20 0x80
+#define RK3288_LVDS_CH0_REG20_MSB 0x45
+#define RK3288_LVDS_CH0_REG20_LSB 0x44
+
+#define RK3288_LVDS_CFG_REG21 0x84
+#define RK3288_LVDS_CFG_REG21_TX_ENABLE 0x92
+#define RK3288_LVDS_CFG_REG21_TX_DISABLE 0x00
+#define RK3288_LVDS_CH1_OFFSET 0x100
+
+/* fbdiv value is split over 2 registers, with bit8 in reg2 */
+#define RK3288_LVDS_PLL_FBDIV_REG2(_fbd) \
+ (_fbd & BIT(8) ? RK3288_LVDS_CH0_REG2_PLL_FBDIV8 : 0)
+#define RK3288_LVDS_PLL_FBDIV_REG3(_fbd) \
+ (_fbd & RK3288_LVDS_CH0_REG3_PLL_FBDIV_MASK)
+#define RK3288_LVDS_PLL_PREDIV_REGD(_pd) \
+ (_pd & RK3288_LVDS_CH0_REGD_PLL_PREDIV_MASK)
+
+#define RK3288_LVDS_SOC_CON6_SEL_VOP_LIT BIT(3)
+
+#define LVDS_FMT_MASK (0x07 << 16)
+#define LVDS_MSB BIT(3)
+#define LVDS_DUAL BIT(4)
+#define LVDS_FMT_1 BIT(5)
+#define LVDS_TTL_EN BIT(6)
+#define LVDS_START_PHASE_RST_1 BIT(7)
+#define LVDS_DCLK_INV BIT(8)
+#define LVDS_CH0_EN BIT(11)
+#define LVDS_CH1_EN BIT(12)
+#define LVDS_PWRDN BIT(15)
+
+#define LVDS_24BIT (0 << 1)
+#define LVDS_18BIT (1 << 1)
+#define LVDS_FORMAT_VESA (0 << 0)
+#define LVDS_FORMAT_JEIDA (1 << 0)
+
+#define LVDS_VESA_24 0
+#define LVDS_JEIDA_24 1
+#define LVDS_VESA_18 2
+#define LVDS_JEIDA_18 3
+
+#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 94de7b9f6fde..4a39049e901a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -533,7 +533,7 @@ static int vop_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
if (!dev->of_node) {
- dev_err(dev, "can't find vop devices\n");
+ DRM_DEV_ERROR(dev, "can't find vop devices\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 388a0fc13564..d36919b14da7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <video/sh_mobile_meram.h>
@@ -131,7 +132,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 1700c542cd93..9e9343101738 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
@@ -145,7 +146,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = sti_output_poll_changed,
.atomic_check = sti_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 852bf2293b05..83314aee65cb 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -463,11 +463,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = dvo;
bridge->funcs = &sti_dvo_bridge_funcs;
bridge->of_node = dvo->dev.of_node;
- err = drm_bridge_add(bridge);
- if (err) {
- DRM_ERROR("Failed to add bridge\n");
- return err;
- }
+ drm_bridge_add(bridge);
err = drm_bridge_attach(encoder, bridge, NULL);
if (err) {
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index b333b37f3f89..c857663eafc2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "ltdc.h"
@@ -31,7 +32,7 @@ static void drv_output_poll_changed(struct drm_device *ddev)
}
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = drv_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 568c5d0461ea..e5b6310240fe 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -113,11 +113,13 @@ static enum dsi_color dsi_color_from_mipi(enum mipi_dsi_pixel_format fmt)
static int dsi_pll_get_clkout_khz(int clkin_khz, int idf, int ndiv, int odf)
{
+ int divisor = idf * odf;
+
/* prevent from division by 0 */
- if (idf * odf)
- return DIV_ROUND_CLOSEST(clkin_khz * ndiv, idf * odf);
+ if (!divisor)
+ return 0;
- return 0;
+ return DIV_ROUND_CLOSEST(clkin_khz * ndiv, divisor);
}
static int dsi_pll_get_params(int clkin_khz, int clkout_khz,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d394a03632c4..735c9081202a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -791,9 +791,8 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int ltdc_encoder_init(struct drm_device *ddev)
+static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
- struct ltdc_device *ldev = ddev->dev_private;
struct drm_encoder *encoder;
int ret;
@@ -807,7 +806,7 @@ static int ltdc_encoder_init(struct drm_device *ddev)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
- ret = drm_bridge_attach(encoder, ldev->bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -936,12 +935,9 @@ int ltdc_load(struct drm_device *ddev)
ret = PTR_ERR(bridge);
goto err;
}
- ldev->is_panel_bridge = true;
}
- ldev->bridge = bridge;
-
- ret = ltdc_encoder_init(ddev);
+ ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err;
@@ -972,8 +968,7 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- if (ldev->is_panel_bridge)
- drm_panel_bridge_remove(bridge);
+ drm_panel_bridge_remove(bridge);
clk_disable_unprepare(ldev->pixel_clk);
@@ -986,8 +981,7 @@ void ltdc_unload(struct drm_device *ddev)
DRM_DEBUG_DRIVER("\n");
- if (ldev->is_panel_bridge)
- drm_panel_bridge_remove(ldev->bridge);
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, 0);
clk_disable_unprepare(ldev->pixel_clk);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index bc6d6f6419a9..ae437557d715 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -24,8 +24,6 @@ struct ltdc_device {
struct drm_fbdev_cma *fbdev;
void __iomem *regs;
struct clk *pixel_clk; /* lcd pixel clock */
- struct drm_bridge *bridge;
- bool is_panel_bridge;
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
u32 error_status;
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 55b32368f8fb..0c2f8c7facae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,25 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
-sun4i-drm-y += sun4i_drv.o
-sun4i-drm-y += sun4i_framebuffer.o
+sun4i-backend-y += sun4i_backend.o sun4i_layer.o
-sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
-sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
-sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
-sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
+sun4i-drm-y += sun4i_drv.o
+sun4i-drm-y += sun4i_framebuffer.o
-sun4i-tcon-y += sun4i_tcon.o
-sun4i-tcon-y += sun4i_rgb.o
-sun4i-tcon-y += sun4i_dotclock.o
-sun4i-tcon-y += sun4i_crtc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
+sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
+sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
-sun4i-backend-y += sun4i_backend.o sun4i_layer.o
+sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
-sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
+sun4i-tcon-y += sun4i_crtc.o
+sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_tcon.o
+sun4i-tcon-y += sun4i_rgb.o
-obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
-obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
+obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o
+obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
+obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
-obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index ec5943627aa5..847eecbe4d14 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/list.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/reset.h>
@@ -28,6 +29,11 @@
#include "sun4i_layer.h"
#include "sunxi_engine.h"
+struct sun4i_backend_quirks {
+ /* backend <-> TCON muxing selection done in backend */
+ bool needs_output_muxing;
+};
+
static const u32 sunxi_rgb2yuv_coef[12] = {
0x00000107, 0x00000204, 0x00000064, 0x00000108,
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
@@ -209,24 +215,20 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
u32 lo_paddr, hi_paddr;
dma_addr_t paddr;
- int bpp;
-
- /* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
-
- /* Compute the start of the displayed memory */
- bpp = fb->format->cpp[0];
- paddr = gem->paddr + fb->offsets[0];
- paddr += (state->src_x >> 16) * bpp;
- paddr += (state->src_y >> 16) * fb->pitches[0];
+ /* Get the start of the displayed memory */
+ paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ /*
+ * backend DMA accesses DRAM directly, bypassing the system
+ * bus. As such, the address range is different and the buffer
+ * address needs to be corrected.
+ */
+ paddr -= PHYS_OFFSET;
+
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
@@ -349,6 +351,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
+ const struct sun4i_backend_quirks *quirks;
struct resource *res;
void __iomem *regs;
int i, ret;
@@ -369,13 +372,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(regs))
return PTR_ERR(regs);
- backend->engine.regs = devm_regmap_init_mmio(dev, regs,
- &sun4i_backend_regmap_config);
- if (IS_ERR(backend->engine.regs)) {
- dev_err(dev, "Couldn't create the backend regmap\n");
- return PTR_ERR(backend->engine.regs);
- }
-
backend->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(backend->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -421,9 +417,23 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
}
}
+ backend->engine.regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_backend_regmap_config);
+ if (IS_ERR(backend->engine.regs)) {
+ dev_err(dev, "Couldn't create the backend regmap\n");
+ return PTR_ERR(backend->engine.regs);
+ }
+
list_add_tail(&backend->engine.list, &drv->engine_list);
- /* Reset the registers */
+ /*
+ * Many of the backend's layer configuration registers have
+ * undefined default values. This poses a risk as we use
+ * regmap_update_bits in some places, and don't overwrite
+ * the whole register.
+ *
+ * Clear the registers here to have something predictable.
+ */
for (i = 0x800; i < 0x1000; i += 4)
regmap_write(backend->engine.regs, i, 0);
@@ -436,6 +446,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
SUN4I_BACKEND_MODCTL_DEBE_EN |
SUN4I_BACKEND_MODCTL_START_CTL);
+ /* Set output selection if needed */
+ quirks = of_device_get_match_data(dev);
+ if (quirks->needs_output_muxing) {
+ /*
+ * We assume there is no dynamic muxing of backends
+ * and TCONs, so we select the backend with same ID.
+ *
+ * While dynamic selection might be interesting, since
+ * the CRTC is tied to the TCON, while the layers are
+ * tied to the backends, this means, we will need to
+ * switch between groups of layers. There might not be
+ * a way to represent this constraint in DRM.
+ */
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_MODCTL_REG,
+ SUN4I_BACKEND_MODCTL_OUT_SEL,
+ (backend->engine.id
+ ? SUN4I_BACKEND_MODCTL_OUT_LCD1
+ : SUN4I_BACKEND_MODCTL_OUT_LCD0));
+ }
+
return 0;
err_disable_ram_clk:
@@ -483,10 +514,44 @@ static int sun4i_backend_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_backend_quirks sun4i_backend_quirks = {
+ .needs_output_muxing = true,
+};
+
+static const struct sun4i_backend_quirks sun5i_backend_quirks = {
+};
+
+static const struct sun4i_backend_quirks sun6i_backend_quirks = {
+};
+
+static const struct sun4i_backend_quirks sun7i_backend_quirks = {
+ .needs_output_muxing = true,
+};
+
+static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
+};
+
static const struct of_device_id sun4i_backend_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-display-backend" },
- { .compatible = "allwinner,sun6i-a31-display-backend" },
- { .compatible = "allwinner,sun8i-a33-display-backend" },
+ {
+ .compatible = "allwinner,sun4i-a10-display-backend",
+ .data = &sun4i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-display-backend",
+ .data = &sun5i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-display-backend",
+ .data = &sun6i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-display-backend",
+ .data = &sun7i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun8i-a33-display-backend",
+ .data = &sun8i_a33_backend_quirks,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 21945af67a9d..ac3cc029f5cd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -25,7 +25,8 @@
#define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29)
#define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28)
#define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20)
-#define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD0 (0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD1 (1 << 20)
#define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20)
#define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20)
#define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index d097c6f93ad0..5decae0069d0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -30,6 +30,22 @@
#include "sunxi_engine.h"
#include "sun4i_tcon.h"
+/*
+ * While this isn't really working in the DRM theory, in practice we
+ * can only ever have one encoder per TCON since we have a mux in our
+ * TCON.
+ */
+static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -72,11 +88,12 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
- sun4i_tcon_disable(scrtc->tcon);
+ sun4i_tcon_set_status(scrtc->tcon, encoder, false);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
@@ -90,11 +107,21 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
- sun4i_tcon_enable(scrtc->tcon);
+ sun4i_tcon_set_status(scrtc->tcon, encoder, true);
+}
+
+static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+
+ sun4i_tcon_mode_set(scrtc->tcon, encoder, mode);
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
@@ -102,6 +129,7 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
.atomic_flush = sun4i_crtc_atomic_flush,
.atomic_enable = sun4i_crtc_atomic_enable,
.atomic_disable = sun4i_crtc_atomic_disable,
+ .mode_set_nofb = sun4i_crtc_mode_set_nofb,
};
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index ace59651892f..75c76cdd82bc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -11,6 +11,7 @@
*/
#include <linux/component.h>
+#include <linux/kfifo.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -106,11 +107,6 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm;
}
- /* drm_vblank_init calls kcalloc, which can fail */
- ret = drm_vblank_init(drm, 1);
- if (ret)
- goto free_mem_region;
-
drm_mode_config_init(drm);
ret = component_bind_all(drm->dev, drm);
@@ -119,6 +115,11 @@ static int sun4i_drv_bind(struct device *dev)
goto cleanup_mode_config;
}
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret)
+ goto free_mem_region;
+
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
@@ -177,16 +178,20 @@ static bool sun4i_drv_node_is_connector(struct device_node *node)
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon");
}
@@ -200,7 +205,33 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+/*
+ * The encoder drivers use drm_of_find_possible_crtcs to get upstream
+ * crtcs from the device tree using of_graph. For the results to be
+ * correct, encoders must be probed/bound after _all_ crtcs have been
+ * created. The existing code uses a depth first recursive traversal
+ * of the of_graph, which means the encoders downstream of the TCON
+ * get add right after the first TCON. The second TCON or CRTC will
+ * never be properly associated with encoders connected to it.
+ *
+ * Also, in a dual display pipeline setup, both frontends can feed
+ * either backend, and both backends can feed either TCON, we want
+ * all components of the same type to be added before the next type
+ * in the pipeline. Fortunately, the pipelines are perfectly symmetric,
+ * i.e. components of the same type are at the same depth when counted
+ * from the frontend. The only exception is the third pipeline in
+ * the A80 SoC, which we do not support anyway.
+ *
+ * Hence we can use a breadth first search traversal order to add
+ * components. We do not need to check for duplicates. The component
+ * matching system handles this for us.
+ */
+struct endpoint_list {
+ DECLARE_KFIFO(fifo, struct device_node *, 16);
+};
+
static int sun4i_drv_add_endpoints(struct device *dev,
+ struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
@@ -264,10 +295,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
}
}
- /* Walk down our tree */
- count += sun4i_drv_add_endpoints(dev, match, remote);
-
- of_node_put(remote);
+ kfifo_put(&list->fifo, remote);
}
return count;
@@ -276,8 +304,11 @@ static int sun4i_drv_add_endpoints(struct device *dev,
static int sun4i_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
- struct device_node *np = pdev->dev.of_node;
- int i, count = 0;
+ struct device_node *np = pdev->dev.of_node, *endpoint;
+ struct endpoint_list list;
+ int i, ret, count = 0;
+
+ INIT_KFIFO(list.fifo);
for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np,
@@ -286,12 +317,19 @@ static int sun4i_drv_probe(struct platform_device *pdev)
if (!pipeline)
break;
- count += sun4i_drv_add_endpoints(&pdev->dev, &match,
- pipeline);
- of_node_put(pipeline);
+ kfifo_put(&list.fifo, pipeline);
+ }
+
+ while (kfifo_get(&list.fifo, &endpoint)) {
+ /* process this endpoint */
+ ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match,
+ endpoint);
+
+ /* sun4i_drv_add_endpoints can fail to allocate memory */
+ if (ret < 0)
+ return ret;
- DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
- count, i);
+ count += ret;
}
if (count)
@@ -308,10 +346,12 @@ static int sun4i_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_drv_of_table[] = {
+ { .compatible = "allwinner,sun4i-a10-display-engine" },
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
{ .compatible = "allwinner,sun5i-a13-display-engine" },
{ .compatible = "allwinner,sun6i-a31-display-engine" },
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
+ { .compatible = "allwinner,sun7i-a20-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 9872e0fc03b0..2992f0a6b349 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
#include "sun4i_drv.h"
@@ -28,7 +29,7 @@ static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
.output_poll_changed = sun4i_de_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index a1f8cba251a2..b685ee11623d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -14,6 +14,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <linux/regmap.h>
#include <media/cec-pin.h>
@@ -58,16 +59,24 @@
#define SUN4I_HDMI_PAD_CTRL0_TXEN BIT(23)
#define SUN4I_HDMI_PAD_CTRL1_REG 0x204
+#define SUN4I_HDMI_PAD_CTRL1_UNKNOWN BIT(24) /* set on A31 */
#define SUN4I_HDMI_PAD_CTRL1_AMP_OPT BIT(23)
#define SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT BIT(22)
#define SUN4I_HDMI_PAD_CTRL1_EMP_OPT BIT(20)
#define SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT BIT(19)
+#define SUN4I_HDMI_PAD_CTRL1_PWSCK BIT(18)
+#define SUN4I_HDMI_PAD_CTRL1_PWSDT BIT(17)
#define SUN4I_HDMI_PAD_CTRL1_REG_DEN BIT(15)
#define SUN4I_HDMI_PAD_CTRL1_REG_DENCK BIT(14)
#define SUN4I_HDMI_PAD_CTRL1_REG_EMP(n) (((n) & 7) << 10)
#define SUN4I_HDMI_PAD_CTRL1_HALVE_CLK BIT(6)
#define SUN4I_HDMI_PAD_CTRL1_REG_AMP(n) (((n) & 7) << 3)
+/* These bits seem to invert the TMDS data channels */
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_R BIT(2)
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_G BIT(1)
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_B BIT(0)
+
#define SUN4I_HDMI_PLL_CTRL_REG 0x208
#define SUN4I_HDMI_PLL_CTRL_PLL_EN BIT(31)
#define SUN4I_HDMI_PLL_CTRL_BWS BIT(30)
@@ -152,21 +161,106 @@
#define SUN4I_HDMI_DDC_FIFO_SIZE 16
+/* A31 specific */
+#define SUN6I_HDMI_DDC_CTRL_REG 0x500
+#define SUN6I_HDMI_DDC_CTRL_RESET BIT(31)
+#define SUN6I_HDMI_DDC_CTRL_START_CMD BIT(27)
+#define SUN6I_HDMI_DDC_CTRL_SDA_ENABLE BIT(6)
+#define SUN6I_HDMI_DDC_CTRL_SCL_ENABLE BIT(4)
+#define SUN6I_HDMI_DDC_CTRL_ENABLE BIT(0)
+
+#define SUN6I_HDMI_DDC_CMD_REG 0x508
+#define SUN6I_HDMI_DDC_CMD_BYTE_COUNT(count) ((count) << 16)
+/* command types in lower 3 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_ADDR_REG 0x50c
+#define SUN6I_HDMI_DDC_ADDR_SEGMENT(seg) (((seg) & 0xff) << 24)
+#define SUN6I_HDMI_DDC_ADDR_EDDC(addr) (((addr) & 0xff) << 16)
+#define SUN6I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8)
+#define SUN6I_HDMI_DDC_ADDR_SLAVE(addr) (((addr) & 0xff) << 1)
+
+#define SUN6I_HDMI_DDC_INT_STATUS_REG 0x514
+#define SUN6I_HDMI_DDC_INT_STATUS_TIMEOUT BIT(8)
+/* lower 8 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_FIFO_CTRL_REG 0x518
+#define SUN6I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(15)
+/* lower 9 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_CLK_REG 0x520
+/* DDC CLK bit fields are the same, but the formula is not */
+
+#define SUN6I_HDMI_DDC_FIFO_DATA_REG 0x580
+
enum sun4i_hdmi_pkt_type {
SUN4I_HDMI_PKT_AVI = 2,
SUN4I_HDMI_PKT_END = 15,
};
+struct sun4i_hdmi_variant {
+ bool has_ddc_parent_clk;
+ bool has_reset_control;
+
+ u32 pad_ctrl0_init_val;
+ u32 pad_ctrl1_init_val;
+ u32 pll_ctrl_init_val;
+
+ struct reg_field ddc_clk_reg;
+ u8 ddc_clk_pre_divider;
+ u8 ddc_clk_m_offset;
+
+ u8 tmds_clk_div_offset;
+
+ /* Register fields for I2C adapter */
+ struct reg_field field_ddc_en;
+ struct reg_field field_ddc_start;
+ struct reg_field field_ddc_reset;
+ struct reg_field field_ddc_addr_reg;
+ struct reg_field field_ddc_slave_addr;
+ struct reg_field field_ddc_int_mask;
+ struct reg_field field_ddc_int_status;
+ struct reg_field field_ddc_fifo_clear;
+ struct reg_field field_ddc_fifo_rx_thres;
+ struct reg_field field_ddc_fifo_tx_thres;
+ struct reg_field field_ddc_byte_count;
+ struct reg_field field_ddc_cmd;
+ struct reg_field field_ddc_sda_en;
+ struct reg_field field_ddc_sck_en;
+
+ /* DDC FIFO register offset */
+ u32 ddc_fifo_reg;
+
+ /*
+ * DDC FIFO threshold boundary conditions
+ *
+ * This is used to cope with the threshold boundary condition
+ * being slightly different on sun5i and sun6i.
+ *
+ * On sun5i the threshold is exclusive, i.e. does not include,
+ * the value of the threshold. ( > for RX; < for TX )
+ * On sun6i the threshold is inclusive, i.e. includes, the
+ * value of the threshold. ( >= for RX; <= for TX )
+ */
+ bool ddc_fifo_thres_incl;
+
+ bool ddc_fifo_has_dir;
+};
+
struct sun4i_hdmi {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
void __iomem *base;
+ struct regmap *regmap;
+
+ /* Reset control */
+ struct reset_control *reset;
/* Parent clocks */
struct clk *bus_clk;
struct clk *mod_clk;
+ struct clk *ddc_parent_clk;
struct clk *pll0_clk;
struct clk *pll1_clk;
@@ -176,10 +270,28 @@ struct sun4i_hdmi {
struct i2c_adapter *i2c;
+ /* Regmap fields for I2C adapter */
+ struct regmap_field *field_ddc_en;
+ struct regmap_field *field_ddc_start;
+ struct regmap_field *field_ddc_reset;
+ struct regmap_field *field_ddc_addr_reg;
+ struct regmap_field *field_ddc_slave_addr;
+ struct regmap_field *field_ddc_int_mask;
+ struct regmap_field *field_ddc_int_status;
+ struct regmap_field *field_ddc_fifo_clear;
+ struct regmap_field *field_ddc_fifo_rx_thres;
+ struct regmap_field *field_ddc_fifo_tx_thres;
+ struct regmap_field *field_ddc_byte_count;
+ struct regmap_field *field_ddc_cmd;
+ struct regmap_field *field_ddc_sda_en;
+ struct regmap_field *field_ddc_sck_en;
+
struct sun4i_drv *drv;
bool hdmi_monitor;
struct cec_adapter *cec_adap;
+
+ const struct sun4i_hdmi_variant *variant;
};
int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 4692e8c345ed..e826da34e919 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -11,13 +11,16 @@
*/
#include <linux/clk-provider.h>
+#include <linux/regmap.h>
-#include "sun4i_tcon.h"
#include "sun4i_hdmi.h"
struct sun4i_ddc {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
+ struct regmap_field *reg;
+ u8 pre_div;
+ u8 m_offset;
};
static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
@@ -27,6 +30,8 @@ static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long parent_rate,
+ const u8 pre_div,
+ const u8 m_offset,
u8 *m, u8 *n)
{
unsigned long best_rate = 0;
@@ -36,7 +41,8 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
- tmp_rate = (((parent_rate / 2) / 10) >> _n) / (_m + 1);
+ tmp_rate = (((parent_rate / pre_div) / 10) >> _n) /
+ (_m + m_offset);
if (tmp_rate > rate)
continue;
@@ -60,21 +66,25 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- return sun4i_ddc_calc_divider(rate, *prate, NULL, NULL);
+ struct sun4i_ddc *ddc = hw_to_ddc(hw);
+
+ return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
+ ddc->m_offset, NULL, NULL);
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
- u32 reg;
+ unsigned int reg;
u8 m, n;
- reg = readl(ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG);
- m = (reg >> 3) & 0x7;
+ regmap_field_read(ddc->reg, &reg);
+ m = (reg >> 3) & 0xf;
n = reg & 0x7;
- return (((parent_rate / 2) / 10) >> n) / (m + 1);
+ return (((parent_rate / ddc->pre_div) / 10) >> n) /
+ (m + ddc->m_offset);
}
static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -83,10 +93,12 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
struct sun4i_ddc *ddc = hw_to_ddc(hw);
u8 div_m, div_n;
- sun4i_ddc_calc_divider(rate, parent_rate, &div_m, &div_n);
+ sun4i_ddc_calc_divider(rate, parent_rate, ddc->pre_div,
+ ddc->m_offset, &div_m, &div_n);
- writel(SUN4I_HDMI_DDC_CLK_M(div_m) | SUN4I_HDMI_DDC_CLK_N(div_n),
- ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG);
+ regmap_field_write(ddc->reg,
+ SUN4I_HDMI_DDC_CLK_M(div_m) |
+ SUN4I_HDMI_DDC_CLK_N(div_n));
return 0;
}
@@ -111,6 +123,11 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
if (!ddc)
return -ENOMEM;
+ ddc->reg = devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->ddc_clk_reg);
+ if (IS_ERR(ddc->reg))
+ return PTR_ERR(ddc->reg);
+
init.name = "hdmi-ddc";
init.ops = &sun4i_ddc_ops;
init.parent_names = &parent_name;
@@ -118,6 +135,8 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
ddc->hdmi = hdmi;
ddc->hw.init = &init;
+ ddc->pre_div = hdmi->variant->ddc_clk_pre_divider;
+ ddc->m_offset = hdmi->variant->ddc_clk_m_offset;
hdmi->ddc_clk = devm_clk_register(hdmi->dev, &ddc->hw);
if (IS_ERR(hdmi->ddc_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 3cf1a6932fac..dda904ec0534 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -20,14 +20,16 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_hdmi.h"
-#include "sun4i_tcon.h"
static inline struct sun4i_hdmi *
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
@@ -83,8 +85,6 @@ static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
static void sun4i_hdmi_disable(struct drm_encoder *encoder)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
u32 val;
DRM_DEBUG_DRIVER("Disabling the HDMI Output\n");
@@ -92,22 +92,16 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
-
- sun4i_tcon_channel_disable(tcon, 1);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
u32 val = 0;
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
- sun4i_tcon_channel_enable(tcon, 1);
-
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
@@ -125,15 +119,9 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
unsigned int x, y;
u32 val;
- sun4i_tcon1_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 1, encoder);
-
- clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
@@ -141,6 +129,22 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
writel(SUN4I_HDMI_UNKNOWN_INPUT_SYNC,
hdmi->base + SUN4I_HDMI_UNKNOWN_REG);
+ /*
+ * Setup output pad (?) controls
+ *
+ * This is done here instead of at probe/bind time because
+ * the controller seems to toggle some of the bits on its own.
+ *
+ * We can't just initialize the register there, we need to
+ * protect the clock bits that have already been read out and
+ * cached by the clock framework.
+ */
+ val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+ val &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
+ val |= hdmi->variant->pad_ctrl1_init_val;
+ writel(val, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+ val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+
/* Setup timing registers */
writel(SUN4I_HDMI_VID_TIMING_X(mode->hdisplay) |
SUN4I_HDMI_VID_TIMING_Y(mode->vdisplay),
@@ -267,6 +271,176 @@ static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
};
#endif
+#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
+#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
+
+/* Only difference from sun5i is AMP is 4 instead of 6 */
+static const struct sun4i_hdmi_variant sun4i_variant = {
+ .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN |
+ SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(7) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(15) |
+ SUN4I_HDMI_PLL_CTRL_S(7) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_BWS |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 2,
+ .ddc_clk_m_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
+ .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
+ .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
+ .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
+ .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
+
+ .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_has_dir = true,
+};
+
+static const struct sun4i_hdmi_variant sun5i_variant = {
+ .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN |
+ SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(7) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(15) |
+ SUN4I_HDMI_PLL_CTRL_S(7) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_BWS |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 2,
+ .ddc_clk_m_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
+ .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
+ .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
+ .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
+ .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
+
+ .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_has_dir = true,
+};
+
+static const struct sun4i_hdmi_variant sun6i_variant = {
+ .has_ddc_parent_clk = true,
+ .has_reset_control = true,
+ .pad_ctrl0_init_val = 0xff |
+ SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(4) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_PWSDT |
+ SUN4I_HDMI_PAD_CTRL1_PWSCK |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_UNKNOWN,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(3) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(10) |
+ SUN4I_HDMI_PLL_CTRL_S(4) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN6I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 1,
+ .ddc_clk_m_offset = 2,
+
+ .tmds_clk_div_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_start = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 27, 27),
+ .field_ddc_reset = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_addr_reg = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 7),
+ .field_ddc_int_status = REG_FIELD(SUN6I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 18, 18),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 16, 25),
+ .field_ddc_cmd = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 6, 6),
+ .field_ddc_sck_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 4, 4),
+
+ .ddc_fifo_reg = SUN6I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_thres_incl = true,
+};
+
+static const struct regmap_config sun4i_hdmi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x580,
+};
+
static int sun4i_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -285,6 +459,10 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drv = drv;
+ hdmi->variant = of_device_get_match_data(dev);
+ if (!hdmi->variant)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(hdmi->base)) {
@@ -292,10 +470,25 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->base);
}
+ if (hdmi->variant->has_reset_control) {
+ hdmi->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(hdmi->reset)) {
+ dev_err(dev, "Couldn't get the HDMI reset control\n");
+ return PTR_ERR(hdmi->reset);
+ }
+
+ ret = reset_control_deassert(hdmi->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert HDMI reset\n");
+ return ret;
+ }
+ }
+
hdmi->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(hdmi->bus_clk)) {
dev_err(dev, "Couldn't get the HDMI bus clock\n");
- return PTR_ERR(hdmi->bus_clk);
+ ret = PTR_ERR(hdmi->bus_clk);
+ goto err_assert_reset;
}
clk_prepare_enable(hdmi->bus_clk);
@@ -321,45 +514,37 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_mod_clk;
}
+ hdmi->regmap = devm_regmap_init_mmio(dev, hdmi->base,
+ &sun4i_hdmi_regmap_config);
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(dev, "Couldn't create HDMI encoder regmap\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
goto err_disable_mod_clk;
}
+ if (hdmi->variant->has_ddc_parent_clk) {
+ hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
+ if (IS_ERR(hdmi->ddc_parent_clk)) {
+ dev_err(dev, "Couldn't get the HDMI DDC clock\n");
+ return PTR_ERR(hdmi->ddc_parent_clk);
+ }
+ } else {
+ hdmi->ddc_parent_clk = hdmi->tmds_clk;
+ }
+
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
- writel(SUN4I_HDMI_PAD_CTRL0_TXEN | SUN4I_HDMI_PAD_CTRL0_CKEN |
- SUN4I_HDMI_PAD_CTRL0_PWENG | SUN4I_HDMI_PAD_CTRL0_PWEND |
- SUN4I_HDMI_PAD_CTRL0_PWENC | SUN4I_HDMI_PAD_CTRL0_LDODEN |
- SUN4I_HDMI_PAD_CTRL0_LDOCEN | SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ writel(hdmi->variant->pad_ctrl0_init_val,
hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG);
- /*
- * We can't just initialize the register there, we need to
- * protect the clock bits that have already been read out and
- * cached by the clock framework.
- */
- reg = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
- reg &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
- reg |= SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
- SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
- SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
- SUN4I_HDMI_PAD_CTRL1_REG_DEN |
- SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
- SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
- SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
- SUN4I_HDMI_PAD_CTRL1_AMP_OPT;
- writel(reg, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
-
reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK;
- reg |= SUN4I_HDMI_PLL_CTRL_VCO_S(8) | SUN4I_HDMI_PLL_CTRL_CS(7) |
- SUN4I_HDMI_PLL_CTRL_CP_S(15) | SUN4I_HDMI_PLL_CTRL_S(7) |
- SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) | SUN4I_HDMI_PLL_CTRL_SDIV2 |
- SUN4I_HDMI_PLL_CTRL_LDO2_EN | SUN4I_HDMI_PLL_CTRL_LDO1_EN |
- SUN4I_HDMI_PLL_CTRL_HV_IS_33 | SUN4I_HDMI_PLL_CTRL_BWS |
- SUN4I_HDMI_PLL_CTRL_PLL_EN;
+ reg |= hdmi->variant->pll_ctrl_init_val;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
ret = sun4i_hdmi_i2c_create(dev, hdmi);
@@ -429,6 +614,8 @@ err_disable_mod_clk:
clk_disable_unprepare(hdmi->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(hdmi->bus_clk);
+err_assert_reset:
+ reset_control_assert(hdmi->reset);
return ret;
}
@@ -463,7 +650,9 @@ static int sun4i_hdmi_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_hdmi_of_table[] = {
- { .compatible = "allwinner,sun5i-a10s-hdmi" },
+ { .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, },
+ { .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
+ { .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index 2e42d09ab42e..58e9d37e8c17 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -25,8 +25,6 @@
/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
-/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */
-#define TX_THRESHOLD 1
static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
{
@@ -39,27 +37,36 @@ static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
u32 reg;
+ /*
+ * If threshold is inclusive, then the FIFO may only have
+ * RX_THRESHOLD number of bytes, instead of RX_THRESHOLD + 1.
+ */
+ int read_len = RX_THRESHOLD +
+ (hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
- /* Limit transfer length by FIFO threshold */
- len = min_t(int, len, read ? (RX_THRESHOLD + 1) :
- (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1));
+ /*
+ * Limit transfer length by FIFO threshold or FIFO size.
+ * For TX the threshold is for an empty FIFO.
+ */
+ len = min_t(int, len, read ? read_len : SUN4I_HDMI_DDC_FIFO_SIZE);
/* Wait until error, FIFO request bit set or transfer complete */
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg,
- reg & mask, len * byte_time_ns, 100000))
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_int_status, reg,
+ reg & mask, len * byte_time_ns,
+ 100000))
return -ETIMEDOUT;
if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
return -EIO;
if (read)
- readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ readsb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
else
- writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ writesb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
- /* Clear FIFO request bit */
- writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST,
- hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ /* Clear FIFO request bit by forcing a write to that bit */
+ regmap_field_force_write(hdmi->field_ddc_int_status,
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST);
return len;
}
@@ -70,50 +77,52 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
u32 reg;
/* Set FIFO direction */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
- reg |= (msg->flags & I2C_M_RD) ?
- SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
- SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
- writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ if (hdmi->variant->ddc_fifo_has_dir) {
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
+ reg |= (msg->flags & I2C_M_RD) ?
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ }
+
+ /* Clear address register (not cleared by soft reset) */
+ regmap_field_write(hdmi->field_ddc_addr_reg, 0);
/* Set I2C address */
- writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr),
- hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
-
- /* Set FIFO RX/TX thresholds and clear FIFO */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR;
- reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK;
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD);
- reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK;
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD);
- writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG,
- reg,
- !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR),
- 100, 2000))
+ regmap_field_write(hdmi->field_ddc_slave_addr, msg->addr);
+
+ /*
+ * Set FIFO RX/TX thresholds and clear FIFO
+ *
+ * If threshold is inclusive, we can set the TX threshold to
+ * 0 instead of 1.
+ */
+ regmap_field_write(hdmi->field_ddc_fifo_tx_thres,
+ hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
+ regmap_field_write(hdmi->field_ddc_fifo_rx_thres, RX_THRESHOLD);
+ regmap_field_write(hdmi->field_ddc_fifo_clear, 1);
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_fifo_clear,
+ reg, !reg, 100, 2000))
return -EIO;
/* Set transfer length */
- writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
+ regmap_field_write(hdmi->field_ddc_byte_count, msg->len);
/* Set command */
- writel(msg->flags & I2C_M_RD ?
- SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
- SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE,
- hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
+ regmap_field_write(hdmi->field_ddc_cmd,
+ msg->flags & I2C_M_RD ?
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE);
- /* Clear interrupt status bits */
- writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
- SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
- SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE,
- hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ /* Clear interrupt status bits by forcing a write */
+ regmap_field_force_write(hdmi->field_ddc_int_status,
+ SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE);
/* Start command */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ regmap_field_write(hdmi->field_ddc_start, 1);
/* Transfer bytes */
for (i = 0; i < msg->len; i += len) {
@@ -124,14 +133,12 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
}
/* Wait for command to finish */
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG,
- reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
- 100, 100000))
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_start,
+ reg, !reg, 100, 100000))
return -EIO;
/* Check for errors */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ regmap_field_read(hdmi->field_ddc_int_status, &reg);
if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
!(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
return -EIO;
@@ -154,20 +161,21 @@ static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
return -EINVAL;
}
+ /* DDC clock needs to be enabled for the module to work */
+ clk_prepare_enable(hdmi->ddc_clk);
+ clk_set_rate(hdmi->ddc_clk, 100000);
+
/* Reset I2C controller */
- writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
- 100, 2000))
+ regmap_field_write(hdmi->field_ddc_en, 1);
+ regmap_field_write(hdmi->field_ddc_reset, 1);
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_reset,
+ reg, !reg, 100, 2000)) {
+ clk_disable_unprepare(hdmi->ddc_clk);
return -EIO;
+ }
- writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
- SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
- hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
-
- clk_prepare_enable(hdmi->ddc_clk);
- clk_set_rate(hdmi->ddc_clk, 100000);
+ regmap_field_write(hdmi->field_ddc_sck_en, 1);
+ regmap_field_write(hdmi->field_ddc_sda_en, 1);
for (i = 0; i < num; i++) {
err = xfer_msg(hdmi, &msgs[i]);
@@ -191,12 +199,105 @@ static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
.functionality = sun4i_hdmi_i2c_func,
};
+static int sun4i_hdmi_init_regmap_fields(struct sun4i_hdmi *hdmi)
+{
+ hdmi->field_ddc_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_en);
+ if (IS_ERR(hdmi->field_ddc_en))
+ return PTR_ERR(hdmi->field_ddc_en);
+
+ hdmi->field_ddc_start =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_start);
+ if (IS_ERR(hdmi->field_ddc_start))
+ return PTR_ERR(hdmi->field_ddc_start);
+
+ hdmi->field_ddc_reset =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_reset);
+ if (IS_ERR(hdmi->field_ddc_reset))
+ return PTR_ERR(hdmi->field_ddc_reset);
+
+ hdmi->field_ddc_addr_reg =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_addr_reg);
+ if (IS_ERR(hdmi->field_ddc_addr_reg))
+ return PTR_ERR(hdmi->field_ddc_addr_reg);
+
+ hdmi->field_ddc_slave_addr =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_slave_addr);
+ if (IS_ERR(hdmi->field_ddc_slave_addr))
+ return PTR_ERR(hdmi->field_ddc_slave_addr);
+
+ hdmi->field_ddc_int_mask =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_int_mask);
+ if (IS_ERR(hdmi->field_ddc_int_mask))
+ return PTR_ERR(hdmi->field_ddc_int_mask);
+
+ hdmi->field_ddc_int_status =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_int_status);
+ if (IS_ERR(hdmi->field_ddc_int_status))
+ return PTR_ERR(hdmi->field_ddc_int_status);
+
+ hdmi->field_ddc_fifo_clear =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_clear);
+ if (IS_ERR(hdmi->field_ddc_fifo_clear))
+ return PTR_ERR(hdmi->field_ddc_fifo_clear);
+
+ hdmi->field_ddc_fifo_rx_thres =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_rx_thres);
+ if (IS_ERR(hdmi->field_ddc_fifo_rx_thres))
+ return PTR_ERR(hdmi->field_ddc_fifo_rx_thres);
+
+ hdmi->field_ddc_fifo_tx_thres =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_tx_thres);
+ if (IS_ERR(hdmi->field_ddc_fifo_tx_thres))
+ return PTR_ERR(hdmi->field_ddc_fifo_tx_thres);
+
+ hdmi->field_ddc_byte_count =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_byte_count);
+ if (IS_ERR(hdmi->field_ddc_byte_count))
+ return PTR_ERR(hdmi->field_ddc_byte_count);
+
+ hdmi->field_ddc_cmd =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_cmd);
+ if (IS_ERR(hdmi->field_ddc_cmd))
+ return PTR_ERR(hdmi->field_ddc_cmd);
+
+ hdmi->field_ddc_sda_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_sda_en);
+ if (IS_ERR(hdmi->field_ddc_sda_en))
+ return PTR_ERR(hdmi->field_ddc_sda_en);
+
+ hdmi->field_ddc_sck_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_sck_en);
+ if (IS_ERR(hdmi->field_ddc_sck_en))
+ return PTR_ERR(hdmi->field_ddc_sck_en);
+
+ return 0;
+}
+
int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
{
struct i2c_adapter *adap;
int ret = 0;
- ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ ret = sun4i_ddc_create(hdmi, hdmi->ddc_parent_clk);
+ if (ret)
+ return ret;
+
+ ret = sun4i_hdmi_init_regmap_fields(hdmi);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 5cf2527bffc8..dc332ea56f6c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -12,12 +12,13 @@
#include <linux/clk-provider.h>
-#include "sun4i_tcon.h"
#include "sun4i_hdmi.h"
struct sun4i_tmds {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
+
+ u8 div_offset;
};
static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
@@ -28,6 +29,7 @@ static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
unsigned long parent_rate,
+ u8 div_offset,
u8 *div,
bool *half)
{
@@ -35,7 +37,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
u8 best_m = 0, m;
bool is_double;
- for (m = 1; m < 16; m++) {
+ for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
u8 d;
for (d = 1; d < 3; d++) {
@@ -67,11 +69,12 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
static int sun4i_tmds_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_hw *parent;
+ struct sun4i_tmds *tmds = hw_to_tmds(hw);
+ struct clk_hw *parent = NULL;
unsigned long best_parent = 0;
unsigned long rate = req->rate;
int best_div = 1, best_half = 1;
- int i, j;
+ int i, j, p;
/*
* We only consider PLL3, since the TCON is very likely to be
@@ -79,32 +82,38 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
* clock, so we should not need to do anything.
*/
- parent = clk_hw_get_parent_by_index(hw, 0);
- if (!parent)
- return -EINVAL;
-
- for (i = 1; i < 3; i++) {
- for (j = 1; j < 16; j++) {
- unsigned long ideal = rate * i * j;
- unsigned long rounded;
-
- rounded = clk_hw_round_rate(parent, ideal);
-
- if (rounded == ideal) {
- best_parent = rounded;
- best_half = i;
- best_div = j;
- goto out;
- }
-
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
- best_parent = rounded;
- best_div = i;
+ for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+ parent = clk_hw_get_parent_by_index(hw, p);
+ if (!parent)
+ continue;
+
+ for (i = 1; i < 3; i++) {
+ for (j = tmds->div_offset ?: 1;
+ j < (16 + tmds->div_offset); j++) {
+ unsigned long ideal = rate * i * j;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_parent = rounded;
+ best_half = i;
+ best_div = j;
+ goto out;
+ }
+
+ if (abs(rate - rounded / i) <
+ abs(rate - best_parent / best_div)) {
+ best_parent = rounded;
+ best_div = i;
+ }
}
}
}
+ if (!parent)
+ return -EINVAL;
+
out:
req->rate = best_parent / best_half / best_div;
req->best_parent_rate = best_parent;
@@ -124,7 +133,7 @@ static unsigned long sun4i_tmds_recalc_rate(struct clk_hw *hw,
parent_rate /= 2;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
- reg = (reg >> 4) & 0xf;
+ reg = ((reg >> 4) & 0xf) + tmds->div_offset;
if (!reg)
reg = 1;
@@ -139,7 +148,8 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
u32 reg;
u8 div;
- sun4i_tmds_calc_divider(rate, parent_rate, &div, &half);
+ sun4i_tmds_calc_divider(rate, parent_rate, tmds->div_offset,
+ &div, &half);
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
reg &= ~SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
@@ -149,7 +159,7 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= ~SUN4I_HDMI_PLL_CTRL_DIV_MASK;
- writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div),
+ writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div - tmds->div_offset),
tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
return 0;
@@ -216,6 +226,7 @@ int sun4i_tmds_create(struct sun4i_hdmi *hdmi)
tmds->hdmi = hdmi;
tmds->hw.init = &init;
+ tmds->div_offset = hdmi->variant->tmds_clk_div_offset;
hdmi->tmds_clk = devm_clk_register(hdmi->dev, &tmds->hw);
if (IS_ERR(hdmi->tmds_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 7cd7090ad63a..832f8f9bc47f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -134,13 +134,10 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel))
+ if (!IS_ERR(tcon->panel)) {
drm_panel_prepare(tcon->panel);
-
- sun4i_tcon_channel_enable(tcon, 0);
-
- if (!IS_ERR(tcon->panel))
drm_panel_enable(tcon->panel);
+ }
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -150,31 +147,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel))
+ if (!IS_ERR(tcon->panel)) {
drm_panel_disable(tcon->panel);
-
- sun4i_tcon_channel_disable(tcon, 0);
-
- if (!IS_ERR(tcon->panel))
drm_panel_unprepare(tcon->panel);
-}
-
-static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
-
- sun4i_tcon0_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 0, encoder);
-
- /* FIXME: This seems to be board specific */
- clk_set_phase(tcon->dclk, 120);
+ }
}
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
- .mode_set = sun4i_rgb_encoder_mode_set,
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index d9791292553e..e122f5b2a395 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -14,9 +14,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <uapi/drm/drm_mode.h>
+
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
@@ -32,66 +35,61 @@
#include "sun4i_tcon.h"
#include "sunxi_engine.h"
-void sun4i_tcon_disable(struct sun4i_tcon *tcon)
-{
- DRM_DEBUG_DRIVER("Disabling TCON\n");
-
- /* Disable the TCON */
- regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
- SUN4I_TCON_GCTL_TCON_ENABLE, 0);
-}
-EXPORT_SYMBOL(sun4i_tcon_disable);
-
-void sun4i_tcon_enable(struct sun4i_tcon *tcon)
+static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
+ bool enabled)
{
- DRM_DEBUG_DRIVER("Enabling TCON\n");
-
- /* Enable the TCON */
- regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
- SUN4I_TCON_GCTL_TCON_ENABLE,
- SUN4I_TCON_GCTL_TCON_ENABLE);
-}
-EXPORT_SYMBOL(sun4i_tcon_enable);
+ struct clk *clk;
-void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
-{
- DRM_DEBUG_DRIVER("Disabling TCON channel %d\n", channel);
-
- /* Disable the TCON's channel */
- if (channel == 0) {
+ switch (channel) {
+ case 0:
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
- SUN4I_TCON0_CTL_TCON_ENABLE, 0);
- clk_disable_unprepare(tcon->dclk);
+ SUN4I_TCON0_CTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0);
+ clk = tcon->dclk;
+ break;
+ case 1:
+ WARN_ON(!tcon->quirks->has_channel_1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0);
+ clk = tcon->sclk1;
+ break;
+ default:
+ DRM_WARN("Unknown channel... doing nothing\n");
return;
}
- WARN_ON(!tcon->quirks->has_channel_1);
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE, 0);
- clk_disable_unprepare(tcon->sclk1);
+ if (enabled)
+ clk_prepare_enable(clk);
+ else
+ clk_disable_unprepare(clk);
}
-EXPORT_SYMBOL(sun4i_tcon_channel_disable);
-void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
+void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ bool enabled)
{
- DRM_DEBUG_DRIVER("Enabling TCON channel %d\n", channel);
-
- /* Enable the TCON's channel */
- if (channel == 0) {
- regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
- SUN4I_TCON0_CTL_TCON_ENABLE,
- SUN4I_TCON0_CTL_TCON_ENABLE);
- clk_prepare_enable(tcon->dclk);
+ int channel;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_NONE:
+ channel = 0;
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ case DRM_MODE_ENCODER_TVDAC:
+ channel = 1;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
return;
}
- WARN_ON(!tcon->quirks->has_channel_1);
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE,
- SUN4I_TCON1_CTL_TCON_ENABLE);
- clk_prepare_enable(tcon->sclk1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
+
+ sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
-EXPORT_SYMBOL(sun4i_tcon_channel_enable);
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
{
@@ -109,30 +107,40 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
}
EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- struct drm_encoder *encoder)
+/*
+ * This function is a helper for TCON output muxing. The TCON output
+ * muxing control register in earlier SoCs (without the TCON TOP block)
+ * are located in TCON0. This helper returns a pointer to TCON0's
+ * sun4i_tcon structure, or NULL if not found.
+ */
+static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
{
- u32 val;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon;
- if (!tcon->quirks->has_unknown_mux)
- return;
+ list_for_each_entry(tcon, &drv->tcon_list, list)
+ if (tcon->id == 0)
+ return tcon;
- if (channel != 1)
- return;
+ dev_warn(drm->dev,
+ "TCON0 not found, display output muxing may not work\n");
- if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
- val = 1;
- else
- val = 0;
+ return NULL;
+}
- /*
- * FIXME: Undocumented bits
- */
- regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
+void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
+ const struct drm_encoder *encoder)
+{
+ int ret = -ENOTSUPP;
+
+ if (tcon->quirks->set_mux)
+ ret = tcon->quirks->set_mux(tcon, encoder);
+
+ DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
+ encoder->name, encoder->crtc->name, ret);
}
-EXPORT_SYMBOL(sun4i_tcon_set_mux);
-static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
+static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode,
int channel)
{
int delay = mode->vtotal - mode->vdisplay;
@@ -150,15 +158,26 @@ static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
return delay;
}
-void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode)
+static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
+{
+ /* Configure the dot clock */
+ clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+
+ /* Set the resolution */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
+ SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
+ SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
+}
+
+static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
{
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
- /* Configure the dot clock */
- clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+ sun4i_tcon0_mode_set_common(tcon, mode);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -166,11 +185,6 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
- /* Set the resolution */
- regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
- SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
- SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
-
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
@@ -224,10 +238,9 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
/* Enable the output on the pins */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
}
-EXPORT_SYMBOL(sun4i_tcon0_mode_set);
-void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode)
+static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
{
unsigned int bp, hsync, vsync, vtotal;
u8 clk_delay;
@@ -315,7 +328,26 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON1);
}
-EXPORT_SYMBOL(sun4i_tcon1_mode_set);
+
+void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_NONE:
+ sun4i_tcon0_mode_set_rgb(tcon, mode);
+ sun4i_tcon_set_mux(tcon, 0, encoder);
+ break;
+ case DRM_MODE_ENCODER_TVDAC:
+ case DRM_MODE_ENCODER_TMDS:
+ sun4i_tcon1_mode_set(tcon, mode);
+ sun4i_tcon_set_mux(tcon, 1, encoder);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
+ }
+}
+EXPORT_SYMBOL(sun4i_tcon_mode_set);
static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
struct sun4i_crtc *scrtc)
@@ -463,42 +495,170 @@ static int sun4i_tcon_init_regmap(struct device *dev,
* function in fact searches the corresponding engine, and the ID is
* requested via the get_id function of the engine.
*/
-static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
- struct device_node *node)
+static struct sunxi_engine *
+sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
+ struct device_node *node)
{
struct device_node *port, *ep, *remote;
- struct sunxi_engine *engine;
+ struct sunxi_engine *engine = ERR_PTR(-EINVAL);
port = of_graph_get_port_by_id(node, 0);
if (!port)
return ERR_PTR(-EINVAL);
+ /*
+ * This only works if there is only one path from the TCON
+ * to any display engine. Otherwise the probe order of the
+ * TCONs and display engines is not guaranteed. They may
+ * either bind to the wrong one, or worse, bind to the same
+ * one if additional checks are not done.
+ *
+ * Bail out if there are multiple input connections.
+ */
+ if (of_get_available_child_count(port) != 1)
+ goto out_put_port;
+
+ /* Get the first connection without specifying an ID */
+ ep = of_get_next_available_child(port, NULL);
+ if (!ep)
+ goto out_put_port;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote)
+ goto out_put_ep;
+
+ /* does this node match any registered engines? */
+ list_for_each_entry(engine, &drv->engine_list, list)
+ if (remote == engine->node)
+ goto out_put_remote;
+
+ /* keep looking through upstream ports */
+ engine = sun4i_tcon_find_engine_traverse(drv, remote);
+
+out_put_remote:
+ of_node_put(remote);
+out_put_ep:
+ of_node_put(ep);
+out_put_port:
+ of_node_put(port);
+
+ return engine;
+}
+
+/*
+ * The device tree binding says that the remote endpoint ID of any
+ * connection between components, up to and including the TCON, of
+ * the display pipeline should be equal to the actual ID of the local
+ * component. Thus we can look at any one of the input connections of
+ * the TCONs, and use that connection's remote endpoint ID as our own.
+ *
+ * Since the user of this function already finds the input port,
+ * the port is passed in directly without further checks.
+ */
+static int sun4i_tcon_of_get_id_from_port(struct device_node *port)
+{
+ struct device_node *ep;
+ int ret = -EINVAL;
+
+ /* try finding an upstream endpoint */
for_each_available_child_of_node(port, ep) {
- remote = of_graph_get_remote_port_parent(ep);
+ struct device_node *remote;
+ u32 reg;
+
+ remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
- /* does this node match any registered engines? */
- list_for_each_entry(engine, &drv->engine_list, list) {
- if (remote == engine->node) {
- of_node_put(remote);
- of_node_put(port);
- return engine;
- }
- }
+ ret = of_property_read_u32(remote, "reg", &reg);
+ if (ret)
+ continue;
- /* keep looking through upstream ports */
- engine = sun4i_tcon_find_engine(drv, remote);
- if (!IS_ERR(engine)) {
- of_node_put(remote);
- of_node_put(port);
- return engine;
- }
+ ret = reg;
}
+ return ret;
+}
+
+/*
+ * Once we know the TCON's id, we can look through the list of
+ * engines to find a matching one. We assume all engines have
+ * been probed and added to the list.
+ */
+static struct sunxi_engine *sun4i_tcon_get_engine_by_id(struct sun4i_drv *drv,
+ int id)
+{
+ struct sunxi_engine *engine;
+
+ list_for_each_entry(engine, &drv->engine_list, list)
+ if (engine->id == id)
+ return engine;
+
return ERR_PTR(-EINVAL);
}
+/*
+ * On SoCs with the old display pipeline design (Display Engine 1.0),
+ * we assumed the TCON was always tied to just one backend. However
+ * this proved not to be the case. On the A31, the TCON can select
+ * either backend as its source. On the A20 (and likely on the A10),
+ * the backend can choose which TCON to output to.
+ *
+ * The device tree binding says that the remote endpoint ID of any
+ * connection between components, up to and including the TCON, of
+ * the display pipeline should be equal to the actual ID of the local
+ * component. Thus we should be able to look at any one of the input
+ * connections of the TCONs, and use that connection's remote endpoint
+ * ID as our own.
+ *
+ * However the connections between the backend and TCON were assumed
+ * to be always singular, and their endpoit IDs were all incorrectly
+ * set to 0. This means for these old device trees, we cannot just look
+ * up the remote endpoint ID of a TCON input endpoint. TCON1 would be
+ * incorrectly identified as TCON0.
+ *
+ * This function first checks if the TCON node has 2 input endpoints.
+ * If so, then the device tree is a corrected version, and it will use
+ * sun4i_tcon_of_get_id() and sun4i_tcon_get_engine_by_id() from above
+ * to fetch the ID and engine directly. If not, then it is likely an
+ * old device trees, where the endpoint IDs were incorrect, but did not
+ * have endpoint connections between the backend and TCON across
+ * different display pipelines. It will fall back to the old method of
+ * traversing the of_graph to try and find a matching engine by device
+ * node.
+ *
+ * In the case of single display pipeline device trees, either method
+ * works.
+ */
+static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
+ struct device_node *node)
+{
+ struct device_node *port;
+ struct sunxi_engine *engine;
+
+ port = of_graph_get_port_by_id(node, 0);
+ if (!port)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Is this a corrected device tree with cross pipeline
+ * connections between the backend and TCON?
+ */
+ if (of_get_child_count(port) > 1) {
+ /* Get our ID directly from an upstream endpoint */
+ int id = sun4i_tcon_of_get_id_from_port(port);
+
+ /* Get our engine by matching our ID */
+ engine = sun4i_tcon_get_engine_by_id(drv, id);
+
+ of_node_put(port);
+ return engine;
+ }
+
+ /* Fallback to old method by traversing input endpoints */
+ of_node_put(port);
+ return sun4i_tcon_find_engine_traverse(drv, node);
+}
+
static int sun4i_tcon_bind(struct device *dev, struct device *master,
void *data)
{
@@ -530,10 +690,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
}
/* Make sure our TCON is reset */
- if (!reset_control_status(tcon->lcd_rst))
- reset_control_assert(tcon->lcd_rst);
-
- ret = reset_control_deassert(tcon->lcd_rst);
+ ret = reset_control_reset(tcon->lcd_rst);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
@@ -574,6 +731,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
if (ret < 0)
goto err_free_clocks;
+ if (tcon->quirks->needs_de_be_mux) {
+ /*
+ * We assume there is no dynamic muxing of backends
+ * and TCONs, so we select the backend with same ID.
+ *
+ * While dynamic selection might be interesting, since
+ * the CRTC is tied to the TCON, while the layers are
+ * tied to the backends, this means, we will need to
+ * switch between groups of layers. There might not be
+ * a way to represent this constraint in DRM.
+ */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_SRC_SEL_MASK,
+ tcon->id);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_SRC_SEL_MASK,
+ tcon->id);
+ }
+
list_add_tail(&tcon->list, &drv->tcon_list);
return 0;
@@ -623,17 +799,97 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+/* platform specific TCON muxing callbacks */
+static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
+ u32 shift;
+
+ if (!tcon0)
+ return -EINVAL;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ /* HDMI */
+ shift = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
+ 0x3 << shift, tcon->id << shift);
+
+ return 0;
+}
+
+static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u32 val;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+ val = 1;
+ else
+ val = 0;
+
+ /*
+ * FIXME: Undocumented bits
+ */
+ return regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
+}
+
+static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
+ u32 shift;
+
+ if (!tcon0)
+ return -EINVAL;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ /* HDMI */
+ shift = 8;
+ break;
+ default:
+ /* TODO A31 has MIPI DSI but A31s does not */
+ return -EINVAL;
+ }
+
+ regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
+ 0x3 << shift, tcon->id << shift);
+
+ return 0;
+}
+
+static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
+ .has_channel_1 = true,
+ .set_mux = sun4i_a10_tcon_set_mux,
+};
+
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
- .has_unknown_mux = true,
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .set_mux = sun5i_a13_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .needs_de_be_mux = true,
+ .set_mux = sun6i_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .needs_de_be_mux = true,
+};
+
+static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
+ .has_channel_1 = true,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
@@ -645,9 +901,11 @@ static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
};
static const struct of_device_id sun4i_tcon_of_table[] = {
+ { .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 552c88ec16be..f61bf6d83b4a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -37,6 +37,7 @@
#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
#define SUN4I_TCON0_CTL_CLK_DELAY_MASK GENMASK(8, 4)
#define SUN4I_TCON0_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
+#define SUN4I_TCON0_CTL_SRC_SEL_MASK GENMASK(2, 0)
#define SUN4I_TCON0_DCLK_REG 0x44
#define SUN4I_TCON0_DCLK_GATE_BIT (31)
@@ -85,6 +86,7 @@
#define SUN4I_TCON1_CTL_INTERLACE_ENABLE BIT(20)
#define SUN4I_TCON1_CTL_CLK_DELAY_MASK GENMASK(8, 4)
#define SUN4I_TCON1_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON1_CTL_CLK_DELAY_MASK)
+#define SUN4I_TCON1_CTL_SRC_SEL_MASK GENMASK(1, 0)
#define SUN4I_TCON1_BASIC0_REG 0x94
#define SUN4I_TCON1_BASIC0_X(width) ((((width) - 1) & 0xfff) << 16)
@@ -143,9 +145,14 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon;
+
struct sun4i_tcon_quirks {
- bool has_unknown_mux; /* sun5i has undocumented mux */
bool has_channel_1; /* a33 does not have channel 1 */
+ bool needs_de_be_mux; /* sun6i needs mux to select backend */
+
+ /* callback to handle tcon muxing options */
+ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
};
struct sun4i_tcon {
@@ -183,22 +190,11 @@ struct sun4i_tcon {
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
-/* Global Control */
-void sun4i_tcon_disable(struct sun4i_tcon *tcon);
-void sun4i_tcon_enable(struct sun4i_tcon *tcon);
-
-/* Channel Control */
-void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel);
-void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
-
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
-
-/* Mode Related Controls */
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- struct drm_encoder *encoder);
-void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode);
-void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode);
+void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode);
+void sun4i_tcon_set_status(struct sun4i_tcon *crtc,
+ const struct drm_encoder *encoder, bool enable);
#endif /* __SUN4I_TCON_H__ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 050cfd43c7a0..b070d522ed8d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -24,7 +24,6 @@
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
-#include "sun4i_tcon.h"
#include "sunxi_engine.h"
#define SUN4I_TVE_EN_REG 0x000
@@ -345,12 +344,9 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
- sun4i_tcon_channel_disable(tcon, 1);
-
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
0);
@@ -362,7 +358,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
@@ -371,8 +366,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
SUN4I_TVE_EN_ENABLE);
-
- sun4i_tcon_channel_enable(tcon, 1);
}
static void sun4i_tv_mode_set(struct drm_encoder *encoder,
@@ -380,13 +373,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
- sun4i_tcon1_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 1, encoder);
-
/* Enable and map the DAC to the output */
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_DAC_MAP_MASK,
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index dc58ab140151..cf54847a8bd1 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,6 +9,7 @@ config DRM_TEGRA
select DRM_PANEL
select TEGRA_HOST1X
select IOMMU_IOVA if IOMMU_SUPPORT
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4df39112e38e..24a5ef4f5bb8 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -23,16 +24,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-struct tegra_dc_soc_info {
- bool supports_border_color;
- bool supports_interlacing;
- bool supports_cursor;
- bool supports_block_linear;
- unsigned int pitch_align;
- bool has_powergate;
- bool broken_reset;
-};
-
struct tegra_plane {
struct drm_plane base;
unsigned int index;
@@ -559,14 +550,21 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
return 0;
}
-static void tegra_dc_disable_window(struct tegra_dc *dc, int index)
+static void tegra_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
+ struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
unsigned long flags;
u32 value;
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
spin_lock_irqsave(&dc->lock, flags);
- value = WINDOW_A_SELECT << index;
+ value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
@@ -591,7 +589,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
return;
if (!plane->state->visible)
- return tegra_dc_disable_window(dc, p->index);
+ return tegra_plane_atomic_disable(plane, old_state);
memset(&window, 0, sizeof(window));
window.src.x = plane->state->src.x1 >> 16;
@@ -627,25 +625,10 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
tegra_dc_setup_window(dc, p->index, &window);
}
-static void tegra_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_dc *dc;
-
- /* rien ne va plus */
- if (!old_state || !old_state->crtc)
- return;
-
- dc = to_tegra_dc(old_state->crtc);
-
- tegra_dc_disable_window(dc, p->index);
-}
-
-static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
+static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
.atomic_check = tegra_plane_atomic_check,
- .atomic_update = tegra_plane_atomic_update,
.atomic_disable = tegra_plane_atomic_disable,
+ .atomic_update = tegra_plane_atomic_update,
};
static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
@@ -685,7 +668,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
- drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
return &plane->base;
}
@@ -880,12 +863,6 @@ static const uint32_t tegra_overlay_plane_formats[] = {
DRM_FORMAT_YUV422,
};
-static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
- .atomic_check = tegra_plane_atomic_check,
- .atomic_update = tegra_plane_atomic_update,
- .atomic_disable = tegra_plane_atomic_disable,
-};
-
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int index)
@@ -913,7 +890,7 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
- drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
return &plane->base;
}
@@ -1161,6 +1138,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -1756,7 +1738,7 @@ static int tegra_dc_init(struct host1x_client *client)
struct drm_plane *cursor = NULL;
int err;
- dc->syncpt = host1x_syncpt_request(dc->dev, flags);
+ dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -1985,7 +1967,6 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1994,14 +1975,11 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
- id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
- if (!id)
- return -ENODEV;
+ dc->soc = of_device_get_match_data(&pdev->dev);
spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
- dc->soc = id->data;
err = tegra_dc_parse_dt(dc);
if (err < 0)
@@ -2019,8 +1997,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
- if (!dc->soc->broken_reset)
- reset_control_assert(dc->rst);
+ /* assert reset and disable clock */
+ if (!dc->soc->broken_reset) {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ clk_disable_unprepare(dc->clk);
+ }
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 4a268635749b..cb100b6e3282 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -10,6 +10,126 @@
#ifndef TEGRA_DC_H
#define TEGRA_DC_H 1
+#include <linux/host1x.h>
+
+#include <drm/drm_crtc.h>
+
+#include "drm.h"
+
+struct tegra_output;
+
+struct tegra_dc_stats {
+ unsigned long frames;
+ unsigned long vblank;
+ unsigned long underflow;
+ unsigned long overflow;
+};
+
+struct tegra_dc_soc_info {
+ bool supports_border_color;
+ bool supports_interlacing;
+ bool supports_cursor;
+ bool supports_block_linear;
+ unsigned int pitch_align;
+ bool has_powergate;
+ bool broken_reset;
+};
+
+struct tegra_dc {
+ struct host1x_client client;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+ spinlock_t lock;
+
+ struct drm_crtc base;
+ unsigned int powergate;
+ int pipe;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct tegra_dc_stats stats;
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+
+ /* page-flip handling */
+ struct drm_pending_vblank_event *event;
+
+ const struct tegra_dc_soc_info *soc;
+
+ struct iommu_domain *domain;
+};
+
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+ unsigned int offset)
+{
+ trace_dc_writel(dc->dev, offset, value);
+ writel(value, dc->regs + (offset << 2));
+}
+
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
+{
+ u32 value = readl(dc->regs + (offset << 2));
+
+ trace_dc_readl(dc->dev, offset, value);
+
+ return value;
+}
+
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int stride[2];
+ unsigned long base[3];
+ bool bottom_up;
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+};
+
+/* from dc.c */
+void tegra_dc_commit(struct tegra_dc *dc);
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div);
+
+/* from rgb.c */
+int tegra_dc_rgb_probe(struct tegra_dc *dc);
+int tegra_dc_rgb_remove(struct tegra_dc *dc);
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
#define SYNCPT_CNTRL_NO_STALL (1 << 8)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b822e484b7e5..52552b9b89ef 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -385,12 +385,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_cmdbufs = args->num_cmdbufs;
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
- struct drm_tegra_cmdbuf __user *cmdbufs =
- (void __user *)(uintptr_t)args->cmdbufs;
- struct drm_tegra_reloc __user *relocs =
- (void __user *)(uintptr_t)args->relocs;
- struct drm_tegra_waitchk __user *waitchks =
- (void __user *)(uintptr_t)args->waitchks;
+ struct drm_tegra_cmdbuf __user *user_cmdbufs;
+ struct drm_tegra_reloc __user *user_relocs;
+ struct drm_tegra_waitchk __user *user_waitchks;
+ struct drm_tegra_syncpt __user *user_syncpt;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
@@ -399,6 +397,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_refs;
int err;
+ user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
+ user_relocs = u64_to_user_ptr(args->relocs);
+ user_waitchks = u64_to_user_ptr(args->waitchks);
+ user_syncpt = u64_to_user_ptr(args->syncpts);
+
/* We don't yet support other than one syncpt_incr struct per submit */
if (args->num_syncpts != 1)
return -EINVAL;
@@ -439,7 +442,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct tegra_bo *obj;
u64 offset;
- if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
+ if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
err = -EFAULT;
goto fail;
}
@@ -475,7 +478,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
num_cmdbufs--;
- cmdbufs++;
+ user_cmdbufs++;
}
/* copy and resolve relocations from submit */
@@ -484,7 +487,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct tegra_bo *obj;
err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
- &relocs[num_relocs], drm,
+ &user_relocs[num_relocs], drm,
file);
if (err < 0)
goto fail;
@@ -518,9 +521,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
struct tegra_bo *obj;
- err = host1x_waitchk_copy_from_user(wait,
- &waitchks[num_waitchks],
- file);
+ err = host1x_waitchk_copy_from_user(
+ wait, &user_waitchks[num_waitchks], file);
if (err < 0)
goto fail;
@@ -538,8 +540,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
- if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
- sizeof(syncpt))) {
+ if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
err = -EFAULT;
goto fail;
}
@@ -1316,6 +1317,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra186-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 063f5d397526..ddae331ad8b6 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -119,104 +119,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t iova);
-struct tegra_dc_soc_info;
-struct tegra_output;
-
-struct tegra_dc_stats {
- unsigned long frames;
- unsigned long vblank;
- unsigned long underflow;
- unsigned long overflow;
-};
-
-struct tegra_dc {
- struct host1x_client client;
- struct host1x_syncpt *syncpt;
- struct device *dev;
- spinlock_t lock;
-
- struct drm_crtc base;
- unsigned int powergate;
- int pipe;
-
- struct clk *clk;
- struct reset_control *rst;
- void __iomem *regs;
- int irq;
-
- struct tegra_output *rgb;
-
- struct tegra_dc_stats stats;
- struct list_head list;
-
- struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
-
- /* page-flip handling */
- struct drm_pending_vblank_event *event;
-
- const struct tegra_dc_soc_info *soc;
-
- struct iommu_domain *domain;
-};
-
-static inline struct tegra_dc *
-host1x_client_to_dc(struct host1x_client *client)
-{
- return container_of(client, struct tegra_dc, client);
-}
-
-static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
-{
- return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
-}
-
-static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
- unsigned int offset)
-{
- trace_dc_writel(dc->dev, offset, value);
- writel(value, dc->regs + (offset << 2));
-}
-
-static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
-{
- u32 value = readl(dc->regs + (offset << 2));
-
- trace_dc_readl(dc->dev, offset, value);
-
- return value;
-}
-
-struct tegra_dc_window {
- struct {
- unsigned int x;
- unsigned int y;
- unsigned int w;
- unsigned int h;
- } src;
- struct {
- unsigned int x;
- unsigned int y;
- unsigned int w;
- unsigned int h;
- } dst;
- unsigned int bits_per_pixel;
- unsigned int stride[2];
- unsigned long base[3];
- bool bottom_up;
-
- struct tegra_bo_tiling tiling;
- u32 format;
- u32 swap;
-};
-
-/* from dc.c */
-void tegra_dc_commit(struct tegra_dc *dc);
-int tegra_dc_state_setup_clock(struct tegra_dc *dc,
- struct drm_crtc_state *crtc_state,
- struct clk *clk, unsigned long pclk,
- unsigned int div);
+struct cec_notifier;
struct tegra_output {
struct device_node *of_node;
@@ -225,6 +128,7 @@ struct tegra_output {
struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
+ struct cec_notifier *notifier;
unsigned int hpd_irq;
int hpd_gpio;
enum of_gpio_flags hpd_gpio_flags;
@@ -243,12 +147,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c)
return container_of(c, struct tegra_output, connector);
}
-/* from rgb.c */
-int tegra_dc_rgb_probe(struct tegra_dc *dc);
-int tegra_dc_rgb_remove(struct tegra_dc *dc);
-int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
-int tegra_dc_rgb_exit(struct tegra_dc *dc);
-
/* from output.c */
int tegra_output_probe(struct tegra_output *output);
void tegra_output_remove(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 6ea070da7718..9a8ea93016a9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -36,7 +36,7 @@ static int gr2d_init(struct host1x_client *client)
if (!gr2d->channel)
return -ENOMEM;
- client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr2d->channel);
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index cee2ab645cde..28c4ef63065b 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -46,7 +46,7 @@ static int gr3d_init(struct host1x_client *client)
if (!gr3d->channel)
return -ENOMEM;
- client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr3d->channel);
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 5b9d83b71943..6434b3d3d1ba 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -21,6 +22,8 @@
#include <sound/hda_verbs.h>
+#include <media/cec-notifier.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
@@ -1663,20 +1666,15 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
- match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
- if (!match)
- return -ENODEV;
-
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
- hdmi->config = match->data;
+ hdmi->config = of_device_get_match_data(&pdev->dev);
hdmi->dev = &pdev->dev;
hdmi->audio_source = AUTO;
@@ -1725,6 +1723,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->vdd);
}
+ hdmi->output.notifier = cec_notifier_get(&pdev->dev);
+ if (hdmi->output.notifier == NULL)
+ return -ENOMEM;
+
hdmi->output.dev = &pdev->dev;
err = tegra_output_probe(&hdmi->output);
@@ -1783,6 +1785,9 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
tegra_output_remove(&hdmi->output);
+ if (hdmi->output.notifier)
+ cec_notifier_put(hdmi->output.notifier);
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 595d1ec3e02e..1cfbacea8113 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -11,6 +11,8 @@
#include <drm/drm_panel.h>
#include "drm.h"
+#include <media/cec-notifier.h>
+
int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
@@ -32,6 +34,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
else if (output->ddc)
edid = drm_get_edid(connector, output->ddc);
+ cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
drm_mode_connector_update_edid_property(connector, edid);
if (edid) {
@@ -68,6 +71,9 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
}
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(output->notifier);
+
return status;
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7ab1d1dc7cd7..b0a1dedac802 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -174,9 +174,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
- struct clk *clk_brick;
struct clk *clk_safe;
- struct clk *clk_src;
+ struct clk *clk_out;
+ struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
@@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
clk_disable_unprepare(sor->clk);
- err = clk_set_parent(sor->clk, parent);
+ err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
@@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
return 0;
}
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
- return container_of(hw, struct tegra_clk_sor_brick, hw);
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
};
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
@@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
@@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
return parent;
}
-static const struct clk_ops tegra_clk_sor_brick_ops = {
- .set_parent = tegra_clk_sor_brick_set_parent,
- .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
};
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
- const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
{
- struct tegra_clk_sor_brick *brick;
+ struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
- brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
- if (!brick)
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
return ERR_PTR(-ENOMEM);
- brick->sor = sor;
+ pad->sor = sor;
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_brick_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
- init.ops = &tegra_clk_sor_brick_ops;
+ init.parent_names = tegra_clk_sor_pad_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.ops = &tegra_clk_sor_pad_ops;
- brick->hw.init = &init;
+ pad->hw.init = &init;
- clk = devm_clk_register(sor->dev, &brick->hw);
+ clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
@@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
@@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
div = clk_get_rate(sor->clk) / 1000000 * 4;
@@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
- err = clk_set_parent(sor->clk_src, sor->clk_parent);
- if (err < 0)
- dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
- err = tegra_sor_set_parent_clock(sor, sor->clk_src);
- if (err < 0)
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return;
+ }
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ return;
+ }
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
@@ -2536,20 +2544,17 @@ MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
static int tegra_sor_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device_node *np;
struct tegra_sor *sor;
struct resource *regs;
int err;
- match = of_match_device(tegra_sor_of_match, &pdev->dev);
-
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
if (!sor)
return -ENOMEM;
+ sor->soc = of_device_get_match_data(&pdev->dev);
sor->output.dev = sor->dev = &pdev->dev;
- sor->soc = match->data;
sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
sor->soc->num_settings *
@@ -2631,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
- sor->clk_src = devm_clk_get(&pdev->dev, "source");
- if (IS_ERR(sor->clk_src)) {
- err = PTR_ERR(sor->clk_src);
- dev_err(sor->dev, "failed to get source clock: %d\n",
- err);
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
goto remove;
}
}
@@ -2661,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
- sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
- pm_runtime_put(&pdev->dev);
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+ err);
+ goto remove;
+ }
- if (IS_ERR(sor->clk_brick)) {
- err = PTR_ERR(sor->clk_brick);
- dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ sor->clk_pad = tegra_clk_sor_pad_register(sor,
+ "sor1_pad_clkout");
+ pm_runtime_put(&pdev->dev);
+ }
+
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+ err);
goto remove;
}
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 2448229fa653..18024183aa2b 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -167,7 +167,7 @@ static int vic_init(struct host1x_client *client)
goto detach_device;
}
- client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
if (!client->syncpts[0]) {
err = -ENOMEM;
goto free_channel;
@@ -270,29 +270,33 @@ static const struct vic_config vic_t210_config = {
.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
};
+#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
+
+static const struct vic_config vic_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
+};
+
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
+ { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ },
};
static int vic_probe(struct platform_device *pdev)
{
- struct vic_config *vic_config = NULL;
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct resource *regs;
- const struct of_device_id *match;
struct vic *vic;
int err;
- match = of_match_device(vic_match, dev);
- vic_config = (struct vic_config *)match->data;
-
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
+ vic->config = of_device_get_match_data(dev);
+
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
@@ -321,7 +325,7 @@ static int vic_probe(struct platform_device *pdev)
if (err < 0)
return err;
- err = falcon_read_firmware(&vic->falcon, vic_config->firmware);
+ err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
goto exit_falcon;
@@ -334,7 +338,6 @@ static int vic_probe(struct platform_device *pdev)
vic->client.base.syncpts = syncpts;
vic->client.base.num_syncpts = 1;
vic->dev = dev;
- vic->config = vic_config;
INIT_LIST_HEAD(&vic->client.list);
vic->client.ops = &vic_ops;
@@ -405,3 +408,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 28fed7e206d0..81ac82455ce4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -12,14 +12,3 @@ config DRM_TILCDC
controller, for example AM33xx in beagle-bone, DA8xx, or
OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
-config DRM_TILCDC_SLAVE_COMPAT
- bool "Support device tree blobs using TI LCDC Slave binding"
- depends on DRM_TILCDC
- default y
- select OF_RESOLVE
- select OF_OVERLAY
- help
- Choose this option if you need a kernel that is compatible
- with device tree blobs using the obsolete "ti,tilcdc,slave"
- binding. If you find "ti,tilcdc,slave"-string from your DTB,
- you probably need this. Otherwise you do not.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index b9e1108e5b4e..87f9480e43b0 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -3,9 +3,6 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
- tilcdc_slave_compat.dtb.o
-
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 406fe4544b83..6ef4d1a1e3a9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -24,6 +24,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/of_graph.h>
+#include <linux/math64.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -48,6 +49,7 @@ struct tilcdc_crtc {
unsigned int lcd_fck_rate;
ktime_t last_vblank;
+ unsigned int hvtotal_us;
struct drm_framebuffer *curr_fb;
struct drm_framebuffer *next_fb;
@@ -75,7 +77,7 @@ static void unref_worker(struct drm_flip_work *work, void *val)
struct drm_device *dev = tilcdc_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
+ drm_framebuffer_put(val);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
+uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
+{
+ return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
+ mode->clock);
+}
+
static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
@@ -456,9 +464,12 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
set_scanout(crtc, fb);
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
crtc->hwmode = crtc->state->adjusted_mode;
+
+ tilcdc_crtc->hvtotal_us =
+ tilcdc_mode_hvtotal(&crtc->hwmode);
}
static void tilcdc_crtc_enable(struct drm_crtc *crtc)
@@ -467,7 +478,6 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
unsigned long flags;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
mutex_lock(&tilcdc_crtc->enable_lock);
if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -564,7 +574,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
static void tilcdc_crtc_disable(struct drm_crtc *crtc)
{
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
tilcdc_crtc_off(crtc, false);
}
@@ -608,9 +617,7 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
- drm_modeset_lock(&crtc->mutex, NULL);
- tilcdc_crtc_disable(crtc);
- drm_modeset_unlock(&crtc->mutex);
+ tilcdc_crtc_shutdown(crtc);
flush_workqueue(priv->wq);
@@ -626,14 +633,12 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
if (tilcdc_crtc->event) {
dev_err(dev->dev, "already pending page flip!\n");
return -EBUSY;
}
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
crtc->primary->fb = fb;
tilcdc_crtc->event = event;
@@ -648,7 +653,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ tilcdc_crtc->hvtotal_us);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
@@ -728,11 +733,39 @@ static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
}
+static void tilcdc_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int ret;
+
+ drm_atomic_helper_crtc_reset(crtc);
+
+ /* Turn the raster off if it for some reason is on. */
+ pm_runtime_get_sync(dev->dev);
+ if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
+ /* Enable DMA Frame Done Interrupt */
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+
+ tilcdc_crtc->frame_done = false;
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+ }
+ pm_runtime_put_sync(dev->dev);
+}
+
static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.destroy = tilcdc_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = tilcdc_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = tilcdc_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b0d70f943cec..72ce063aa0d8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -65,7 +66,7 @@ static struct of_device_id tilcdc_of_match[];
static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
@@ -225,7 +226,7 @@ static void tilcdc_fini(struct drm_device *dev)
pm_runtime_disable(dev->dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 1813a3623ce6..8eebb5f826a6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -418,7 +418,7 @@ static int panel_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id panel_of_match[] = {
+static const struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
deleted file mode 100644
index 54025af534d4..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
- int total;
- int num;
- void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
- kft->total = 32;
- kft->num = 0;
- kft->table = kmalloc(kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
- if (kft->num == kft->total) {
- void **old = kft->table;
-
- kft->total *= 2;
- kft->table = krealloc(old, kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table) {
- kft->table = old;
- kfree(p);
- return -ENOMEM;
- }
- }
- kft->table[kft->num++] = p;
- return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
- int i;
-
- for (i = 0; i < kft->num; i++)
- kfree(kft->table[i]);
-
- kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
- struct kfree_table *kft)
-{
- struct property *nprop;
-
- nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
- if (!nprop || kfree_table_add(kft, nprop))
- return NULL;
-
- nprop->name = kstrdup(prop->name, GFP_KERNEL);
- if (!nprop->name || kfree_table_add(kft, nprop->name))
- return NULL;
-
- nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!nprop->value || kfree_table_add(kft, nprop->value))
- return NULL;
-
- nprop->length = prop->length;
-
- return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
- struct device_node *to,
- const char * const props[],
- struct kfree_table *kft)
-{
- struct property *prop;
- int i;
-
- for (i = 0; props[i]; i++) {
- prop = of_find_property(from, props[i], NULL);
- if (!prop)
- continue;
-
- prop = tilcdc_prop_dup(prop, kft);
- if (!prop)
- continue;
-
- prop->next = to->properties;
- to->properties = prop;
- }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
- const char *str,
- struct kfree_table *kft)
-{
- prop->value = kstrdup(str, GFP_KERNEL);
- if (kfree_table_add(kft, prop->value) || !prop->value)
- return -ENOMEM;
- prop->length = strlen(str)+1;
- return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen((char *)prop->value)+1;
-
- of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
- const int size = __dtb_tilcdc_slave_compat_end -
- __dtb_tilcdc_slave_compat_begin;
- static void *overlay_data;
- struct device_node *overlay;
-
- if (!size) {
- pr_warn("%s: No overlay data\n", __func__);
- return NULL;
- }
-
- overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
- size, GFP_KERNEL);
- if (!overlay_data || kfree_table_add(kft, overlay_data))
- return NULL;
-
- of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
- if (!overlay) {
- pr_warn("%s: Unfattening overlay tree failed\n", __func__);
- return NULL;
- }
-
- of_node_set_flag(overlay, OF_DETACHED);
-
- return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
- { .compatible = "ti,tilcdc,slave", },
- {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
- { .compatible = "ti,am33xx-tilcdc", },
- {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
- { .compatible = "nxp,tda998x", },
- {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
- "pinctrl-names",
- "pinctrl-0",
- "pinctrl-1",
- NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
- struct device_node *slave = NULL, *lcdc = NULL;
- struct device_node *i2c = NULL, *fragment = NULL;
- struct device_node *overlay, *encoder;
- struct property *prop;
- /* For all memory needed for the overlay tree. This memory can
- be freed after the overlay has been applied. */
- struct kfree_table kft;
- int ovcs_id, ret;
-
- if (kfree_table_init(&kft))
- return;
-
- lcdc = of_find_matching_node(NULL, tilcdc_of_match);
- slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
- if (!slave || !of_device_is_available(lcdc))
- goto out;
-
- i2c = of_parse_phandle(slave, "i2c", 0);
- if (!i2c) {
- pr_err("%s: Can't find i2c node trough phandle\n", __func__);
- goto out;
- }
-
- overlay = tilcdc_get_overlay(&kft);
- if (!overlay)
- goto out;
-
- encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
- if (!encoder) {
- pr_err("%s: Failed to find tda998x node\n", __func__);
- goto out;
- }
-
- tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
- for_each_child_of_node(overlay, fragment) {
- prop = of_find_property(fragment, "target-path", NULL);
- if (!prop)
- continue;
- if (!strncmp("i2c", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
- goto out;
- if (!strncmp("lcdc", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
- goto out;
- }
-
- tilcdc_node_disable(slave);
-
- ovcs_id = 0;
- ret = of_overlay_apply(overlay, &ovcs_id);
- if (ret)
- pr_err("%s: Applying overlay changeset failed: %d\n",
- __func__, ret);
- else
- pr_info("%s: ti,tilcdc,slave node successfully converted\n",
- __func__);
-out:
- kfree_table_free(&kft);
- of_node_put(i2c);
- of_node_put(slave);
- of_node_put(lcdc);
- of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
- tilcdc_convert_slave_node();
- return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
deleted file mode 100644
index 693f8b0aea2d..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
- fragment@0 {
- target-path = "i2c";
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- tda19988 {
- compatible = "nxp,tda998x";
- reg = <0x70>;
- status = "okay";
-
- port {
- hdmi_0: endpoint@0 {
- remote-endpoint = <&lcd_0>;
- };
- };
- };
- };
- };
-
- fragment@1 {
- target-path = "lcdc";
- __overlay__ {
- port {
- lcd_0: endpoint@0 {
- remote-endpoint = <&hdmi_0>;
- };
- };
- };
- };
-
- __local_fixups__ {
- fragment@0 {
- __overlay__ {
- tda19988 {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
- fragment@1 {
- __overlay__ {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
-};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
deleted file mode 100644
index 403d35d87d0b..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 1e2dfb1b1d6b..7e3643462a08 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -289,8 +289,6 @@ static const struct tilcdc_module_ops tfp410_module_ops = {
* Device:
*/
-static struct of_device_id tfp410_of_match[];
-
static int tfp410_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -375,7 +373,7 @@ static int tfp410_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tfp410_of_match[] = {
+static const struct of_device_id tfp410_of_match[] = {
{ .compatible = "ti,tilcdc,tfp410", },
{ },
};
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 551709e6b114..1a8a57cad431 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
@@ -128,7 +129,7 @@ tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
{
struct tinydrm_device *tdev = drm->dev_private;
- return drm_fb_cma_create_with_funcs(drm, file_priv, mode_cmd,
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
tdev->fb_funcs);
}
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 177e9d861001..f41fc506ff87 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/tinydrm/tinydrm.h>
@@ -50,7 +51,6 @@ static int tinydrm_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
.get_modes = tinydrm_connector_get_modes,
- .best_encoder = drm_atomic_helper_best_encoder,
};
static enum drm_connector_status
@@ -144,7 +144,7 @@ EXPORT_SYMBOL(tinydrm_display_pipe_update);
* @pipe: Simple display pipe
* @plane_state: Plane state
*
- * This function uses drm_fb_cma_prepare_fb() to check if the plane FB has an
+ * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has an
* dma-buf attached, extracts the exclusive fence and attaches it to plane
* state for the atomic helper to wait on. Drivers can use this as their
* &drm_simple_display_pipe_funcs->prepare_fb callback.
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(tinydrm_display_pipe_update);
int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 7e5bb7d6f655..6a83b3093254 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -31,7 +31,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
ret = regulator_enable(mipi->regulator);
if (ret) {
- dev_err(dev, "Failed to enable regulator %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to enable regulator %d\n", ret);
return ret;
}
@@ -42,7 +42,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
mipi_dbi_hw_reset(mipi);
ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
if (ret) {
- dev_err(dev, "Error sending command %d\n", ret);
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
regulator_disable(mipi->regulator);
return ret;
}
@@ -163,7 +163,6 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
@@ -175,13 +174,13 @@ static int mi0283qt_probe(struct spi_device *spi)
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return PTR_ERR(mipi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
- dev_err(dev, "Failed to get gpio 'dc'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
return PTR_ERR(dc);
}
@@ -215,20 +214,9 @@ static int mi0283qt_probe(struct spi_device *spi)
return ret;
}
- tdev = &mipi->tinydrm;
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, mipi);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
-
- return 0;
+ return devm_tinydrm_register(&mipi->tinydrm);
}
static void mi0283qt_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 2caeabcd3458..d43e992ab432 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,6 +9,7 @@
* (at your option) any later version.
*/
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/debugfs.h>
@@ -253,8 +254,8 @@ out_unlock:
}
static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = mipi_dbi_fb_dirty,
};
@@ -842,6 +843,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
return -ENOMEM;
}
+ DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
+
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 30dc97b3ff21..75740630c410 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -473,8 +474,7 @@ static void repaper_get_temperature(struct repaper_epd *epd)
ret = thermal_zone_get_temp(epd->thermal, &temperature);
if (ret) {
- dev_err(&epd->spi->dev, "Failed to get temperature (%d)\n",
- ret);
+ DRM_DEV_ERROR(&epd->spi->dev, "Failed to get temperature (%d)\n", ret);
return;
}
@@ -629,15 +629,15 @@ out_unlock:
mutex_unlock(&tdev->dirty_lock);
if (ret)
- dev_err(fb->dev->dev, "Failed to update display (%d)\n", ret);
+ DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
kfree(buf);
return ret;
}
static const struct drm_framebuffer_funcs repaper_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = repaper_fb_dirty,
};
@@ -703,7 +703,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
}
if (!i) {
- dev_err(dev, "timeout waiting for panel to become ready.\n");
+ DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
return;
}
@@ -725,9 +725,9 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
ret = repaper_read_val(spi, 0x0f);
if (ret < 0 || !(ret & 0x80)) {
if (ret < 0)
- dev_err(dev, "failed to read chip (%d)\n", ret);
+ DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
else
- dev_err(dev, "panel is reported broken\n");
+ DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
return;
}
@@ -767,7 +767,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
/* check DC/DC */
ret = repaper_read_val(spi, 0x0f);
if (ret < 0) {
- dev_err(dev, "failed to read chip (%d)\n", ret);
+ DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
return;
}
@@ -779,7 +779,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
}
if (!dc_ok) {
- dev_err(dev, "dc/dc failed\n");
+ DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
return;
}
@@ -959,7 +959,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->panel_on)) {
ret = PTR_ERR(epd->panel_on);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'panel-on'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'panel-on'\n");
return ret;
}
@@ -967,7 +967,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->discharge)) {
ret = PTR_ERR(epd->discharge);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'discharge'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'discharge'\n");
return ret;
}
@@ -975,7 +975,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->reset)) {
ret = PTR_ERR(epd->reset);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return ret;
}
@@ -983,7 +983,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->busy)) {
ret = PTR_ERR(epd->busy);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'busy'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'busy'\n");
return ret;
}
@@ -991,8 +991,7 @@ static int repaper_probe(struct spi_device *spi)
&thermal_zone)) {
epd->thermal = thermal_zone_get_zone_by_name(thermal_zone);
if (IS_ERR(epd->thermal)) {
- dev_err(dev, "Failed to get thermal zone: %s\n",
- thermal_zone);
+ DRM_DEV_ERROR(dev, "Failed to get thermal zone: %s\n", thermal_zone);
return PTR_ERR(epd->thermal);
}
}
@@ -1033,7 +1032,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->border)) {
ret = PTR_ERR(epd->border);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'border'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'border'\n");
return ret;
}
@@ -1078,19 +1077,11 @@ static int repaper_probe(struct spi_device *spi)
return ret;
drm_mode_config_reset(tdev->drm);
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, tdev);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
+ DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- return 0;
+ return devm_tinydrm_register(tdev);
}
static void repaper_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index b439956a07f4..0a2c60da5c0e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -167,8 +168,8 @@ out_unlock:
}
static const struct drm_framebuffer_funcs st7586_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = st7586_fb_dirty,
};
@@ -187,7 +188,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi_dbi_hw_reset(mipi);
ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
if (ret) {
- dev_err(dev, "Error sending command %d\n", ret);
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
return;
}
@@ -343,7 +344,6 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
struct mipi_dbi *mipi;
struct gpio_desc *a0;
u32 rotation = 0;
@@ -355,13 +355,13 @@ static int st7586_probe(struct spi_device *spi)
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return PTR_ERR(mipi->reset);
}
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
if (IS_ERR(a0)) {
- dev_err(dev, "Failed to get gpio 'a0'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'a0'\n");
return PTR_ERR(a0);
}
@@ -388,20 +388,9 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
- tdev = &mipi->tinydrm;
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, mipi);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
-
- return 0;
+ return devm_tinydrm_register(&mipi->tinydrm);
}
static void st7586_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce6296416..c088703777e2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
dma_fence_put(bo->moving);
- if (bo->resv == &bo->ttm_resv)
- reservation_object_fini(&bo->ttm_resv);
+ reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
if (bo->resv == &bo->ttm_resv)
return 0;
- reservation_object_init(&bo->ttm_resv);
BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
- if (r) {
+ if (r)
reservation_object_unlock(&bo->ttm_resv);
- reservation_object_fini(&bo->ttm_resv);
- }
return r;
}
@@ -440,28 +436,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob;
int ret;
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ 30 * HZ);
+ spin_lock(&glob->lru_lock);
+ goto error;
+ }
+
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, NULL);
-
if (!ret) {
- if (!ttm_bo_wait(bo, false, true)) {
+ if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
- return;
- }
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle and free it.
- */
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait(bo, true, true);
ttm_bo_cleanup_memtype_use(bo);
return;
}
+
ttm_bo_flush_all_fences(bo);
/*
@@ -474,11 +472,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
+error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
@@ -1203,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
lockdep_assert_held(&bo->resv->lock.base);
} else {
bo->resv = &bo->ttm_resv;
- reservation_object_init(&bo->ttm_resv);
}
+ reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c934ad5b3903..e7a519f1849b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ mutex_init(&fbo->wu_mutex);
fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
@@ -587,7 +588,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long offset, size;
int ret;
- BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 29855be96be0..e96374990398 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -546,8 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page,
- bool no_wait, bool interruptible)
+ struct page *page, uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@@ -564,11 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
- interruptible);
+ return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
}
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
+ uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@@ -579,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+ ttm_mem_global_free_zone(glob, zone, size);
}
-
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 871599826773..44343a2bf55c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -81,6 +81,7 @@ struct ttm_page_pool {
char *name;
unsigned long nfrees;
unsigned long nrefills;
+ unsigned int order;
};
/**
@@ -95,7 +96,7 @@ struct ttm_pool_opts {
unsigned small;
};
-#define NUM_POOLS 4
+#define NUM_POOLS 6
/**
* struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +123,8 @@ struct ttm_pool_manager {
struct ttm_page_pool uc_pool;
struct ttm_page_pool wc_pool_dma32;
struct ttm_page_pool uc_pool_dma32;
+ struct ttm_page_pool wc_pool_huge;
+ struct ttm_page_pool uc_pool_huge;
} ;
};
};
@@ -220,6 +223,17 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
#ifndef CONFIG_X86
+static int set_pages_wb(struct page *page, int numpages)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ unmap_page_from_agp(page++);
+#endif
+ return 0;
+}
+
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -256,8 +270,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/**
* Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
- enum ttm_caching_state cstate)
+static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
+ enum ttm_caching_state cstate)
{
int pool_index;
@@ -269,20 +283,36 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
else
pool_index = 0x1;
- if (flags & TTM_PAGE_FLAG_DMA32)
+ if (flags & TTM_PAGE_FLAG_DMA32) {
+ if (huge)
+ return NULL;
pool_index |= 0x2;
+ } else if (huge) {
+ pool_index |= 0x4;
+ }
+
return &_manager->pools[pool_index];
}
/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+ unsigned int order)
{
- unsigned i;
- if (set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
- for (i = 0; i < npages; ++i)
- __free_page(pages[i]);
+ unsigned int i, pages_nr = (1 << order);
+
+ if (order == 0) {
+ if (set_pages_array_wb(pages, npages))
+ pr_err("Failed to set %d pages to wb!\n", npages);
+ }
+
+ for (i = 0; i < npages; ++i) {
+ if (order > 0) {
+ if (set_pages_wb(pages[i], pages_nr))
+ pr_err("Failed to set %d pages to wb!\n", pages_nr);
+ }
+ __free_pages(pages[i], order);
+ }
}
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -321,7 +351,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
}
@@ -345,7 +375,7 @@ restart:
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
- ttm_pages_put(pages_to_free, freed_pages);
+ ttm_pages_put(pages_to_free, freed_pages, pool->order);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
@@ -380,7 +410,7 @@ restart:
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (freed_pages)
- ttm_pages_put(pages_to_free, freed_pages);
+ ttm_pages_put(pages_to_free, freed_pages, pool->order);
out:
if (pages_to_free != static_buf)
kfree(pages_to_free);
@@ -404,6 +434,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned int nr_free_pool;
if (!mutex_trylock(&lock))
return SHRINK_STOP;
@@ -411,12 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
+ unsigned page_nr;
+
if (shrink_pages == 0)
break;
+
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ page_nr = (1 << pool->order);
/* OK to use static buffer since global mutex is held. */
- shrink_pages = ttm_page_pool_free(pool, nr_free, true);
- freed += nr_free - shrink_pages;
+ nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
+ shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+ freed += (nr_free_pool - shrink_pages) << pool->order;
+ if (freed >= sc->nr_to_scan)
+ break;
}
mutex_unlock(&lock);
return freed;
@@ -428,9 +466,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;
+ struct ttm_page_pool *pool;
- for (i = 0; i < NUM_POOLS; ++i)
- count += _manager->pools[i].npages;
+ for (i = 0; i < NUM_POOLS; ++i) {
+ pool = &_manager->pools[i];
+ count += (pool->npages << pool->order);
+ }
return count;
}
@@ -494,28 +535,29 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+ int ttm_flags, enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
struct page **caching_array;
struct page *p;
int r = 0;
- unsigned i, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+ unsigned i, j, cpages;
+ unsigned npages = 1 << order;
+ unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM;
}
for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_page(gfp_flags);
+ p = alloc_pages(gfp_flags, order);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -531,14 +573,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
goto out;
}
+ list_add(&p->lru, pages);
+
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
- if (!PageHighMem(p))
+ if (PageHighMem(p))
+ continue;
+
#endif
- {
- caching_array[cpages++] = p;
+ for (j = 0; j < npages; ++j) {
+ caching_array[cpages++] = p++;
if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array,
@@ -552,8 +598,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
cpages = 0;
}
}
-
- list_add(&p->lru, pages);
}
if (cpages) {
@@ -573,9 +617,9 @@ out:
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count,
- unsigned long *irq_flags)
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned long *irq_flags)
{
struct page *p;
int r;
@@ -605,7 +649,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size);
+ cstate, alloc_size, 0);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
@@ -613,7 +657,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) {
++cpages;
@@ -627,22 +671,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut 'count' number of pages from the pool and put them on the return list.
+ * Allocate pages from the pool and put them on the return list.
*
- * @return count of pages still required to fulfill the request.
+ * @return zero for success or negative error code.
*/
-static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
+static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
unsigned long irq_flags;
struct list_head *p;
unsigned i;
+ int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags);
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+ if (!order)
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
+ &irq_flags);
if (count >= pool->npages) {
/* take all pages from the pool */
@@ -672,32 +719,128 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
count = 0;
out:
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return count;
+
+ /* clear the pages coming from the pool if requested */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ struct page *page;
+
+ list_for_each_entry(page, pages, lru) {
+ if (PageHighMem(page))
+ clear_highpage(page);
+ else
+ clear_page(page_address(page));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count) {
+ gfp_t gfp_flags = pool->gfp_flags;
+
+ /* set zero flag for page allocation if required */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
+ count, order);
+ }
+
+ return r;
}
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
+ i = 0;
+ while (i < npages) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct page *p = pages[i];
+#endif
+ unsigned order = 0, j;
+
+ if (!pages[i]) {
+ ++i;
+ continue;
+ }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+ }
+#endif
+
+ if (page_count(pages[i]) != 1)
+ pr_err("Erroneous page count. Leaking pages.\n");
+ __free_pages(pages[i], order);
+
+ j = 1 << order;
+ while (j) {
+ pages[i++] = NULL;
+ --j;
}
}
return;
}
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge) {
+ unsigned max_size, n2free;
+
+ spin_lock_irqsave(&huge->lock, irq_flags);
+ while (i < npages) {
+ struct page *p = pages[i];
+ unsigned j;
+
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j != HPAGE_PMD_NR)
+ break;
+
+ list_add_tail(&pages[i]->lru, &huge->list);
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = NULL;
+ huge->npages++;
+ }
+
+ /* Check that we don't go over the pool limit */
+ max_size = _manager->options.max_size;
+ max_size /= HPAGE_PMD_NR;
+ if (huge->npages > max_size)
+ n2free = huge->npages - max_size;
+ else
+ n2free = 0;
+ spin_unlock_irqrestore(&huge->lock, irq_flags);
+ if (n2free)
+ ttm_page_pool_free(huge, n2free, false);
+ }
+#endif
+
spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
+ while (i < npages) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +848,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
pages[i] = NULL;
pool->npages++;
}
+ ++i;
}
/* Check that we don't go over the pool limit */
npages = 0;
@@ -727,82 +871,117 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
struct list_head plist;
struct page *p = NULL;
- gfp_t gfp_flags = GFP_USER;
- unsigned count;
+ unsigned count, first;
int r;
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
/* No pool for cached pages */
if (pool == NULL) {
+ gfp_t gfp_flags = GFP_USER;
+ unsigned i;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned j;
+#endif
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) {
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (!(gfp_flags & GFP_DMA32)) {
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
+
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
+
+ npages -= HPAGE_PMD_NR;
+ }
+ }
+#endif
+
+ first = i;
+ while (npages) {
p = alloc_page(gfp_flags);
if (!p) {
-
- pr_err("Unable to allocate page\n");
+ pr_debug("Unable to allocate page\n");
return -ENOMEM;
}
- pages[r] = p;
+ /* Swap the pages if we detect consecutive order */
+ if (i > first && pages[i - 1] == p - 1)
+ swap(p, pages[i - 1]);
+
+ pages[i++] = p;
+ --npages;
}
return 0;
}
- /* combine zero flag to pool flags */
- gfp_flags |= pool->gfp_flags;
-
- /* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- /* clear the pages coming from the pool if requested */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge && npages >= HPAGE_PMD_NR) {
+ INIT_LIST_HEAD(&plist);
+ ttm_page_pool_get_pages(huge, &plist, flags, cstate,
+ npages / HPAGE_PMD_NR,
+ HPAGE_PMD_ORDER);
+
list_for_each_entry(p, &plist, lru) {
- if (PageHighMem(p))
- clear_highpage(p);
- else
- clear_page(page_address(p));
+ unsigned j;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[count++] = &p[j];
}
}
+#endif
- /* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
+ INIT_LIST_HEAD(&plist);
+ r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
+ npages - count, 0);
+
+ first = count;
+ list_for_each_entry(p, &plist, lru) {
+ struct page *tmp = p;
+
+ /* Swap the pages if we detect consecutive order */
+ if (count > first && pages[count - 1] == tmp - 1)
+ swap(tmp, pages[count - 1]);
+ pages[count++] = tmp;
+ }
+
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool.
+ */
+ pr_debug("Failed to allocate extra pages for large request\n");
+ ttm_put_pages(pages, count, flags, cstate);
+ return r;
}
return 0;
}
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
- char *name)
+ char *name, unsigned int order)
{
spin_lock_init(&pool->lock);
pool->fill_lock = false;
@@ -810,11 +989,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
pool->npages = pool->nfrees = 0;
pool->gfp_flags = flags;
pool->name = name;
+ pool->order = order;
}
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned order = HPAGE_PMD_ORDER;
+#else
+ unsigned order = 0;
+#endif
WARN_ON(_manager);
@@ -822,15 +1007,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+ ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+ ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma");
+ GFP_USER | GFP_DMA32, "wc dma", 0);
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma");
+ GFP_USER | GFP_DMA32, "uc dma", 0);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+ "wc huge", order);
+
+ ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+ , "uc huge", order);
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
@@ -873,17 +1066,16 @@ int ttm_pool_populate(struct ttm_tt *ttm)
if (ttm->state != tt_unpopulated)
return 0;
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
+ ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
+ PAGE_SIZE);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
@@ -908,18 +1100,89 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
+ if (!ttm->pages[i])
+ continue;
+
+ ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
+ PAGE_SIZE);
}
+ ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+ int r;
+
+ r = ttm_pool_populate(&tt->ttm);
+ if (r)
+ return r;
+
+ for (i = 0; i < tt->ttm.num_pages; ++i) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ 0, num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, tt->dma_address[i])) {
+ while (i--) {
+ dma_unmap_page(dev, tt->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ tt->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+ return -EFAULT;
+ }
+
+ for (j = 1; j < num_pages; ++j) {
+ tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
+ ++i;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_populate_and_map_pages);
+
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+
+ for (i = 0; i < tt->ttm.num_pages;) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+ ++i;
+ continue;
+ }
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ i += num_pages;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+}
+EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
@@ -929,12 +1192,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
seq_printf(m, "No pool allocator running.\n");
return 0;
}
- seq_printf(m, "%6s %12s %13s %8s\n",
+ seq_printf(m, "%7s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) {
p = &_manager->pools[i];
- seq_printf(m, "%6s %12ld %13ld %8d\n",
+ seq_printf(m, "%7s %12ld %13ld %8d\n",
p->name, p->nrefills,
p->nfrees, p->npages);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 90ddbdca93bd..6b2627fe9bc1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -60,37 +60,32 @@
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define IS_UNDEFINED (0)
-#define IS_WC (1<<1)
-#define IS_UC (1<<2)
-#define IS_CACHED (1<<3)
-#define IS_DMA32 (1<<4)
+#define VADDR_FLAG_HUGE_POOL 1UL
enum pool_type {
- POOL_IS_UNDEFINED,
- POOL_IS_WC = IS_WC,
- POOL_IS_UC = IS_UC,
- POOL_IS_CACHED = IS_CACHED,
- POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
- POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
- POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+ IS_UNDEFINED = 0,
+ IS_WC = 1 << 1,
+ IS_UC = 1 << 2,
+ IS_CACHED = 1 << 3,
+ IS_DMA32 = 1 << 4,
+ IS_HUGE = 1 << 5
};
+
/*
- * The pool structure. There are usually six pools:
+ * The pool structure. There are up to nine pools:
* - generic (not restricted to DMA32):
* - write combined, uncached, cached.
* - dma32 (up to 2^32 - so up 4GB):
* - write combined, uncached, cached.
+ * - huge (not restricted to DMA32):
+ * - write combined, uncached, cached.
* for each 'struct device'. The 'cached' is for pages that are actively used.
* The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool
- * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * @lock: Protects the free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work.
- * @inuse_list: Pool of pages that are in use. The order is very important and
- * it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation.
@@ -107,7 +102,6 @@ struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type;
spinlock_t lock;
- struct list_head inuse_list;
struct list_head free_list;
struct device *dev;
unsigned size;
@@ -124,13 +118,14 @@ struct dma_pool {
* The accounting page keeping track of the allocated page along with
* the DMA address.
* @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page
+ * @vaddr: The virtual address of the page and a flag if the page belongs to a
+ * huge pool
* @dma: The bus address of the page. If the page is not allocated
* via the DMA API, it will be -1.
*/
struct dma_page {
struct list_head page_list;
- void *vaddr;
+ unsigned long vaddr;
struct page *p;
dma_addr_t dma;
};
@@ -329,7 +324,8 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
dma_addr_t dma = d_page->dma;
- dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+ d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
+ dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
kfree(d_page);
d_page = NULL;
@@ -337,19 +333,22 @@ static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
+ void *vaddr;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
- d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
- &d_page->dma,
- pool->gfp_flags);
- if (d_page->vaddr) {
- if (is_vmalloc_addr(d_page->vaddr))
- d_page->p = vmalloc_to_page(d_page->vaddr);
+ vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
+ pool->gfp_flags);
+ if (vaddr) {
+ if (is_vmalloc_addr(vaddr))
+ d_page->p = vmalloc_to_page(vaddr);
else
- d_page->p = virt_to_page(d_page->vaddr);
+ d_page->p = virt_to_page(vaddr);
+ d_page->vaddr = (unsigned long)vaddr;
+ if (pool->type & IS_HUGE)
+ d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
} else {
kfree(d_page);
d_page = NULL;
@@ -381,11 +380,40 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
}
/* set memory back to wb and free the pages. */
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ struct page *page = d_page->p;
+ unsigned i, num_pages;
+ int ret;
+
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED)) {
+ num_pages = pool->size / PAGE_SIZE;
+ for (i = 0; i < num_pages; ++i, ++page) {
+ ret = set_pages_array_wb(&page, 1);
+ if (ret) {
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+ }
+ }
+ }
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
struct page *pages[], unsigned npages)
{
struct dma_page *d_page, *tmp;
+ if (pool->type & IS_HUGE) {
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
+ ttm_dma_page_put(pool, d_page);
+
+ return;
+ }
+
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
set_pages_array_wb(pages, npages))
@@ -398,17 +426,6 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
}
}
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
/*
* Free pages from pool.
*
@@ -446,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("%s: Failed to allocate memory for pool free operation\n",
+ pr_debug("%s: Failed to allocate memory for pool free operation\n",
pool->dev_name);
return 0;
}
@@ -577,8 +594,8 @@ static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
enum pool_type type)
{
- char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
struct device_pools *sec_pool = NULL;
struct dma_pool *pool = NULL, **ptr;
unsigned i;
@@ -609,18 +626,24 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->npages_free = pool->npages_in_use = 0;
pool->nfrees = 0;
pool->gfp_flags = flags;
- pool->size = PAGE_SIZE;
+ if (type & IS_HUGE)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pool->size = HPAGE_PMD_SIZE;
+#else
+ BUG();
+#endif
+ else
+ pool->size = PAGE_SIZE;
pool->type = type;
pool->nrefills = 0;
p = pool->name;
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
p += snprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
@@ -724,7 +747,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
struct dma_page *dma_p;
struct page *p;
int r = 0;
- unsigned i, cpages;
+ unsigned i, j, npages, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
@@ -732,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("%s: Unable to allocate table for new pages\n",
+ pr_debug("%s: Unable to allocate table for new pages\n",
pool->dev_name);
return -ENOMEM;
}
@@ -745,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
- pr_err("%s: Unable to get page %u\n",
- pool->dev_name, i);
+ pr_debug("%s: Unable to get page %u\n",
+ pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -762,28 +785,32 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
goto out;
}
p = dma_p->p;
+ list_add(&dma_p->page_list, d_pages);
+
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
- if (!PageHighMem(p))
+ if (PageHighMem(p))
+ continue;
#endif
- {
- caching_array[cpages++] = p;
+
+ npages = pool->size / PAGE_SIZE;
+ for (j = 0; j < npages; ++j) {
+ caching_array[cpages++] = p + j;
if (cpages == max_cpages) {
/* Note: Cannot hold the spinlock */
r = ttm_set_pages_caching(pool, caching_array,
- cpages);
+ cpages);
if (r) {
ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
+ pool, d_pages, caching_array,
+ cpages);
goto out;
}
cpages = 0;
}
}
- list_add(&dma_p->page_list, d_pages);
}
if (cpages) {
@@ -828,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page;
unsigned cpages = 0;
- pr_err("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
+ pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
@@ -871,6 +898,27 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
return r;
}
+static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ gfp_t gfp_flags;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (huge) {
+ gfp_flags |= GFP_TRANSHUGE;
+ gfp_flags &= ~__GFP_MOVABLE;
+ gfp_flags &= ~__GFP_COMP;
+ }
+
+ return gfp_flags;
+}
+
/*
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
@@ -879,33 +927,70 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
enum pool_type type;
unsigned i;
- gfp_t gfp_flags;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ i = 0;
+
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
+ goto skip_huge;
+
+ pool = ttm_dma_find_pool(dev, type | IS_HUGE);
+ if (!pool) {
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
+
+ pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
+ if (IS_ERR_OR_NULL(pool))
+ goto skip_huge;
+ }
+
+ while (num_pages >= HPAGE_PMD_NR) {
+ unsigned j;
+
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0)
+ break;
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ pool->size);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
+ ttm->pages[j] = ttm->pages[j - 1] + 1;
+ ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
+ PAGE_SIZE;
+ }
+
+ i += HPAGE_PMD_NR;
+ num_pages -= HPAGE_PMD_NR;
+ }
+
+skip_huge:
+#endif
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
+
pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool)) {
+ if (IS_ERR_OR_NULL(pool))
return -ENOMEM;
- }
}
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; ++i) {
+ while (num_pages) {
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
if (ret != 0) {
ttm_dma_unpopulate(ttm_dma, dev);
@@ -913,11 +998,14 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
+ pool->size);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+
+ ++i;
+ --num_pages;
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
@@ -941,10 +1029,33 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
- unsigned count = 0, i, npages = 0;
+ unsigned count, i, npages = 0;
unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pool = ttm_dma_find_pool(dev, type | IS_HUGE);
+ if (pool) {
+ count = 0;
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ page_list) {
+ if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
+ continue;
+
+ count++;
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ ttm_dma_page_put(pool, d_page);
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ pool->nfrees += count;
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ }
+#endif
+
pool = ttm_dma_find_pool(dev, type);
if (!pool)
return;
@@ -953,6 +1064,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
ttm_to_type(ttm->page_flags, tt_cached)) == pool);
/* make sure pages array match list and count number of pages */
+ count = 0;
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
ttm->pages[count] = d_page->p;
count++;
@@ -978,13 +1090,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p);
+ d_page->p, pool->size);
ttm_dma_page_put(pool, d_page);
}
} else {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
+ ttm->pages[i], pool->size);
}
}
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
new file mode 100644
index 000000000000..c5f03bf4570c
--- /dev/null
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -0,0 +1,16 @@
+config DRM_TVE200
+ tristate "DRM Support for Faraday TV Encoder TVE200"
+ depends on DRM
+ depends on CMA
+ depends on ARM || COMPILE_TEST
+ depends on OF
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ Choose this option for DRM support for the Faraday TV Encoder
+ TVE200 Controller.
+ If M is selected the module will be called tve200_drm.
diff --git a/drivers/gpu/drm/tve200/Makefile b/drivers/gpu/drm/tve200/Makefile
new file mode 100644
index 000000000000..6b7a6a1dcbf8
--- /dev/null
+++ b/drivers/gpu/drm/tve200/Makefile
@@ -0,0 +1,4 @@
+tve200_drm-y += tve200_display.o \
+ tve200_drv.o
+
+obj-$(CONFIG_DRM_TVE200) += tve200_drm.o
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
new file mode 100644
index 000000000000..2c668bd6d997
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tve200_drm.h"
+
+irqreturn_t tve200_irq(int irq, void *data)
+{
+ struct tve200_drm_dev_private *priv = data;
+ u32 stat;
+ u32 val;
+
+ stat = readl(priv->regs + TVE200_INT_STAT);
+
+ if (!stat)
+ return IRQ_NONE;
+
+ /*
+ * Vblank IRQ
+ *
+ * The hardware is a bit tilted: the line stays high after clearing
+ * the vblank IRQ, firing many more interrupts. We counter this
+ * by toggling the IRQ back and forth from firing at vblank and
+ * firing at start of active image, which works around the problem
+ * since those occur strictly in sequence, and we get two IRQs for each
+ * frame, one at start of Vblank (that we make call into the CRTC) and
+ * another one at the start of the image (that we discard).
+ */
+ if (stat & TVE200_INT_V_STATUS) {
+ val = readl(priv->regs + TVE200_CTRL);
+ /* We have an actual start of vsync */
+ if (!(val & TVE200_VSTSTYPE_BITS)) {
+ drm_crtc_handle_vblank(&priv->pipe.crtc);
+ /* Toggle trigger to start of active image */
+ val |= TVE200_VSTSTYPE_VAI;
+ } else {
+ /* Toggle trigger back to start of vsync */
+ val &= ~TVE200_VSTSTYPE_BITS;
+ }
+ writel(val, priv->regs + TVE200_CTRL);
+ } else
+ dev_err(priv->drm->dev, "stray IRQ %08x\n", stat);
+
+ /* Clear the interrupt once done */
+ writel(stat, priv->regs + TVE200_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static int tve200_display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *pstate,
+ struct drm_crtc_state *cstate)
+{
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *old_fb = pipe->plane.state->fb;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ /*
+ * We support these specific resolutions and nothing else.
+ */
+ if (!(mode->hdisplay == 352 && mode->vdisplay == 240) && /* SIF(525) */
+ !(mode->hdisplay == 352 && mode->vdisplay == 288) && /* CIF(625) */
+ !(mode->hdisplay == 640 && mode->vdisplay == 480) && /* VGA */
+ !(mode->hdisplay == 720 && mode->vdisplay == 480) && /* D1 */
+ !(mode->hdisplay == 720 && mode->vdisplay == 576)) { /* D1 */
+ DRM_DEBUG_KMS("unsupported display mode (%u x %u)\n",
+ mode->hdisplay, mode->vdisplay);
+ return -EINVAL;
+ }
+
+ if (fb) {
+ u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+
+ /* FB base address must be dword aligned. */
+ if (offset & 3) {
+ DRM_DEBUG_KMS("FB not 32-bit aligned\n");
+ return -EINVAL;
+ }
+
+ /*
+ * There's no pitch register, the mode's hdisplay
+ * controls this.
+ */
+ if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0]) {
+ DRM_DEBUG_KMS("can't handle pitches\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We can't change the FB format in a flicker-free
+ * manner (and only update it during CRTC enable).
+ */
+ if (old_fb && old_fb->format != fb->format)
+ cstate->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_connector *connector = priv->connector;
+ u32 format = fb->format->format;
+ u32 ctrl1 = 0;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Function 1 */
+ ctrl1 |= TVE200_CTRL_CSMODE;
+ /* Interlace mode for CCIR656: parameterize? */
+ ctrl1 |= TVE200_CTRL_NONINTERLACE;
+ /* 32 words per burst */
+ ctrl1 |= TVE200_CTRL_BURST_32_WORDS;
+ /* 16 retries */
+ ctrl1 |= TVE200_CTRL_RETRYCNT_16;
+ /* NTSC mode: parametrize? */
+ ctrl1 |= TVE200_CTRL_NTSC;
+
+ /* Vsync IRQ at start of Vsync at first */
+ ctrl1 |= TVE200_VSTSTYPE_VSYNC;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ ctrl1 |= TVE200_CTRL_TVCLKP;
+
+ if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
+ (mode->hdisplay == 352 && mode->vdisplay == 288)) { /* CIF(625) */
+ ctrl1 |= TVE200_CTRL_IPRESOL_CIF;
+ dev_info(drm->dev, "CIF mode\n");
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ ctrl1 |= TVE200_CTRL_IPRESOL_VGA;
+ dev_info(drm->dev, "VGA mode\n");
+ } else if ((mode->hdisplay == 720 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 720 && mode->vdisplay == 576)) {
+ ctrl1 |= TVE200_CTRL_IPRESOL_D1;
+ dev_info(drm->dev, "D1 mode\n");
+ }
+
+ if (format & DRM_FORMAT_BIG_ENDIAN) {
+ ctrl1 |= TVE200_CTRL_BBBP;
+ format &= ~DRM_FORMAT_BIG_ENDIAN;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ ctrl1 |= TVE200_IPDMOD_RGB888;
+ break;
+ case DRM_FORMAT_RGB565:
+ ctrl1 |= TVE200_IPDMOD_RGB565;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ ctrl1 |= TVE200_IPDMOD_RGB555;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ ctrl1 |= TVE200_IPDMOD_RGB888 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_BGR565:
+ ctrl1 |= TVE200_IPDMOD_RGB565 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ ctrl1 |= TVE200_IPDMOD_RGB555 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_YUYV:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_CR0Y1CB0Y0;
+ break;
+ case DRM_FORMAT_YVYU:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_CB0Y1CR0Y0;
+ break;
+ case DRM_FORMAT_UYVY:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CR0Y0CB0;
+ break;
+ case DRM_FORMAT_VYUY:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CB0Y0CR0;
+ break;
+ case DRM_FORMAT_YUV420:
+ ctrl1 |= TVE200_CTRL_YUV420;
+ ctrl1 |= TVE200_IPDMOD_YUV420;
+ break;
+ default:
+ dev_err(drm->dev, "Unknown FB format 0x%08x\n",
+ fb->format->format);
+ break;
+ }
+
+ ctrl1 |= TVE200_TVEEN;
+
+ /* Turn it on */
+ writel(ctrl1, priv->regs + TVE200_CTRL);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable and Power Down */
+ writel(0, priv->regs + TVE200_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static void tve200_display_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_pstate)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ if (fb) {
+ /* For RGB, the Y component is used as base address */
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 0),
+ priv->regs + TVE200_Y_FRAME_BASE_ADDR);
+
+ /* For three plane YUV we need two more addresses */
+ if (fb->format->format == DRM_FORMAT_YUV420) {
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 1),
+ priv->regs + TVE200_U_FRAME_BASE_ADDR);
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 2),
+ priv->regs + TVE200_V_FRAME_BASE_ADDR);
+ }
+ }
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
+ return 0;
+}
+
+void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ writel(0, priv->regs + TVE200_INT_EN);
+}
+
+static int tve200_display_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
+ .check = tve200_display_check,
+ .enable = tve200_display_enable,
+ .disable = tve200_display_disable,
+ .update = tve200_display_update,
+ .prepare_fb = tve200_display_prepare_fb,
+};
+
+int tve200_display_init(struct drm_device *drm)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ int ret;
+ static const u32 formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ /*
+ * The controller actually supports any YCbCr ordering,
+ * for packed YCbCr. This just lists the orderings that
+ * DRM supports.
+ */
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ /* This uses three planes */
+ DRM_FORMAT_YUV420,
+ };
+
+ ret = drm_simple_display_pipe_init(drm, &priv->pipe,
+ &tve200_display_funcs,
+ formats, ARRAY_SIZE(formats),
+ NULL,
+ priv->connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
new file mode 100644
index 000000000000..628b79324c48
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+
+#ifndef _TVE200_DRM_H_
+#define _TVE200_DRM_H_
+
+/* Bits 2-31 are valid physical base addresses */
+#define TVE200_Y_FRAME_BASE_ADDR 0x00
+#define TVE200_U_FRAME_BASE_ADDR 0x04
+#define TVE200_V_FRAME_BASE_ADDR 0x08
+
+#define TVE200_INT_EN 0x0C
+#define TVE200_INT_CLR 0x10
+#define TVE200_INT_STAT 0x14
+#define TVE200_INT_BUS_ERR BIT(7)
+#define TVE200_INT_V_STATUS BIT(6) /* vertical blank */
+#define TVE200_INT_V_NEXT_FRAME BIT(5)
+#define TVE200_INT_U_NEXT_FRAME BIT(4)
+#define TVE200_INT_Y_NEXT_FRAME BIT(3)
+#define TVE200_INT_V_FIFO_UNDERRUN BIT(2)
+#define TVE200_INT_U_FIFO_UNDERRUN BIT(1)
+#define TVE200_INT_Y_FIFO_UNDERRUN BIT(0)
+#define TVE200_FIFO_UNDERRUNS (TVE200_INT_V_FIFO_UNDERRUN | \
+ TVE200_INT_U_FIFO_UNDERRUN | \
+ TVE200_INT_Y_FIFO_UNDERRUN)
+
+#define TVE200_CTRL 0x18
+#define TVE200_CTRL_YUV420 BIT(31)
+#define TVE200_CTRL_CSMODE BIT(30)
+#define TVE200_CTRL_NONINTERLACE BIT(28) /* 0 = non-interlace CCIR656 */
+#define TVE200_CTRL_TVCLKP BIT(27) /* Inverted clock phase */
+/* Bits 24..26 define the burst size after arbitration on the bus */
+#define TVE200_CTRL_BURST_4_WORDS (0 << 24)
+#define TVE200_CTRL_BURST_8_WORDS (1 << 24)
+#define TVE200_CTRL_BURST_16_WORDS (2 << 24)
+#define TVE200_CTRL_BURST_32_WORDS (3 << 24)
+#define TVE200_CTRL_BURST_64_WORDS (4 << 24)
+#define TVE200_CTRL_BURST_128_WORDS (5 << 24)
+#define TVE200_CTRL_BURST_256_WORDS (6 << 24)
+#define TVE200_CTRL_BURST_0_WORDS (7 << 24) /* ? */
+/*
+ * Bits 16..23 is the retry count*16 before issueing a new AHB transfer
+ * on the AHB bus.
+ */
+#define TVE200_CTRL_RETRYCNT_MASK GENMASK(23, 16)
+#define TVE200_CTRL_RETRYCNT_16 (1 << 16)
+#define TVE200_CTRL_BBBP BIT(15) /* 0 = little-endian */
+/* Bits 12..14 define the YCbCr ordering */
+#define TVE200_CTRL_YCBCRODR_CB0Y0CR0Y1 (0 << 12)
+#define TVE200_CTRL_YCBCRODR_Y0CB0Y1CR0 (1 << 12)
+#define TVE200_CTRL_YCBCRODR_CR0Y0CB0Y1 (2 << 12)
+#define TVE200_CTRL_YCBCRODR_Y1CB0Y0CR0 (3 << 12)
+#define TVE200_CTRL_YCBCRODR_CR0Y1CB0Y0 (4 << 12)
+#define TVE200_CTRL_YCBCRODR_Y1CR0Y0CB0 (5 << 12)
+#define TVE200_CTRL_YCBCRODR_CB0Y1CR0Y0 (6 << 12)
+#define TVE200_CTRL_YCBCRODR_Y0CR0Y1CB0 (7 << 12)
+/* Bits 10..11 define the input resolution (framebuffer size) */
+#define TVE200_CTRL_IPRESOL_CIF (0 << 10)
+#define TVE200_CTRL_IPRESOL_VGA (1 << 10)
+#define TVE200_CTRL_IPRESOL_D1 (2 << 10)
+#define TVE200_CTRL_NTSC BIT(9) /* 0 = PAL, 1 = NTSC */
+#define TVE200_CTRL_INTERLACE BIT(8) /* 1 = interlace, only for D1 */
+#define TVE200_IPDMOD_RGB555 (0 << 6) /* TVE200_CTRL_YUV420 = 0 */
+#define TVE200_IPDMOD_RGB565 (1 << 6)
+#define TVE200_IPDMOD_RGB888 (2 << 6)
+#define TVE200_IPDMOD_YUV420 (2 << 6) /* TVE200_CTRL_YUV420 = 1 */
+#define TVE200_IPDMOD_YUV422 (3 << 6)
+/* Bits 4 & 5 define when to fire the vblank IRQ */
+#define TVE200_VSTSTYPE_VSYNC (0 << 4) /* start of vsync */
+#define TVE200_VSTSTYPE_VBP (1 << 4) /* start of v back porch */
+#define TVE200_VSTSTYPE_VAI (2 << 4) /* start of v active image */
+#define TVE200_VSTSTYPE_VFP (3 << 4) /* start of v front porch */
+#define TVE200_VSTSTYPE_BITS (BIT(4) | BIT(5))
+#define TVE200_BGR BIT(1) /* 0 = RGB, 1 = BGR */
+#define TVE200_TVEEN BIT(0) /* Enable TVE block */
+
+#define TVE200_CTRL_2 0x1c
+#define TVE200_CTRL_3 0x20
+
+#define TVE200_CTRL_4 0x24
+#define TVE200_CTRL_4_RESET BIT(0) /* triggers reset of TVE200 */
+
+#include <drm/drm_gem.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct tve200_drm_dev_private {
+ struct drm_device *drm;
+
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct drm_simple_display_pipe pipe;
+ struct drm_fbdev_cma *fbdev;
+
+ void *regs;
+ struct clk *pclk;
+ struct clk *clk;
+};
+
+#define to_tve200_connector(x) \
+ container_of(x, struct tve200_drm_connector, connector)
+
+int tve200_display_init(struct drm_device *dev);
+int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc);
+void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc);
+irqreturn_t tve200_irq(int irq, void *data);
+int tve200_connector_init(struct drm_device *dev);
+int tve200_encoder_init(struct drm_device *dev);
+int tve200_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _TVE200_DRM_H_ */
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
new file mode 100644
index 000000000000..bd6c9454d767
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+
+/**
+ * DOC: Faraday TV Encoder TVE200 DRM Driver
+ *
+ * The Faraday TV Encoder TVE200 is also known as the Gemini TV Interface
+ * Controller (TVC) and is found in the Gemini Chipset from Storlink
+ * Semiconductor (later Storm Semiconductor, later Cortina Systems)
+ * but also in the Grain Media GM8180 chipset. On the Gemini the module
+ * is connected to 8 data lines and a single clock line, comprising an
+ * 8-bit BT.656 interface.
+ *
+ * This is a very basic YUV display driver. The datasheet specifies that
+ * it supports the ITU BT.656 standard. It requires a 27 MHz clock which is
+ * the hallmark of any TV encoder supporting both PAL and NTSC.
+ *
+ * This driver exposes a standard KMS interface for this TV encoder.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-buf.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
+
+#include "tve200_drm.h"
+
+#define DRIVER_DESC "DRM module for Faraday TVE200"
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tve200_modeset_init(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config;
+ struct tve200_drm_dev_private *priv = dev->dev_private;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret = 0;
+
+ drm_mode_config_init(dev);
+ mode_config = &dev->mode_config;
+ mode_config->funcs = &mode_config_funcs;
+ mode_config->min_width = 352;
+ mode_config->max_width = 720;
+ mode_config->min_height = 240;
+ mode_config->max_height = 576;
+
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto out_bridge;
+ }
+ } else {
+ /*
+ * TODO: when we are using a different bridge than a panel
+ * (such as a dumb VGA connector) we need to devise a different
+ * method to get the connector out of the bridge.
+ */
+ dev_err(dev->dev, "the bridge is not a panel\n");
+ goto out_bridge;
+ }
+
+ ret = tve200_display_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to init display\n");
+ goto out_bridge;
+ }
+
+ ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
+ bridge);
+ if (ret) {
+ dev_err(dev->dev, "failed to attach bridge\n");
+ goto out_bridge;
+ }
+
+ priv->panel = panel;
+ priv->connector = panel->connector;
+ priv->bridge = bridge;
+
+ dev_info(dev->dev, "attached to panel %s\n",
+ dev_name(panel->dev));
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ dev_err(dev->dev, "failed to init vblank\n");
+ goto out_bridge;
+ }
+
+ drm_mode_config_reset(dev);
+
+ /*
+ * Passing in 16 here will make the RGB656 mode the default
+ * Passing in 32 will use XRGB8888 mode
+ */
+ priv->fbdev = drm_fbdev_cma_init(dev, 16,
+ dev->mode_config.num_connector);
+ drm_kms_helper_poll_init(dev);
+
+ goto finish;
+
+out_bridge:
+ if (panel)
+ drm_panel_bridge_remove(bridge);
+ drm_mode_config_cleanup(dev);
+finish:
+ return ret;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+
+static void tve200_lastclose(struct drm_device *dev)
+{
+ struct tve200_drm_dev_private *priv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static struct drm_driver tve200_drm_driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .lastclose = tve200_lastclose,
+ .ioctls = NULL,
+ .fops = &drm_fops,
+ .name = "tve200",
+ .desc = DRIVER_DESC,
+ .date = "20170703",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ .enable_vblank = tve200_enable_vblank,
+ .disable_vblank = tve200_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+};
+
+static int tve200_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tve200_drm_dev_private *priv;
+ struct drm_device *drm;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&tve200_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+ platform_set_drvdata(pdev, drm);
+ priv->drm = drm;
+ drm->dev_private = priv;
+
+ /* Clock the silicon so we can access the registers */
+ priv->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(priv->pclk)) {
+ dev_err(dev, "unable to get PCLK\n");
+ ret = PTR_ERR(priv->pclk);
+ goto dev_unref;
+ }
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable PCLK\n");
+ goto dev_unref;
+ }
+
+ /* This clock is for the pixels (27MHz) */
+ priv->clk = devm_clk_get(dev, "TVE");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "unable to get TVE clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto clk_disable;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "%s failed mmio\n", __func__);
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ /* turn off interrupts before requesting the irq */
+ writel(0, priv->regs + TVE200_INT_EN);
+
+ ret = devm_request_irq(dev, irq, tve200_irq, 0, "tve200", priv);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", ret);
+ goto clk_disable;
+ }
+
+ ret = tve200_modeset_init(drm);
+ if (ret)
+ goto clk_disable;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto clk_disable;
+
+ return 0;
+
+clk_disable:
+ clk_disable_unprepare(priv->pclk);
+dev_unref:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static int tve200_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev)
+ drm_fbdev_cma_fini(priv->fbdev);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_mode_config_cleanup(drm);
+ clk_disable_unprepare(priv->pclk);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static const struct of_device_id tve200_of_match[] = {
+ {
+ .compatible = "faraday,tve200",
+ },
+ {},
+};
+
+static struct platform_driver tve200_driver = {
+ .driver = {
+ .name = "tve200",
+ .of_match_table = of_match_ptr(tve200_of_match),
+ },
+ .probe = tve200_probe,
+ .remove = tve200_remove,
+};
+module_platform_driver(tve200_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 9f9a49748d17..c3dc1fd20cb4 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -14,70 +14,95 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
+#include "udl_connector.h"
#include "udl_drv.h"
-/* dummy connector to just get EDID,
- all UDL appear to have a DVI-D */
-
-static u8 *udl_get_edid(struct udl_device *udl)
+static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
+ u8 *buff)
{
- u8 *block;
- char *rbuf;
int ret, i;
+ u8 *read_buff;
- block = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (block == NULL)
- return NULL;
-
- rbuf = kmalloc(2, GFP_KERNEL);
- if (rbuf == NULL)
- goto error;
+ read_buff = kmalloc(2, GFP_KERNEL);
+ if (!read_buff)
+ return false;
for (i = 0; i < EDID_LENGTH; i++) {
+ int bval = (i + block_idx * EDID_LENGTH) << 8;
ret = usb_control_msg(udl->udev,
- usb_rcvctrlpipe(udl->udev, 0), (0x02),
- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
- HZ);
+ usb_rcvctrlpipe(udl->udev, 0),
+ (0x02), (0x80 | (0x02 << 5)), bval,
+ 0xA1, read_buff, 2, HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- goto error;
+ kfree(read_buff);
+ return false;
}
- block[i] = rbuf[1];
+ buff[i] = read_buff[1];
}
- kfree(rbuf);
- return block;
-
-error:
- kfree(block);
- kfree(rbuf);
- return NULL;
+ kfree(read_buff);
+ return true;
}
-static int udl_get_modes(struct drm_connector *connector)
+static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
+ int *result_buff_size)
{
- struct udl_device *udl = connector->dev->dev_private;
- struct edid *edid;
- int ret;
-
- edid = (struct edid *)udl_get_edid(udl);
- if (!edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
+ int i, extensions;
+ u8 *block_buff = NULL, *buff_ptr;
+
+ block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (block_buff == NULL)
+ return false;
+
+ if (udl_get_edid_block(udl, 0, block_buff) &&
+ memchr_inv(block_buff, 0, EDID_LENGTH)) {
+ extensions = ((struct edid *)block_buff)->extensions;
+ if (extensions > 0) {
+ /* we have to read all extensions one by one */
+ *result_buff_size = EDID_LENGTH * (extensions + 1);
+ *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
+ buff_ptr = *result_buff;
+ if (buff_ptr == NULL) {
+ kfree(block_buff);
+ return false;
+ }
+ memcpy(buff_ptr, block_buff, EDID_LENGTH);
+ kfree(block_buff);
+ buff_ptr += EDID_LENGTH;
+ for (i = 1; i < extensions; ++i) {
+ if (udl_get_edid_block(udl, i, buff_ptr)) {
+ buff_ptr += EDID_LENGTH;
+ } else {
+ kfree(*result_buff);
+ *result_buff = NULL;
+ return false;
+ }
+ }
+ return true;
+ }
+ /* we have only base edid block */
+ *result_buff = block_buff;
+ *result_buff_size = EDID_LENGTH;
+ return true;
}
- /*
- * We only read the main block, but if the monitor reports extension
- * blocks then the drm edid code expects them to be present, so patch
- * the extension count to 0.
- */
- edid->checksum += edid->extensions;
- edid->extensions = 0;
-
- drm_mode_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
+ kfree(block_buff);
+
+ return false;
+}
+
+static int udl_get_modes(struct drm_connector *connector)
+{
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
+ drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+ if (udl_connector->edid)
+ return drm_add_edid_modes(connector, udl_connector->edid);
+ return 0;
}
static int udl_mode_valid(struct drm_connector *connector,
@@ -96,8 +121,26 @@ static int udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- if (drm_dev_is_unplugged(connector->dev))
+ u8 *edid_buff = NULL;
+ int edid_buff_size = 0;
+ struct udl_device *udl = connector->dev->dev_private;
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
+ /* cleanup previous edid */
+ if (udl_connector->edid != NULL) {
+ kfree(udl_connector->edid);
+ udl_connector->edid = NULL;
+ }
+
+
+ if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
return connector_status_disconnected;
+
+ udl_connector->edid = (struct edid *)edid_buff;
+
return connector_status_connected;
}
@@ -105,7 +148,7 @@ static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
@@ -117,8 +160,14 @@ static int udl_connector_set_property(struct drm_connector *connector,
static void udl_connector_destroy(struct drm_connector *connector)
{
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
+ kfree(udl_connector->edid);
kfree(connector);
}
@@ -138,17 +187,22 @@ static const struct drm_connector_funcs udl_connector_funcs = {
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
{
+ struct udl_drm_connector *udl_connector;
struct drm_connector *connector;
- connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
- if (!connector)
+ udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
+ if (!udl_connector)
return -ENOMEM;
- drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
+ connector = &udl_connector->connector;
+ drm_connector_init(dev, connector, &udl_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
new file mode 100644
index 000000000000..0fb0db5c4612
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_connector.h
@@ -0,0 +1,13 @@
+#ifndef __UDL_CONNECTOR_H__
+#define __UDL_CONNECTOR_H__
+
+#include <drm/drm_crtc.h>
+
+struct udl_drm_connector {
+ struct drm_connector connector;
+ /* last udl_detect edid */
+ struct edid *edid;
+};
+
+
+#endif //__UDL_CONNECTOR_H__
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 31421b6b586e..3c45a3064726 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -14,6 +14,9 @@
static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
+ struct drm_device *dev = usb_get_intfdata(interface);
+
+ drm_kms_helper_poll_disable(dev);
return 0;
}
@@ -21,6 +24,7 @@ static int udl_usb_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ drm_kms_helper_poll_enable(dev);
udl_modeset_restore(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0328b2c7b210..f1ec4528a73e 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -11,6 +11,7 @@
* more details.
*/
#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
@@ -350,6 +351,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_fb;
+ drm_kms_helper_poll_init(dev);
+
return 0;
err_fb:
udl_fbdev_cleanup(dev);
@@ -371,6 +374,8 @@ void udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
+ drm_kms_helper_poll_fini(dev);
+
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 837c82757339..f5500df51686 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -25,5 +25,3 @@ vc4-y := \
vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
obj-$(CONFIG_DRM_VC4) += vc4.o
-
-CFLAGS_vc4_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3afdbf4bc10b..2decc8e2c79f 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -53,6 +53,17 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
vc4->bo_labels[i].size_allocated / 1024,
vc4->bo_labels[i].num_allocated);
}
+
+ mutex_lock(&vc4->purgeable.lock);
+ if (vc4->purgeable.num)
+ DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
+
+ if (vc4->purgeable.purged_num)
+ DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
+ mutex_unlock(&vc4->purgeable.lock);
}
#ifdef CONFIG_DEBUG_FS
@@ -75,6 +86,17 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
}
mutex_unlock(&vc4->bo_lock);
+ mutex_lock(&vc4->purgeable.lock);
+ if (vc4->purgeable.num)
+ seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
+
+ if (vc4->purgeable.purged_num)
+ seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
+ mutex_unlock(&vc4->purgeable.lock);
+
return 0;
}
#endif
@@ -247,6 +269,109 @@ static void vc4_bo_cache_purge(struct drm_device *dev)
mutex_unlock(&vc4->bo_lock);
}
+void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ list_add_tail(&bo->size_head, &vc4->purgeable.list);
+ vc4->purgeable.num++;
+ vc4->purgeable.size += bo->base.base.size;
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
+static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ /* list_del_init() is used here because the caller might release
+ * the purgeable lock in order to acquire the madv one and update the
+ * madv status.
+ * During this short period of time a user might decide to mark
+ * the BO as unpurgeable, and if bo->madv is set to
+ * VC4_MADV_DONTNEED it will try to remove the BO from the
+ * purgeable list which will fail if the ->next/prev fields
+ * are set to LIST_POISON1/LIST_POISON2 (which is what
+ * list_del() does).
+ * Re-initializing the list element guarantees that list_del()
+ * will work correctly even if it's a NOP.
+ */
+ list_del_init(&bo->size_head);
+ vc4->purgeable.num--;
+ vc4->purgeable.size -= bo->base.base.size;
+}
+
+void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ vc4_bo_remove_from_purgeable_pool_locked(bo);
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
+static void vc4_bo_purge(struct drm_gem_object *obj)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+ struct drm_device *dev = obj->dev;
+
+ WARN_ON(!mutex_is_locked(&bo->madv_lock));
+ WARN_ON(bo->madv != VC4_MADV_DONTNEED);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
+ bo->base.vaddr = NULL;
+ bo->madv = __VC4_MADV_PURGED;
+}
+
+static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ while (!list_empty(&vc4->purgeable.list)) {
+ struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
+ struct vc4_bo, size_head);
+ struct drm_gem_object *obj = &bo->base.base;
+ size_t purged_size = 0;
+
+ vc4_bo_remove_from_purgeable_pool_locked(bo);
+
+ /* Release the purgeable lock while we're purging the BO so
+ * that other people can continue inserting things in the
+ * purgeable pool without having to wait for all BOs to be
+ * purged.
+ */
+ mutex_unlock(&vc4->purgeable.lock);
+ mutex_lock(&bo->madv_lock);
+
+ /* Since we released the purgeable pool lock before acquiring
+ * the BO madv one, the user may have marked the BO as WILLNEED
+ * and re-used it in the meantime.
+ * Before purging the BO we need to make sure
+ * - it is still marked as DONTNEED
+ * - it has not been re-inserted in the purgeable list
+ * - it is not used by HW blocks
+ * If one of these conditions is not met, just skip the entry.
+ */
+ if (bo->madv == VC4_MADV_DONTNEED &&
+ list_empty(&bo->size_head) &&
+ !refcount_read(&bo->usecnt)) {
+ purged_size = bo->base.base.size;
+ vc4_bo_purge(obj);
+ }
+ mutex_unlock(&bo->madv_lock);
+ mutex_lock(&vc4->purgeable.lock);
+
+ if (purged_size) {
+ vc4->purgeable.purged_size += purged_size;
+ vc4->purgeable.purged_num++;
+ }
+ }
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
uint32_t size,
enum vc4_kernel_bo_type type)
@@ -293,6 +418,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
if (!bo)
return ERR_PTR(-ENOMEM);
+ bo->madv = VC4_MADV_WILLNEED;
+ refcount_set(&bo->usecnt, 0);
+ mutex_init(&bo->madv_lock);
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -330,16 +458,38 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
* CMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
+ cma_obj = drm_gem_cma_create(dev, size);
+ }
+ if (IS_ERR(cma_obj)) {
+ /*
+ * Still not enough CMA memory, purge the userspace BO
+ * cache and retry.
+ * This is sub-optimal since we purge the whole userspace
+ * BO cache which forces user that want to re-use the BO to
+ * restore its initial content.
+ * Ideally, we should purge entries one by one and retry
+ * after each to see if CMA allocation succeeds. Or even
+ * better, try to find an entry with at least the same
+ * size.
+ */
+ vc4_bo_userspace_cache_purge(dev);
cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
- DRM_ERROR("Failed to allocate from CMA:\n");
- vc4_bo_stats_dump(vc4);
- return ERR_PTR(-ENOMEM);
- }
+ }
+
+ if (IS_ERR(cma_obj)) {
+ DRM_ERROR("Failed to allocate from CMA:\n");
+ vc4_bo_stats_dump(vc4);
+ return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&cma_obj->base);
+ /* By default, BOs do not support the MADV ioctl. This will be enabled
+ * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
+ * BOs).
+ */
+ bo->madv = __VC4_MADV_NOTSUPP;
+
mutex_lock(&vc4->bo_lock);
vc4_bo_set_label(&cma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
@@ -365,6 +515,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put_unlocked(&bo->base.base);
@@ -403,6 +555,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
struct vc4_bo *bo = to_vc4_bo(gem_bo);
struct list_head *cache_list;
+ /* Remove the BO from the purgeable list. */
+ mutex_lock(&bo->madv_lock);
+ if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
+ vc4_bo_remove_from_purgeable_pool(bo);
+ mutex_unlock(&bo->madv_lock);
+
mutex_lock(&vc4->bo_lock);
/* If the object references someone else's memory, we can't cache it.
*/
@@ -418,7 +576,8 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
/* If this object was partially constructed but CMA allocation
- * had failed, just free it.
+ * had failed, just free it. Can also happen when the BO has been
+ * purged.
*/
if (!bo->base.vaddr) {
vc4_bo_destroy(bo);
@@ -437,6 +596,10 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
bo->validated_shader = NULL;
}
+ /* Reset madv and usecnt before adding the BO to the cache. */
+ bo->madv = __VC4_MADV_NOTSUPP;
+ refcount_set(&bo->usecnt, 0);
+
bo->t_format = false;
bo->free_time = jiffies;
list_add(&bo->size_head, cache_list);
@@ -461,10 +624,60 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
mutex_unlock(&vc4->bo_lock);
}
-static void vc4_bo_cache_time_timer(unsigned long data)
+int vc4_bo_inc_usecnt(struct vc4_bo *bo)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
+
+ /* Fast path: if the BO is already retained by someone, no need to
+ * check the madv status.
+ */
+ if (refcount_inc_not_zero(&bo->usecnt))
+ return 0;
+
+ mutex_lock(&bo->madv_lock);
+ switch (bo->madv) {
+ case VC4_MADV_WILLNEED:
+ if (!refcount_inc_not_zero(&bo->usecnt))
+ refcount_set(&bo->usecnt, 1);
+ ret = 0;
+ break;
+ case VC4_MADV_DONTNEED:
+ /* We shouldn't use a BO marked as purgeable if at least
+ * someone else retained its content by incrementing usecnt.
+ * Luckily the BO hasn't been purged yet, but something wrong
+ * is happening here. Just throw an error instead of
+ * authorizing this use case.
+ */
+ case __VC4_MADV_PURGED:
+ /* We can't use a purged BO. */
+ default:
+ /* Invalid madv value. */
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&bo->madv_lock);
+
+ return ret;
+}
+
+void vc4_bo_dec_usecnt(struct vc4_bo *bo)
+{
+ /* Fast path: if the BO is still retained by someone, no need to test
+ * the madv value.
+ */
+ if (refcount_dec_not_one(&bo->usecnt))
+ return;
+
+ mutex_lock(&bo->madv_lock);
+ if (refcount_dec_and_test(&bo->usecnt) &&
+ bo->madv == VC4_MADV_DONTNEED)
+ vc4_bo_add_to_purgeable_pool(bo);
+ mutex_unlock(&bo->madv_lock);
+}
+
+static void vc4_bo_cache_time_timer(struct timer_list *t)
+{
+ struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
@@ -480,18 +693,52 @@ struct dma_buf *
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
+ struct dma_buf *dmabuf;
+ int ret;
if (bo->validated_shader) {
DRM_DEBUG("Attempting to export shader BO\n");
return ERR_PTR(-EINVAL);
}
- return drm_gem_prime_export(dev, obj, flags);
+ /* Note: as soon as the BO is exported it becomes unpurgeable, because
+ * noone ever decrements the usecnt even if the reference held by the
+ * exported BO is released. This shouldn't be a problem since we don't
+ * expect exported BOs to be marked as purgeable.
+ */
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret) {
+ DRM_ERROR("Failed to increment BO usecnt\n");
+ return ERR_PTR(ret);
+ }
+
+ dmabuf = drm_gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf))
+ vc4_bo_dec_usecnt(bo);
+
+ return dmabuf;
+}
+
+int vc4_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ /* The only reason we would end up here is when user-space accesses
+ * BO's memory after it's been purged.
+ */
+ mutex_lock(&bo->madv_lock);
+ WARN_ON(bo->madv != __VC4_MADV_PURGED);
+ mutex_unlock(&bo->madv_lock);
+
+ return VM_FAULT_SIGBUS;
}
int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_gem_object *gem_obj;
+ unsigned long vm_pgoff;
struct vc4_bo *bo;
int ret;
@@ -507,16 +754,36 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
+ if (bo->madv != VC4_MADV_WILLNEED) {
+ DRM_DEBUG("mmaping of %s BO not allowed\n",
+ bo->madv == VC4_MADV_DONTNEED ?
+ "purgeable" : "purged");
+ return -EINVAL;
+ }
+
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
+ /* This ->vm_pgoff dance is needed to make all parties happy:
+ * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
+ * mem-region, hence the need to set it to zero (the value set by
+ * the DRM core is a virtual offset encoding the GEM object-id)
+ * - the mmap() core logic needs ->vm_pgoff to be restored to its
+ * initial value before returning from this function because it
+ * encodes the offset of this GEM in the dev->anon_inode pseudo-file
+ * and this information will be used when we invalidate userspace
+ * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
+ */
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
bo->base.paddr, vma->vm_end - vma->vm_start);
+ vma->vm_pgoff = vm_pgoff;
+
if (ret)
drm_gem_vm_close(vma);
@@ -580,6 +847,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put_unlocked(&bo->base.base);
@@ -633,6 +902,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
args->size)) {
@@ -768,9 +1039,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
- setup_timer(&vc4->bo_cache.time_timer,
- vc4_bo_cache_time_timer,
- (unsigned long)dev);
+ timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 519cefef800d..72c9dbd81d7f 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -97,8 +97,6 @@ struct vc4_dpi {
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_bridge *bridge;
- bool is_panel_bridge;
void __iomem *regs;
@@ -251,10 +249,11 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dpi->bridge);
+ &panel, &bridge);
if (ret) {
/* If nothing was connected in the DT, that's not an
* error.
@@ -265,13 +264,10 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
return ret;
}
- if (panel) {
- dpi->bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DPI);
- dpi->is_panel_bridge = true;
- }
+ if (panel)
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, dpi->bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
@@ -352,8 +348,7 @@ static void vc4_dpi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi = dev_get_drvdata(dev);
- if (dpi->is_panel_bridge)
- drm_panel_bridge_remove(dpi->bridge);
+ drm_of_panel_bridge_remove(dev->of_node, 0, 0);
drm_encoder_cleanup(dpi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1c96edcb302b..e3c29729da2e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_ETC1:
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
+ case DRM_VC4_PARAM_SUPPORTS_MADVISE:
args->value = true;
break;
default:
@@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_device *dev)
drm_fbdev_cma_restore_mode(vc4->fbdev);
}
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
.gem_free_object_unlocked = vc4_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 87f2d8e5c134..9c0d380c96f2 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -74,6 +74,19 @@ struct vc4_dev {
/* Protects bo_cache and bo_labels. */
struct mutex bo_lock;
+ /* Purgeable BO pool. All BOs in this pool can have their memory
+ * reclaimed if the driver is unable to allocate new BOs. We also
+ * keep stats related to the purge mechanism here.
+ */
+ struct {
+ struct list_head list;
+ unsigned int num;
+ size_t size;
+ unsigned int purged_num;
+ size_t purged_size;
+ struct mutex lock;
+ } purgeable;
+
uint64_t dma_fence_context;
/* Sequence number for the last job queued in bin_job_list.
@@ -192,6 +205,16 @@ struct vc4_bo {
* for user-allocated labels.
*/
int label;
+
+ /* Count the number of active users. This is needed to determine
+ * whether we can move the BO to the purgeable list or not (when the BO
+ * is used by the GPU or the display engine we can't purge it).
+ */
+ refcount_t usecnt;
+
+ /* Store purgeable/purged state here */
+ u32 madv;
+ struct mutex madv_lock;
};
static inline struct vc4_bo *
@@ -503,6 +526,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -513,6 +537,10 @@ void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
+int vc4_bo_inc_usecnt(struct vc4_bo *bo);
+void vc4_bo_dec_usecnt(struct vc4_bo *bo);
+void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
+void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -557,6 +585,8 @@ void vc4_job_handle_completed(struct vc4_dev *vc4);
int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_seqno_cb *cb, uint64_t seqno,
void (*func)(struct vc4_seqno_cb *cb));
+int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index d1e0dc908048..94085f8bcd68 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -504,7 +505,6 @@ struct vc4_dsi {
struct mipi_dsi_host dsi_host;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
- bool is_panel_bridge;
void __iomem *regs;
@@ -859,14 +859,11 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
pll_clock = parent_rate / divider;
pixel_clock_hz = pll_clock / dsi->divider;
- /* Round up the clk_set_rate() request slightly, since
- * PLLD_DSI1 is an integer divider and its rate selection will
- * never round up.
- */
- adjusted_mode->clock = pixel_clock_hz / 1000 + 1;
+ adjusted_mode->clock = pixel_clock_hz / 1000;
/* Given the new pixel clock, adjust HFP to keep vrefresh the same. */
- adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal);
+ adjusted_mode->htotal = adjusted_mode->clock * mode->htotal /
+ mode->clock;
adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal;
adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal;
@@ -900,7 +897,11 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_dump_regs(dsi);
}
- phy_clock = pixel_clock_hz * dsi->divider;
+ /* Round up the clk_set_rate() request slightly, since
+ * PLLD_DSI1 is an integer divider and its rate selection will
+ * never round up.
+ */
+ phy_clock = (pixel_clock_hz + 1000) * dsi->divider;
ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
if (ret) {
dev_err(&dsi->pdev->dev,
@@ -1288,7 +1289,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
- int ret = 0;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1323,34 +1323,12 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- dsi->bridge = of_drm_find_bridge(device->dev.of_node);
- if (!dsi->bridge) {
- struct drm_panel *panel =
- of_drm_find_panel(device->dev.of_node);
-
- dsi->bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(dsi->bridge)) {
- ret = PTR_ERR(dsi->bridge);
- dsi->bridge = NULL;
- return ret;
- }
- dsi->is_panel_bridge = true;
- }
-
- return drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ return 0;
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
- struct vc4_dsi *dsi = host_to_dsi(host);
-
- if (dsi->is_panel_bridge) {
- drm_panel_bridge_remove(dsi->bridge);
- dsi->bridge = NULL;
- }
-
return 0;
}
@@ -1382,6 +1360,27 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
*ret = IRQ_HANDLED;
}
+/*
+ * Initial handler for port 1 where we need the reg_dma workaround.
+ * The register DMA writes sleep, so we can't do it in the top half.
+ * Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the
+ * parent interrupt contrller until our interrupt thread is done.
+ */
+static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data)
+{
+ struct vc4_dsi *dsi = data;
+ u32 stat = DSI_PORT_READ(INT_STAT);
+
+ if (!stat)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Normal IRQ handler for port 0, or the threaded IRQ handler for port
+ * 1 where we need the reg_dma workaround.
+ */
static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
{
struct vc4_dsi *dsi = data;
@@ -1492,16 +1491,13 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_dsi *dsi;
+ struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
+ struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
-
match = of_match_device(vc4_dsi_dt_match, dev);
if (!match)
return -ENODEV;
@@ -1516,7 +1512,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
- dsi->pdev = pdev;
dsi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
@@ -1565,8 +1560,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
/* Clear any existing interrupt state. */
DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
- ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
+ if (dsi->reg_dma_mem)
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_dsi_irq_defer_to_thread_handler,
+ vc4_dsi_irq_handler,
+ IRQF_ONESHOT,
+ "vc4 dsi", dsi);
+ else
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get interrupt: %d\n", ret);
@@ -1597,6 +1599,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+ &panel, &dsi->bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(dsi->bridge))
+ return PTR_ERR(dsi->bridge);
+ }
+
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
if (ret) {
@@ -1615,12 +1629,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- dsi->dsi_host.ops = &vc4_dsi_host_ops;
- dsi->dsi_host.dev = dev;
-
- mipi_dsi_host_register(&dsi->dsi_host);
-
- dev_set_drvdata(dev, dsi);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ dev_err(dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
pm_runtime_enable(dev);
@@ -1638,8 +1651,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
vc4_dsi_encoder_destroy(dsi->encoder);
- mipi_dsi_host_unregister(&dsi->dsi_host);
-
if (dsi->port == 1)
vc4->dsi1 = NULL;
}
@@ -1651,12 +1662,47 @@ static const struct component_ops vc4_dsi_ops = {
static int vc4_dsi_dev_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &vc4_dsi_ops);
+ struct device *dev = &pdev->dev;
+ struct vc4_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+ dev_set_drvdata(dev, dsi);
+
+ dsi->pdev = pdev;
+
+ /* Note, the initialization sequence for DSI and panels is
+ * tricky. The component bind above won't get past its
+ * -EPROBE_DEFER until the panel/bridge probes. The
+ * panel/bridge will return -EPROBE_DEFER until it has a
+ * mipi_dsi_host to register its device to. So, we register
+ * the host during pdev probe time, so vc4 as a whole can then
+ * -EPROBE_DEFER its component bind process until the panel
+ * successfully attaches.
+ */
+ dsi->dsi_host.ops = &vc4_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ mipi_dsi_host_register(&dsi->dsi_host);
+
+ ret = component_add(&pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
+ return 0;
}
static int vc4_dsi_dev_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct vc4_dsi *dsi = dev_get_drvdata(dev);
+
component_del(&pdev->dev, &vc4_dsi_ops);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index d0c6bfb68c4e..638540943c61 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
+ bo = to_vc4_bo(&exec[i]->bo[j]->base);
+
+ /* Retain BOs just in case they were marked purgeable.
+ * This prevents the BO from being purged before
+ * someone had a chance to dump the hang state.
+ */
+ WARN_ON(!refcount_read(&bo->usecnt));
+ refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+ /* No need to retain BOs coming from the ->unref_list
+ * because they are naturally unpurgeable.
+ */
drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
@@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *dev)
state->fdbgs = V3D_READ(V3D_FDBGS);
state->errstat = V3D_READ(V3D_ERRSTAT);
+ /* We need to turn purgeable BOs into unpurgeable ones so that
+ * userspace has a chance to dump the hang state before the kernel
+ * decides to purge those BOs.
+ * Note that BO consistency at dump time cannot be guaranteed. For
+ * example, if the owner of these BOs decides to re-use them or mark
+ * them purgeable again there's nothing we can do to prevent it.
+ */
+ for (i = 0; i < kernel_state->user_state.bo_count; i++) {
+ struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
+
+ if (bo->madv == __VC4_MADV_NOTSUPP)
+ continue;
+
+ mutex_lock(&bo->madv_lock);
+ if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
+ bo->madv = VC4_MADV_WILLNEED;
+ refcount_dec(&bo->usecnt);
+ mutex_unlock(&bo->madv_lock);
+ }
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (vc4->hang_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
@@ -281,10 +312,10 @@ vc4_reset_work(struct work_struct *work)
}
static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct drm_device *dev = vc4->dev;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
* The command validator needs to reference BOs by their index within
* the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at vc4_complete_exec() time.
*/
static int
vc4_cl_lookup_bos(struct drm_device *dev,
@@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev,
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
- spin_unlock(&file_priv->table_lock);
- goto fail;
+ break;
}
+
drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
+ if (ret)
+ goto fail_put_bo;
+
+ for (i = 0; i < exec->bo_count; i++) {
+ ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ if (ret)
+ goto fail_dec_usecnt;
+ }
+
+ kvfree(handles);
+ return 0;
+
+fail_dec_usecnt:
+ /* Decrease usecnt on acquired objects.
+ * We cannot rely on vc4_complete_exec() to release resources here,
+ * because vc4_complete_exec() has no information about which BO has
+ * had its ->usecnt incremented.
+ * To make things easier we just free everything explicitly and set
+ * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
+ * step.
+ */
+ for (i-- ; i >= 0; i--)
+ vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
+
+fail_put_bo:
+ /* Release any reference to acquired objects. */
+ for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
+ drm_gem_object_put_unlocked(&exec->bo[i]->base);
+
fail:
kvfree(handles);
+ kvfree(exec->bo);
+ exec->bo = NULL;
return ret;
}
@@ -829,12 +888,18 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* If we got force-completed because of GPU reset rather than
* through our IRQ handler, signal the fence now.
*/
- if (exec->fence)
+ if (exec->fence) {
dma_fence_signal(exec->fence);
+ dma_fence_put(exec->fence);
+ }
if (exec->bo) {
- for (i = 0; i < exec->bo_count; i++)
+ for (i = 0; i < exec->bo_count; i++) {
+ struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+
+ vc4_bo_dec_usecnt(bo);
drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ }
kvfree(exec->bo);
}
@@ -1091,13 +1156,14 @@ vc4_gem_init(struct drm_device *dev)
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
- setup_timer(&vc4->hangcheck.timer,
- vc4_hangcheck_elapsed,
- (unsigned long)dev);
+ timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
mutex_init(&vc4->power_lock);
+
+ INIT_LIST_HEAD(&vc4->purgeable.list);
+ mutex_init(&vc4->purgeable.lock);
}
void
@@ -1121,3 +1187,81 @@ vc4_gem_destroy(struct drm_device *dev)
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
}
+
+int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_gem_madvise *args = data;
+ struct drm_gem_object *gem_obj;
+ struct vc4_bo *bo;
+ int ret;
+
+ switch (args->madv) {
+ case VC4_MADV_DONTNEED:
+ case VC4_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ bo = to_vc4_bo(gem_obj);
+
+ /* Only BOs exposed to userspace can be purged. */
+ if (bo->madv == __VC4_MADV_NOTSUPP) {
+ DRM_DEBUG("madvise not supported on this BO\n");
+ ret = -EINVAL;
+ goto out_put_gem;
+ }
+
+ /* Not sure it's safe to purge imported BOs. Let's just assume it's
+ * not until proven otherwise.
+ */
+ if (gem_obj->import_attach) {
+ DRM_DEBUG("madvise not supported on imported BOs\n");
+ ret = -EINVAL;
+ goto out_put_gem;
+ }
+
+ mutex_lock(&bo->madv_lock);
+
+ if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
+ !refcount_read(&bo->usecnt)) {
+ /* If the BO is about to be marked as purgeable, is not used
+ * and is not already purgeable or purged, add it to the
+ * purgeable list.
+ */
+ vc4_bo_add_to_purgeable_pool(bo);
+ } else if (args->madv == VC4_MADV_WILLNEED &&
+ bo->madv == VC4_MADV_DONTNEED &&
+ !refcount_read(&bo->usecnt)) {
+ /* The BO has not been purged yet, just remove it from
+ * the purgeable list.
+ */
+ vc4_bo_remove_from_purgeable_pool(bo);
+ }
+
+ /* Save the purged state. */
+ args->retained = bo->madv != __VC4_MADV_PURGED;
+
+ /* Update internal madv state only if the bo was not purged. */
+ if (bo->madv != __VC4_MADV_PURGED)
+ bo->madv = args->madv;
+
+ mutex_unlock(&bo->madv_lock);
+
+ ret = 0;
+
+out_put_gem:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 937da8dd65b8..0b2088264039 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -309,16 +309,13 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct drm_connector *connector = NULL;
+ struct drm_connector *connector;
struct vc4_hdmi_connector *hdmi_connector;
- int ret = 0;
hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
GFP_KERNEL);
- if (!hdmi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!hdmi_connector)
+ return ERR_PTR(-ENOMEM);
connector = &hdmi_connector->base;
hdmi_connector->encoder = encoder;
@@ -336,12 +333,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, encoder);
return connector;
-
- fail:
- if (connector)
- vc4_hdmi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
@@ -433,7 +424,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- vc4_encoder->rgb_range_selectable);
+ vc4_encoder->rgb_range_selectable,
+ false);
vc4_hdmi_write_infoframe(encoder, &frame);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 7d7af3a93d94..26eddbb62893 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
list_move_tail(&exec->head, &vc4->job_done_list);
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
+ dma_fence_put(exec->fence);
exec->fence = NULL;
}
vc4_submit_next_render_job(dev);
@@ -208,6 +209,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ /* Undo the effects of a previous vc4_irq_uninstall. */
+ enable_irq(dev->irq);
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -225,6 +229,9 @@ vc4_irq_uninstall(struct drm_device *dev)
/* Clear any pending interrupts we might have left. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+ /* Finish any interrupt handler still in flight. */
+ disable_irq(dev->irq);
+
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 2968b3ebb895..423a23ed8fc2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -23,6 +23,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -547,14 +548,24 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
break;
- case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
+ /* For T-tiled, the FB pitch is "how many bytes from
+ * one row to the next, such that pitch * tile_h ==
+ * tile_size * tiles_per_row."
+ */
+ u32 tile_size_shift = 12; /* T tiles are 4kb */
+ u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
+ u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
+
tiling = SCALER_CTL0_TILING_256B_OR_T;
- pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET),
- VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L),
- VC4_SET_FIELD((vc4_state->src_w[0] + 31) >> 5,
- SCALER_PITCH0_TILE_WIDTH_R));
+ pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
+ VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
+ VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -764,21 +775,40 @@ static int vc4_prepare_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
struct dma_fence *fence;
+ int ret;
if ((plane->state->fb == state->fb) || !state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret)
+ return ret;
+
fence = reservation_object_get_excl_rcu(bo->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
+static void vc4_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vc4_bo *bo;
+
+ if (plane->state->fb == state->fb || !state->fb)
+ return;
+
+ bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ vc4_bo_dec_usecnt(bo);
+}
+
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.prepare_fb = vc4_prepare_fb,
+ .cleanup_fb = vc4_cleanup_fb,
};
static void vc4_plane_destroy(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h
index ad7b1ea720c2..deafb32923e1 100644
--- a/drivers/gpu/drm/vc4/vc4_trace.h
+++ b/drivers/gpu/drm/vc4/vc4_trace.h
@@ -59,5 +59,5 @@ TRACE_EVENT(vc4_wait_for_seqno_end,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/vc4
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 8fd52f211e9d..b28876c222b4 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
.timeline_value_str = vgem_fence_timeline_value_str,
};
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = (struct vgem_fence *)data;
+ struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+ timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 98aae9809249..d6e84a589ef1 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->pages,
- (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0);
+ ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
return ret;
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- setup_timer(&blitq->poll_timer, via_dmablit_timer,
- (unsigned long)blitq);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
}
}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 0677bbf4ec7e..fb2609434df7 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -34,6 +34,7 @@
#include <drm/drm_legacy.h>
#include "via_verifier.h"
#include "via_drv.h"
+#include <linux/kernel.h>
typedef enum {
state_command,
@@ -1102,10 +1103,7 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
void via_init_command_verifier(void)
{
- setup_hazard_table(init_table1, table1,
- sizeof(init_table1) / sizeof(hz_init_t));
- setup_hazard_table(init_table2, table2,
- sizeof(init_table2) / sizeof(hz_init_t));
- setup_hazard_table(init_table3, table3,
- sizeof(init_table3) / sizeof(hz_init_t));
+ setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
+ setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
+ setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index b6d52055a11f..41b0930f7968 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -53,7 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
@@ -327,7 +327,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 72ad7b103448..92fb27753b9e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -72,7 +72,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
*obj_p = &obj->gem_base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->gem_base);
*handle_p = handle;
return 0;
@@ -130,7 +130,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
return -ENOENT;
obj = gem_to_virtio_gpu_obj(gobj);
*offset_p = virtio_gpu_object_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b94bd5440e57..0528edb4a2bf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -86,7 +86,7 @@ static void virtio_gpu_unref_list(struct list_head *head)
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
- drm_gem_object_unreference_unlocked(&qobj->gem_base);
+ drm_gem_object_put_unlocked(&qobj->gem_base);
}
}
@@ -304,7 +304,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
return ret;
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
rc->res_handle = res_id; /* similiar to a VM address */
rc->bo_handle = handle;
@@ -341,7 +341,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->gem_base.size;
ri->res_handle = qobj->hw_res_handle;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -389,7 +389,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -439,7 +439,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -462,7 +462,7 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
nowait = true;
ret = virtio_gpu_object_wait(qobj, nowait);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ec24fd801cd..01be355525e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -286,7 +286,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -ENOENT;
@@ -369,7 +369,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -ENOENT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b850562fbdd6..0545740b3724 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1726,7 +1726,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return 0;
}
- crtc = drm_crtc_find(dev, arg->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index d1552d3e0652..bc5f6026573d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -360,8 +360,8 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
true);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index ca3afae2db1f..90b5437fd787 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -549,8 +549,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
true);
vmw_fence_obj_unreference(&fence);
} else {
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 45244828fc1f..e8b8266c0cde 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
@@ -40,7 +41,7 @@ static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
}
static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = zx_drm_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index c0b80244158d..b92016ce09b7 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -12,6 +12,7 @@ host1x-y = \
hw/host1x01.o \
hw/host1x02.o \
hw/host1x04.o \
- hw/host1x05.o
+ hw/host1x05.o \
+ hw/host1x06.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index ed03b3243195..2e57c9cea696 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -404,12 +404,13 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
- of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.of_node = host1x->dev->of_node;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
+ of_dma_configure(&device->dev, host1x->dev->of_node);
+
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
kfree(device);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index db9b91d1384c..2fb93c27c1d9 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -128,8 +128,7 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
* host1x_channel_request() - Allocate a channel
* @device: Host1x unit this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. If there are no free channels,
- * this will sleep until one becomes available. May return NULL if CDMA
+ * Allocates a new host1x channel for @device. May return NULL if CDMA
* initialization fails.
*/
struct host1x_channel *host1x_channel_request(struct device *dev)
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 2aae0e63214c..dc77ec452ffc 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -40,7 +40,19 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
- o->fn(o->ctx, o->buf, len);
+ o->fn(o->ctx, o->buf, len, false);
+}
+
+void host1x_debug_cont(struct output *o, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+ va_end(args);
+
+ o->fn(o->ctx, o->buf, len, true);
}
static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
index 4595b2e0799f..990cce47e737 100644
--- a/drivers/gpu/host1x/debug.h
+++ b/drivers/gpu/host1x/debug.h
@@ -24,22 +24,28 @@
struct host1x;
struct output {
- void (*fn)(void *ctx, const char *str, size_t len);
+ void (*fn)(void *ctx, const char *str, size_t len, bool cont);
void *ctx;
char buf[256];
};
-static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
+static inline void write_to_seqfile(void *ctx, const char *str, size_t len,
+ bool cont)
{
seq_write((struct seq_file *)ctx, str, len);
}
-static inline void write_to_printk(void *ctx, const char *str, size_t len)
+static inline void write_to_printk(void *ctx, const char *str, size_t len,
+ bool cont)
{
- pr_info("%s", str);
+ if (cont)
+ pr_cont("%s", str);
+ else
+ pr_info("%s", str);
}
void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
+void __printf(2, 3) host1x_debug_cont(struct output *o, const char *fmt, ...);
extern unsigned int host1x_debug_trace_cmdbuf;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 5267c62e8896..bf67c3aeb634 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -39,6 +39,17 @@
#include "hw/host1x02.h"
#include "hw/host1x04.h"
#include "hw/host1x05.h"
+#include "hw/host1x06.h"
+
+void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
+{
+ writel(v, host1x->hv_regs + r);
+}
+
+u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
+{
+ return readl(host1x->hv_regs + r);
+}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -104,7 +115,19 @@ static const struct host1x_info host1x05_info = {
.dma_mask = DMA_BIT_MASK(34),
};
+static const struct host1x_info host1x06_info = {
+ .nb_channels = 63,
+ .nb_pts = 576,
+ .nb_mlocks = 24,
+ .nb_bases = 16,
+ .init = host1x06_init,
+ .sync_offset = 0x0,
+ .dma_mask = DMA_BIT_MASK(34),
+ .has_hypervisor = true,
+};
+
static const struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
@@ -116,20 +139,37 @@ MODULE_DEVICE_TABLE(of, host1x_of_match);
static int host1x_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct host1x *host;
- struct resource *regs;
+ struct resource *regs, *hv_regs = NULL;
int syncpt_irq;
int err;
- id = of_match_device(host1x_of_match, &pdev->dev);
- if (!id)
- return -EINVAL;
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(&pdev->dev, "failed to get registers\n");
- return -ENXIO;
+ host->info = of_device_get_match_data(&pdev->dev);
+
+ if (host->info->has_hypervisor) {
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get vm registers\n");
+ return -ENXIO;
+ }
+
+ hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "hypervisor");
+ if (!hv_regs) {
+ dev_err(&pdev->dev,
+ "failed to get hypervisor registers\n");
+ return -ENXIO;
+ }
+ } else {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
}
syncpt_irq = platform_get_irq(pdev, 0);
@@ -138,15 +178,10 @@ static int host1x_probe(struct platform_device *pdev)
return syncpt_irq;
}
- host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
-
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
- host->info = id->data;
/* set common host1x device data */
platform_set_drvdata(pdev, host);
@@ -155,6 +190,12 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ if (host->info->has_hypervisor) {
+ host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
+ if (IS_ERR(host->hv_regs))
+ return PTR_ERR(host->hv_regs);
+ }
+
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
if (host->info->init) {
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ffdbc15b749b..502769726480 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -79,6 +79,9 @@ struct host1x_syncpt_ops {
u32 (*load)(struct host1x_syncpt *syncpt);
int (*cpu_incr)(struct host1x_syncpt *syncpt);
int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
+ void (*assign_to_channel)(struct host1x_syncpt *syncpt,
+ struct host1x_channel *channel);
+ void (*enable_protection)(struct host1x *host);
};
struct host1x_intr_ops {
@@ -100,12 +103,14 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_hypervisor; /* has hypervisor registers */
};
struct host1x {
const struct host1x_info *info;
void __iomem *regs;
+ void __iomem *hv_regs; /* hypervisor region */
struct host1x_syncpt *syncpt;
struct host1x_syncpt_base *bases;
struct device *dev;
@@ -140,6 +145,8 @@ struct host1x {
struct list_head list;
};
+void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
+u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
u32 host1x_sync_readl(struct host1x *host1x, u32 r);
void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
@@ -182,6 +189,18 @@ static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
return host->syncpt_op->patch_wait(sp, patch_addr);
}
+static inline void host1x_hw_syncpt_assign_to_channel(
+ struct host1x *host, struct host1x_syncpt *sp,
+ struct host1x_channel *ch)
+{
+ return host->syncpt_op->assign_to_channel(sp, ch);
+}
+
+static inline void host1x_hw_syncpt_enable_protection(struct host1x *host)
+{
+ return host->syncpt_op->enable_protection(host);
+}
+
static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *))
{
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 6b231119193e..ce320534cbed 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -172,6 +172,30 @@ static void cdma_stop(struct host1x_cdma *cdma)
mutex_unlock(&cdma->lock);
}
+static void cdma_hw_cmdproc_stop(struct host1x *host, struct host1x_channel *ch,
+ bool stop)
+{
+#if HOST1X_HW >= 6
+ host1x_ch_writel(ch, stop ? 0x1 : 0x0, HOST1X_CHANNEL_CMDPROC_STOP);
+#else
+ u32 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
+ if (stop)
+ cmdproc_stop |= BIT(ch->id);
+ else
+ cmdproc_stop &= ~BIT(ch->id);
+ host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+#endif
+}
+
+static void cdma_hw_teardown(struct host1x *host, struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ host1x_ch_writel(ch, 0x1, HOST1X_CHANNEL_TEARDOWN);
+#else
+ host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+#endif
+}
+
/*
* Stops both channel's command processor and CDMA immediately.
* Also, tears down the channel and resets corresponding module.
@@ -180,7 +204,6 @@ static void cdma_freeze(struct host1x_cdma *cdma)
{
struct host1x *host = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
- u32 cmdproc_stop;
if (cdma->torndown && !cdma->running) {
dev_warn(host->dev, "Already torn down\n");
@@ -189,9 +212,7 @@ static void cdma_freeze(struct host1x_cdma *cdma)
dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
- cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop |= BIT(ch->id);
- host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host, ch, true);
dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
@@ -201,7 +222,7 @@ static void cdma_freeze(struct host1x_cdma *cdma)
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
- host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+ cdma_hw_teardown(host, ch);
cdma->running = false;
cdma->torndown = true;
@@ -211,15 +232,12 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
- u32 cmdproc_stop;
dev_dbg(host1x->dev,
"resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
- cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop &= ~BIT(ch->id);
- host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host1x, ch, false);
cdma->torndown = false;
cdma_timeout_restart(cdma, getptr);
@@ -232,7 +250,7 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
*/
static void cdma_timeout_handler(struct work_struct *work)
{
- u32 prev_cmdproc, cmdproc_stop, syncpt_val;
+ u32 syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
@@ -254,12 +272,7 @@ static void cdma_timeout_handler(struct work_struct *work)
}
/* stop processing to get a clean snapshot */
- prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop = prev_cmdproc | BIT(ch->id);
- host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
-
- dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
- prev_cmdproc, cmdproc_stop);
+ cdma_hw_cmdproc_stop(host1x, ch, true);
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
@@ -268,9 +281,7 @@ static void cdma_timeout_handler(struct work_struct *work)
dev_dbg(host1x->dev,
"cdma_timeout: expired, but buffer had completed\n");
/* restore */
- cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
- host1x_sync_writel(host1x, cmdproc_stop,
- HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host1x, ch, false);
mutex_unlock(&cdma->lock);
return;
}
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 8447a56c41ca..9af758785a11 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -147,6 +147,8 @@ static int channel_submit(struct host1x_job *job)
syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
+ host1x_hw_syncpt_assign_to_channel(host, sp, ch);
+
job->syncpt_end = syncval;
/* add a setclass for modules that require it */
@@ -178,10 +180,32 @@ error:
return err;
}
+static void enable_gather_filter(struct host1x *host,
+ struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ u32 val;
+
+ if (!host->hv_regs)
+ return;
+
+ val = host1x_hypervisor_readl(
+ host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+ val |= BIT(ch->id % 32);
+ host1x_hypervisor_writel(
+ host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+#elif HOST1X_HW >= 4
+ host1x_ch_writel(ch,
+ HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
+ HOST1X_CHANNEL_CHANNELCTRL);
+#endif
+}
+
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+ enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 7a4a3286e4a7..989476801f9d 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -30,6 +30,13 @@ enum {
HOST1X_OPCODE_IMM = 0x04,
HOST1X_OPCODE_RESTART = 0x05,
HOST1X_OPCODE_GATHER = 0x06,
+ HOST1X_OPCODE_SETSTRMID = 0x07,
+ HOST1X_OPCODE_SETAPPID = 0x08,
+ HOST1X_OPCODE_SETPYLD = 0x09,
+ HOST1X_OPCODE_INCR_W = 0x0a,
+ HOST1X_OPCODE_NONINCR_W = 0x0b,
+ HOST1X_OPCODE_GATHER_W = 0x0c,
+ HOST1X_OPCODE_RESTART_W = 0x0d,
HOST1X_OPCODE_EXTEND = 0x0e,
};
@@ -38,67 +45,122 @@ enum {
HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
};
-static unsigned int show_channel_command(struct output *o, u32 val)
+#define INVALID_PAYLOAD 0xffffffff
+
+static unsigned int show_channel_command(struct output *o, u32 val,
+ u32 *payload)
{
- unsigned int mask, subop;
+ unsigned int mask, subop, num, opcode;
+
+ opcode = val >> 28;
- switch (val >> 28) {
+ switch (opcode) {
case HOST1X_OPCODE_SETCLASS:
mask = val & 0x3f;
if (mask) {
- host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ host1x_debug_cont(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
}
- host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ host1x_debug_cont(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
return 0;
case HOST1X_OPCODE_INCR:
- host1x_debug_output(o, "INCR(offset=%03x, [",
+ num = val & 0xffff;
+ host1x_debug_cont(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
- return val & 0xffff;
+ if (!num)
+ host1x_debug_cont(o, "])\n");
+
+ return num;
case HOST1X_OPCODE_NONINCR:
- host1x_debug_output(o, "NONINCR(offset=%03x, [",
+ num = val & 0xffff;
+ host1x_debug_cont(o, "NONINCR(offset=%03x, [",
val >> 16 & 0xfff);
- return val & 0xffff;
+ if (!num)
+ host1x_debug_cont(o, "])\n");
+
+ return num;
case HOST1X_OPCODE_MASK:
mask = val & 0xffff;
- host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+ host1x_debug_cont(o, "MASK(offset=%03x, mask=%03x, [",
val >> 16 & 0xfff, mask);
+ if (!mask)
+ host1x_debug_cont(o, "])\n");
+
return hweight16(mask);
case HOST1X_OPCODE_IMM:
- host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+ host1x_debug_cont(o, "IMM(offset=%03x, data=%03x)\n",
val >> 16 & 0xfff, val & 0xffff);
return 0;
case HOST1X_OPCODE_RESTART:
- host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+ host1x_debug_cont(o, "RESTART(offset=%08x)\n", val << 4);
return 0;
case HOST1X_OPCODE_GATHER:
- host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ host1x_debug_cont(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
val >> 16 & 0xfff, val >> 15 & 0x1,
val >> 14 & 0x1, val & 0x3fff);
return 1;
+#if HOST1X_HW >= 6
+ case HOST1X_OPCODE_SETSTRMID:
+ host1x_debug_cont(o, "SETSTRMID(offset=%06x)\n",
+ val & 0x3fffff);
+ return 0;
+
+ case HOST1X_OPCODE_SETAPPID:
+ host1x_debug_cont(o, "SETAPPID(appid=%02x)\n", val & 0xff);
+ return 0;
+
+ case HOST1X_OPCODE_SETPYLD:
+ *payload = val & 0xffff;
+ host1x_debug_cont(o, "SETPYLD(data=%04x)\n", *payload);
+ return 0;
+
+ case HOST1X_OPCODE_INCR_W:
+ case HOST1X_OPCODE_NONINCR_W:
+ host1x_debug_cont(o, "%s(offset=%06x, ",
+ opcode == HOST1X_OPCODE_INCR_W ?
+ "INCR_W" : "NONINCR_W",
+ val & 0x3fffff);
+ if (*payload == 0) {
+ host1x_debug_cont(o, "[])\n");
+ return 0;
+ } else if (*payload == INVALID_PAYLOAD) {
+ host1x_debug_cont(o, "unknown)\n");
+ return 0;
+ } else {
+ host1x_debug_cont(o, "[");
+ return *payload;
+ }
+
+ case HOST1X_OPCODE_GATHER_W:
+ host1x_debug_cont(o, "GATHER_W(count=%04x, addr=[",
+ val & 0x3fff);
+ return 2;
+#endif
+
case HOST1X_OPCODE_EXTEND:
subop = val >> 24 & 0xf;
if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
- host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+ host1x_debug_cont(o, "ACQUIRE_MLOCK(index=%d)\n",
val & 0xff);
else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
- host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+ host1x_debug_cont(o, "RELEASE_MLOCK(index=%d)\n",
val & 0xff);
else
- host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+ host1x_debug_cont(o, "EXTEND_UNKNOWN(%08x)\n", val);
return 0;
default:
+ host1x_debug_cont(o, "UNKNOWN\n");
return 0;
}
}
@@ -110,6 +172,7 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
/* Map dmaget cursor to corresponding mem handle */
u32 offset = phys_addr - pin_addr;
unsigned int data_count = 0, i;
+ u32 payload = INVALID_PAYLOAD;
/*
* Sometimes we're given different hardware address to the same
@@ -126,11 +189,11 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
u32 val = *(map_addr + offset / 4 + i);
if (!data_count) {
- host1x_debug_output(o, "%08x: %08x:", addr, val);
- data_count = show_channel_command(o, val);
+ host1x_debug_output(o, "%08x: %08x: ", addr, val);
+ data_count = show_channel_command(o, val, &payload);
} else {
- host1x_debug_output(o, "%08x%s", val,
- data_count > 0 ? ", " : "])\n");
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
data_count--;
}
}
@@ -174,138 +237,11 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
}
}
-static void host1x_debug_show_channel_cdma(struct host1x *host,
- struct host1x_channel *ch,
- struct output *o)
-{
- struct host1x_cdma *cdma = &ch->cdma;
- u32 dmaput, dmaget, dmactrl;
- u32 cbstat, cbread;
- u32 val, base, baseval;
-
- dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
- dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
- dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
- cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
- cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
-
- host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
-
- if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
- !ch->cdma.push_buffer.mapped) {
- host1x_debug_output(o, "inactive\n\n");
- return;
- }
-
- if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT)
- host1x_debug_output(o, "waiting on syncpt %d val %d\n",
- cbread >> 24, cbread & 0xffffff);
- else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
- HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
- base = (cbread >> 16) & 0xff;
- baseval =
- host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
- val = cbread & 0xffff;
- host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
- cbread >> 24, baseval + val, base,
- baseval, val);
- } else
- host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
- HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
- cbread);
-
- host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
- dmaput, dmaget, dmactrl);
- host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
-
- show_channel_gathers(o, cdma);
- host1x_debug_output(o, "\n");
-}
-
-static void host1x_debug_show_channel_fifo(struct host1x *host,
- struct host1x_channel *ch,
- struct output *o)
-{
- u32 val, rd_ptr, wr_ptr, start, end;
- unsigned int data_count = 0;
-
- host1x_debug_output(o, "%u: fifo:\n", ch->id);
-
- val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
- host1x_debug_output(o, "FIFOSTAT %08x\n", val);
- if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
- host1x_debug_output(o, "[empty]\n");
- return;
- }
-
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
- host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
- HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
- HOST1X_SYNC_CFPEEK_CTRL);
-
- val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
- rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
- wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
-
- val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
- start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
- end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
-
- do {
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
- host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
- HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
- HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
- HOST1X_SYNC_CFPEEK_CTRL);
- val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
-
- if (!data_count) {
- host1x_debug_output(o, "%08x:", val);
- data_count = show_channel_command(o, val);
- } else {
- host1x_debug_output(o, "%08x%s", val,
- data_count > 0 ? ", " : "])\n");
- data_count--;
- }
-
- if (rd_ptr == end)
- rd_ptr = start;
- else
- rd_ptr++;
- } while (rd_ptr != wr_ptr);
-
- if (data_count)
- host1x_debug_output(o, ", ...])\n");
- host1x_debug_output(o, "\n");
-
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
-}
-
-static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
-{
- unsigned int i;
-
- host1x_debug_output(o, "---- mlocks ----\n");
-
- for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
- u32 owner =
- host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
- if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
- host1x_debug_output(o, "%u: locked by channel %u\n",
- i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
- else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
- host1x_debug_output(o, "%u: locked by cpu\n", i);
- else
- host1x_debug_output(o, "%u: unlocked\n", i);
- }
-
- host1x_debug_output(o, "\n");
-}
+#if HOST1X_HW >= 6
+#include "debug_hw_1x06.c"
+#else
+#include "debug_hw_1x01.c"
+#endif
static const struct host1x_debug_ops host1x_debug_ops = {
.show_channel_cdma = host1x_debug_show_channel_cdma,
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x01.c b/drivers/gpu/host1x/hw/debug_hw_1x01.c
new file mode 100644
index 000000000000..8790d5fd5f20
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw_1x01.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ struct host1x_cdma *cdma = &ch->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 val, base, baseval;
+
+ dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+ dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+ dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+ cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
+ cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
+
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
+
+ if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
+ !ch->cdma.push_buffer.mapped) {
+ host1x_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT)
+ host1x_debug_output(o, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
+ base = (cbread >> 16) & 0xff;
+ baseval =
+ host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
+ val = cbread & 0xffff;
+ host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
+ cbread >> 24, baseval + val, base,
+ baseval, val);
+ } else
+ host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
+ HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
+ cbread);
+
+ host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+ show_channel_gathers(o, cdma);
+ host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ unsigned int data_count = 0;
+
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
+ host1x_debug_output(o, "FIFOSTAT %08x\n", val);
+ if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
+ host1x_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
+ HOST1X_SYNC_CFPEEK_CTRL);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
+ rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
+ wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
+ start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
+ end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
+
+ do {
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
+ HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
+ HOST1X_SYNC_CFPEEK_CTRL);
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%08x: ", val);
+ data_count = show_channel_command(o, val, NULL);
+ } else {
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
+ data_count--;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (data_count)
+ host1x_debug_cont(o, ", ...])\n");
+ host1x_debug_output(o, "\n");
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+ unsigned int i;
+
+ host1x_debug_output(o, "---- mlocks ----\n");
+
+ for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
+ u32 owner =
+ host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
+ if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
+ host1x_debug_output(o, "%u: locked by channel %u\n",
+ i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
+ else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
+ host1x_debug_output(o, "%u: locked by cpu\n", i);
+ else
+ host1x_debug_output(o, "%u: unlocked\n", i);
+ }
+
+ host1x_debug_output(o, "\n");
+}
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
new file mode 100644
index 000000000000..b503c740c022
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2017 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ struct host1x_cdma *cdma = &ch->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 offset, class;
+ u32 ch_stat;
+
+ dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+ dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+ dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+ offset = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_OFFSET);
+ class = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_CLASS);
+ ch_stat = host1x_ch_readl(ch, HOST1X_CHANNEL_CHANNELSTAT);
+
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
+
+ if (dmactrl & HOST1X_CHANNEL_DMACTRL_DMASTOP ||
+ !ch->cdma.push_buffer.mapped) {
+ host1x_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ if (class == HOST1X_CLASS_HOST1X && offset == HOST1X_UCLASS_WAIT_SYNCPT)
+ host1x_debug_output(o, "waiting on syncpt\n");
+ else
+ host1x_debug_output(o, "active class %02x, offset %04x\n",
+ class, offset);
+
+ host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat);
+
+ show_channel_gathers(o, cdma);
+ host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ u32 payload = INVALID_PAYLOAD;
+ unsigned int data_count = 0;
+
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_STAT);
+ host1x_debug_output(o, "CMDFIFO_STAT %08x\n", val);
+ if (val & HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY) {
+ host1x_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
+ host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
+
+ /* Peek pointer values are invalid during SLCG, so disable it */
+ host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
+
+ val = 0;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id);
+ host1x_hypervisor_writel(host, val, HOST1X_HV_CMDFIFO_PEEK_CTRL);
+
+ val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_PEEK_PTRS);
+ rd_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(val);
+ wr_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(val);
+
+ val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_SETUP(ch->id));
+ start = HOST1X_HV_CMDFIFO_SETUP_BASE_V(val);
+ end = HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(val);
+
+ do {
+ val = 0;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id);
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(rd_ptr);
+ host1x_hypervisor_writel(host, val,
+ HOST1X_HV_CMDFIFO_PEEK_CTRL);
+
+ val = host1x_hypervisor_readl(host,
+ HOST1X_HV_CMDFIFO_PEEK_READ);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%03x 0x%08x: ",
+ rd_ptr - start, val);
+ data_count = show_channel_command(o, val, &payload);
+ } else {
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
+ data_count--;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (data_count)
+ host1x_debug_cont(o, ", ...])\n");
+ host1x_debug_output(o, "\n");
+
+ host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
+ host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+ /* TODO */
+}
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index 859b73beb4d0..bb124f8b4af8 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -21,6 +21,8 @@
#include "host1x01_hardware.h"
/* include code */
+#define HOST1X_HW 1
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
index 928946c2144b..c5f85dbedb98 100644
--- a/drivers/gpu/host1x/hw/host1x02.c
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -21,6 +21,8 @@
#include "host1x02_hardware.h"
/* include code */
+#define HOST1X_HW 2
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
index 8007c70fa9c4..f102a1a7743f 100644
--- a/drivers/gpu/host1x/hw/host1x04.c
+++ b/drivers/gpu/host1x/hw/host1x04.c
@@ -21,6 +21,8 @@
#include "host1x04_hardware.h"
/* include code */
+#define HOST1X_HW 4
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x05.c b/drivers/gpu/host1x/hw/host1x05.c
index 047097ce3bad..2b1239d6ec67 100644
--- a/drivers/gpu/host1x/hw/host1x05.c
+++ b/drivers/gpu/host1x/hw/host1x05.c
@@ -21,6 +21,8 @@
#include "host1x05_hardware.h"
/* include code */
+#define HOST1X_HW 5
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x06.c b/drivers/gpu/host1x/hw/host1x06.c
new file mode 100644
index 000000000000..a66230827c59
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06.c
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra186 SoCs
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x06.h"
+#include "host1x06_hardware.h"
+
+/* include code */
+#define HOST1X_HW 6
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x06_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x06.h b/drivers/gpu/host1x/hw/host1x06.h
new file mode 100644
index 000000000000..d9abe1489241
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra186 SoCs
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X06_H
+#define HOST1X_HOST1X06_H
+
+struct host1x;
+
+int host1x06_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x06_hardware.h b/drivers/gpu/host1x/hw/host1x06_hardware.h
new file mode 100644
index 000000000000..3039c92ea605
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra186
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X06_HARDWARE_H
+#define __HOST1X_HOST1X06_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x06_uclass.h"
+#include "hw_host1x06_vm.h"
+#include "hw_host1x06_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
index 95e6f96142b9..2e8b635aa660 100644
--- a/drivers/gpu/host1x/hw/hw_host1x04_channel.h
+++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
@@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void)
}
#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
host1x_channel_dmactrl_dmainitget()
+static inline u32 host1x_channel_channelctrl_r(void)
+{
+ return 0x98;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL \
+ host1x_channel_channelctrl_r()
+static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v)
+{
+ return (v & 0x1) << 2;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \
+ host1x_channel_channelctrl_kernel_filter_gbuffer_f(v)
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_channel.h b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
index fce6e2c1ff4c..abbbc2641ce6 100644
--- a/drivers/gpu/host1x/hw/hw_host1x05_channel.h
+++ b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
@@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void)
}
#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
host1x_channel_dmactrl_dmainitget()
+static inline u32 host1x_channel_channelctrl_r(void)
+{
+ return 0x98;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL \
+ host1x_channel_channelctrl_r()
+static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v)
+{
+ return (v & 0x1) << 2;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \
+ host1x_channel_channelctrl_kernel_filter_gbuffer_f(v)
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h
new file mode 100644
index 000000000000..c05dab8a178b
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN 0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x) (0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL 0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x) (x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x) ((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ 0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS 0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x) ((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x) (0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x) ((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE 0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
new file mode 100644
index 000000000000..4457486c72b0
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X06_UCLASS_H
+#define HOST1X_HW_HOST1X06_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_vm.h b/drivers/gpu/host1x/hw/hw_host1x06_vm.h
new file mode 100644
index 000000000000..e54b33902332
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_vm.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART 0x0000
+#define HOST1X_CHANNEL_DMASTART_HI 0x0004
+#define HOST1X_CHANNEL_DMAPUT 0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI 0x000c
+#define HOST1X_CHANNEL_DMAGET 0x0010
+#define HOST1X_CHANNEL_DMAGET_HI 0x0014
+#define HOST1X_CHANNEL_DMAEND 0x0018
+#define HOST1X_CHANNEL_DMAEND_HI 0x001c
+#define HOST1X_CHANNEL_DMACTRL 0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT 0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA 0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET 0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS 0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT 0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP 0x0048
+#define HOST1X_CHANNEL_TEARDOWN 0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x) (0x6400 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x) (0x6464 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_BASE(x) (0x8000 + 4*(x))
+#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8a00 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0x9384 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 37ebb51703fa..329239237090 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -72,6 +72,23 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
+static void intr_hw_init(struct host1x *host, u32 cpm)
+{
+#if HOST1X_HW < 6
+ /* disable the ip_busy_timeout. this prevents write drops */
+ host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /*
+ * increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on Tegra2.
+ */
+ host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* update host clocks per usec */
+ host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+#endif
+}
+
static int
_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *))
@@ -92,17 +109,7 @@ _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
return err;
}
- /* disable the ip_busy_timeout. this prevents write drops */
- host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
-
- /*
- * increase the auto-ack timout to the maximum value. 2d will hang
- * otherwise on Tegra2.
- */
- host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
-
- /* update host clocks per usec */
- host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+ intr_hw_init(host, cpm);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 7b0270d60742..7dfd47d74f89 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -106,6 +106,50 @@ static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
return 0;
}
+/**
+ * syncpt_assign_to_channel() - Assign syncpoint to channel
+ * @sp: syncpoint
+ * @ch: channel
+ *
+ * On chips with the syncpoint protection feature (Tegra186+), assign @sp to
+ * @ch, preventing other channels from incrementing the syncpoints. If @ch is
+ * NULL, unassigns the syncpoint.
+ *
+ * On older chips, do nothing.
+ */
+static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
+ struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ struct host1x *host = sp->host;
+
+ if (!host->hv_regs)
+ return;
+
+ host1x_sync_writel(host,
+ HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
+ HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
+#endif
+}
+
+/**
+ * syncpt_enable_protection() - Enable syncpoint protection
+ * @host: host1x instance
+ *
+ * On chips with the syncpoint protection feature (Tegra186+), enable this
+ * feature. On older chips, do nothing.
+ */
+static void syncpt_enable_protection(struct host1x *host)
+{
+#if HOST1X_HW >= 6
+ if (!host->hv_regs)
+ return;
+
+ host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN,
+ HOST1X_HV_SYNCPT_PROT_EN);
+#endif
+}
+
static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.restore = syncpt_restore,
.restore_wait_base = syncpt_restore_wait_base,
@@ -113,4 +157,6 @@ static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.load = syncpt_load,
.cpu_incr = syncpt_cpu_incr,
.patch_wait = syncpt_patch_wait,
+ .assign_to_channel = syncpt_assign_to_channel,
+ .enable_protection = syncpt_enable_protection,
};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 048ac9e344ce..a2a952adc136 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -54,7 +54,7 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
}
static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
- struct device *dev,
+ struct host1x_client *client,
unsigned long flags)
{
int i;
@@ -76,11 +76,11 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
}
name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
- dev ? dev_name(dev) : NULL);
+ client ? dev_name(client->dev) : NULL);
if (!name)
goto free_base;
- sp->dev = dev;
+ sp->client = client;
sp->name = name;
if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
@@ -398,6 +398,13 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
+
+ /*
+ * Unassign syncpt from channels for purposes of Tegra186
+ * syncpoint protection. This prevents any channel from
+ * accessing it until it is reassigned.
+ */
+ host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
}
for (i = 0; i < host->info->nb_bases; i++)
@@ -408,6 +415,7 @@ int host1x_syncpt_init(struct host1x *host)
host->bases = bases;
host1x_syncpt_restore(host);
+ host1x_hw_syncpt_enable_protection(host);
/* Allocate sync point to use for clearing waits for expired fences */
host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
@@ -419,7 +427,7 @@ int host1x_syncpt_init(struct host1x *host)
/**
* host1x_syncpt_request() - request a syncpoint
- * @dev: device requesting the syncpoint
+ * @client: client requesting the syncpoint
* @flags: flags
*
* host1x client drivers can use this function to allocate a syncpoint for
@@ -427,12 +435,12 @@ int host1x_syncpt_init(struct host1x *host)
* use by the client exclusively. When no longer using a syncpoint, a host1x
* client driver needs to release it using host1x_syncpt_free().
*/
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->parent->parent);
- return host1x_syncpt_alloc(host, dev, flags);
+ return host1x_syncpt_alloc(host, client, flags);
}
EXPORT_SYMBOL(host1x_syncpt_request);
@@ -456,7 +464,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
- sp->dev = NULL;
+ sp->client = NULL;
sp->name = NULL;
sp->client_managed = false;
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index f719205105ac..9d88d37c2397 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -44,7 +44,7 @@ struct host1x_syncpt {
const char *name;
bool client_managed;
struct host1x *host;
- struct device *dev;
+ struct host1x_client *client;
struct host1x_syncpt_base *base;
/* interrupt data */
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 7a4b8362dda8..49bfe6e7d005 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable);
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
- int di;
u32 reg;
- di = dc->di;
-
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 76875f6299b8..d35d6d271f3f 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1402,29 +1402,14 @@ static struct miscdevice vga_arb_device = {
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
};
-static int __init vga_arb_device_init(void)
+static void __init vga_arb_select_default_device(void)
{
- int rc;
struct pci_dev *pdev;
struct vga_device *vgadev;
- rc = misc_register(&vga_arb_device);
- if (rc < 0)
- pr_err("error %d registering device\n", rc);
-
- bus_register_notifier(&pci_bus_type, &pci_notifier);
-
- /* We add all pci devices satisfying vga class in the arbiter by
- * default */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_ANY_ID, pdev)) != NULL)
- vga_arbiter_add_pci_device(pdev);
-
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
list_for_each_entry(vgadev, &vga_list, list) {
struct device *dev = &vgadev->pdev->dev;
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
/*
* Override vga_arbiter_add_pci_device()'s I/O based detection
* as it may take the wrong device (e.g. on Apple system under
@@ -1461,13 +1446,66 @@ static int __init vga_arb_device_init(void)
vgaarb_info(dev, "overriding boot device\n");
vga_set_default_device(vgadev->pdev);
}
+ }
#endif
+
+ if (!vga_default_device()) {
+ list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
+ u16 cmd;
+
+ pdev = vgadev->pdev;
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
+ vga_set_default_device(pdev);
+ break;
+ }
+ }
+ }
+
+ if (!vga_default_device()) {
+ vgadev = list_first_entry_or_null(&vga_list,
+ struct vga_device, list);
+ if (vgadev) {
+ struct device *dev = &vgadev->pdev->dev;
+ vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
+ vga_set_default_device(vgadev->pdev);
+ }
+ }
+}
+
+static int __init vga_arb_device_init(void)
+{
+ int rc;
+ struct pci_dev *pdev;
+ struct vga_device *vgadev;
+
+ rc = misc_register(&vga_arb_device);
+ if (rc < 0)
+ pr_err("error %d registering device\n", rc);
+
+ bus_register_notifier(&pci_bus_type, &pci_notifier);
+
+ /* We add all PCI devices satisfying VGA class in the arbiter by
+ * default */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, pdev)) != NULL)
+ vga_arbiter_add_pci_device(pdev);
+
+ list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
+
if (vgadev->bridge_has_one_vga)
vgaarb_info(dev, "bridge control possible\n");
else
vgaarb_info(dev, "no bridge control possible\n");
}
+ vga_arb_select_default_device();
+
pr_info("loaded\n");
return rc;
}
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 07cbc70f00e7..eae7d52cf1a8 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -173,9 +173,9 @@ static void battery_flat(struct appleir *appleir)
dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
}
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
{
- struct appleir *appleir = (struct appleir *)data;
+ struct appleir *appleir = from_timer(appleir, t, key_up_timer);
struct hid_device *hid = appleir->hid;
unsigned long flags;
@@ -303,8 +303,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
spin_lock_init(&appleir->lock);
- setup_timer(&appleir->key_up_timer,
- key_up_tick, (unsigned long) appleir);
+ timer_setup(&appleir->key_up_timer, key_up_tick, 0);
hid_set_drvdata(hid, appleir);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 49c4bd34b3c5..87eda34ea2f8 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -239,9 +239,9 @@ drop_note:
return;
}
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
{
- struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+ struct pcmidi_sustain *pms = from_timer(pms, t, timer);
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
@@ -256,8 +256,7 @@ static void init_sustain_timers(struct pcmidi_snd *pm)
pms = &pm->sustained_notes[i];
pms->in_use = 0;
pms->pm = pm;
- setup_timer(&pms->timer, pcmidi_sustained_note_release,
- (unsigned long)pms);
+ timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
}
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index d00391418d1a..579884ebd94d 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1226,9 +1226,9 @@ static void wiimote_schedule(struct wiimote_data *wdata)
spin_unlock_irqrestore(&wdata->state.lock, flags);
}
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
{
- struct wiimote_data *wdata = (void*)arg;
+ struct wiimote_data *wdata = from_timer(wdata, t, timer);
wiimote_schedule(wdata);
}
@@ -1740,7 +1740,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
wdata->state.cmd_battery = 0xff;
INIT_WORK(&wdata->init_worker, wiimote_init_worker);
- setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+ timer_setup(&wdata->timer, wiimote_init_timeout, 0);
return wdata;
}
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index e7b1d796ba2e..14c22786b519 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+CFLAGS_hv_trace.o = -I$(src)
+
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
- channel_mgmt.o ring_buffer.o
+ channel_mgmt.o ring_buffer.o hv_trace.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 894b67ac2cae..ba0a092ae085 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -43,6 +43,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
+ trace_vmbus_setevent(channel);
+
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
@@ -185,6 +187,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
+ trace_vmbus_open(open_msg, ret);
+
if (ret != 0) {
err = ret;
goto error_clean_msglist;
@@ -234,13 +238,18 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
+ int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
- return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_tl_connect_request(&conn_msg, ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
@@ -433,6 +442,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
+
+ trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
+
if (ret != 0)
goto cleanup;
@@ -448,6 +460,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
+
+ trace_vmbus_establish_gpadl_body(gpadl_body, ret);
+
if (ret != 0)
goto cleanup;
@@ -511,6 +526,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
+ trace_vmbus_teardown_gpadl(msg, ret);
+
if (ret)
goto post_msg_err;
@@ -589,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
+ trace_vmbus_close_internal(msg, ret);
+
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
@@ -640,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
*/
return;
}
- mutex_lock(&vmbus_connection.channel_mutex);
/*
* Close all the sub-channels first and then close the
* primary channel.
*/
list_for_each_safe(cur, tmp, &channel->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- vmbus_close_internal(cur_channel);
if (cur_channel->rescind) {
+ wait_for_completion(&cur_channel->rescind_event);
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_close_internal(cur_channel);
hv_process_channel_removal(
cur_channel->offermsg.child_relid);
+ } else {
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_close_internal(cur_channel);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
* Now close the primary.
*/
+ mutex_lock(&vmbus_connection.channel_mutex);
vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
}
@@ -745,6 +770,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
+ desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
@@ -788,6 +814,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
+ desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 379b0df123be..c21020b69114 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
return NULL;
spin_lock_init(&channel->lock);
+ init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
@@ -350,7 +351,7 @@ static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree_rcu(channel, rcu);
+ kobject_put(&channel->kobj);
}
static void percpu_channel_enq(void *arg)
@@ -373,12 +374,15 @@ static void percpu_channel_deq(void *arg)
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
+ int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
- true);
+ ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
+
+ trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(u32 relid)
@@ -520,6 +524,14 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->state = CHANNEL_OPEN_STATE;
if (!fnew) {
+ struct hv_device *dev
+ = newchannel->primary_channel->device_obj;
+
+ if (vmbus_add_channel_kobj(dev, newchannel)) {
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ goto err_free_chan;
+ }
+
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
@@ -805,6 +817,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
+ trace_vmbus_onoffer(offer);
+
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -846,6 +860,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+ trace_vmbus_onoffer_rescind(rescind);
+
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
@@ -883,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
/*
* Now wait for offer handling to complete.
*/
+ vmbus_rescind_cleanup(channel);
while (READ_ONCE(channel->probe_done) == false) {
/*
* We wait here until any channel offer is currently
@@ -898,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
- vmbus_rescind_cleanup(channel);
return;
}
/*
@@ -907,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/
dev = get_device(&channel->device_obj->device);
if (dev) {
- vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
@@ -921,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 2. Then close the primary channel.
*/
mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) {
/*
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
hv_process_channel_removal(rescind->child_relid);
+ } else {
+ complete(&channel->rescind_event);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
@@ -974,6 +990,8 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
result = (struct vmbus_channel_open_result *)hdr;
+ trace_vmbus_onopen_result(result);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1018,6 +1036,8 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
+ trace_vmbus_ongpadl_created(gpadlcreated);
+
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
@@ -1066,6 +1086,8 @@ static void vmbus_ongpadl_torndown(
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
+ trace_vmbus_ongpadl_torndown(gpadl_torndown);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1109,6 +1131,9 @@ static void vmbus_onversion_response(
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
+
+ trace_vmbus_onversion_response(version_response);
+
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
@@ -1168,6 +1193,8 @@ void vmbus_onmessage(void *context)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
+ trace_vmbus_on_message(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
@@ -1201,9 +1228,11 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
-
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
+
+ trace_vmbus_request_offers(ret);
+
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f41901f80b64..447371f4de56 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -117,6 +117,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
+
+ trace_vmbus_negotiate_version(msg, ret);
+
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
@@ -319,6 +322,8 @@ void vmbus_on_event(unsigned long data)
struct vmbus_channel *channel = (void *) data;
unsigned long time_limit = jiffies + 2;
+ trace_vmbus_on_event(channel);
+
do {
void (*callback_fn)(void *);
@@ -409,6 +414,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
+ ++channel->sig_events;
+
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv_trace.c b/drivers/hv/hv_trace.c
new file mode 100644
index 000000000000..df47acd01a81
--- /dev/null
+++ b/drivers/hv/hv_trace.c
@@ -0,0 +1,4 @@
+#include "hyperv_vmbus.h"
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace.h"
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
new file mode 100644
index 000000000000..d635ee95b20d
--- /dev/null
+++ b/drivers/hv/hv_trace.h
@@ -0,0 +1,327 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(vmbus_hdr_msg,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr),
+ TP_STRUCT__entry(__field(unsigned int, msgtype)),
+ TP_fast_assign(__entry->msgtype = hdr->msgtype;),
+ TP_printk("msgtype=%u", __entry->msgtype)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_msg_dpc,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_message,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+TRACE_EVENT(vmbus_onoffer,
+ TP_PROTO(const struct vmbus_channel_offer_channel *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u8, monitorid)
+ __field(u16, is_ddc_int)
+ __field(u32, connection_id)
+ __array(char, if_type, 16)
+ __array(char, if_instance, 16)
+ __field(u16, chn_flags)
+ __field(u16, mmio_mb)
+ __field(u16, sub_idx)
+ ),
+ TP_fast_assign(__entry->child_relid = offer->child_relid;
+ __entry->monitorid = offer->monitorid;
+ __entry->is_ddc_int = offer->is_dedicated_interrupt;
+ __entry->connection_id = offer->connection_id;
+ memcpy(__entry->if_type,
+ &offer->offer.if_type.b, 16);
+ memcpy(__entry->if_instance,
+ &offer->offer.if_instance.b, 16);
+ __entry->chn_flags = offer->offer.chn_flags;
+ __entry->mmio_mb = offer->offer.mmio_megabytes;
+ __entry->sub_idx = offer->offer.sub_channel_index;
+ ),
+ TP_printk("child_relid 0x%x, monitorid 0x%x, is_dedicated %d, "
+ "connection_id 0x%x, if_type %pUl, if_instance %pUl, "
+ "chn_flags 0x%x, mmio_megabytes %d, sub_channel_index %d",
+ __entry->child_relid, __entry->monitorid,
+ __entry->is_ddc_int, __entry->connection_id,
+ __entry->if_type, __entry->if_instance,
+ __entry->chn_flags, __entry->mmio_mb,
+ __entry->sub_idx
+ )
+ );
+
+TRACE_EVENT(vmbus_onoffer_rescind,
+ TP_PROTO(const struct vmbus_channel_rescind_offer *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(__field(u32, child_relid)),
+ TP_fast_assign(__entry->child_relid = offer->child_relid),
+ TP_printk("child_relid 0x%x", __entry->child_relid)
+ );
+
+TRACE_EVENT(vmbus_onopen_result,
+ TP_PROTO(const struct vmbus_channel_open_result *result),
+ TP_ARGS(result),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = result->child_relid;
+ __entry->openid = result->openid;
+ __entry->status = result->status;
+ ),
+ TP_printk("child_relid 0x%x, openid %d, status %d",
+ __entry->child_relid, __entry->openid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_created,
+ TP_PROTO(const struct vmbus_channel_gpadl_created *gpadlcreated),
+ TP_ARGS(gpadlcreated),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = gpadlcreated->child_relid;
+ __entry->gpadl = gpadlcreated->gpadl;
+ __entry->status = gpadlcreated->creation_status;
+ ),
+ TP_printk("child_relid 0x%x, gpadl 0x%x, creation_status %d",
+ __entry->child_relid, __entry->gpadl, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_torndown,
+ TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
+ TP_ARGS(gpadltorndown),
+ TP_STRUCT__entry(__field(u32, gpadl)),
+ TP_fast_assign(__entry->gpadl = gpadltorndown->gpadl),
+ TP_printk("gpadl 0x%x", __entry->gpadl)
+ );
+
+TRACE_EVENT(vmbus_onversion_response,
+ TP_PROTO(const struct vmbus_channel_version_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u8, ver)
+ ),
+ TP_fast_assign(__entry->ver = response->version_supported;
+ ),
+ TP_printk("version_supported %d", __entry->ver)
+ );
+
+TRACE_EVENT(vmbus_request_offers,
+ TP_PROTO(int ret),
+ TP_ARGS(ret),
+ TP_STRUCT__entry(__field(int, ret)),
+ TP_fast_assign(__entry->ret = ret),
+ TP_printk("sending ret %d", __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_open,
+ TP_PROTO(const struct vmbus_channel_open_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, gpadlhandle)
+ __field(u32, target_vp)
+ __field(u32, offset)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->openid = msg->openid;
+ __entry->gpadlhandle = msg->ringbuffer_gpadlhandle;
+ __entry->target_vp = msg->target_vp;
+ __entry->offset = msg->downstream_ringbuffer_pageoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, openid %d, "
+ "gpadlhandle 0x%x, target_vp 0x%x, offset 0x%x, ret %d",
+ __entry->child_relid, __entry->openid,
+ __entry->gpadlhandle, __entry->target_vp,
+ __entry->offset, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_close_internal,
+ TP_PROTO(const struct vmbus_channel_close_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d", __entry->child_relid,
+ __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_header,
+ TP_PROTO(const struct vmbus_channel_gpadl_header *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u16, range_buflen)
+ __field(u16, rangecount)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->range_buflen = msg->range_buflen;
+ __entry->rangecount = msg->rangecount;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, range_buflen %d "
+ "rangecount %d, ret %d",
+ __entry->child_relid, __entry->gpadl,
+ __entry->range_buflen, __entry->rangecount, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_body,
+ TP_PROTO(const struct vmbus_channel_gpadl_body *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, msgnumber)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->msgnumber = msg->msgnumber;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending msgnumber %d, gpadl 0x%x, ret %d",
+ __entry->msgnumber, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_teardown_gpadl,
+ TP_PROTO(const struct vmbus_channel_gpadl_teardown *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, ret %d",
+ __entry->child_relid, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_negotiate_version,
+ TP_PROTO(const struct vmbus_channel_initiate_contact *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, ver)
+ __field(u32, target_vcpu)
+ __field(int, ret)
+ __field(u64, int_page)
+ __field(u64, mon_page1)
+ __field(u64, mon_page2)
+ ),
+ TP_fast_assign(
+ __entry->ver = msg->vmbus_version_requested;
+ __entry->target_vcpu = msg->target_vcpu;
+ __entry->int_page = msg->interrupt_page;
+ __entry->mon_page1 = msg->monitor_page1;
+ __entry->mon_page2 = msg->monitor_page2;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending vmbus_version_requested %d, target_vcpu 0x%x, "
+ "pages %llx:%llx:%llx, ret %d",
+ __entry->ver, __entry->target_vcpu, __entry->int_page,
+ __entry->mon_page1, __entry->mon_page2, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_release_relid,
+ TP_PROTO(const struct vmbus_channel_relid_released *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d",
+ __entry->child_relid, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_tl_connect_request,
+ TP_PROTO(const struct vmbus_channel_tl_connect_request *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __array(char, guest_id, 16)
+ __array(char, host_id, 16)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
+ memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+ __entry->ret = ret;
+ ),
+ TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+ "ret %d",
+ __entry->guest_id, __entry->host_id, __entry->ret
+ )
+ );
+
+DECLARE_EVENT_CLASS(vmbus_channel,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel),
+ TP_STRUCT__entry(__field(u32, relid)),
+ TP_fast_assign(__entry->relid = channel->offermsg.child_relid),
+ TP_printk("relid 0x%x", __entry->relid)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_chan_sched,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_setevent,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_on_event,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace
+#endif /* _HV_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 49569f8fe038..22300ec7b556 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,8 @@
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include "hv_trace.h"
+
/*
* Timeout for services such as KVP and fcopy.
*/
@@ -373,6 +375,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
+int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
struct vmbus_channel *relid2channel(u32 relid);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2cd134dd94d2..76ed9a216f10 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -65,7 +65,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
regs = current_pt_regs();
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -75,7 +75,7 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -107,28 +107,30 @@ static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
}
-static u8 channel_monitor_group(struct vmbus_channel *channel)
+static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
-static u8 channel_monitor_offset(struct vmbus_channel *channel)
+static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
-static u32 channel_pending(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_pending(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
+
return monitor_page->trigger_group[monitor_group].pending;
}
-static u32 channel_latency(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_latency(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->latency[monitor_group][monitor_offset];
}
@@ -833,6 +835,8 @@ void vmbus_on_msg_dpc(unsigned long data)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ trace_vmbus_on_msg_dpc(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
goto msg_handled;
@@ -942,6 +946,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->rescind)
continue;
+ trace_vmbus_chan_sched(channel);
+
+ ++channel->interrupts;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
@@ -1133,6 +1141,159 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+/*
+ * Called when last reference to channel is gone.
+ */
+static void vmbus_chan_release(struct kobject *kobj)
+{
+ struct vmbus_channel *channel
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ kfree_rcu(channel, rcu);
+}
+
+struct vmbus_chan_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*store)(struct vmbus_channel *chan,
+ const char *buf, size_t count);
+};
+#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
+ struct vmbus_chan_attribute chan_attr_##_name \
+ = __ATTR(_name, _mode, _show, _store)
+#define VMBUS_CHAN_ATTR_RW(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
+#define VMBUS_CHAN_ATTR_RO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
+#define VMBUS_CHAN_ATTR_WO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
+
+static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ const struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(chan, buf);
+}
+
+static const struct sysfs_ops vmbus_chan_sysfs_ops = {
+ .show = vmbus_chan_attr_show,
+};
+
+static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(out_mask);
+
+static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(in_mask);
+
+static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+}
+VMBUS_CHAN_ATTR_RO(read_avail);
+
+static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+}
+VMBUS_CHAN_ATTR_RO(write_avail);
+
+static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%u\n", channel->target_cpu);
+}
+VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+
+static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_pending(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+
+static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_latency(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+
+static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->interrupts);
+}
+VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+
+static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->sig_events);
+}
+VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+
+static struct attribute *vmbus_chan_attrs[] = {
+ &chan_attr_out_mask.attr,
+ &chan_attr_in_mask.attr,
+ &chan_attr_read_avail.attr,
+ &chan_attr_write_avail.attr,
+ &chan_attr_cpu.attr,
+ &chan_attr_pending.attr,
+ &chan_attr_latency.attr,
+ &chan_attr_interrupts.attr,
+ &chan_attr_events.attr,
+ NULL
+};
+
+static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+ .default_attrs = vmbus_chan_attrs,
+};
+
+/*
+ * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+ */
+int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+ if (ret)
+ return ret;
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+}
+
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
@@ -1164,7 +1325,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
- int ret = 0;
+ struct kobject *kobj = &child_device_obj->device.kobj;
+ int ret;
dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
@@ -1178,13 +1340,32 @@ int vmbus_device_register(struct hv_device *child_device_obj)
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
-
- if (ret)
+ if (ret) {
pr_err("Unable to register child device\n");
- else
- pr_debug("child device %s registered\n",
- dev_name(&child_device_obj->device));
+ return ret;
+ }
+
+ child_device_obj->channels_kset = kset_create_and_add("channels",
+ NULL, kobj);
+ if (!child_device_obj->channels_kset) {
+ ret = -ENOMEM;
+ goto err_dev_unregister;
+ }
+
+ ret = vmbus_add_channel_kobj(child_device_obj,
+ child_device_obj->channel);
+ if (ret) {
+ pr_err("Unable to register primary channeln");
+ goto err_kset_unregister;
+ }
+
+ return 0;
+
+err_kset_unregister:
+ kset_unregister(child_device_obj->channels_kset);
+err_dev_unregister:
+ device_unregister(&child_device_obj->device);
return ret;
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 5f11dc014ed6..e5234f953a6d 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -22,6 +22,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
#define JC42_REG_TEMP 0x05
#define JC42_REG_MANID 0x06
#define JC42_REG_DEVICEID 0x07
+#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */
/* Status bits in temperature register */
#define JC42_ALARM_CRIT_BIT 15
@@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
#define GT_MANID 0x1c68 /* Giantec */
#define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */
+/* SMBUS register */
+#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */
+
/* Supported chips */
/* Analog Devices */
@@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->extended = !!(cap & JC42_CAP_RANGE);
+ if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+ int smbus;
+
+ /*
+ * Not all chips support this register, but from a
+ * quick read of various datasheets no chip appears
+ * incompatible with the below attempt to disable
+ * the timeout. And the whole thing is opt-in...
+ */
+ smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+ if (smbus < 0)
+ return smbus;
+ i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+ smbus | SMBUS_STMOUT);
+ }
+
config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
if (config < 0)
return config;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 46a54ed23410..0721e175664a 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -81,7 +81,7 @@ struct tctl_offset {
};
static const struct tctl_offset tctl_offset_table[] = {
- { 0x17, "AMD Ryzen 7 1600X", 20000 },
+ { 0x17, "AMD Ryzen 5 1600X", 20000 },
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 52a58b8b6e1b..a139940cd991 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
static long pmbus_reg2data_direct(struct pmbus_data *data,
struct pmbus_sensor *sensor)
{
- long val = (s16) sensor->data;
- long m, b, R;
+ s64 b, val = (s16)sensor->data;
+ s32 m, R;
m = data->info->m[sensor->class];
b = data->info->b[sensor->class];
@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
R--;
}
while (R < 0) {
- val = DIV_ROUND_CLOSEST(val, 10);
+ val = div_s64(val + 5LL, 10L); /* round closest */
R++;
}
- return (val - b) / m;
+ val = div_s64(val - b, m);
+ return clamp_val(val, LONG_MIN, LONG_MAX);
}
/*
@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
struct pmbus_sensor *sensor, long val)
{
- long m, b, R;
+ s64 b, val64 = val;
+ s32 m, R;
m = data->info->m[sensor->class];
b = data->info->b[sensor->class];
@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
- val = val * m + b;
+ val64 = val64 * m + b;
while (R > 0) {
- val *= 10;
+ val64 *= 10;
R--;
}
while (R < 0) {
- val = DIV_ROUND_CLOSEST(val, 10);
+ val64 = div_s64(val64 + 5LL, 10L); /* round closest */
R++;
}
- return val;
+ return (u16)clamp_val(val64, S16_MIN, S16_MAX);
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 246fb2365126..2b0f182daa87 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1246,10 +1246,8 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove_files:
w83781d_remove_files(dev);
- if (data->lm75[0])
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1])
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1262,10 +1260,8 @@ w83781d_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
w83781d_remove_files(dev);
- if (data->lm75[0])
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1])
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8af6081b4ab4..28fa3bd2c096 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1316,8 +1316,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
/* Undo inits in case of errors */
error_sc_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
error_sc_0:
return err;
}
@@ -1434,10 +1433,8 @@ error5:
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
error3:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1448,10 +1445,8 @@ static int w83791d_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index d764602d70db..76aa39e537e0 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -981,8 +981,7 @@ w83792d_detect_subclients(struct i2c_client *new_client)
/* Undo inits in case of errors */
ERROR_SC_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
@@ -1456,10 +1455,8 @@ exit_remove_files:
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
exit_i2c_unregister:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1475,10 +1472,8 @@ w83792d_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 5ba9d9f1daa1..0af0f6283b35 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1564,10 +1564,8 @@ static int w83793_remove(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
@@ -1625,8 +1623,7 @@ w83793_detect_subclients(struct i2c_client *client)
/* Undo inits in case of errors */
ERROR_SC_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
@@ -1962,10 +1959,8 @@ exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 6e0a5539a9ea..f0f467983960 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig HWSPINLOCK
- tristate "Hardware Spinlock drivers"
+ bool "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index accc2056f7c6..8f4357e2626c 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -199,8 +199,8 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
static const struct amba_id replicator_ids[] = {
{
- .id = 0x0003b909,
- .mask = 0x0003ffff,
+ .id = 0x000bb909,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 56ecd7aff5eb..e03e58933141 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -748,8 +748,8 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
static const struct amba_id etb_ids[] = {
{
- .id = 0x0003b907,
- .mask = 0x0003ffff,
+ .id = 0x000bb907,
+ .mask = 0x000fffff,
},
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e5b1ec57dbde..39f42fdd503d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -901,33 +901,33 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
- .id = 0x0003b921,
- .mask = 0x0003ffff,
+ .id = 0x000bb921,
+ .mask = 0x000fffff,
.data = "ETM 3.3",
},
{ /* ETM 3.5 - Cortex-A5 */
- .id = 0x0003b955,
- .mask = 0x0003ffff,
+ .id = 0x000bb955,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* ETM 3.5 */
- .id = 0x0003b956,
- .mask = 0x0003ffff,
+ .id = 0x000bb956,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* PTM 1.0 */
- .id = 0x0003b950,
- .mask = 0x0003ffff,
+ .id = 0x000bb950,
+ .mask = 0x000fffff,
.data = "PTM 1.0",
},
{ /* PTM 1.1 */
- .id = 0x0003b95f,
- .mask = 0x0003ffff,
+ .id = 0x000bb95f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ /* PTM 1.1 Qualcomm */
- .id = 0x0003006f,
- .mask = 0x0003ffff,
+ .id = 0x000b006f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 77642e0e955b..fd3c396717f6 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -248,8 +248,8 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
static const struct amba_id funnel_ids[] = {
{
- .id = 0x0003b908,
- .mask = 0x0003ffff,
+ .id = 0x000bb908,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 92a780a6df1d..15e7ef3891f5 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -917,13 +917,13 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
{
- .id = 0x0003b962,
- .mask = 0x0003ffff,
+ .id = 0x000bb962,
+ .mask = 0x000fffff,
.data = "STM32",
},
{
- .id = 0x0003b963,
- .mask = 0x0003ffff,
+ .id = 0x000bb963,
+ .mask = 0x000fffff,
.data = "STM500",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2ff4a66a3caa..0ea04f588de0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -439,8 +439,8 @@ out:
static const struct amba_id tmc_ids[] = {
{
- .id = 0x0003b961,
- .mask = 0x0003ffff,
+ .id = 0x000bb961,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC 600 TMC-ETR/ETS */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index d7a3e453016d..bef49a3a5ca7 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -194,8 +194,8 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
static const struct amba_id tpiu_ids[] = {
{
- .id = 0x0003b912,
- .mask = 0x0003ffff,
+ .id = 0x000bb912,
+ .mask = 0x000fffff,
},
{
.id = 0x0004b912,
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7c6da2..7da75644c750 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
* @len: length of the data packet
*/
static void notrace
-stm_ftrace_write(const void *buf, unsigned int len)
+stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
- stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+ struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+
+ stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
}
static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dadb2dc5..44cffad43701 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id cht_wc_i2c_adap_id_table[] = {
+static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
{ .name = "cht_wcove_ext_chgr" },
{},
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9e12a53ef7b8..8eac00efadc1 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
+ if (dev->irq == IRQ_NOTCONNECTED)
+ priv->features &= ~FEATURE_IRQ;
+
if (priv->features & FEATURE_IRQ) {
u16 pcictl, pcists;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d32e5f..462948e2c535 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
if (adapdata->smba) {
i2c_del_adapter(adap);
- if (adapdata->port == (0 << 1)) {
+ if (adapdata->port == (0 << piix4_port_shift_sb800)) {
release_region(adapdata->smba, SMBIOSIZE);
if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab51761f8c5..d4f9cef251ac 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i2c-stm32.h
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec108496f15..47c8d00de53f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32 I2C controller
*
@@ -6,11 +7,11 @@
* http://www.st.com/resource/en/reference_manual/DM00031020.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2016
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-st.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c2e9aa..b445b3bb0bb1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32F7 I2C controller
*
@@ -7,11 +8,11 @@
* http://www.st.com/resource/en/reference_manual/dm00124865.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-stm32f4.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 31186ead5a40..509a6007cdf6 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
property_entries_dup(info->properties);
if (IS_ERR(devinfo->board_info.properties)) {
status = PTR_ERR(devinfo->board_info.properties);
+ kfree(devinfo);
break;
}
}
@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
GFP_KERNEL);
if (!devinfo->board_info.resources) {
status = -ENOMEM;
+ kfree(devinfo);
break;
}
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6f638bbc922d..2cab27a68479 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,6 +35,7 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
@@ -238,46 +239,29 @@ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
}
static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
- unsigned long arg)
+ unsigned nmsgs, struct i2c_msg *msgs)
{
- struct i2c_rdwr_ioctl_data rdwr_arg;
- struct i2c_msg *rdwr_pa;
u8 __user **data_ptrs;
int i, res;
- if (copy_from_user(&rdwr_arg,
- (struct i2c_rdwr_ioctl_data __user *)arg,
- sizeof(rdwr_arg)))
- return -EFAULT;
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
- return -EINVAL;
-
- rdwr_pa = memdup_user(rdwr_arg.msgs,
- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
- if (IS_ERR(rdwr_pa))
- return PTR_ERR(rdwr_pa);
-
- data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
+ data_ptrs = kmalloc(nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
- kfree(rdwr_pa);
+ kfree(msgs);
return -ENOMEM;
}
res = 0;
- for (i = 0; i < rdwr_arg.nmsgs; i++) {
+ for (i = 0; i < nmsgs; i++) {
/* Limit the size of the message to a sane amount */
- if (rdwr_pa[i].len > 8192) {
+ if (msgs[i].len > 8192) {
res = -EINVAL;
break;
}
- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
- rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
- if (IS_ERR(rdwr_pa[i].buf)) {
- res = PTR_ERR(rdwr_pa[i].buf);
+ data_ptrs[i] = (u8 __user *)msgs[i].buf;
+ msgs[i].buf = memdup_user(data_ptrs[i], msgs[i].len);
+ if (IS_ERR(msgs[i].buf)) {
+ res = PTR_ERR(msgs[i].buf);
break;
}
@@ -292,121 +276,117 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
* greater (for example to account for a checksum byte at
* the end of the message.)
*/
- if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
- if (!(rdwr_pa[i].flags & I2C_M_RD) ||
- rdwr_pa[i].buf[0] < 1 ||
- rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ if (!(msgs[i].flags & I2C_M_RD) ||
+ msgs[i].buf[0] < 1 ||
+ msgs[i].len < msgs[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
res = -EINVAL;
break;
}
- rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ msgs[i].len = msgs[i].buf[0];
}
}
if (res < 0) {
int j;
for (j = 0; j < i; ++j)
- kfree(rdwr_pa[j].buf);
+ kfree(msgs[j].buf);
kfree(data_ptrs);
- kfree(rdwr_pa);
+ kfree(msgs);
return res;
}
- res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
+ res = i2c_transfer(client->adapter, msgs, nmsgs);
while (i-- > 0) {
- if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
- if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
- rdwr_pa[i].len))
+ if (res >= 0 && (msgs[i].flags & I2C_M_RD)) {
+ if (copy_to_user(data_ptrs[i], msgs[i].buf,
+ msgs[i].len))
res = -EFAULT;
}
- kfree(rdwr_pa[i].buf);
+ kfree(msgs[i].buf);
}
kfree(data_ptrs);
- kfree(rdwr_pa);
+ kfree(msgs);
return res;
}
static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
- unsigned long arg)
+ u8 read_write, u8 command, u32 size,
+ union i2c_smbus_data __user *data)
{
- struct i2c_smbus_ioctl_data data_arg;
union i2c_smbus_data temp = {};
int datasize, res;
- if (copy_from_user(&data_arg,
- (struct i2c_smbus_ioctl_data __user *) arg,
- sizeof(struct i2c_smbus_ioctl_data)))
- return -EFAULT;
- if ((data_arg.size != I2C_SMBUS_BYTE) &&
- (data_arg.size != I2C_SMBUS_QUICK) &&
- (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
- (data_arg.size != I2C_SMBUS_WORD_DATA) &&
- (data_arg.size != I2C_SMBUS_PROC_CALL) &&
- (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
- (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
- (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
- (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
+ if ((size != I2C_SMBUS_BYTE) &&
+ (size != I2C_SMBUS_QUICK) &&
+ (size != I2C_SMBUS_BYTE_DATA) &&
+ (size != I2C_SMBUS_WORD_DATA) &&
+ (size != I2C_SMBUS_PROC_CALL) &&
+ (size != I2C_SMBUS_BLOCK_DATA) &&
+ (size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
+ (size != I2C_SMBUS_I2C_BLOCK_DATA) &&
+ (size != I2C_SMBUS_BLOCK_PROC_CALL)) {
dev_dbg(&client->adapter->dev,
"size out of range (%x) in ioctl I2C_SMBUS.\n",
- data_arg.size);
+ size);
return -EINVAL;
}
/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
so the check is valid if size==I2C_SMBUS_QUICK too. */
- if ((data_arg.read_write != I2C_SMBUS_READ) &&
- (data_arg.read_write != I2C_SMBUS_WRITE)) {
+ if ((read_write != I2C_SMBUS_READ) &&
+ (read_write != I2C_SMBUS_WRITE)) {
dev_dbg(&client->adapter->dev,
"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
- data_arg.read_write);
+ read_write);
return -EINVAL;
}
/* Note that command values are always valid! */
- if ((data_arg.size == I2C_SMBUS_QUICK) ||
- ((data_arg.size == I2C_SMBUS_BYTE) &&
- (data_arg.read_write == I2C_SMBUS_WRITE)))
+ if ((size == I2C_SMBUS_QUICK) ||
+ ((size == I2C_SMBUS_BYTE) &&
+ (read_write == I2C_SMBUS_WRITE)))
/* These are special: we do not use data */
return i2c_smbus_xfer(client->adapter, client->addr,
- client->flags, data_arg.read_write,
- data_arg.command, data_arg.size, NULL);
+ client->flags, read_write,
+ command, size, NULL);
- if (data_arg.data == NULL) {
+ if (data == NULL) {
dev_dbg(&client->adapter->dev,
"data is NULL pointer in ioctl I2C_SMBUS.\n");
return -EINVAL;
}
- if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
- (data_arg.size == I2C_SMBUS_BYTE))
- datasize = sizeof(data_arg.data->byte);
- else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
- (data_arg.size == I2C_SMBUS_PROC_CALL))
- datasize = sizeof(data_arg.data->word);
+ if ((size == I2C_SMBUS_BYTE_DATA) ||
+ (size == I2C_SMBUS_BYTE))
+ datasize = sizeof(data->byte);
+ else if ((size == I2C_SMBUS_WORD_DATA) ||
+ (size == I2C_SMBUS_PROC_CALL))
+ datasize = sizeof(data->word);
else /* size == smbus block, i2c block, or block proc. call */
- datasize = sizeof(data_arg.data->block);
+ datasize = sizeof(data->block);
- if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
- (data_arg.read_write == I2C_SMBUS_WRITE)) {
- if (copy_from_user(&temp, data_arg.data, datasize))
+ if ((size == I2C_SMBUS_PROC_CALL) ||
+ (size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (size == I2C_SMBUS_I2C_BLOCK_DATA) ||
+ (read_write == I2C_SMBUS_WRITE)) {
+ if (copy_from_user(&temp, data, datasize))
return -EFAULT;
}
- if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
+ if (size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
/* Convert old I2C block commands to the new
convention. This preserves binary compatibility. */
- data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
- if (data_arg.read_write == I2C_SMBUS_READ)
+ size = I2C_SMBUS_I2C_BLOCK_DATA;
+ if (read_write == I2C_SMBUS_READ)
temp.block[0] = I2C_SMBUS_BLOCK_MAX;
}
res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- data_arg.read_write, data_arg.command, data_arg.size, &temp);
- if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
- (data_arg.read_write == I2C_SMBUS_READ))) {
- if (copy_to_user(data_arg.data, &temp, datasize))
+ read_write, command, size, &temp);
+ if (!res && ((size == I2C_SMBUS_PROC_CALL) ||
+ (size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (read_write == I2C_SMBUS_READ))) {
+ if (copy_to_user(data, &temp, datasize))
return -EFAULT;
}
return res;
@@ -454,12 +434,39 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
funcs = i2c_get_functionality(client->adapter);
return put_user(funcs, (unsigned long __user *)arg);
- case I2C_RDWR:
- return i2cdev_ioctl_rdwr(client, arg);
+ case I2C_RDWR: {
+ struct i2c_rdwr_ioctl_data rdwr_arg;
+ struct i2c_msg *rdwr_pa;
+
+ if (copy_from_user(&rdwr_arg,
+ (struct i2c_rdwr_ioctl_data __user *)arg,
+ sizeof(rdwr_arg)))
+ return -EFAULT;
+
+ /* Put an arbitrary limit on the number of messages that can
+ * be sent at once */
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
- case I2C_SMBUS:
- return i2cdev_ioctl_smbus(client, arg);
+ rdwr_pa = memdup_user(rdwr_arg.msgs,
+ rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
+
+ return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
+ }
+ case I2C_SMBUS: {
+ struct i2c_smbus_ioctl_data data_arg;
+ if (copy_from_user(&data_arg,
+ (struct i2c_smbus_ioctl_data __user *) arg,
+ sizeof(struct i2c_smbus_ioctl_data)))
+ return -EFAULT;
+ return i2cdev_ioctl_smbus(client, data_arg.read_write,
+ data_arg.command,
+ data_arg.size,
+ data_arg.data);
+ }
case I2C_RETRIES:
client->adapter->retries = arg;
break;
@@ -480,6 +487,90 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+#ifdef CONFIG_COMPAT
+
+struct i2c_smbus_ioctl_data32 {
+ u8 read_write;
+ u8 command;
+ u32 size;
+ compat_caddr_t data; /* union i2c_smbus_data *data */
+};
+
+struct i2c_msg32 {
+ u16 addr;
+ u16 flags;
+ u16 len;
+ compat_caddr_t buf;
+};
+
+struct i2c_rdwr_ioctl_data32 {
+ compat_caddr_t msgs; /* struct i2c_msg __user *msgs */
+ u32 nmsgs;
+};
+
+static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = file->private_data;
+ unsigned long funcs;
+ switch (cmd) {
+ case I2C_FUNCS:
+ funcs = i2c_get_functionality(client->adapter);
+ return put_user(funcs, (compat_ulong_t __user *)arg);
+ case I2C_RDWR: {
+ struct i2c_rdwr_ioctl_data32 rdwr_arg;
+ struct i2c_msg32 *p;
+ struct i2c_msg *rdwr_pa;
+ int i;
+
+ if (copy_from_user(&rdwr_arg,
+ (struct i2c_rdwr_ioctl_data32 __user *)arg,
+ sizeof(rdwr_arg)))
+ return -EFAULT;
+
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+ rdwr_pa = kmalloc_array(rdwr_arg.nmsgs, sizeof(struct i2c_msg),
+ GFP_KERNEL);
+ if (!rdwr_pa)
+ return -ENOMEM;
+
+ p = compat_ptr(rdwr_arg.msgs);
+ for (i = 0; i < rdwr_arg.nmsgs; i++) {
+ struct i2c_msg32 umsg;
+ if (copy_from_user(&umsg, p + i, sizeof(umsg))) {
+ kfree(rdwr_pa);
+ return -EFAULT;
+ }
+ rdwr_pa[i] = (struct i2c_msg) {
+ .addr = umsg.addr,
+ .flags = umsg.flags,
+ .len = umsg.len,
+ .buf = compat_ptr(umsg.buf)
+ };
+ }
+
+ return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
+ }
+ case I2C_SMBUS: {
+ struct i2c_smbus_ioctl_data32 data32;
+ if (copy_from_user(&data32,
+ (void __user *) arg,
+ sizeof(data32)))
+ return -EFAULT;
+ return i2cdev_ioctl_smbus(client, data32.read_write,
+ data32.command,
+ data32.size,
+ compat_ptr(data32.data));
+ }
+ default:
+ return i2cdev_ioctl(file, cmd, arg);
+ }
+}
+#else
+#define compat_i2cdev_ioctl NULL
+#endif
+
static int i2cdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
@@ -527,6 +618,7 @@ static const struct file_operations i2cdev_fops = {
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
+ .compat_ioctl = compat_i2cdev_ioctl,
.open = i2cdev_open,
.release = i2cdev_release,
};
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6ff0be8cbdc9..7c3ed7c9af77 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1333,8 +1333,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
struct scsi_request *req = scsi_req(rq);
- scsi_req_init(req);
- memset(req->cmd, 0, BLK_MAX_CDB);
+ q->initialize_rq_fn(rq);
if (rq_data_dir(rq) == READ)
req->cmd[0] = GPCMD_READ_10;
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index f5f2b62471da..859ddab9448f 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -22,7 +22,7 @@
#define DRV_NAME "ide-pnp"
/* Add your devices here :)) */
-static struct pnp_device_id idepnp_devices[] = {
+static const struct pnp_device_id idepnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 3576ec73ec23..9ad60421d360 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -1011,7 +1011,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
ddata->irq = platform_get_irq_byname(pdev, "adcdone");
- if (!ddata->irq)
+ if (ddata->irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 9c6932ffc0af..36047147ce7c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
struct meson_sar_adc_data {
bool has_bl30_integration;
+ u32 bandgap_reg;
unsigned int resolution;
const char *name;
+ const struct regmap_config *regmap_config;
};
struct meson_sar_adc_priv {
@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
int calibscale;
};
-static const struct regmap_config meson_sar_adc_regmap_config = {
+static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = MESON_SAR_ADC_REG13,
};
+static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MESON_SAR_ADC_DELTA_10,
+};
+
static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
init.num_parents = 1;
priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
- priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
+ priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
priv->clk_gate.hw.init = &init;
priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
return 0;
}
+static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
+{
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ u32 enable_mask;
+
+ if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
+ enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+ else
+ enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
+
+ regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
+ on_off ? enable_mask : 0);
+}
+
static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
- MESON_SAR_ADC_REG11_BANDGAP_EN,
- MESON_SAR_ADC_REG11_BANDGAP_EN);
+
+ meson_sar_adc_set_bandgap(indio_dev, true);
+
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
MESON_SAR_ADC_REG3_ADC_EN,
MESON_SAR_ADC_REG3_ADC_EN);
@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
err_adc_clk:
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
MESON_SAR_ADC_REG3_ADC_EN, 0);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
- MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+ meson_sar_adc_set_bandgap(indio_dev, false);
clk_disable_unprepare(priv->sana_clk);
err_sana_clk:
clk_disable_unprepare(priv->core_clk);
@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
MESON_SAR_ADC_REG3_ADC_EN, 0);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
- MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+
+ meson_sar_adc_set_bandgap(indio_dev, false);
clk_disable_unprepare(priv->sana_clk);
clk_disable_unprepare(priv->core_clk);
@@ -844,30 +866,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
.has_bl30_integration = false,
+ .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+ .regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.name = "meson-meson8-saradc",
};
static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
.has_bl30_integration = false,
+ .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+ .regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.name = "meson-meson8b-saradc",
};
static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
.has_bl30_integration = true,
+ .bandgap_reg = MESON_SAR_ADC_REG11,
+ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
.name = "meson-gxbb-saradc",
};
static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
.has_bl30_integration = true,
+ .bandgap_reg = MESON_SAR_ADC_REG11,
+ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.name = "meson-gxl-saradc",
};
static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
.has_bl30_integration = true,
+ .bandgap_reg = MESON_SAR_ADC_REG11,
+ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.name = "meson-gxm-saradc",
};
@@ -945,7 +977,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return ret;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &meson_sar_adc_regmap_config);
+ priv->data->regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index ea7adb638d99..2ba2ff5e59c4 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -175,9 +175,9 @@ static void ssp_wdt_work_func(struct work_struct *work)
data->timeout_cnt = 0;
}
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
{
- struct ssp_data *data = (struct ssp_data *)ptr;
+ struct ssp_data *data = from_timer(data, t, wdt_timer);
switch (data->fw_dl_state) {
case SSP_FW_DL_STATE_FAIL:
@@ -571,7 +571,7 @@ static int ssp_probe(struct spi_device *spi)
INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
- setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+ timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
ret = request_threaded_irq(data->spi->irq, NULL,
ssp_irq_thread_fn,
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 203ffb9cad6a..147a8c14235f 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
break;
case IIO_CHAN_INFO_SCALE:
- *val = 1; /* 0.0625 */
+ *val = 1000; /* 62.5 */
*val2 = 16;
ret = IIO_VAL_FRACTIONAL;
break;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9c4cfd19b739..2f0998ebeed2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
* iio_format_value() - Formats a IIO value into its string representation
* @buf: The buffer to which the formatted value gets written
* which is assumed to be big enough (i.e. PAGE_SIZE).
- * @type: One of the IIO_VAL_... constants. This decides how the val
+ * @type: One of the IIO_VAL_* constants. This decides how the val
* and val2 parameters are formatted.
* @size: Number of IIO value entries contained in vals
* @vals: Pointer to the values, exact meaning depends on the
@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
*
* Return: 0 by default, a negative number on failure or the
* total number of characters written for a type that belongs
- * to the IIO_VAL_... constant.
+ * to the IIO_VAL_* constant.
*/
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
{
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 53c5d653e780..df23dbcc030a 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -869,6 +869,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
static void sx9500_gpio_probe(struct i2c_client *client,
struct sx9500_data *data)
{
+ struct gpio_desc *gpiod_int;
struct device *dev;
if (!client)
@@ -876,6 +877,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
dev = &client->dev;
+ if (client->irq <= 0) {
+ gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
+ if (IS_ERR(gpiod_int))
+ dev_err(dev, "gpio get irq failed\n");
+ else
+ client->irq = gpiod_to_irq(gpiod_int);
+ }
+
data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
if (IS_ERR(data->gpiod_rst)) {
dev_warn(dev, "gpio get reset pin failed\n");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 98ac46ed7214..cbf186522016 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,6 +1,6 @@
menuconfig INFINIBAND
tristate "InfiniBand support"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
depends on NET
depends on INET
depends on m || IPV6 != m
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1fdb473b5df7..6294a7001d33 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
id_priv->id.route.addr.dev_addr.net = get_net(net);
+ id_priv->seq_num &= 0x00ffffff;
return &id_priv->id;
}
@@ -4457,7 +4458,7 @@ out:
return skb->len;
}
-static const struct rdma_nl_cbs cma_cb_table[] = {
+static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
};
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 84fc32a2c8b3..30914f3baa5f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
.flags = RDMA_NL_ADMIN_PERM,
@@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
-module_init(ib_core_init);
+subsys_initcall(ib_core_init);
module_exit(ib_core_cleanup);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189ec7502..5d676cff41f4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
}
EXPORT_SYMBOL(iwcm_reject_msg);
-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850a3eff..9a05245a1acf 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
return skb->len;
}
-static const struct rdma_nl_cbs nldev_cb_table[] = {
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 23278ed5be45..feafdb961c48 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
{
+ u8 i = rdma_start_port(dev);
+ bool is_ib = false;
int ret;
+ while (i <= rdma_end_port(dev) && !is_ib)
+ is_ib = rdma_protocol_ib(dev, i++);
+
+ /* If this isn't an IB device don't create the security context */
+ if (!is_ib)
+ return 0;
+
qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
if (!qp->qp_sec)
return -ENOMEM;
@@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
{
+ /* Return if not IB */
+ if (!sec)
+ return;
+
mutex_lock(&sec->mutex);
/* Remove the QP from the lists so it won't get added to
@@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
int ret;
int i;
+ /* Return if not IB */
+ if (!sec)
+ return;
+
/* If a concurrent cache update is in progress this
* QP security could be marked for an error state
* transition. Wait for this to complete.
@@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
{
int i;
+ /* Return if not IB */
+ if (!sec)
+ return;
+
/* If a concurrent cache update is occurring we must
* wait until this QP security structure is processed
* in the QP to error flow before destroying it because
@@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
{
int ret = 0;
struct ib_ports_pkeys *tmp_pps;
- struct ib_ports_pkeys *new_pps;
+ struct ib_ports_pkeys *new_pps = NULL;
struct ib_qp *real_qp = qp->real_qp;
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
real_qp->qp_type == IB_QPT_GSI ||
@@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
(qp_attr_mask & IB_QP_ALT_PATH));
+ WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
+ rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
+ !real_qp->qp_sec),
+ "%s: QP security is not initialized for IB QP: %d\n",
+ __func__, real_qp->qp_num);
+
/* The port/pkey settings are maintained only for the real QP. Open
* handles on the real QP will be in the shared_qp_list. When
* enforcing security on the real QP all the shared QPs will be
* checked as well.
*/
- if (pps_change && !special_qp) {
+ if (pps_change && !special_qp && real_qp->qp_sec) {
mutex_lock(&real_qp->qp_sec->mutex);
new_pps = get_new_pps(real_qp,
qp_attr,
qp_attr_mask);
-
+ if (!new_pps) {
+ mutex_unlock(&real_qp->qp_sec->mutex);
+ return -ENOMEM;
+ }
/* Add this QP to the lists for the new port
* and pkey settings before checking for permission
* in case there is a concurrent cache update
@@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
qp_attr_mask,
udata);
- if (pps_change && !special_qp) {
+ if (new_pps) {
/* Clean up the lists and free the appropriate
* ports_pkeys structure.
*/
@@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
u16 pkey;
int ret;
+ if (!rdma_protocol_ib(dev, port_num))
+ return 0;
+
ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
if (ret)
return ret;
@@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
{
int ret;
+ if (!rdma_protocol_ib(agent->device, agent->port_num))
+ return 0;
+
ret = security_ib_alloc_security(&agent->security);
if (ret)
return ret;
@@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
{
+ if (!rdma_protocol_ib(agent->device, agent->port_num))
+ return;
+
security_ib_free_security(agent->security);
if (agent->lsm_nb_reg)
unregister_lsm_notifier(&agent->lsm_nb);
@@ -697,8 +736,14 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
{
- if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
- return -EACCES;
+ if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
+ return 0;
+
+ if (map->agent.qp->qp_type == IB_QPT_SMI) {
+ if (!map->agent.smp_allowed)
+ return -EACCES;
+ return 0;
+ }
return ib_security_pkey_access(map->agent.device,
map->agent.port_num,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 21e60b1e2ff4..130606c3b07c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d55710b116..d0202bb176a4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
+ if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+ !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
attr->qp_state = cmd->base.qp_state;
attr->cur_qp_state = cmd->base.cur_qp_state;
attr->path_mtu = cmd->base.path_mtu;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..b7bfc536e00f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
{
+ if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
+ WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+ return 0;
+ }
+
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe433136..38bddd02a943 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
complete_sq_drain_wr(qhp, wr);
return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
complete_rq_drain_wr(qhp, wr);
return err;
@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
- if (t4_clear_cq_armed(&rchp->cq) &&
- (rq_flushed || sq_flushed)) {
+ if ((rq_flushed || sq_flushed) &&
+ t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
- if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
- if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 76157cc03eca..2e6e0c516041 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -71,13 +71,13 @@ static ssize_t hfi1_seq_read(
loff_t *ppos)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
ssize_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_read(file, buf, size, ppos);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_read(file, buf, size, ppos);
+ debugfs_file_put(d);
return r;
}
@@ -87,13 +87,13 @@ static loff_t hfi1_seq_lseek(
int whence)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
loff_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_lseek(file, offset, whence);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_lseek(file, offset, whence);
+ debugfs_file_put(d);
return r;
}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index fd01a760259f..af5f7936f7e5 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_16b_header *hdr = &opa_hdr->opah;
struct ib_other_headers *ohdr;
- u32 bth0, bth1;
+ u32 bth0, bth1 = 0;
u16 len, pkey;
u8 becn = !!is_fecn;
u8 l4 = OPA_16B_L4_IB_LOCAL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 3e4c5253ab5c..a40ec939ece5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
{
int i;
struct device *dev = hr_dev->dev;
- u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
- vunmap(buf->direct.buf);
-
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
{
int i = 0;
dma_addr_t t;
- struct page **pages;
struct device *dev = hr_dev->dev;
- u32 bits_per_long = BITS_PER_LONG;
u32 page_size = 1 << page_shift;
u32 order;
@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, page_size);
}
- if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
- pages = kmalloc_array(buf->nbufs, sizeof(*pages),
- GFP_KERNEL);
- if (!pages)
- goto err_free;
-
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
-
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
- PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- } else {
- buf->direct.buf = NULL;
- }
}
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 01d3d695cbba..b154ce40cded 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -726,11 +726,9 @@ static inline struct hns_roce_qp
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
- u32 bits_per_long_val = BITS_PER_LONG;
u32 page_size = 1 << buf->page_shift;
- if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
- buf->nbufs == 1)
+ if (buf->nbufs == 1)
return (char *)(buf->direct.buf) + offset;
else
return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 8b733a66fae5..0eeabfbee192 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
+ memset(chunk->buf, 0, sizeof(chunk->buf));
list_add_tail(&chunk->list, &hem->chunk_list);
}
@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
if (!buf)
goto fail;
- sg_set_buf(mem, buf, PAGE_SIZE << order);
- WARN_ON(mem->offset);
+ chunk->buf[chunk->npages] = buf;
sg_dma_len(mem) = PAGE_SIZE << order;
++chunk->npages;
@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(hr_dev->dev,
- chunk->mem[i].length,
- lowmem_page_address(sg_page(&chunk->mem[i])),
+ sg_dma_len(&chunk->mem[i]),
+ chunk->buf[i],
sg_dma_address(&chunk->mem[i]));
kfree(chunk);
}
@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
- struct page *page = NULL;
+ void *addr = NULL;
unsigned long mhop_obj = obj;
unsigned long obj_per_chunk;
unsigned long idx_offset;
int offset, dma_offset;
+ int length;
int i, j;
u32 hem_idx = 0;
@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
list_for_each_entry(chunk, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
+ length = sg_dma_len(&chunk->mem[i]);
if (dma_handle && dma_offset >= 0) {
- if (sg_dma_len(&chunk->mem[i]) >
- (u32)dma_offset)
+ if (length > (u32)dma_offset)
*dma_handle = sg_dma_address(
&chunk->mem[i]) + dma_offset;
- dma_offset -= sg_dma_len(&chunk->mem[i]);
+ dma_offset -= length;
}
- if (chunk->mem[i].length > (u32)offset) {
- page = sg_page(&chunk->mem[i]);
+ if (length > (u32)offset) {
+ addr = chunk->buf[i] + offset;
goto out;
}
- offset -= chunk->mem[i].length;
+ offset -= length;
}
}
out:
mutex_unlock(&table->mutex);
- return page ? lowmem_page_address(page) + offset : NULL;
+ return addr;
}
EXPORT_SYMBOL_GPL(hns_roce_table_find);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index db66db12075e..e8850d59e780 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
int npages;
int nsg;
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
+ void *buf[HNS_ROCE_HEM_CHUNK_LEN];
};
struct hns_roce_hem {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8f719c00467b..8e18445714a9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
{
struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg;
+ u64 page_addr;
u64 *pages;
+ int i, j;
+ int len;
int entry;
- int i;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
@@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- pages[i] = ((u64)sg_dma_address(sg)) >> 6;
-
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- break;
- i++;
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (j = 0; j < len; ++j) {
+ page_addr = sg_dma_address(sg) +
+ (j << mr->umem->page_shift);
+ pages[i] = page_addr >> 6;
+
+ /* Record the first 2 entry directly to MTPT table */
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+ goto found;
+ i++;
+ }
}
+found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 493d6ef3d2d5..77870f9e1736 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1043,7 +1043,7 @@ negotiate_done:
* i40iw_schedule_cm_timer
* @@cm_node: connection's node
* @sqbuf: buffer to send
- * @type: if it es send ot close
+ * @type: if it is send or close
* @send_retrans: if rexmits to be done
* @close_when_complete: is cm_node to be removed
*
@@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send) {
- i40iw_free_sqbuf(vsi, (void *)sqbuf);
+ if (type != I40IW_TIMER_TYPE_CLOSE)
+ i40iw_free_sqbuf(vsi, (void *)sqbuf);
return -ENOMEM;
}
new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
kfree(new_send);
- i40iw_free_sqbuf(vsi, (void *)sqbuf);
i40iw_pr_err("already close entry\n");
return -EINVAL;
}
@@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
- loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
- i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
}
return cm_node;
}
@@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_id->add_ref(cm_id);
i40iw_add_ref(&iwqp->ibqp);
- i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
-
attr.qp_state = IB_QPS_RTS;
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ cm_node->accelerated = 1;
+ status =
+ i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+ if (status)
+ i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
+
if (cm_node->loopbackpartner) {
cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
@@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
}
- cm_node->accelerated = 1;
if (cm_node->accept_pend) {
atomic_dec(&cm_node->listener->pend_accepts_cnt);
cm_node->accept_pend = 0;
@@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err;
}
+ if (cm_node->loopbackpartner) {
+ cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
+ i40iw_create_event(cm_node->loopbackpartner,
+ I40IW_CM_EVENT_MPA_REQ);
+ }
+
i40iw_debug(cm_node->dev,
I40IW_DEBUG_CM,
"Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
@@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
if (iwqp->page)
kunmap(iwqp->page);
- status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
- if (status)
- i40iw_pr_err("send cm event\n");
memset(&attr, 0, sizeof(attr));
attr.qp_state = IB_QPS_RTS;
@@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
cm_node->accelerated = 1;
+ status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ 0);
+ if (status)
+ i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
return;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d88c6cf47cf2..da9821a10e0d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
&cqp->sdbuf,
- 128,
+ I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
I40IW_SD_BUF_ALIGNMENT);
if (ret_code)
@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
}
/**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @wqe_idx: we index of cqp ring
+ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
+ * @cqp: pointer to CQP structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index for next WQE on CQP SQ
*/
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
+ u64 scratch, u32 *wqe_idx)
{
u64 *wqe = NULL;
- u32 wqe_idx;
enum i40iw_status_code ret_code;
if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
@@ -616,21 +617,33 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
cqp->sq_ring.size);
return NULL;
}
- I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
if (ret_code)
return NULL;
- if (!wqe_idx)
+ if (!*wqe_idx)
cqp->polarity = !cqp->polarity;
- wqe = cqp->sq_base[wqe_idx].elem;
- cqp->scratch_array[wqe_idx] = scratch;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
I40IW_CQP_INIT_WQE(wqe);
return wqe;
}
/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+ u32 wqe_idx;
+
+ return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+
+/**
* i40iw_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
*/
@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
u64 *wqe;
int mem_entries, wqe_entries;
struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+ u64 offset;
+ u32 wqe_idx;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
if (!wqe)
return I40IW_ERR_RING_FULL;
@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
if (mem_entries) {
- memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
- data = sdbuf->pa;
+ offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
+ memcpy((char *)sdbuf->va + offset, &info->entry[3],
+ mem_entries << 4);
+ data = (u64)sdbuf->pa + offset;
} else {
data = 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 65ec39e3746b..029083cb81d5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1114,7 +1114,7 @@
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
#define I40IWQPC_ARPIDX_SHIFT 48
-#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
#define I40IWQPC_FLOWLABEL_SHIFT 0
#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
@@ -1526,7 +1526,7 @@ enum i40iw_alignment {
I40IW_AEQ_ALIGNMENT = 0x100,
I40IW_CEQ_ALIGNMENT = 0x100,
I40IW_CQ0_ALIGNMENT = 0x100,
- I40IW_SD_BUF_ALIGNMENT = 0x100
+ I40IW_SD_BUF_ALIGNMENT = 0x80
};
#define I40IW_WQE_SIZE_64 64
@@ -1534,6 +1534,8 @@ enum i40iw_alignment {
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
+#define I40IW_UPDATE_SD_BUF_SIZE 128
+
#define I40IW_CQE_QTYPE_RQ 0
#define I40IW_CQE_QTYPE_SQ 1
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049bcdb53..caf490ab24c8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ ucmd->rx_hash_fields_mask);
+ return (-EOPNOTSUPP);
+ }
+
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
-
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9beee9cef137..ee0ee1f9994b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -642,9 +642,9 @@ err:
return -ENOMEM;
}
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
dev->fill_delay = 0;
}
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
INIT_LIST_HEAD(&ent->head);
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index f6474c24f193..ffb98eaaf1c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
spin_unlock_irqrestore(&catas_lock, flags);
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+ struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
{
phys_addr_t addr;
- init_timer(&dev->catas_err.timer);
+ timer_setup(&dev->catas_err.timer, poll_catas, 0);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
return;
}
- dev->catas_err.timer.data = (unsigned long) dev;
- dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index db46b7b53fb4..162475aeeedd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3819,7 +3819,7 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
if (!nesvnic->event_timer.function) {
ib_dispatch_event(&event);
nesvnic->last_dispatched_event = event.event;
- nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
+ nesvnic->event_timer.function = nes_handle_delayed_event;
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
add_timer(&nesvnic->event_timer);
} else {
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 5243ad30dfc0..85dfbba427f6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1667,8 +1667,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
}
if (!rcd->rcvegrbuf_phys) {
rcd->rcvegrbuf_phys =
- kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
+ kmalloc_array_node(chunk,
+ sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL, rcd->node_id);
if (!rcd->rcvegrbuf_phys)
goto bail_rcvegrbuf;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 410025a19729..9177df60742a 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -238,7 +238,7 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
rdi->qp_dev->qp_table =
- kmalloc_node(rdi->qp_dev->qp_table_size *
+ kmalloc_array_node(rdi->qp_dev->qp_table_size,
sizeof(*rdi->qp_dev->qp_table),
GFP_KERNEL, rdi->dparms.node);
if (!rdi->qp_dev->qp_table)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd99cdf7..2c13123bfd69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
noio_flag = memalloc_noio_save();
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
+ memalloc_noio_restore(noio_flag);
ret = -ENOMEM;
goto err_tx;
}
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index cedc665364cd..73862a836062 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -202,9 +202,9 @@ void gameport_stop_polling(struct gameport *gameport)
}
EXPORT_SYMBOL(gameport_stop_polling);
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
{
- struct gameport *gameport = (struct gameport *)d;
+ struct gameport *gameport = from_timer(gameport, t, poll_timer);
gameport->poll_handler(gameport);
if (gameport->poll_cnt)
@@ -542,8 +542,7 @@ static void gameport_init_port(struct gameport *gameport)
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
- setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
- (unsigned long)gameport);
+ timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
}
/*
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 44916ef4a424..e30642db50d5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2047,7 +2047,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
- dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
+ dev->timer.function = input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
}
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index f4ad83eab67f..de0dd4756c84 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -364,9 +364,9 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
return 0;
}
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
{
- struct db9 *db9 = (void *) private;
+ struct db9 *db9 = from_timer(db9, t, timer);
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
@@ -609,7 +609,7 @@ static void db9_attach(struct parport *pp)
db9->pd = pd;
db9->mode = mode;
db9->parportno = pp->number;
- setup_timer(&db9->timer, db9_timer, (long)db9);
+ timer_setup(&db9->timer, db9_timer, 0);
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index ca734ea97e53..2ffb2e8bdc3b 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -743,9 +743,9 @@ static void gc_psx_process_packet(struct gc *gc)
* gc_timer() initiates reads of console pads data.
*/
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
{
- struct gc *gc = (void *) private;
+ struct gc *gc = from_timer(gc, t, timer);
/*
* N64 pads - must be read first, any read confuses them for 200 us
@@ -974,7 +974,7 @@ static void gc_attach(struct parport *pp)
mutex_init(&gc->mutex);
gc->pd = pd;
gc->parportno = pp->number;
- setup_timer(&gc->timer, gc_timer, (long) gc);
+ timer_setup(&gc->timer, gc_timer, 0);
for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
if (!pads[i])
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index a1fdc75a438d..e2685753e460 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -89,9 +89,9 @@ static struct tgfx {
* tgfx_timer() reads and analyzes TurboGraFX joystick data.
*/
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
{
- struct tgfx *tgfx = (void *) private;
+ struct tgfx *tgfx = from_timer(tgfx, t, timer);
struct input_dev *dev;
int data1, data2, i;
@@ -200,7 +200,7 @@ static void tgfx_attach(struct parport *pp)
mutex_init(&tgfx->sem);
tgfx->pd = pd;
tgfx->parportno = pp->number;
- setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+ timer_setup(&tgfx->timer, tgfx_timer, 0);
for (i = 0; i < n_devs; i++) {
if (n_buttons[i] < 1)
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index d3265b6b58b8..1173890f6719 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -102,7 +102,7 @@ static inline bool get_down(unsigned long data0, unsigned long data1)
!(data1 & S3C2410_ADCDAT0_UPDOWN));
}
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
{
unsigned long data0;
unsigned long data1;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a0babdbf7146..4a2de34895ec 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2250,10 +2250,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
uint64_t tmp;
if (!sg_res) {
+ unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
sg->dma_length = sg->length;
- pteval = page_to_phys(sg_page(sg)) | prot;
+ pteval = (sg_phys(sg) - pgoff) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
@@ -3787,7 +3789,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
- sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+ sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nelems;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 466aaa8ba841..83fe2621effe 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -36,7 +36,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -107,7 +107,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
spin_lock_init(&fq->lock);
}
- setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
@@ -519,9 +519,9 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
}
}
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
{
- struct iova_domain *iovad = (struct iova_domain *)data;
+ struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
int cpu;
atomic_set(&iovad->fq_timer_on, 0);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 53380bd72ea4..c70476b34a53 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -41,8 +41,15 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+ bool
+ depends on ARM_GIC_V3_ITS
depends on PCI
depends on PCI_MSI
+ default ARM_GIC_V3_ITS
config ARM_NVIC
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index dae7282bfdef..d2df34a54d38 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -30,7 +30,8 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index dc8c1e3eafe7..667b9e14b032 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -19,62 +19,9 @@
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#include <asm/exception.h>
-
-#define LOCAL_CONTROL 0x000
-#define LOCAL_PRESCALER 0x008
+#include <linux/irqchip/irq-bcm2836.h>
-/*
- * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
- * next 2 bits identify the CPU that the GPU FIQ goes to.
- */
-#define LOCAL_GPU_ROUTING 0x00c
-/* When setting bits 0-3, enables PMU interrupts on that CPU. */
-#define LOCAL_PM_ROUTING_SET 0x010
-/* When setting bits 0-3, disables PMU interrupts on that CPU. */
-#define LOCAL_PM_ROUTING_CLR 0x014
-/*
- * The low 4 bits of this are the CPU's timer IRQ enables, and the
- * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
- * bits).
- */
-#define LOCAL_TIMER_INT_CONTROL0 0x040
-/*
- * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
- * the next 4 bits are the CPU's per-mailbox FIQ enables (which
- * override the IRQ bits).
- */
-#define LOCAL_MAILBOX_INT_CONTROL0 0x050
-/*
- * The CPU's interrupt status register. Bits are defined by the the
- * LOCAL_IRQ_* bits below.
- */
-#define LOCAL_IRQ_PENDING0 0x060
-/* Same status bits as above, but for FIQ. */
-#define LOCAL_FIQ_PENDING0 0x070
-/*
- * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and
- * these bits are organized by mailbox number and then CPU number. We
- * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
- * any bit is set.
- */
-#define LOCAL_MAILBOX0_SET0 0x080
-#define LOCAL_MAILBOX3_SET0 0x08c
-/* Mailbox write-to-clear bits. */
-#define LOCAL_MAILBOX0_CLR0 0x0c0
-#define LOCAL_MAILBOX3_CLR0 0x0cc
-
-#define LOCAL_IRQ_CNTPSIRQ 0
-#define LOCAL_IRQ_CNTPNSIRQ 1
-#define LOCAL_IRQ_CNTHPIRQ 2
-#define LOCAL_IRQ_CNTVIRQ 3
-#define LOCAL_IRQ_MAILBOX0 4
-#define LOCAL_IRQ_MAILBOX1 5
-#define LOCAL_IRQ_MAILBOX2 6
-#define LOCAL_IRQ_MAILBOX3 7
-#define LOCAL_IRQ_GPU_FAST 8
-#define LOCAL_IRQ_PMU_FAST 9
-#define LAST_IRQ LOCAL_IRQ_PMU_FAST
+#include <asm/exception.h>
struct bcm2836_arm_irqchip_intc {
struct irq_domain *domain;
@@ -215,24 +162,6 @@ static int bcm2836_cpu_dying(unsigned int cpu)
cpu);
return 0;
}
-
-#ifdef CONFIG_ARM
-static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
-{
- unsigned long secondary_startup_phys =
- (unsigned long)virt_to_phys((void *)secondary_startup);
-
- writel(secondary_startup_phys,
- intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
-
- return 0;
-}
-
-static const struct smp_operations bcm2836_smp_ops __initconst = {
- .smp_boot_secondary = bcm2836_smp_boot_secondary,
-};
-#endif
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -249,10 +178,6 @@ bcm2836_arm_irqchip_smp_init(void)
bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
-
-#ifdef CONFIG_ARM
- smp_set_ops(&bcm2836_smp_ops);
-#endif
#endif
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b54b55597ffb..b56c3e23f0af 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1103,18 +1103,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
int nr_parts;
struct partition_affinity *parts;
- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
if (!parts_node)
return;
nr_parts = of_get_child_count(parts_node);
if (!nr_parts)
- return;
+ goto out_put_node;
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
if (WARN_ON(!parts))
- return;
+ goto out_put_node;
for_each_child_of_node(parts_node, child_part) {
struct partition_affinity *part;
@@ -1181,6 +1181,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs[i] = desc;
}
+
+out_put_node:
+ of_node_put(parts_node);
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
@@ -1260,7 +1263,9 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
goto out_unmap_rdist;
gic_populate_ppi_partitions(node);
- gic_of_setup_kvm_info(node);
+
+ if (static_key_true(&supports_deactivate))
+ gic_of_setup_kvm_info(node);
return 0;
out_unmap_rdist:
@@ -1521,7 +1526,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
- pr_err("No distributor detected at @%p, giving up",
+ pr_err("No distributor detected at @%p, giving up\n",
acpi_data.dist_base);
goto out_dist_unmap;
}
@@ -1549,7 +1554,9 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
goto out_fwhandle_free;
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
- gic_acpi_setup_kvm_info();
+
+ if (static_key_true(&supports_deactivate))
+ gic_acpi_setup_kvm_info();
return 0;
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index cd0bcc3b7e33..dba9d67cb9c1 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -177,6 +177,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
.map = map,
},
};
+ int ret;
/*
* The host will never see that interrupt firing again, so it
@@ -184,7 +185,11 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- return irq_set_vcpu_affinity(irq, &info);
+ ret = irq_set_vcpu_affinity(irq, &info);
+ if (ret)
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return ret;
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index f641e8e2c78d..121af5cf688f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1420,7 +1420,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (ret)
return;
- gic_set_kvm_info(&gic_v2_kvm_info);
+ if (static_key_true(&supports_deactivate))
+ gic_set_kvm_info(&gic_v2_kvm_info);
}
int __init
@@ -1652,7 +1653,8 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain);
- gic_acpi_setup_kvm_info();
+ if (static_key_true(&supports_deactivate))
+ gic_acpi_setup_kvm_info();
return 0;
}
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 1f59998e03f8..e80263e16c4c 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -325,7 +325,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Ioremap the registers */
priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv->pdc_base)
return -EIO;
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c25ce5af091a..ec0e6a8cdb75 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -156,7 +156,7 @@ static int s3c_irq_type(struct irq_data *data, unsigned int type)
irq_set_handler(data->irq, handle_level_irq);
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index 1b6e2f7c59af..1927b2f36ff6 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -196,8 +196,8 @@ static int __init exiu_init(struct device_node *node,
}
data->base = of_iomap(node, 0);
- if (IS_ERR(data->base)) {
- err = PTR_ERR(data->base);
+ if (!data->base) {
+ err = -ENODEV;
goto out_free;
}
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 6aa3ea479214..f31265937439 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
size_t alloc_sz;
- u32 nregs;
+ int nregs;
int err;
nregs = count_registers(pdev);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 89dd1303a98a..49fef08858c5 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2235,9 +2235,9 @@ static void send_listen(capidrv_contr *card)
send_message(card, &cmdcmsg);
}
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
{
- capidrv_contr *card = (capidrv_contr *)x;
+ capidrv_contr *card = from_timer(card, t, listentimer);
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
return -1;
}
card->owner = THIS_MODULE;
- setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+ timer_setup(&card->listentimer, listentimerfunc, 0);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 6f423bc49d0d..5620fd2c6009 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -55,10 +55,10 @@ DEFINE_SPINLOCK(divert_lock);
/***************************/
/* timer callback function */
/***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
{
unsigned long flags;
- struct call_struc *cs = (struct call_struc *) arg;
+ struct call_struc *cs = from_timer(cs, t, timer);
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs->timer); /* delete active timer */
@@ -157,7 +157,7 @@ int cf_command(int drvid, int mode,
/* allocate mem for information struct */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (-ENOMEM); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics.driver = drvid;
cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
@@ -450,8 +450,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
return (0); /* no external deflection needed */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (0); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire,
- (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics = *ic; /* copy incoming data */
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index c61049585cbd..0033d74a7291 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -78,7 +78,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
/*
* proc entry
@@ -300,8 +300,7 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
init_waitqueue_head(&p_os->read_wait);
init_waitqueue_head(&p_os->close_wait);
- setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
- (unsigned long)p_os);
+ timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
p_os->aborted = 0;
p_os->adapter_nr = adapter_nr;
return (1);
@@ -457,9 +456,9 @@ void diva_os_wakeup_close(void *os_context)
}
static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
{
- diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+ diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
p_os->aborted = 1;
wake_up_interruptible(&p_os->read_wait);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 3cf07b8ced1c..4d85645c87f7 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2855,7 +2855,7 @@ irq_notforus:
*/
static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
{
}
@@ -3877,8 +3877,7 @@ hfcmulti_initmode(struct dchannel *dch)
if (hc->dnum[pt]) {
mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
-1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
}
for (i = 1; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3984,8 +3983,7 @@ hfcmulti_initmode(struct dchannel *dch)
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
hc->chan[i - 2].slot_tx = -1;
hc->chan[i - 2].slot_rx = -1;
hc->chan[i - 2].conf = -1;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e4ebbee863a1..34c93874af23 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
* Timer function called when kernel timer expires
*/
static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
{
+ struct hfc_pci *hc = from_timer(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
@@ -1241,7 +1242,7 @@ hfcpci_int(int intno, void *dev_id)
* timer callback for D-chan busy resolution. Currently no function
*/
static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
{
}
@@ -1717,8 +1718,7 @@ static void
inithfcpci(struct hfc_pci *hc)
{
printk(KERN_DEBUG "inithfcpci: entered\n");
- setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
- (long)&hc->dch);
+ timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
hc->chanlimit = 2;
mode_hfcpci(&hc->bch[0], 1, -1);
mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2043,7 +2043,7 @@ setup_hw(struct hfc_pci *hc)
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+ timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
/* default PCM master */
test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
return 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 5b078591b6ee..b791688d0228 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1146,9 +1146,9 @@ mISDNisar_irq(struct isar_hw *isar)
EXPORT_SYMBOL(mISDNisar_irq);
static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
{
- struct isar_ch *ch = (struct isar_ch *)data;
+ struct isar_ch *ch = from_timer(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
@@ -1635,11 +1635,9 @@ init_isar(struct isar_hw *isar)
}
if (isar->version != 1)
return -EINVAL;
- setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
- (long)&isar->ch[0]);
+ timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
- setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
- (long)&isar->ch[1]);
+ timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
return 0;
}
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
index 62f9c43e2377..74c871495e81 100644
--- a/drivers/isdn/hisax/asuscom.c
+++ b/drivers/isdn/hisax/asuscom.c
@@ -348,7 +348,7 @@ int setup_asuscom(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index daf3742cdef6..a18b605fb4f2 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -805,7 +805,7 @@ static int avm_pnp_setup(struct IsdnCardState *cs)
cs->hw.avm.cfg_reg =
pnp_port_start(pnp_avm_d, 0);
cs->irq = pnp_irq(pnp_avm_d, 0);
- if (!cs->irq) {
+ if (cs->irq == -1) {
printk(KERN_ERR "FritzPnP:No IRQ\n");
return (0);
}
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 38bdd3f7b960..d23df7a7784d 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -1093,7 +1093,7 @@ static int setup_diva_isapnp(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index b21c05820f44..0754c0743790 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -945,7 +945,7 @@ static int setup_elsa_isapnp(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index d925f579bc80..4d3b4b2f2612 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1423,7 +1423,7 @@ int setup_hfcsx(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index 380bbeda9c74..91b5219499ca 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -196,7 +196,7 @@ int setup_hfcs(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index e4f7573ba9bf..7a7137d8664b 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -940,6 +940,8 @@ static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
}
adapter->io = pnp_port_start(pdev, 0);
adapter->irq = pnp_irq(pdev, 0);
+ if (!adapter->io || adapter->irq == -1)
+ goto err_free;
printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n",
(char *) dev_id->driver_data, adapter->io, adapter->irq);
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index 1399ddd4f6cb..53e299be4304 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -238,7 +238,7 @@ int setup_isurf(struct IsdnCard *card)
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
- if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
+ if (cs->irq == -1 || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
index 7ae39f5e865d..bfb79f3f0a49 100644
--- a/drivers/isdn/hisax/ix1_micro.c
+++ b/drivers/isdn/hisax/ix1_micro.c
@@ -256,7 +256,7 @@ int setup_ix1micro(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index e4c33cfe3ef4..dfbcd2eaa81a 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -261,7 +261,7 @@ int setup_niccy(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1] ||
+ if (card->para[0] == -1 || !card->para[1] ||
!card->para[2]) {
printk(KERN_ERR "NiccyPnP:some resources are "
"missing %ld/%lx/%lx\n",
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index f16a47bcef48..c0b97b893495 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -558,7 +558,7 @@ static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index 38fb2c1a3f0f..1eef693f04f0 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -306,7 +306,7 @@ int setup_teles3(struct IsdnCard *card)
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1] || !card->para[2]) {
+ if (card->para[0] == -1 || !card->para[1] || !card->para[2]) {
printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
card->para[0], card->para[1], card->para[2]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 38a5bb764c7b..8b03d618185e 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -231,7 +231,7 @@ static int isdn_timer_cnt2 = 0;
static int isdn_timer_cnt3 = 0;
static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
{
int tf = dev->tflags;
if (tf & ISDN_TIMER_FAST) {
@@ -2294,8 +2294,7 @@ static int __init isdn_init(void)
printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
return -EIO;
}
- init_timer(&dev->timer);
- dev->timer.function = isdn_timer_funct;
+ timer_setup(&dev->timer, isdn_timer_funct, 0);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->timerlock);
#ifdef MODULE
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f63a110b7bcb..c138f66f2659 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1509,9 +1509,9 @@ static int isdn_net_ioctl(struct net_device *dev,
/* called via cisco_timer.function */
static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
{
- isdn_net_local *lp = (isdn_net_local *) data;
+ isdn_net_local *lp = from_timer(lp, t, cisco_timer);
struct sk_buff *skb;
unsigned char *p;
unsigned long last_cisco_myseq = lp->cisco_myseq;
@@ -1615,9 +1615,8 @@ isdn_net_ciscohdlck_connected(isdn_net_local *lp)
/* send slarp request because interface/seq.no.s reset */
isdn_net_ciscohdlck_slarp_send_request(lp);
- init_timer(&lp->cisco_timer);
- lp->cisco_timer.data = (unsigned long) lp;
- lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+ timer_setup(&lp->cisco_timer,
+ isdn_net_ciscohdlck_slarp_send_keepalive, 0);
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
add_timer(&lp->cisco_timer);
}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index cd2b3c69771a..e07aefb9151d 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -50,7 +50,7 @@ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
unsigned char id);
static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
@@ -2327,10 +2327,10 @@ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
/* The timer callback function which is called when a ResetReq has timed out,
aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
{
struct ippp_ccp_reset_state *rs =
- (struct ippp_ccp_reset_state *)closure;
+ from_timer(rs, t, timer);
if (!rs) {
printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
@@ -2376,8 +2376,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
rs->state = CCPResetIdle;
rs->is = is;
rs->id = id;
- setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
- (unsigned long)rs);
+ timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
is->reset->rs[id] = rs;
}
return rs;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d30130c8d0f3..960f26348bb5 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -541,9 +541,9 @@ isdn_tty_senddown(modem_info *info)
* into the tty's buffer.
*/
static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
{
- modem_info *info = (modem_info *) data;
+ modem_info *info = from_timer(info, t, nc_timer);
isdn_tty_modem_result(RESULT_NO_CARRIER, info);
}
@@ -1812,8 +1812,7 @@ isdn_tty_modem_init(void)
info->isdn_channel = -1;
info->drv_index = -1;
info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
- setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
- (unsigned long)info);
+ timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
skb_queue_head_init(&info->xmit_queue);
#ifdef CONFIG_ISDN_AUDIO
skb_queue_head_init(&info->dtmf_queue);
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index ce90213a42fa..76516ee84e9a 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -270,9 +270,9 @@ static void pblk_write_kick(struct pblk *pblk)
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
}
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, wtimer);
/* kick the write thread every tick to flush outstanding data */
pblk_write_kick(pblk);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 00d5698d64a9..9c8e114c8a54 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -442,9 +442,9 @@ next_gc_group:
goto next_gc_group;
}
-static void pblk_gc_timer(unsigned long data)
+static void pblk_gc_timer(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
pblk_gc_kick(pblk);
}
@@ -601,7 +601,7 @@ int pblk_gc_init(struct pblk *pblk)
goto fail_free_writer_kthread;
}
- setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+ timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
gc->gc_active = 0;
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f62112ba5482..695826a06b5d 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -866,7 +866,7 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index abae31fd434e..dacc71922260 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -158,9 +158,9 @@ int pblk_rl_max_io(struct pblk_rl *rl)
return rl->rb_max_io;
}
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
{
- struct pblk_rl *rl = (struct pblk_rl *)data;
+ struct pblk_rl *rl = from_timer(rl, t, u_timer);
/* Release user I/O state. Protect from GC */
smp_store_release(&rl->rb_user_active, 0);
@@ -202,7 +202,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
- setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+ timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
rl->rb_user_active = 0;
rl->rb_gc_active = 0;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 90961033a79f..59a64d461a5d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -797,7 +797,7 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
* pblk write thread
*/
int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
void pblk_write_should_kick(struct pblk *pblk);
/*
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 267f01ae87e4..0993c14be860 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -267,9 +267,9 @@ static void rrpc_gc_kick(struct rrpc *rrpc)
/*
* timed GC every interval.
*/
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
{
- struct rrpc *rrpc = (struct rrpc *)data;
+ struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
rrpc_gc_kick(rrpc);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
@@ -1063,7 +1063,7 @@ static int rrpc_gc_init(struct rrpc *rrpc)
if (!rrpc->kgc_wq)
return -ENOMEM;
- setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+ timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
return 0;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a27d85232ce1..a0cc1bc6d884 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
if (b == -1)
goto err;
- k->ptr[i] = PTR(ca->buckets[b].gen,
+ k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 11c5503d31dc..81e8dc3dbe5e 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- register_shrinker(&c->shrink);
+
+ if (register_shrinker(&c->shrink))
+ pr_warn("bcache: %s: could not register shrinker",
+ __func__);
return 0;
}
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 41c238fc3733..f9d391711595 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
return false;
for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
return false;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 02a98ddb592d..a87165c1d8e5 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
* find a sequence of buckets with valid journal entries
*/
for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ /*
+ * We must try the index l with ZERO first for
+ * correctness due to the scenario that the journal
+ * bucket is circular buffer which might have wrapped
+ */
l = (i * 2654435769U) % ca->sb.njournal_buckets;
if (test_bit(l, bitmap))
@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)
continue;
ja->cur_idx = next;
- k->ptr[n++] = PTR(0,
+ k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 3a7aed7282b2..643c3021624f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
- * If cache device is dirty (dc->has_dirty is non-zero), then
- * recovery a failed read request from cached device may get a
- * stale data back. So read failure recovery is only permitted
- * when cache device is clean.
+ * If read request hit dirty data (s->read_dirty_data is true),
+ * then recovery a failed read request from cached device may
+ * get a stale data back. So read failure recovery is only
+ * permitted when read request hit clean data in cache device,
+ * or when cache read race happened.
*/
- if (s->recoverable &&
- (dc && !atomic_read(&dc->has_dirty))) {
+ if (s->recoverable && !s->read_dirty_data) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index d0831d5bcc87..be119326297b 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -147,9 +147,9 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
}
}
-static void scale_accounting(unsigned long data)
+static void scale_accounting(struct timer_list *t)
{
- struct cache_accounting *acc = (struct cache_accounting *) data;
+ struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \
unsigned t = atomic_xchg(&acc->collector.name, 0); \
@@ -234,9 +234,7 @@ void bch_cache_accounting_init(struct cache_accounting *acc,
kobject_init(&acc->day.kobj, &bch_stats_ktype);
closure_init(&acc->cl, parent);
- init_timer(&acc->timer);
+ timer_setup(&acc->timer, scale_accounting, 0);
acc->timer.expires = jiffies + accounting_delay;
- acc->timer.data = (unsigned long) acc;
- acc->timer.function = scale_accounting;
add_timer(&acc->timer);
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 33bb074d6941..c546b567f3b5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = c->minimum_buffers;
*limit_buffers = buffers;
- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+ *threshold_buffers = mult_frac(buffers,
+ DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/*
@@ -1610,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
int l;
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
- unsigned long count = nr_to_scan;
+ unsigned long count = c->n_buffers[LIST_CLEAN] +
+ c->n_buffers[LIST_DIRTY];
unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
@@ -1646,8 +1648,11 @@ static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+ unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+ READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ unsigned long retain_target = get_retain_buffers(c);
- return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ return (count < retain_target) ? 0 : (count - retain_target);
}
/*
@@ -1910,19 +1915,15 @@ static int __init dm_bufio_init(void)
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
- mem = (__u64)((totalram_pages - totalhigh_pages) *
- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX)
mem = ULONG_MAX;
#ifdef CONFIG_MMU
- /*
- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
- * in fs/proc/internal.h
- */
- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif
dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14f9c6a..47407e43b96a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
{
int r;
- r = dm_register_target(&cache_target);
- if (r) {
- DMERR("cache target registration failed: %d", r);
- return r;
- }
-
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
if (!migration_cache) {
dm_unregister_target(&cache_target);
return -ENOMEM;
}
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2209a9700acd..288386bfbfb5 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -44,9 +44,9 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock);
-static void handle_delayed_timer(unsigned long data)
+static void handle_delayed_timer(struct timer_list *t)
{
- struct delay_c *dc = (struct delay_c *)data;
+ struct delay_c *dc = from_timer(dc, t, delay_timer);
queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
@@ -195,7 +195,7 @@ out:
goto bad_queue;
}
- setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
+ timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index ba84b8d62cd0..73a5c198113a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1513,7 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
- ti->discards_supported = true;
era->callbacks.congested_fn = era_is_congested;
dm_table_add_target_callbacks(ti->table, &era->callbacks);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 61180783ef42..05c7bfd0c9d9 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1094,9 +1094,9 @@ static void sleep_on_endio_wait(struct dm_integrity_c *ic)
__remove_wait_queue(&ic->endio_wait, &wait);
}
-static void autocommit_fn(unsigned long data)
+static void autocommit_fn(struct timer_list *t)
{
- struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
+ struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
if (likely(!dm_integrity_failed(ic)))
queue_work(ic->commit_wq, &ic->commit_work);
@@ -2942,7 +2942,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
- setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
+ timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
ic->io = dm_io_client_create();
if (IS_ERR(ic->io)) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 204606bef9e2..f7810cc869ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -458,6 +458,38 @@ do { \
} while (0)
/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+ return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+ dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return __must_push_back(m, flags);
+}
+
+/*
* Map cloned requests (request-based multipath)
*/
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (must_push_back_rq(m))
return DM_MAPIO_DELAY_REQUEUE;
dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
@@ -499,8 +531,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
bool queue_dying = blk_queue_dying(q);
- DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
- PTR_ERR(clone), queue_dying ? " (path offline)" : "");
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
@@ -555,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
@@ -653,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
(!save_old_value && queue_if_no_path));
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- queue_if_no_path || dm_noflush_suspending(m->ti));
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -1488,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
- !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ !must_push_back_rq(m)) {
if (error == BLK_STS_IOERR)
dm_report_EIO(m);
/* complete with the original error */
@@ -1523,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- dm_report_EIO(m);
- *error = BLK_STS_IOERR;
+ if (must_push_back_bio(m)) {
+ r = DM_ENDIO_REQUEUE;
+ } else {
+ dm_report_EIO(m);
+ *error = BLK_STS_IOERR;
+ }
goto done;
}
@@ -1959,13 +1992,6 @@ static int __init dm_multipath_init(void)
{
int r;
- r = dm_register_target(&multipath_target);
- if (r < 0) {
- DMERR("request-based register failed %d", r);
- r = -EINVAL;
- goto bad_register_target;
- }
-
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
@@ -1987,13 +2013,20 @@ static int __init dm_multipath_init(void)
goto bad_alloc_kmpath_handlerd;
}
+ r = dm_register_target(&multipath_target);
+ if (r < 0) {
+ DMERR("request-based register failed %d", r);
+ r = -EINVAL;
+ goto bad_register_target;
+ }
+
return 0;
+bad_register_target:
+ destroy_workqueue(kmpath_handlerd);
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
bad_alloc_kmultipathd:
- dm_unregister_target(&multipath_target);
-bad_register_target:
return r;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 366c625b9591..6319d846e0ad 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2887,9 +2887,6 @@ static void configure_discard_support(struct raid_set *rs)
bool raid456;
struct dm_target *ti = rs->ti;
- /* Assume discards not supported until after checks below. */
- ti->discards_supported = false;
-
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
@@ -2914,9 +2911,6 @@ static void configure_discard_support(struct raid_set *rs)
}
}
- /* All RAID members properly support discards */
- ti->discards_supported = true;
-
/*
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index c0b82136b2d1..580c49cc8079 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -94,9 +94,9 @@ static void wakeup_mirrord(void *context)
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
-static void delayed_wake_fn(unsigned long data)
+static void delayed_wake_fn(struct timer_list *t)
{
- struct mirror_set *ms = (struct mirror_set *) data;
+ struct mirror_set *ms = from_timer(ms, t, timer);
clear_bit(0, &ms->timer_pending);
wakeup_mirrord(ms);
@@ -108,8 +108,6 @@ static void delayed_wake(struct mirror_set *ms)
return;
ms->timer.expires = jiffies + HZ / 5;
- ms->timer.data = (unsigned long) ms;
- ms->timer.function = delayed_wake_fn;
add_timer(&ms->timer);
}
@@ -1133,7 +1131,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_free_context;
}
INIT_WORK(&ms->kmirrord_work, do_mirror);
- init_timer(&ms->timer);
+ timer_setup(&ms->timer, delayed_wake_fn, 0);
ms->timer_pending = 0;
INIT_WORK(&ms->trigger_event, trigger_event);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42e1eda..a0613bd8ed00 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
return r;
}
- r = dm_register_target(&snapshot_target);
- if (r < 0) {
- DMERR("snapshot target register failed %d", r);
- goto bad_register_snapshot_target;
- }
-
- r = dm_register_target(&origin_target);
- if (r < 0) {
- DMERR("Origin target register failed %d", r);
- goto bad_register_origin_target;
- }
-
- r = dm_register_target(&merge_target);
- if (r < 0) {
- DMERR("Merge target register failed %d", r);
- goto bad_register_merge_target;
- }
-
r = init_origin_hash();
if (r) {
DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
+ r = dm_register_target(&snapshot_target);
+ if (r < 0) {
+ DMERR("snapshot target register failed %d", r);
+ goto bad_register_snapshot_target;
+ }
+
+ r = dm_register_target(&origin_target);
+ if (r < 0) {
+ DMERR("Origin target register failed %d", r);
+ goto bad_register_origin_target;
+ }
+
+ r = dm_register_target(&merge_target);
+ if (r < 0) {
+ DMERR("Merge target register failed %d", r);
+ goto bad_register_merge_target;
+ }
+
return 0;
-bad_pending_cache:
- kmem_cache_destroy(exception_cache);
-bad_exception_cache:
- exit_origin_hash();
-bad_origin_hash:
- dm_unregister_target(&merge_target);
bad_register_merge_target:
dm_unregister_target(&origin_target);
bad_register_origin_target:
dm_unregister_target(&snapshot_target);
bad_register_snapshot_target:
+ kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+ kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+ exit_origin_hash();
+bad_origin_hash:
dm_exception_store_exit();
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 433bd3f3014c..aaffd0c0ee9a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
+ goto out;
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
- refcount_inc(&dd->count);
}
-
+ refcount_inc(&dd->count);
+out:
*result = dd->dm_dev;
return 0;
}
@@ -1758,13 +1759,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
-
-static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_discard(q);
+ return q && !blk_queue_discard(q);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1772,28 +1772,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i;
- /*
- * Unless any target used by the table set discards_supported,
- * require at least one underlying device to support discards.
- * t->devices includes internal dm devices such as mirror logs
- * so we need to use iterate_devices here, which targets
- * supporting discard selectively must provide.
- */
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
- continue;
-
- if (ti->discards_supported)
- return true;
+ return false;
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_discard_capable, NULL))
- return true;
+ /*
+ * Either the target provides discard support (as implied by setting
+ * 'discards_supported') or it relies on _all_ data devices having
+ * discard support.
+ */
+ if (!ti->discards_supported &&
+ (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
+ return false;
}
- return false;
+ return true;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1806,9 +1802,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
q->limits = *limits;
- if (!dm_table_supports_discards(t))
+ if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
- else
+ /* Must also clear discard limits... */
+ q->limits.max_discard_sectors = 0;
+ q->limits.max_hw_discard_sectors = 0;
+ q->limits.discard_granularity = 0;
+ q->limits.discard_alignment = 0;
+ q->limits.discard_misaligned = 0;
+ } else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff9b4cf..f91d771fff4b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
static int __init dm_thin_init(void)
{
- int r;
+ int r = -ENOMEM;
pool_table_init();
+ _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+ if (!_new_mapping_cache)
+ return r;
+
r = dm_register_target(&thin_target);
if (r)
- return r;
+ goto bad_new_mapping_cache;
r = dm_register_target(&pool_target);
if (r)
- goto bad_pool_target;
-
- r = -ENOMEM;
-
- _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
- if (!_new_mapping_cache)
- goto bad_new_mapping_cache;
+ goto bad_thin_target;
return 0;
-bad_new_mapping_cache:
- dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+ kmem_cache_destroy(_new_mapping_cache);
return r;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c3dc134b9fb5..4e4dee0ec2de 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -541,7 +541,7 @@ static void mddev_put(struct mddev *mddev)
bioset_free(sync_bs);
}
-static void md_safemode_timeout(unsigned long data);
+static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
@@ -550,8 +550,7 @@ void mddev_init(struct mddev *mddev)
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
- setup_timer(&mddev->safemode_timer, md_safemode_timeout,
- (unsigned long) mddev);
+ timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
@@ -5404,9 +5403,9 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
return -EINVAL;
}
-static void md_safemode_timeout(unsigned long data)
+static void md_safemode_timeout(struct timer_list *t)
{
- struct mddev *mddev = (struct mddev *) data;
+ struct mddev *mddev = from_timer(mddev, t, safemode_timer);
mddev->safemode = 1;
if (mddev->external)
@@ -7606,7 +7605,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
/* Still cleaning up */
resync = max_sectors;
- } else
+ } else if (resync > max_sectors)
+ resync = max_sectors;
+ else
resync -= atomic_read(&mddev->recovery_active);
if (resync == 0) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index cc9d337a1ed3..6df398e3a008 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -809,11 +809,15 @@ static void flush_pending_writes(struct r1conf *conf)
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
+ struct blk_plug plug;
struct bio *bio;
+
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+ blk_start_plug(&plug);
flush_bio_list(conf, bio);
+ blk_finish_plug(&plug);
} else
spin_unlock_irq(&conf->device_lock);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b9edbc747a95..c131835cf008 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -894,10 +894,13 @@ static void flush_pending_writes(struct r10conf *conf)
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
+ struct blk_plug plug;
struct bio *bio;
+
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+ blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
bitmap_unplug(conf->mddev->bitmap);
@@ -918,6 +921,7 @@ static void flush_pending_writes(struct r10conf *conf)
generic_make_request(bio);
bio = next;
}
+ blk_finish_plug(&plug);
} else
spin_unlock_irq(&conf->device_lock);
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index f1c86d938502..39f31f07ffe9 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2577,31 +2577,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
int r5c_journal_mode_set(struct mddev *mddev, int mode)
{
struct r5conf *conf;
- int err;
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
- err = mddev_lock(mddev);
- if (err)
- return err;
conf = mddev->private;
- if (!conf || !conf->log) {
- mddev_unlock(mddev);
+ if (!conf || !conf->log)
return -ENODEV;
- }
if (raid5_calc_degraded(conf) > 0 &&
- mode == R5C_JOURNAL_MODE_WRITE_BACK) {
- mddev_unlock(mddev);
+ mode == R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
- }
mddev_suspend(mddev);
conf->log->r5c_journal_mode = mode;
mddev_resume(mddev);
- mddev_unlock(mddev);
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
mdname(mddev), mode, r5c_journal_mode_str[mode]);
@@ -2614,6 +2605,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
{
int mode = ARRAY_SIZE(r5c_journal_mode_str);
size_t len = length;
+ int ret;
if (len < 2)
return -EINVAL;
@@ -2625,8 +2617,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
if (strlen(r5c_journal_mode_str[mode]) == len &&
!strncmp(page, r5c_journal_mode_str[mode], len))
break;
-
- return r5c_journal_mode_set(mddev, mode) ?: length;
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+ ret = r5c_journal_mode_set(mddev, mode);
+ mddev_unlock(mddev);
+ return ret ?: length;
}
struct md_sysfs_entry
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31dc25e2871a..98ce4272ace9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2677,13 +2677,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
pr_debug("raid456: error called\n");
spin_lock_irqsave(&conf->device_lock, flags);
+ set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
- set_bit(Faulty, &rdev->flags);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f8a808d45034..98f88c43f62c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -86,7 +86,7 @@ void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u8 max_events[CEC_NUM_EVENTS] = {
- 1, 1, 64, 64,
+ 1, 1, 64, 64, 8, 8,
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
@@ -170,6 +170,22 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
}
EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
+/* Notify userspace that the HPD pin changed state at the given time. */
+void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
+
/*
* Queue a new message for this filehandle.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index a079f7fe018c..3dba3aa34a43 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -32,6 +32,7 @@
#include <media/cec-pin.h>
#include "cec-priv.h"
+#include "cec-pin-priv.h"
static inline struct cec_devnode *cec_devnode_data(struct file *filp)
{
@@ -529,7 +530,7 @@ static int cec_open(struct inode *inode, struct file *filp)
* Initial events that are automatically sent when the cec device is
* opened.
*/
- struct cec_event ev_state = {
+ struct cec_event ev = {
.event = CEC_EVENT_STATE_CHANGE,
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
@@ -569,9 +570,19 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
/* Queue up initial state events */
- ev_state.state_change.phys_addr = adap->phys_addr;
- ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
- cec_queue_event_fh(fh, &ev_state, 0);
+ ev.state_change.phys_addr = adap->phys_addr;
+ ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event_fh(fh, &ev, 0);
+#ifdef CONFIG_CEC_PIN
+ if (adap->pin && adap->pin->ops->read_hpd) {
+ err = adap->pin->ops->read_hpd(adap);
+ if (err >= 0) {
+ ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ }
+ }
+#endif
list_add(&fh->list, &devnode->fhs);
mutex_unlock(&devnode->lock);
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 648136e552d5..5870da6a567f 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -112,10 +112,6 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
int minor;
int ret;
- /* Initialization */
- INIT_LIST_HEAD(&devnode->fhs);
- mutex_init(&devnode->lock);
-
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
@@ -242,6 +238,10 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
INIT_LIST_HEAD(&adap->wait_queue);
init_waitqueue_head(&adap->kthread_waitq);
+ /* adap->devnode initialization */
+ INIT_LIST_HEAD(&adap->devnode.fhs);
+ mutex_init(&adap->devnode.lock);
+
adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
if (IS_ERR(adap->kthread)) {
pr_err("cec-%s: kernel_thread() failed\n", name);
@@ -277,7 +277,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.version = 1;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
- adap->rc->enabled_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_NS(100);
diff --git a/drivers/media/cec/cec-pin-priv.h b/drivers/media/cec/cec-pin-priv.h
new file mode 100644
index 000000000000..7d0def199762
--- /dev/null
+++ b/drivers/media/cec/cec-pin-priv.h
@@ -0,0 +1,133 @@
+/*
+ * cec-pin-priv.h - internal cec-pin header
+ *
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_PIN_PRIV_H
+#define LINUX_CEC_PIN_PRIV_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <media/cec-pin.h>
+
+enum cec_pin_state {
+ /* CEC is off */
+ CEC_ST_OFF,
+ /* CEC is idle, waiting for Rx or Tx */
+ CEC_ST_IDLE,
+
+ /* Tx states */
+
+ /* Pending Tx, waiting for Signal Free Time to expire */
+ CEC_ST_TX_WAIT,
+ /* Low-drive was detected, wait for bus to go high */
+ CEC_ST_TX_WAIT_FOR_HIGH,
+ /* Drive CEC low for the start bit */
+ CEC_ST_TX_START_BIT_LOW,
+ /* Drive CEC high for the start bit */
+ CEC_ST_TX_START_BIT_HIGH,
+ /* Drive CEC low for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_LOW,
+ /* Drive CEC high for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_HIGH,
+ /* Drive CEC low for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_LOW,
+ /* Drive CEC high for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_HIGH,
+ /*
+ * Wait for start of sample time to check for Ack bit or first
+ * four initiator bits to check for Arbitration Lost.
+ */
+ CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE,
+ /* Wait for end of bit period after sampling */
+ CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE,
+
+ /* Rx states */
+
+ /* Start bit low detected */
+ CEC_ST_RX_START_BIT_LOW,
+ /* Start bit high detected */
+ CEC_ST_RX_START_BIT_HIGH,
+ /* Wait for bit sample time */
+ CEC_ST_RX_DATA_SAMPLE,
+ /* Wait for earliest end of bit period after sampling */
+ CEC_ST_RX_DATA_POST_SAMPLE,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_DATA_HIGH,
+ /* Drive CEC low to send 0 Ack bit */
+ CEC_ST_RX_ACK_LOW,
+ /* End of 0 Ack time, wait for earliest end of bit period */
+ CEC_ST_RX_ACK_LOW_POST,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_ACK_HIGH_POST,
+ /* Wait for earliest end of bit period and end of message */
+ CEC_ST_RX_ACK_FINISH,
+
+ /* Start low drive */
+ CEC_ST_LOW_DRIVE,
+ /* Monitor pin using interrupts */
+ CEC_ST_RX_IRQ,
+
+ /* Total number of pin states */
+ CEC_PIN_STATES
+};
+
+#define CEC_NUM_PIN_EVENTS 128
+
+#define CEC_PIN_IRQ_UNCHANGED 0
+#define CEC_PIN_IRQ_DISABLE 1
+#define CEC_PIN_IRQ_ENABLE 2
+
+struct cec_pin {
+ struct cec_adapter *adap;
+ const struct cec_pin_ops *ops;
+ struct task_struct *kthread;
+ wait_queue_head_t kthread_waitq;
+ struct hrtimer timer;
+ ktime_t ts;
+ unsigned int wait_usecs;
+ u16 la_mask;
+ bool enabled;
+ bool monitor_all;
+ bool rx_eom;
+ bool enable_irq_failed;
+ enum cec_pin_state state;
+ struct cec_msg tx_msg;
+ u32 tx_bit;
+ bool tx_nacked;
+ u32 tx_signal_free_time;
+ struct cec_msg rx_msg;
+ u32 rx_bit;
+
+ struct cec_msg work_rx_msg;
+ u8 work_tx_status;
+ ktime_t work_tx_ts;
+ atomic_t work_irq_change;
+ atomic_t work_pin_events;
+ unsigned int work_pin_events_wr;
+ unsigned int work_pin_events_rd;
+ ktime_t work_pin_ts[CEC_NUM_PIN_EVENTS];
+ bool work_pin_is_high[CEC_NUM_PIN_EVENTS];
+ ktime_t timer_ts;
+ u32 timer_cnt;
+ u32 timer_100ms_overruns;
+ u32 timer_300ms_overruns;
+ u32 timer_max_overrun;
+ u32 timer_sum_overrun;
+};
+
+#endif
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index c003b8eac617..b48dfe844118 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -20,6 +20,7 @@
#include <linux/sched/types.h>
#include <media/cec-pin.h>
+#include "cec-pin-priv.h"
/* All timings are in microseconds */
@@ -132,7 +133,7 @@ static void cec_pin_to_idle(struct cec_pin *pin)
pin->rx_msg.len = 0;
memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
pin->state = CEC_ST_IDLE;
- pin->ts = 0;
+ pin->ts = ns_to_ktime(0);
}
/*
@@ -426,7 +427,7 @@ static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
v = cec_pin_read(pin);
if (v && pin->rx_eom) {
pin->work_rx_msg = pin->rx_msg;
- pin->work_rx_msg.rx_ts = ts;
+ pin->work_rx_msg.rx_ts = ktime_to_ns(ts);
wake_up_interruptible(&pin->kthread_waitq);
pin->ts = ts;
pin->state = CEC_ST_RX_ACK_FINISH;
@@ -457,7 +458,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
s32 delta;
ts = ktime_get();
- if (pin->timer_ts) {
+ if (ktime_to_ns(pin->timer_ts)) {
delta = ktime_us_delta(ts, pin->timer_ts);
pin->timer_cnt++;
if (delta > 100 && pin->state != CEC_ST_IDLE) {
@@ -481,17 +482,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -531,7 +534,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
pin->state = CEC_ST_RX_START_BIT_LOW;
break;
}
- if (pin->ts == 0)
+ if (ktime_to_ns(pin->ts) == 0)
pin->ts = ts;
if (pin->tx_msg.len) {
/*
@@ -572,12 +575,13 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || states[pin->state].usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, states[pin->state].usecs);
- hrtimer_forward_now(timer, states[pin->state].usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(states[pin->state].usecs * 1000));
return HRTIMER_RESTART;
}
pin->wait_usecs = states[pin->state].usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
@@ -596,7 +600,7 @@ static int cec_pin_thread_func(void *_adap)
if (pin->work_rx_msg.len) {
cec_received_msg_ts(adap, &pin->work_rx_msg,
- pin->work_rx_msg.rx_ts);
+ ns_to_ktime(pin->work_rx_msg.rx_ts));
pin->work_rx_msg.len = 0;
}
if (pin->work_tx_status) {
@@ -623,13 +627,15 @@ static int cec_pin_thread_func(void *_adap)
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
break;
default:
@@ -653,7 +659,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
cec_pin_read(pin);
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
- pin->timer_ts = 0;
+ pin->timer_ts = ns_to_ktime(0);
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
pin->kthread = kthread_run(cec_pin_thread_func, adap,
"cec-pin");
@@ -661,7 +667,8 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
pr_err("cec-pin: kernel_thread() failed\n");
return PTR_ERR(pin->kthread);
}
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
} else {
if (pin->ops->disable_irq)
pin->ops->disable_irq(adap);
@@ -699,7 +706,8 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
return 0;
}
@@ -789,7 +797,7 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
CEC_MAX_LOG_ADDRS);
- if (PTR_ERR_OR_ZERO(adap)) {
+ if (IS_ERR(adap)) {
kfree(pin);
return adap;
}
diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c
index 50e3f76d4847..8895158c1962 100644
--- a/drivers/media/common/cypress_firmware.c
+++ b/drivers/media/common/cypress_firmware.c
@@ -74,11 +74,9 @@ int cypress_load_firmware(struct usb_device *udev,
struct hexline *hx;
int ret, pos = 0;
- hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
- if (!hx) {
- dev_err(&udev->dev, "%s: kmalloc() failed\n", KBUILD_MODNAME);
+ hx = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!hx)
return -ENOMEM;
- }
/* stop the CPU */
hx->data[0] = 1;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 930d2c94d5d3..8c87d6837c49 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -163,9 +163,9 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
}
}
-void saa7146_buffer_timeout(unsigned long data)
+void saa7146_buffer_timeout(struct timer_list *t)
{
- struct saa7146_dmaqueue *q = (struct saa7146_dmaqueue*)data;
+ struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
struct saa7146_dev *dev = q->dev;
unsigned long flags;
@@ -559,7 +559,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
vbi->start[1] = 312;
vbi->count[1] = 16;
- init_timer(&vv->vbi_read_timeout);
+ timer_setup(&vv->vbi_read_timeout, NULL, 0);
vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING;
vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY;
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 371c6f8606de..e1d369b976ed 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -349,9 +349,10 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void vbi_read_timeout(unsigned long data)
+static void vbi_read_timeout(struct timer_list *t)
{
- struct file *file = (struct file*)data;
+ struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
+ struct file *file = vv->vbi_read_timeout_file;
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -366,8 +367,7 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- setup_timer(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->vbi_dmaq));
+ timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
@@ -403,7 +403,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
file, &dev->v4l2_lock);
vv->vbi_read_timeout.function = vbi_read_timeout;
- vv->vbi_read_timeout.data = (unsigned long)file;
+ vv->vbi_read_timeout_file = file;
/* initialize the brs */
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
@@ -489,7 +489,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
return ret;
}
-struct saa7146_use_ops saa7146_vbi_uops = {
+const struct saa7146_use_ops saa7146_vbi_uops = {
.init = vbi_init,
.open = vbi_open,
.release = vbi_close,
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index 37b4654dc21c..2b631eaa65b3 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -1201,8 +1201,7 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
INIT_LIST_HEAD(&vv->video_dmaq.queue);
- setup_timer(&vv->video_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->video_dmaq));
+ timer_setup(&vv->video_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->video_dmaq.dev = dev;
/* set some default values */
@@ -1303,7 +1302,7 @@ out:
return ret;
}
-struct saa7146_use_ops saa7146_video_uops = {
+const struct saa7146_use_ops saa7146_video_uops = {
.init = video_init,
.open = video_open,
.release = video_close,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index e7a0d7798d5b..c5c827e11b64 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -447,7 +447,7 @@ static struct smscore_registry_entry_t *smscore_find_registry(char *devpath)
return entry;
}
}
- entry = kmalloc(sizeof(struct smscore_registry_entry_t), GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
entry->mode = default_mode;
strcpy(entry->devpath, devpath);
@@ -521,13 +521,13 @@ static void list_add_locked(struct list_head *new, struct list_head *head,
spin_unlock_irqrestore(lock, flags);
}
-/**
+/*
* register a client callback that called when device plugged in/unplugged
* NOTE: if devices exist callback is called immediately for each device
*
* @param hotplug callback
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smscore_register_hotplug(hotplug_t hotplug)
{
@@ -536,9 +536,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
int rc = 0;
kmutex_lock(&g_smscore_deviceslock);
-
- notifyee = kmalloc(sizeof(struct smscore_device_notifyee_t),
- GFP_KERNEL);
+ notifyee = kmalloc(sizeof(*notifyee), GFP_KERNEL);
if (notifyee) {
/* now notify callback about existing devices */
first = &g_smscore_devices;
@@ -564,7 +562,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
}
EXPORT_SYMBOL_GPL(smscore_register_hotplug);
-/**
+/*
* unregister a client callback that called when device plugged in/unplugged
*
* @param hotplug callback
@@ -627,7 +625,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
{
struct smscore_buffer_t *cb;
- cb = kzalloc(sizeof(struct smscore_buffer_t), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -638,7 +636,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
return cb;
}
-/**
+/*
* creates coredev object for a device, prepares buffers,
* creates buffer mappings, notifies registered hotplugs about new device.
*
@@ -646,7 +644,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
* and handlers
* @param coredev pointer to a value that receives created coredev object
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smscore_register_device(struct smsdevice_params_t *params,
struct smscore_device_t **coredev,
@@ -655,7 +653,7 @@ int smscore_register_device(struct smsdevice_params_t *params,
struct smscore_device_t *dev;
u8 *buffer;
- dev = kzalloc(sizeof(struct smscore_device_t), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -751,7 +749,7 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
void *buffer, size_t size, struct completion *completion) {
int rc;
- if (completion == NULL)
+ if (!completion)
return -EINVAL;
init_completion(completion);
@@ -766,10 +764,10 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
0 : -ETIME;
}
-/**
+/*
* Starts & enables IR operations
*
- * @return 0 on success, < 0 on error.
+ * return: 0 on success, < 0 on error.
*/
static int smscore_init_ir(struct smscore_device_t *coredev)
{
@@ -814,13 +812,13 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
return 0;
}
-/**
+/*
* configures device features according to board configuration structure.
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static int smscore_configure_board(struct smscore_device_t *coredev)
{
@@ -863,13 +861,13 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
return 0;
}
-/**
+/*
* sets initial device mode and notifies client hotplugs that device is ready
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smscore_start_device(struct smscore_device_t *coredev)
{
@@ -1089,7 +1087,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
},
};
-/**
+/*
* get firmware file name from one of the two mechanisms : sms_boards or
* smscore_fw_lkup.
* @param coredev pointer to a coredev object returned by
@@ -1098,7 +1096,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
* @param lookup if 1, always get the fw filename from smscore_fw_lkup
* table. if 0, try first to get from sms_boards
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
int mode)
@@ -1127,7 +1125,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
return fw[mode];
}
-/**
+/*
* loads specified firmware into a buffer and calls device loadfirmware_handler
*
* @param coredev pointer to a coredev object returned by
@@ -1135,7 +1133,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
* @param filename null-terminated string specifies firmware file name
* @param loadfirmware_handler device handler that loads firmware
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
int mode,
@@ -1153,8 +1151,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
}
pr_debug("Firmware name: %s\n", fw_filename);
- if (loadfirmware_handler == NULL && !(coredev->device_flags
- & SMS_DEVICE_FAMILY2))
+ if (!loadfirmware_handler &&
+ !(coredev->device_flags & SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, fw_filename, coredev->device);
@@ -1184,14 +1182,14 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
return rc;
}
-/**
+/*
* notifies all clients registered with the device, notifies hotplugs,
* frees all buffers and coredev object
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
void smscore_unregister_device(struct smscore_device_t *coredev)
{
@@ -1284,14 +1282,14 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
return rc;
}
-/**
+/*
* send init device request and wait for response
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param mode requested mode of operation
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static int smscore_init_device(struct smscore_device_t *coredev, int mode)
{
@@ -1301,10 +1299,8 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
buffer = kmalloc(sizeof(struct sms_msg_data) +
SMS_DMA_ALIGNMENT, GFP_KERNEL | GFP_DMA);
- if (!buffer) {
- pr_err("Could not allocate buffer for init device message.\n");
+ if (!buffer)
return -ENOMEM;
- }
msg = (struct sms_msg_data *)SMS_ALIGN_ADDRESS(buffer);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ,
@@ -1319,7 +1315,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
return rc;
}
-/**
+/*
* calls device handler to change mode of operation
* NOTE: stellar/usb may disconnect when changing mode
*
@@ -1327,7 +1323,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
* smscore_register_device
* @param mode requested mode of operation
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
{
@@ -1415,13 +1411,13 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
return rc;
}
-/**
+/*
* calls device handler to get current mode of operation
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
- * @return current mode
+ * return: current mode
*/
int smscore_get_device_mode(struct smscore_device_t *coredev)
{
@@ -1429,7 +1425,7 @@ int smscore_get_device_mode(struct smscore_device_t *coredev)
}
EXPORT_SYMBOL_GPL(smscore_get_device_mode);
-/**
+/*
* find client by response id & type within the clients list.
* return client handle or NULL.
*
@@ -1466,7 +1462,7 @@ found:
return client;
}
-/**
+/*
* find client by response id/type, call clients onresponse handler
* return buffer to pool on error
*
@@ -1619,13 +1615,13 @@ void smscore_onresponse(struct smscore_device_t *coredev,
}
EXPORT_SYMBOL_GPL(smscore_onresponse);
-/**
+/*
* return pointer to next free buffer descriptor from core pool
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
- * @return pointer to descriptor on success, NULL on error.
+ * return: pointer to descriptor on success, NULL on error.
*/
static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
@@ -1652,7 +1648,7 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
}
EXPORT_SYMBOL_GPL(smscore_getbuffer);
-/**
+/*
* return buffer descriptor to a pool
*
* @param coredev pointer to a coredev object returned by
@@ -1686,11 +1682,10 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
pr_err("The msg ID already registered to another client.\n");
return -EEXIST;
}
- listentry = kzalloc(sizeof(struct smscore_idlist_t), GFP_KERNEL);
- if (!listentry) {
- pr_err("Can't allocate memory for client id.\n");
+ listentry = kzalloc(sizeof(*listentry), GFP_KERNEL);
+ if (!listentry)
return -ENOMEM;
- }
+
listentry->id = id;
listentry->data_type = data_type;
list_add_locked(&listentry->entry, &client->idlist,
@@ -1698,7 +1693,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
return 0;
}
-/**
+/*
* creates smsclient object, check that id is taken by another client
*
* @param coredev pointer to a coredev object from clients hotplug
@@ -1710,7 +1705,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
* @param context client-specific context
* @param client pointer to a value that receives created smsclient object
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smscore_register_client(struct smscore_device_t *coredev,
struct smsclient_params_t *params,
@@ -1724,11 +1719,9 @@ int smscore_register_client(struct smscore_device_t *coredev,
return -EEXIST;
}
- newclient = kzalloc(sizeof(struct smscore_client_t), GFP_KERNEL);
- if (!newclient) {
- pr_err("Failed to allocate memory for client.\n");
+ newclient = kzalloc(sizeof(*newclient), GFP_KERNEL);
+ if (!newclient)
return -ENOMEM;
- }
INIT_LIST_HEAD(&newclient->idlist);
newclient->coredev = coredev;
@@ -1747,7 +1740,7 @@ int smscore_register_client(struct smscore_device_t *coredev,
}
EXPORT_SYMBOL_GPL(smscore_register_client);
-/**
+/*
* frees smsclient object and all subclients associated with it
*
* @param client pointer to smsclient object returned by
@@ -1778,7 +1771,7 @@ void smscore_unregister_client(struct smscore_client_t *client)
}
EXPORT_SYMBOL_GPL(smscore_unregister_client);
-/**
+/*
* verifies that source id is not taken by another client,
* calls device handler to send requests to the device
*
@@ -1787,7 +1780,7 @@ EXPORT_SYMBOL_GPL(smscore_unregister_client);
* @param buffer pointer to a request buffer
* @param size size (in bytes) of request buffer
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
int smsclient_sendrequest(struct smscore_client_t *client,
void *buffer, size_t size)
@@ -1796,7 +1789,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int rc;
- if (client == NULL) {
+ if (!client) {
pr_err("Got NULL client\n");
return -EINVAL;
}
@@ -1804,7 +1797,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
coredev = client->coredev;
/* check that no other channel with same id exists */
- if (coredev == NULL) {
+ if (!coredev) {
pr_err("Got NULL coredev\n");
return -EINVAL;
}
@@ -1961,7 +1954,7 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num,
if (pin_num > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
- if (p_gpio_config == NULL)
+ if (!p_gpio_config)
return -EINVAL;
total_len = sizeof(struct sms_msg_hdr) + (sizeof(u32) * 6);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index a772976cfe26..f96968c11312 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -238,6 +238,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->color_enc = TGP_COLOR_ENC_RGB;
break;
case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->color_enc = TGP_COLOR_ENC_LUMA;
@@ -352,6 +354,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->twopixelsize[0] = 2 * 2;
@@ -1056,6 +1060,14 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_GREY:
buf[0][offset] = r_y_h;
break;
+ case V4L2_PIX_FMT_Y10:
+ buf[0][offset] = (r_y_h << 2) & 0xff;
+ buf[0][offset+1] = r_y_h >> 6;
+ break;
+ case V4L2_PIX_FMT_Y12:
+ buf[0][offset] = (r_y_h << 4) & 0xff;
+ buf[0][offset+1] = r_y_h >> 4;
+ break;
case V4L2_PIX_FMT_Y16:
/*
* Ideally both bytes should be set to r_y_h, but then you won't
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 18e4230865be..3ddd44e1ee77 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -329,9 +329,9 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
return 0;
}
-static void dvb_dmxdev_filter_timeout(unsigned long data)
+static void dvb_dmxdev_filter_timeout(struct timer_list *t)
{
- struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+ struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
@@ -346,8 +346,6 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
del_timer(&dmxdevfilter->timer);
if (para->timeout) {
- dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
- dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
dmxdevfilter->timer.expires =
jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
add_timer(&dmxdevfilter->timer);
@@ -754,7 +752,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
- init_timer(&dmxdevfilter->timer);
+ timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0);
dvbdev->users++;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 054fd4eb6192..5e795f5f0f41 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -36,12 +36,33 @@
#include "demux.h"
#include "dvb_ringbuffer.h"
+/**
+ * enum dmxdev_type - type of demux filter type.
+ *
+ * @DMXDEV_TYPE_NONE: no filter set.
+ * @DMXDEV_TYPE_SEC: section filter.
+ * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter.
+ */
enum dmxdev_type {
DMXDEV_TYPE_NONE,
DMXDEV_TYPE_SEC,
DMXDEV_TYPE_PES,
};
+/**
+ * enum dmxdev_state - state machine for the dmxdev.
+ *
+ * @DMXDEV_STATE_FREE: indicates that the filter is freed.
+ * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMXDEV_STATE_SET: indicates that the filter parameters are set.
+ * @DMXDEV_STATE_GO: indicates that the filter is running.
+ * @DMXDEV_STATE_DONE: indicates that a packet was already filtered
+ * and the filter is now disabled.
+ * Set only if %DMX_ONESHOT. See
+ * &dmx_sct_filter_params.
+ * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition.
+ */
enum dmxdev_state {
DMXDEV_STATE_FREE,
DMXDEV_STATE_ALLOCATED,
@@ -51,12 +72,49 @@ enum dmxdev_state {
DMXDEV_STATE_TIMEDOUT
};
+/**
+ * struct dmxdev_feed - digital TV dmxdev feed
+ *
+ * @pid: Program ID to be filtered
+ * @ts: pointer to &struct dmx_ts_feed
+ * @next: &struct list_head pointing to the next feed.
+ */
+
struct dmxdev_feed {
u16 pid;
struct dmx_ts_feed *ts;
struct list_head next;
};
+/**
+ * struct dmxdev_filter - digital TV dmxdev filter
+ *
+ * @filter: a dmxdev filter. Currently used only for section filter:
+ * if the filter is Section, it contains a
+ * &struct dmx_section_filter @sec pointer.
+ * @feed: a dmxdev feed. Depending on the feed type, it can be:
+ * for TS feed: a &struct list_head @ts list of TS and PES
+ * feeds;
+ * for section feed: a &struct dmx_section_feed @sec pointer.
+ * @params: dmxdev filter parameters. Depending on the feed type, it
+ * can be:
+ * for section filter: a &struct dmx_sct_filter_params @sec
+ * embedded struct;
+ * for a TS filter: a &struct dmx_pes_filter_params @pes
+ * embedded struct.
+ * @type: type of the dmxdev filter, as defined by &enum dmxdev_type.
+ * @state: state of the dmxdev filter, as defined by &enum dmxdev_state.
+ * @dev: pointer to &struct dmxdev.
+ * @buffer: an embedded &struct dvb_ringbuffer buffer.
+ * @mutex: protects the access to &struct dmxdev_filter.
+ * @timer: &struct timer_list embedded timer, used to check for
+ * feed timeouts.
+ * Only for section filter.
+ * @todo: index for the @secheader.
+ * Only for section filter.
+ * @secheader: buffer cache to parse the section header.
+ * Only for section filter.
+ */
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -86,7 +144,23 @@ struct dmxdev_filter {
u8 secheader[3];
};
-
+/**
+ * struct dmxdev - Describes a digital TV demux device.
+ *
+ * @dvbdev: pointer to &struct dvb_device associated with
+ * the demux device node.
+ * @dvr_dvbdev: pointer to &struct dvb_device associated with
+ * the dvr device node.
+ * @filter: pointer to &struct dmxdev_filter.
+ * @demux: pointer to &struct dmx_demux.
+ * @filternum: number of filters.
+ * @capabilities: demux capabilities as defined by &enum dmx_demux_caps.
+ * @exit: flag to indicate that the demux is being released.
+ * @dvr_orig_fe: pointer to &struct dmx_frontend.
+ * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output.
+ * @mutex: protects the usage of this structure.
+ * @lock: protects access to &dmxdev->filter->data.
+ */
struct dmxdev {
struct dvb_device *dvbdev;
struct dvb_device *dvr_dvbdev;
@@ -108,8 +182,20 @@ struct dmxdev {
spinlock_t lock;
};
+/**
+ * dvb_dmxdev_init - initializes a digital TV demux and registers both demux
+ * and DVR devices.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ * @adap: pointer to &struct dvb_adapter.
+ */
+int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap);
-int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
+/**
+ * dvb_dmxdev_release - releases a digital TV demux and unregisters it.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ */
void dvb_dmxdev_release(struct dmxdev *dmxdev);
#endif /* _DMXDEV_H_ */
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 95b3723282f4..d48b61eb01f4 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -206,7 +206,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
* @hlen: Number of bytes in haystack.
* @needle: Buffer to find.
* @nlen: Number of bytes in needle.
- * @return Pointer into haystack needle was found at, or NULL if not found.
+ * return: Pointer into haystack needle was found at, or NULL if not found.
*/
static char *findstr(char *haystack, int hlen, char *needle, int nlen)
{
@@ -226,7 +226,7 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen)
/* ************************************************************************** */
/* EN50221 physical interface functions */
-/**
+/*
* dvb_ca_en50221_check_camstatus - Check CAM status.
*/
static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
@@ -275,9 +275,9 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
* @ca: CA instance.
* @slot: Slot on interface.
* @waitfor: Flags to wait for.
- * @timeout_ms: Timeout in milliseconds.
+ * @timeout_hz: Timeout in milliseconds.
*
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
*/
static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
u8 waitfor, int timeout_hz)
@@ -325,7 +325,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
* @ca: CA instance.
* @slot: Slot id.
*
- * @return 0 on success, nonzero on failure.
+ * return: 0 on success, nonzero on failure.
*/
static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
{
@@ -397,11 +397,11 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
* @ca: CA instance.
* @slot: Slot id.
* @address: Address to read from. Updated.
- * @tupleType: Tuple id byte. Updated.
- * @tupleLength: Tuple length. Updated.
+ * @tuple_type: Tuple id byte. Updated.
+ * @tuple_length: Tuple length. Updated.
* @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
*
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
*/
static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
int *address, int *tuple_type,
@@ -455,7 +455,7 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
* @ca: CA instance.
* @slot: Slot id.
*
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
*/
static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
{
@@ -632,10 +632,11 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
* @ca: CA instance.
* @slot: Slot to read from.
* @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
- * the data will be added into the buffering system as a normal fragment.
+ * the data will be added into the buffering system as a normal
+ * fragment.
* @ecount: Size of ebuf. Ignored if ebuf is NULL.
*
- * @return Number of bytes read, or < 0 on error
+ * return: Number of bytes read, or < 0 on error
*/
static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
u8 *ebuf, int ecount)
@@ -784,11 +785,11 @@ exit:
*
* @ca: CA instance.
* @slot: Slot to write to.
- * @ebuf: The data in this buffer is treated as a complete link-level packet to
- * be written.
- * @count: Size of ebuf.
+ * @buf: The data in this buffer is treated as a complete link-level packet to
+ * be written.
+ * @bytes_write: Size of ebuf.
*
- * @return Number of bytes written, or < 0 on error.
+ * return: Number of bytes written, or < 0 on error.
*/
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
u8 *buf, int bytes_write)
@@ -933,7 +934,7 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
/**
* dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
*
- * @ca: CA instance.
+ * @pubca: CA instance.
* @slot: Slot concerned.
* @change_type: One of the DVB_CA_CAMCHANGE_* values.
*/
@@ -963,7 +964,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
/**
* dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
*
- * @ca: CA instance.
+ * @pubca: CA instance.
* @slot: Slot concerned.
*/
void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -983,7 +984,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
/**
* dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
*
- * @ca: CA instance.
+ * @pubca: CA instance.
* @slot: Slot concerned.
*/
void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -1091,7 +1092,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
*
* @ca: CA instance.
* @slot: Slot to process.
- * @return: 0 .. no change
+ * return:: 0 .. no change
* 1 .. CAM state changed
*/
@@ -1296,7 +1297,7 @@ static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
mutex_unlock(&sl->slot_lock);
}
-/**
+/*
* Kernel thread which monitors CA slots for CAM changes, and performs data
* transfers.
*/
@@ -1336,12 +1337,11 @@ static int dvb_ca_en50221_thread(void *data)
* Real ioctl implementation.
* NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
*
- * @inode: Inode concerned.
* @file: File concerned.
* @cmd: IOCTL command.
- * @arg: Associated argument.
+ * @parg: Associated argument.
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static int dvb_ca_en50221_io_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
@@ -1420,12 +1420,11 @@ out_unlock:
/**
* Wrapper for ioctl implementation.
*
- * @inode: Inode concerned.
* @file: File concerned.
* @cmd: IOCTL command.
* @arg: Associated argument.
*
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
*/
static long dvb_ca_en50221_io_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
@@ -1441,7 +1440,7 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
* @count: Size of source buffer.
* @ppos: Position in file (ignored).
*
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
*/
static ssize_t dvb_ca_en50221_io_write(struct file *file,
const char __user *buf, size_t count,
@@ -1536,7 +1535,7 @@ exit:
return status;
}
-/**
+/*
* Condition for waking up in dvb_ca_en50221_io_read_condition
*/
static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
@@ -1593,7 +1592,7 @@ nextslot:
* @count: Size of destination buffer.
* @ppos: Position in file (ignored).
*
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
*/
static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -1702,7 +1701,7 @@ exit:
* @inode: Inode concerned.
* @file: File concerned.
*
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
*/
static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
{
@@ -1752,7 +1751,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
* @inode: Inode concerned.
* @file: File concerned.
*
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
*/
static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
{
@@ -1781,7 +1780,7 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
* @file: File concerned.
* @wait: poll wait table.
*
- * @return Standard poll mask.
+ * return: Standard poll mask.
*/
static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
@@ -1838,11 +1837,11 @@ static const struct dvb_device dvbdev_ca = {
* Initialise a new DVB CA EN50221 interface device.
*
* @dvb_adapter: DVB adapter to attach the new CA device to.
- * @ca: The dvb_ca instance.
+ * @pubca: The dvb_ca instance.
* @flags: Flags describing the CA device (DVB_CA_FLAG_*).
* @slot_count: Number of slots supported.
*
- * @return 0 on success, nonzero on failure
+ * return: 0 on success, nonzero on failure
*/
int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
struct dvb_ca_en50221 *pubca, int flags, int slot_count)
@@ -1929,8 +1928,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_init);
/**
* Release a DVB CA EN50221 interface device.
*
- * @ca_dev: The dvb_device_t instance for the CA device.
- * @ca: The associated dvb_ca instance.
+ * @pubca: The associated dvb_ca instance.
*/
void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
{
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 6628f80d184f..acade7543b82 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -223,10 +223,10 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
* when the second packet arrives.
*
* Fix:
- * when demux is started, let feed->pusi_seen = 0 to
+ * when demux is started, let feed->pusi_seen = false to
* prevent initial feeding of garbage from the end of
* previous section. When you for the first time see PUSI=1
- * then set feed->pusi_seen = 1
+ * then set feed->pusi_seen = true
*/
static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
const u8 *buf, u8 len)
@@ -318,10 +318,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
*/
#endif
/*
- * Discontinuity detected. Reset pusi_seen = 0 to
+ * Discontinuity detected. Reset pusi_seen to
* stop feeding of suspicious data until next PUSI=1 arrives
*/
- feed->pusi_seen = 0;
+ feed->pusi_seen = false;
dvb_dmx_swfilter_section_new(feed);
}
@@ -335,8 +335,8 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
dvb_dmx_swfilter_section_copy_dump(feed, before,
before_len);
- /* before start of new section, set pusi_seen = 1 */
- feed->pusi_seen = 1;
+ /* before start of new section, set pusi_seen */
+ feed->pusi_seen = true;
dvb_dmx_swfilter_section_new(feed);
dvb_dmx_swfilter_section_copy_dump(feed, after,
after_len);
@@ -367,6 +367,7 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
else
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
}
+ /* Used only on full-featured devices */
if (feed->ts_type & TS_DECODER)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
@@ -898,14 +899,14 @@ static void prepare_secfilters(struct dvb_demux_feed *dvbdmxfeed)
return;
do {
sf = &f->filter;
- doneq = 0;
+ doneq = false;
for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
mode = sf->filter_mode[i];
mask = sf->filter_mask[i];
f->maskandmode[i] = mask & mode;
doneq |= f->maskandnotmode[i] = mask & ~mode;
}
- f->doneq = doneq ? 1 : 0;
+ f->doneq = doneq ? true : false;
} while ((f = f->next));
}
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 6f572ca8d339..cc048f09aa85 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -26,15 +26,33 @@
#include "demux.h"
-#define DMX_TYPE_TS 0
-#define DMX_TYPE_SEC 1
-#define DMX_TYPE_PES 2
+/**
+ * enum dvb_dmx_filter_type - type of demux feed.
+ *
+ * @DMX_TYPE_TS: feed is in TS mode.
+ * @DMX_TYPE_SEC: feed is in Section mode.
+ */
+enum dvb_dmx_filter_type {
+ DMX_TYPE_TS,
+ DMX_TYPE_SEC,
+};
-#define DMX_STATE_FREE 0
-#define DMX_STATE_ALLOCATED 1
-#define DMX_STATE_SET 2
-#define DMX_STATE_READY 3
-#define DMX_STATE_GO 4
+/**
+ * enum dvb_dmx_state - state machine for a demux filter.
+ *
+ * @DMX_STATE_FREE: indicates that the filter is freed.
+ * @DMX_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMX_STATE_READY: indicates that the filter is ready
+ * to be used.
+ * @DMX_STATE_GO: indicates that the filter is running.
+ */
+enum dvb_dmx_state {
+ DMX_STATE_FREE,
+ DMX_STATE_ALLOCATED,
+ DMX_STATE_READY,
+ DMX_STATE_GO,
+};
#define DVB_DEMUX_MASK_MAX 18
@@ -42,24 +60,66 @@
#define SPEED_PKTS_INTERVAL 50000
+/**
+ * struct dvb_demux_filter - Describes a DVB demux section filter.
+ *
+ * @filter: Section filter as defined by &struct dmx_section_filter.
+ * @maskandmode: logical ``and`` bit mask.
+ * @maskandnotmode: logical ``and not`` bit mask.
+ * @doneq: flag that indicates when a filter is ready.
+ * @next: pointer to the next section filter.
+ * @feed: &struct dvb_demux_feed pointer.
+ * @index: index of the used demux filter.
+ * @state: state of the filter as described by &enum dvb_dmx_state.
+ * @type: type of the filter as described
+ * by &enum dvb_dmx_filter_type.
+ */
+
struct dvb_demux_filter {
struct dmx_section_filter filter;
u8 maskandmode[DMX_MAX_FILTER_SIZE];
u8 maskandnotmode[DMX_MAX_FILTER_SIZE];
- int doneq;
+ bool doneq;
struct dvb_demux_filter *next;
struct dvb_demux_feed *feed;
int index;
- int state;
- int type;
+ enum dvb_dmx_state state;
+ enum dvb_dmx_filter_type type;
+ /* private: used only by av7110 */
u16 hw_handle;
- struct timer_list timer;
};
-#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
-
+/**
+ * struct dvb_demux_feed - describes a DVB field
+ *
+ * @feed: a digital TV feed. It can either be a TS or a section feed:
+ * if the feed is TS, it contains &struct dvb_ts_feed @ts;
+ * if the feed is section, it contains
+ * &struct dmx_section_feed @sec.
+ * @cb: digital TV callbacks. depending on the feed type, it can be:
+ * if the feed is TS, it contains a dmx_ts_cb() @ts callback;
+ * if the feed is section, it contains a dmx_section_cb() @sec
+ * callback.
+ *
+ * @demux: pointer to &struct dvb_demux.
+ * @priv: private data that can optionally be used by a DVB driver.
+ * @type: type of the filter, as defined by &enum dvb_dmx_filter_type.
+ * @state: state of the filter as defined by &enum dvb_dmx_state.
+ * @pid: PID to be filtered.
+ * @timeout: feed timeout.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @ts_type: type of TS, as defined by &enum ts_filter_type.
+ * @pes_type: type of PES, as defined by &enum dmx_ts_pes.
+ * @cc: MPEG-TS packet continuity counter
+ * @pusi_seen: if true, indicates that a discontinuity was detected.
+ * it is used to prevent feeding of garbage from previous section.
+ * @peslen: length of the PES (Packet Elementary Stream).
+ * @list_head: head for the list of digital TV demux feeds.
+ * @index: a unique index for each feed. Can be used as hardware
+ * pid filter index.
+ */
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -73,25 +133,63 @@ struct dvb_demux_feed {
struct dvb_demux *demux;
void *priv;
- int type;
- int state;
+ enum dvb_dmx_filter_type type;
+ enum dvb_dmx_state state;
u16 pid;
ktime_t timeout;
struct dvb_demux_filter *filter;
- int ts_type;
+ enum ts_filter_type ts_type;
enum dmx_ts_pes pes_type;
int cc;
- int pusi_seen; /* prevents feeding of garbage from previous section */
+ bool pusi_seen;
u16 peslen;
struct list_head list_head;
- unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+ unsigned int index;
};
+/**
+ * struct dvb_demux - represents a digital TV demux
+ * @dmx: embedded &struct dmx_demux with demux capabilities
+ * and callbacks.
+ * @priv: private data that can optionally be used by
+ * a DVB driver.
+ * @filternum: maximum amount of DVB filters.
+ * @feednum: maximum amount of DVB feeds.
+ * @start_feed: callback routine to be called in order to start
+ * a DVB feed.
+ * @stop_feed: callback routine to be called in order to stop
+ * a DVB feed.
+ * @write_to_decoder: callback routine to be called if the feed is TS and
+ * it is routed to an A/V decoder, when a new TS packet
+ * is received.
+ * Used only on av7110-av.c.
+ * @check_crc32: callback routine to check CRC. If not initialized,
+ * dvb_demux will use an internal one.
+ * @memcopy: callback routine to memcopy received data.
+ * If not initialized, dvb_demux will default to memcpy().
+ * @users: counter for the number of demux opened file descriptors.
+ * Currently, it is limited to 10 users.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @feed: pointer to &struct dvb_demux_feed.
+ * @frontend_list: &struct list_head with frontends used by the demux.
+ * @pesfilter: array of &struct dvb_demux_feed with the PES types
+ * that will be filtered.
+ * @pids: list of filtered program IDs.
+ * @feed_list: &struct list_head with feeds.
+ * @tsbuf: temporary buffer used internally to store TS packets.
+ * @tsbufp: temporary buffer index used internally.
+ * @mutex: pointer to &struct mutex used to protect feed set
+ * logic.
+ * @lock: pointer to &spinlock_t, used to protect buffer handling.
+ * @cnt_storage: buffer used for TS/TEI continuity check.
+ * @speed_last_time: &ktime_t used for TS speed check.
+ * @speed_pkts_cnt: packets count used for TS speed check.
+ */
struct dvb_demux {
struct dmx_demux dmx;
void *priv;
@@ -115,8 +213,6 @@ struct dvb_demux {
struct dvb_demux_feed *pesfilter[DMX_PES_OTHER];
u16 pids[DMX_PES_OTHER];
- int playing;
- int recording;
#define DMX_MAX_PID 0x2000
struct list_head feed_list;
@@ -130,15 +226,119 @@ struct dvb_demux {
ktime_t speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ /* private: used only on av7110 */
+ int playing;
+ int recording;
};
-int dvb_dmx_init(struct dvb_demux *dvbdemux);
-void dvb_dmx_release(struct dvb_demux *dvbdemux);
-void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
+/**
+ * dvb_dmx_init - initialize a digital TV demux struct.
+ *
+ * @demux: &struct dvb_demux to be initialized.
+ *
+ * Before being able to register a digital TV demux struct, drivers
+ * should call this routine. On its typical usage, some fields should
+ * be initialized at the driver before calling it.
+ *
+ * A typical usecase is::
+ *
+ * dvb->demux.dmx.capabilities =
+ * DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ * DMX_MEMORY_BASED_FILTERING;
+ * dvb->demux.priv = dvb;
+ * dvb->demux.filternum = 256;
+ * dvb->demux.feednum = 256;
+ * dvb->demux.start_feed = driver_start_feed;
+ * dvb->demux.stop_feed = driver_stop_feed;
+ * ret = dvb_dmx_init(&dvb->demux);
+ * if (ret < 0)
+ * return ret;
+ */
+int dvb_dmx_init(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_release - releases a digital TV demux internal buffers.
+ *
+ * @demux: &struct dvb_demux to be released.
+ *
+ * The DVB core internally allocates data at @demux. This routine
+ * releases those data. Please notice that the struct itelf is not
+ * released, as it can be embedded on other structs.
+ */
+void dvb_dmx_release(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * The routine will discard a DVB packet that don't start with 0x47.
+ *
+ * Use this routine if the DVB demux fills MPEG-TS buffers that are
+ * already aligned.
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
+void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
+
+/**
+ * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 204 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 204.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 204``.
+ */
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter_raw - make the raw data available to userspace without
+ * filtering
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data
+ * @count: number of packets to be passed. The actual size of each packet
+ * depends on the &dvb_demux->feed->cb.ts logic.
+ *
+ * Use it if the driver needs to deliver the raw payload to userspace without
+ * passing through the kernel demux. That is meant to support some
+ * delivery systems that aren't based on MPEG-TS.
+ *
+ * This function relies on &dvb_demux->feed->cb.ts to actually handle the
+ * buffer.
+ */
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
size_t count);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 9139d01ba7ed..2afaa8226342 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- if (!fepriv)
- return;
-
- dvb_free_device(fepriv->dvbdev);
+ if (fepriv)
+ dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
- kfree(fepriv);
- fe->frontend_priv = NULL;
+ if (fepriv)
+ kfree(fepriv);
}
static void dvb_frontend_free(struct kref *ref)
@@ -371,11 +369,14 @@ static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepr
}
/**
- * Performs automatic twiddling of frontend parameters.
+ * dvb_frontend_swzigzag_autotune - Performs automatic twiddling of frontend
+ * parameters.
*
- * @param fe The frontend concerned.
- * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT
- * @returns Number of complete iterations that have been performed.
+ * @fe: The frontend concerned.
+ * @check_wrapped: Checks if an iteration has completed.
+ * DO NOT SET ON THE FIRST ATTEMPT.
+ *
+ * return: Number of complete iterations that have been performed.
*/
static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped)
{
@@ -951,8 +952,6 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
memset(c, 0, offsetof(struct dtv_frontend_properties, strength));
c->delivery_system = delsys;
- c->state = DTV_CLEAR;
-
dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n",
__func__, c->delivery_system);
@@ -1109,39 +1108,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
-static void dtv_property_dump(struct dvb_frontend *fe,
- bool is_set,
- struct dtv_property *tvp)
-{
- int i;
-
- if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
- dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
- __func__,
- is_set ? "SET" : "GET",
- tvp->cmd);
- return;
- }
-
- dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__,
- is_set ? "SET" : "GET",
- tvp->cmd,
- dtv_cmds[tvp->cmd].name);
-
- if (dtv_cmds[tvp->cmd].buffer) {
- dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
- __func__, tvp->u.buffer.len);
-
- for(i = 0; i < tvp->u.buffer.len; i++)
- dev_dbg(fe->dvb->device,
- "%s: tvp.u.buffer.data[0x%02x] = 0x%02x\n",
- __func__, i, tvp->u.buffer.data[i]);
- } else {
- dev_dbg(fe->dvb->device, "%s: tvp.u.data = 0x%08x\n", __func__,
- tvp->u.data);
- }
-}
-
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
@@ -1290,7 +1256,7 @@ dtv_property_legacy_params_sync(struct dvb_frontend *fe,
* dtv_get_frontend - calls a callback for retrieving DTV parameters
* @fe: struct dvb_frontend pointer
* @c: struct dtv_frontend_properties pointer (DVBv5 cache)
- * @p_out struct dvb_frontend_parameters pointer (DVBv3 FE struct)
+ * @p_out: struct dvb_frontend_parameters pointer (DVBv3 FE struct)
*
* This routine calls either the DVBv3 or DVBv5 get_frontend call.
* If c is not null, it will update the DVBv5 cache struct pointed by it.
@@ -1315,17 +1281,15 @@ static int dtv_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg);
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
const struct dtv_frontend_properties *c,
struct dtv_property *tvp,
struct file *file)
{
- int r, ncaps;
+ int ncaps;
switch(tvp->cmd) {
case DTV_ENUM_DELSYS:
@@ -1536,14 +1500,18 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return -EINVAL;
}
- /* Allow the frontend to override outgoing properties */
- if (fe->ops.get_property) {
- r = fe->ops.get_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, false, tvp);
+ if (!dtv_cmds[tvp->cmd].buffer)
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) = 0x%08x\n",
+ __func__, tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.data);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) len %d: %*ph\n",
+ __func__,
+ tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.buffer.len,
+ tvp->u.buffer.len, tvp->u.buffer.data);
return 0;
}
@@ -1766,23 +1734,36 @@ static int dvbv3_set_delivery_system(struct dvb_frontend *fe)
return emulate_delivery_system(fe, delsys);
}
+/**
+ * dtv_property_process_set - Sets a single DTV property
+ * @fe: Pointer to &struct dvb_frontend
+ * @file: Pointer to &struct file
+ * @cmd: Digital TV command
+ * @data: An unsigned 32-bits number
+ *
+ * This routine assigns the property
+ * value to the corresponding member of
+ * &struct dtv_frontend_properties
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
static int dtv_property_process_set(struct dvb_frontend *fe,
- struct dtv_property *tvp,
- struct file *file)
+ struct file *file,
+ u32 cmd, u32 data)
{
int r = 0;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- /* Allow the frontend to validate incoming properties */
- if (fe->ops.set_property) {
- r = fe->ops.set_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, true, tvp);
-
- switch(tvp->cmd) {
+ /** Dump DTV command name and value*/
+ if (!cmd || cmd > DTV_MAX_COMMAND)
+ dev_warn(fe->dvb->device, "%s: SET cmd 0x%08x undefined\n",
+ __func__, cmd);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: SET cmd 0x%08x (%s) to 0x%08x\n",
+ __func__, cmd, dtv_cmds[cmd].name, data);
+ switch (cmd) {
case DTV_CLEAR:
/*
* Reset a cache of data specific to the frontend here. This does
@@ -1791,144 +1772,144 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dvb_frontend_clear_cache(fe);
break;
case DTV_TUNE:
- /* interpret the cache of data, build either a traditional frontend
- * tunerequest so we can pass validation in the FE_SET_FRONTEND
- * ioctl.
+ /*
+ * Use the cached Digital TV properties to tune the
+ * frontend
*/
- c->state = tvp->cmd;
- dev_dbg(fe->dvb->device, "%s: Finalised property cache\n",
- __func__);
+ dev_dbg(fe->dvb->device,
+ "%s: Setting the frontend from property cache\n",
+ __func__);
r = dtv_set_frontend(fe);
break;
case DTV_FREQUENCY:
- c->frequency = tvp->u.data;
+ c->frequency = data;
break;
case DTV_MODULATION:
- c->modulation = tvp->u.data;
+ c->modulation = data;
break;
case DTV_BANDWIDTH_HZ:
- c->bandwidth_hz = tvp->u.data;
+ c->bandwidth_hz = data;
break;
case DTV_INVERSION:
- c->inversion = tvp->u.data;
+ c->inversion = data;
break;
case DTV_SYMBOL_RATE:
- c->symbol_rate = tvp->u.data;
+ c->symbol_rate = data;
break;
case DTV_INNER_FEC:
- c->fec_inner = tvp->u.data;
+ c->fec_inner = data;
break;
case DTV_PILOT:
- c->pilot = tvp->u.data;
+ c->pilot = data;
break;
case DTV_ROLLOFF:
- c->rolloff = tvp->u.data;
+ c->rolloff = data;
break;
case DTV_DELIVERY_SYSTEM:
- r = dvbv5_set_delivery_system(fe, tvp->u.data);
+ r = dvbv5_set_delivery_system(fe, data);
break;
case DTV_VOLTAGE:
- c->voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
+ c->voltage = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_VOLTAGE,
(void *)c->voltage);
break;
case DTV_TONE:
- c->sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
+ c->sectone = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_TONE,
(void *)c->sectone);
break;
case DTV_CODE_RATE_HP:
- c->code_rate_HP = tvp->u.data;
+ c->code_rate_HP = data;
break;
case DTV_CODE_RATE_LP:
- c->code_rate_LP = tvp->u.data;
+ c->code_rate_LP = data;
break;
case DTV_GUARD_INTERVAL:
- c->guard_interval = tvp->u.data;
+ c->guard_interval = data;
break;
case DTV_TRANSMISSION_MODE:
- c->transmission_mode = tvp->u.data;
+ c->transmission_mode = data;
break;
case DTV_HIERARCHY:
- c->hierarchy = tvp->u.data;
+ c->hierarchy = data;
break;
case DTV_INTERLEAVING:
- c->interleaving = tvp->u.data;
+ c->interleaving = data;
break;
/* ISDB-T Support here */
case DTV_ISDBT_PARTIAL_RECEPTION:
- c->isdbt_partial_reception = tvp->u.data;
+ c->isdbt_partial_reception = data;
break;
case DTV_ISDBT_SOUND_BROADCASTING:
- c->isdbt_sb_mode = tvp->u.data;
+ c->isdbt_sb_mode = data;
break;
case DTV_ISDBT_SB_SUBCHANNEL_ID:
- c->isdbt_sb_subchannel = tvp->u.data;
+ c->isdbt_sb_subchannel = data;
break;
case DTV_ISDBT_SB_SEGMENT_IDX:
- c->isdbt_sb_segment_idx = tvp->u.data;
+ c->isdbt_sb_segment_idx = data;
break;
case DTV_ISDBT_SB_SEGMENT_COUNT:
- c->isdbt_sb_segment_count = tvp->u.data;
+ c->isdbt_sb_segment_count = data;
break;
case DTV_ISDBT_LAYER_ENABLED:
- c->isdbt_layer_enabled = tvp->u.data;
+ c->isdbt_layer_enabled = data;
break;
case DTV_ISDBT_LAYERA_FEC:
- c->layer[0].fec = tvp->u.data;
+ c->layer[0].fec = data;
break;
case DTV_ISDBT_LAYERA_MODULATION:
- c->layer[0].modulation = tvp->u.data;
+ c->layer[0].modulation = data;
break;
case DTV_ISDBT_LAYERA_SEGMENT_COUNT:
- c->layer[0].segment_count = tvp->u.data;
+ c->layer[0].segment_count = data;
break;
case DTV_ISDBT_LAYERA_TIME_INTERLEAVING:
- c->layer[0].interleaving = tvp->u.data;
+ c->layer[0].interleaving = data;
break;
case DTV_ISDBT_LAYERB_FEC:
- c->layer[1].fec = tvp->u.data;
+ c->layer[1].fec = data;
break;
case DTV_ISDBT_LAYERB_MODULATION:
- c->layer[1].modulation = tvp->u.data;
+ c->layer[1].modulation = data;
break;
case DTV_ISDBT_LAYERB_SEGMENT_COUNT:
- c->layer[1].segment_count = tvp->u.data;
+ c->layer[1].segment_count = data;
break;
case DTV_ISDBT_LAYERB_TIME_INTERLEAVING:
- c->layer[1].interleaving = tvp->u.data;
+ c->layer[1].interleaving = data;
break;
case DTV_ISDBT_LAYERC_FEC:
- c->layer[2].fec = tvp->u.data;
+ c->layer[2].fec = data;
break;
case DTV_ISDBT_LAYERC_MODULATION:
- c->layer[2].modulation = tvp->u.data;
+ c->layer[2].modulation = data;
break;
case DTV_ISDBT_LAYERC_SEGMENT_COUNT:
- c->layer[2].segment_count = tvp->u.data;
+ c->layer[2].segment_count = data;
break;
case DTV_ISDBT_LAYERC_TIME_INTERLEAVING:
- c->layer[2].interleaving = tvp->u.data;
+ c->layer[2].interleaving = data;
break;
/* Multistream support */
case DTV_STREAM_ID:
case DTV_DVBT2_PLP_ID_LEGACY:
- c->stream_id = tvp->u.data;
+ c->stream_id = data;
break;
/* ATSC-MH */
case DTV_ATSCMH_PARADE_ID:
- fe->dtv_property_cache.atscmh_parade_id = tvp->u.data;
+ fe->dtv_property_cache.atscmh_parade_id = data;
break;
case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
- fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble = data;
break;
case DTV_LNA:
- c->lna = tvp->u.data;
+ c->lna = data;
if (fe->ops.set_lna)
r = fe->ops.set_lna(fe);
if (r < 0)
@@ -1942,14 +1923,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int err = -EOPNOTSUPP;
+ int err;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
if (down_interruptible(&fepriv->sem))
@@ -1960,109 +1939,33 @@ static int dvb_frontend_ioctl(struct file *file,
return -ENODEV;
}
- if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
- (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT ||
- cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
+ /*
+ * If the frontend is opened in read-only mode, only the ioctls
+ * that don't interfere with the tune logic should be accepted.
+ * That allows an external application to monitor the DVB QoS and
+ * statistics parameters.
+ *
+ * That matches all _IOR() ioctls, except for two special cases:
+ * - FE_GET_EVENT is part of the tuning logic on a DVB application;
+ * - FE_DISEQC_RECV_SLAVE_REPLY is part of DiSEqC 2.0
+ * setup
+ * So, those two ioctls should also return -EPERM, as otherwise
+ * reading from them would interfere with a DVB tune application
+ */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY
+ && (_IOC_DIR(cmd) != _IOC_READ
+ || cmd == FE_GET_EVENT
+ || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
up(&fepriv->sem);
return -EPERM;
}
- if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(file, cmd, parg);
- else {
- c->state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(file, cmd, parg);
- }
+ err = dvb_frontend_handle_ioctl(file, cmd, parg);
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg)
-{
- struct dvb_device *dvbdev = file->private_data;
- struct dvb_frontend *fe = dvbdev->priv;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = 0;
-
- struct dtv_properties *tvps = parg;
- struct dtv_property *tvp = NULL;
- int i;
-
- dev_dbg(fe->dvb->device, "%s:\n", __func__);
-
- if (cmd == FE_SET_PROPERTY) {
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_set(fe, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (c->state == DTV_TUNE)
- dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__);
-
- } else if (cmd == FE_GET_PROPERTY) {
- struct dtv_frontend_properties getp = fe->dtv_property_cache;
-
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- /*
- * Let's use our own copy of property cache, in order to
- * avoid mangling with DTV zigzag logic, as drivers might
- * return crap, if they don't check if the data is available
- * before updating the properties cache.
- */
- if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, &getp, NULL);
- if (err < 0)
- goto out;
- }
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, &getp, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (copy_to_user((void __user *)tvps->props, tvp,
- tvps->num * sizeof(struct dtv_property))) {
- err = -EFAULT;
- goto out;
- }
-
- } else
- err = -EOPNOTSUPP;
-
-out:
- kfree(tvp);
- return err;
-}
-
static int dtv_set_frontend(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -2200,16 +2103,102 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = -EOPNOTSUPP;
+ int i, err;
+
+ dev_dbg(fe->dvb->device, "%s:\n", __func__);
switch (cmd) {
+ case FE_SET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_set(fe, file,
+ (tvp + i)->cmd,
+ (tvp + i)->u.data);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ kfree(tvp);
+ break;
+ }
+ case FE_GET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+ struct dtv_frontend_properties getp = fe->dtv_property_cache;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, &getp, NULL);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_get(fe, &getp,
+ tvp + i, file);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+
+ if (copy_to_user((void __user *)tvps->props, tvp,
+ tvps->num * sizeof(struct dtv_property))) {
+ kfree(tvp);
+ return -EFAULT;
+ }
+ kfree(tvp);
+ break;
+ }
+
case FE_GET_INFO: {
struct dvb_frontend_info* info = parg;
@@ -2273,42 +2262,6 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
break;
}
- case FE_READ_BER:
- if (fe->ops.read_ber) {
- if (fepriv->thread)
- err = fe->ops.read_ber(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SIGNAL_STRENGTH:
- if (fe->ops.read_signal_strength) {
- if (fepriv->thread)
- err = fe->ops.read_signal_strength(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SNR:
- if (fe->ops.read_snr) {
- if (fepriv->thread)
- err = fe->ops.read_snr(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_UNCORRECTED_BLOCKS:
- if (fe->ops.read_ucblocks) {
- if (fepriv->thread)
- err = fe->ops.read_ucblocks(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
case FE_DISEQC_RESET_OVERLOAD:
if (fe->ops.diseqc_reset_overload) {
err = fe->ops.diseqc_reset_overload(fe);
@@ -2360,6 +2313,23 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
+ case FE_DISEQC_RECV_SLAVE_REPLY:
+ if (fe->ops.diseqc_recv_slave_reply)
+ err = fe->ops.diseqc_recv_slave_reply(fe, parg);
+ break;
+
+ case FE_ENABLE_HIGH_LNB_VOLTAGE:
+ if (fe->ops.enable_high_lnb_voltage)
+ err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ break;
+
+ case FE_SET_FRONTEND_TUNE_MODE:
+ fepriv->tune_mode_flags = (unsigned long) parg;
+ err = 0;
+ break;
+
+ /* DEPRECATED dish control ioctls */
+
case FE_DISHNETWORK_SEND_LEGACY_CMD:
if (fe->ops.dishnetwork_send_legacy_command) {
err = fe->ops.dishnetwork_send_legacy_command(fe,
@@ -2425,16 +2395,46 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
- case FE_DISEQC_RECV_SLAVE_REPLY:
- if (fe->ops.diseqc_recv_slave_reply)
- err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg);
+ /* DEPRECATED statistics ioctls */
+
+ case FE_READ_BER:
+ if (fe->ops.read_ber) {
+ if (fepriv->thread)
+ err = fe->ops.read_ber(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
- case FE_ENABLE_HIGH_LNB_VOLTAGE:
- if (fe->ops.enable_high_lnb_voltage)
- err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ case FE_READ_SIGNAL_STRENGTH:
+ if (fe->ops.read_signal_strength) {
+ if (fepriv->thread)
+ err = fe->ops.read_signal_strength(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
+ case FE_READ_SNR:
+ if (fe->ops.read_snr) {
+ if (fepriv->thread)
+ err = fe->ops.read_snr(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ case FE_READ_UNCORRECTED_BLOCKS:
+ if (fe->ops.read_ucblocks) {
+ if (fepriv->thread)
+ err = fe->ops.read_ucblocks(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ /* DEPRECATED DVBv3 ioctls */
+
case FE_SET_FRONTEND:
err = dvbv3_set_delivery_system(fe);
if (err)
@@ -2461,11 +2461,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
err = dtv_get_frontend(fe, &getp, parg);
break;
}
- case FE_SET_FRONTEND_TUNE_MODE:
- fepriv->tune_mode_flags = (unsigned long) parg;
- err = 0;
- break;
- }
+
+ default:
+ return -ENOTSUPP;
+ } /* switch */
return err;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 907a05bde162..ace0c2fb26c2 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -180,8 +180,8 @@ enum dvbfe_search {
/**
* struct dvb_tuner_ops - Tuner information and callbacks
*
- * @info: embedded struct dvb_tuner_info with tuner properties
- * @release: callback function called when frontend is dettached.
+ * @info: embedded &struct dvb_tuner_info with tuner properties
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @init: callback function used to initialize the tuner device.
* @sleep: callback function used to put the tuner to sleep.
@@ -191,14 +191,14 @@ enum dvbfe_search {
* resuming from suspend.
* @set_params: callback function used to inform the tuner to tune
* into a digital TV channel. The properties to be used
- * are stored at @dvb_frontend.dtv_property_cache;. The
- * tuner demod can change the parameters to reflect the
- * changes needed for the channel to be tuned, and
+ * are stored at &struct dvb_frontend.dtv_property_cache.
+ * The tuner demod can change the parameters to reflect
+ * the changes needed for the channel to be tuned, and
* update statistics. This is the recommended way to set
* the tuner parameters and should be used on newer
* drivers.
* @set_analog_params: callback function used to tune into an analog TV
- * channel on hybrid tuners. It passes @analog_parameters;
+ * channel on hybrid tuners. It passes @analog_parameters
* to the driver.
* @set_config: callback function used to send some tuner-specific
* parameters.
@@ -207,9 +207,9 @@ enum dvbfe_search {
* @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
* should return 0.
* @get_status: returns the frontend lock status
- * @get_rf_strength: returns the RF signal strengh. Used mostly to support
+ * @get_rf_strength: returns the RF signal strength. Used mostly to support
* analog TV and radio. Digital TV should report, instead,
- * via DVBv5 API (@dvb_frontend.dtv_property_cache;).
+ * via DVBv5 API (&struct dvb_frontend.dtv_property_cache).
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @calc_regs: callback function used to pass register data settings
@@ -217,7 +217,7 @@ enum dvbfe_search {
* @set_frequency: Set a new frequency. Shouldn't be used on newer drivers.
* @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers.
*
- * NOTE: frequencies used on get_frequency and set_frequency are in Hz for
+ * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for
* terrestrial/cable or kHz for satellite.
*
*/
@@ -283,14 +283,14 @@ struct analog_demod_info {
* @set_params: callback function used to inform the demod to set the
* demodulator parameters needed to decode an analog or
* radio channel. The properties are passed via
- * struct @analog_params;.
+ * &struct analog_params.
* @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @tuner_status: callback function that returns tuner status bits, e. g.
- * TUNER_STATUS_LOCKED and TUNER_STATUS_STEREO.
+ * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO.
* @standby: set the tuner to standby mode.
- * @release: callback function called when frontend is dettached.
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
* mux support instead.
@@ -321,10 +321,10 @@ struct dtv_frontend_properties;
* struct dvb_frontend_ops - Demodulation information and callbacks for
* ditialt TV
*
- * @info: embedded struct dvb_tuner_info with tuner properties
+ * @info: embedded &struct dvb_tuner_info with tuner properties
* @delsys: Delivery systems supported by the frontend
* @detach: callback function called when frontend is detached.
- * drivers should clean up, but not yet free the struct
+ * drivers should clean up, but not yet free the &struct
* dvb_frontend allocation.
* @release: callback function called when frontend is ready to be
* freed.
@@ -338,57 +338,57 @@ struct dtv_frontend_properties;
* allow other drivers to write data into their registers.
* Should not be used on new drivers.
* @tune: callback function used by demod drivers that use
- * @DVBFE_ALGO_HW; to tune into a frequency.
+ * @DVBFE_ALGO_HW to tune into a frequency.
* @get_frontend_algo: returns the desired hardware algorithm.
* @set_frontend: callback function used to inform the demod to set the
* parameters for demodulating a digital TV channel.
- * The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache;. The demod can change
+ * The properties to be used are stored at &struct
+ * dvb_frontend.dtv_property_cache. The demod can change
* the parameters to reflect the changes needed for the
* channel to be decoded, and update statistics.
* @get_tune_settings: callback function
* @get_frontend: callback function used to inform the parameters
* actuall in use. The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache; and update
+ * &struct dvb_frontend.dtv_property_cache and update
* statistics. Please notice that it should not return
* an error code if the statistics are not available
* because the demog is not locked.
* @read_status: returns the locking status of the frontend.
* @read_ber: legacy callback function to return the bit error rate.
* Newer drivers should provide such info via DVBv5 API,
- * e. g. @set_frontend;/@get_frontend;, implementing this
+ * e. g. @set_frontend;/@get_frontend, implementing this
* callback only if DVBv3 API compatibility is wanted.
* @read_signal_strength: legacy callback function to return the signal
* strength. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_snr: legacy callback function to return the Signal/Noise
* rate. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_ucblocks: legacy callback function to return the Uncorrected Error
* Blocks. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @diseqc_reset_overload: callback function to implement the
- * FE_DISEQC_RESET_OVERLOAD ioctl (only Satellite)
+ * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite)
* @diseqc_send_master_cmd: callback function to implement the
- * FE_DISEQC_SEND_MASTER_CMD ioctl (only Satellite).
+ * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite).
* @diseqc_recv_slave_reply: callback function to implement the
- * FE_DISEQC_RECV_SLAVE_REPLY ioctl (only Satellite)
+ * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite)
* @diseqc_send_burst: callback function to implement the
- * FE_DISEQC_SEND_BURST ioctl (only Satellite).
+ * FE_DISEQC_SEND_BURST() ioctl (only Satellite).
* @set_tone: callback function to implement the
- * FE_SET_TONE ioctl (only Satellite).
+ * FE_SET_TONE() ioctl (only Satellite).
* @set_voltage: callback function to implement the
- * FE_SET_VOLTAGE ioctl (only Satellite).
+ * FE_SET_VOLTAGE() ioctl (only Satellite).
* @enable_high_lnb_voltage: callback function to implement the
- * FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite).
+ * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite).
* @dishnetwork_send_legacy_command: callback function to implement the
- * FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite).
* Drivers should not use this, except when the DVB
* core emulation fails to provide proper support (e.g.
* if @set_voltage takes more than 8ms to work), and
@@ -399,15 +399,10 @@ struct dtv_frontend_properties;
* @ts_bus_ctrl: callback function used to take control of the TS bus.
* @set_lna: callback function to power on/off/auto the LNA.
* @search: callback function used on some custom algo search algos.
- * @tuner_ops: pointer to struct dvb_tuner_ops
- * @analog_ops: pointer to struct analog_demod_ops
- * @set_property: callback function to allow the frontend to validade
- * incoming properties. Should not be used on new drivers.
- * @get_property: callback function to allow the frontend to override
- * outcoming properties. Should not be used on new drivers.
+ * @tuner_ops: pointer to &struct dvb_tuner_ops
+ * @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
-
struct dvb_frontend_info info;
u8 delsys[MAX_DELSYS];
@@ -466,9 +461,6 @@ struct dvb_frontend_ops {
struct dvb_tuner_ops tuner_ops;
struct analog_demod_ops analog_ops;
-
- int (*set_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
- int (*get_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
};
#ifdef __DVB_CORE__
@@ -565,15 +557,15 @@ struct dtv_frontend_properties {
enum fe_sec_voltage voltage;
enum fe_sec_tone_mode sectone;
- enum fe_spectral_inversion inversion;
- enum fe_code_rate fec_inner;
+ enum fe_spectral_inversion inversion;
+ enum fe_code_rate fec_inner;
enum fe_transmit_mode transmission_mode;
u32 bandwidth_hz; /* 0 = AUTO */
enum fe_guard_interval guard_interval;
- enum fe_hierarchy hierarchy;
+ enum fe_hierarchy hierarchy;
u32 symbol_rate;
- enum fe_code_rate code_rate_HP;
- enum fe_code_rate code_rate_LP;
+ enum fe_code_rate code_rate_HP;
+ enum fe_code_rate code_rate_LP;
enum fe_pilot pilot;
enum fe_rolloff rolloff;
@@ -628,11 +620,6 @@ struct dtv_frontend_properties {
struct dtv_fe_stats post_bit_count;
struct dtv_fe_stats block_error;
struct dtv_fe_stats block_count;
-
- /* private: */
- /* Cache State */
- u32 state;
-
};
#define DVB_FE_NO_EXIT 0
@@ -643,16 +630,16 @@ struct dtv_frontend_properties {
/**
* struct dvb_frontend - Frontend structure to be used on drivers.
*
- * @refcount: refcount to keep track of struct dvb_frontend
+ * @refcount: refcount to keep track of &struct dvb_frontend
* references
- * @ops: embedded struct dvb_frontend_ops
- * @dvb: pointer to struct dvb_adapter
+ * @ops: embedded &struct dvb_frontend_ops
+ * @dvb: pointer to &struct dvb_adapter
* @demodulator_priv: demod private data
* @tuner_priv: tuner private data
* @frontend_priv: frontend private data
* @sec_priv: SEC private data
* @analog_demod_priv: Analog demod private data
- * @dtv_property_cache: embedded struct dtv_frontend_properties
+ * @dtv_property_cache: embedded &struct dtv_frontend_properties
* @callback: callback function used on some drivers to call
* either the tuner or the demodulator.
* @id: Frontend ID
@@ -681,8 +668,8 @@ struct dvb_frontend {
/**
* dvb_register_frontend() - Registers a DVB frontend at the adapter
*
- * @dvb: pointer to the dvb adapter
- * @fe: pointer to the frontend struct
+ * @dvb: pointer to &struct dvb_adapter
+ * @fe: pointer to &struct dvb_frontend
*
* Allocate and initialize the private data needed by the frontend core to
* manage the frontend and calls dvb_register_device() to register a new
@@ -695,7 +682,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
/**
* dvb_unregister_frontend() - Unregisters a DVB frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Stops the frontend kthread, calls dvb_unregister_device() and frees the
* private frontend data allocated by dvb_register_frontend().
@@ -709,14 +696,14 @@ int dvb_unregister_frontend(struct dvb_frontend *fe);
/**
* dvb_frontend_detach() - Detaches and frees frontend specific data
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function should be called after dvb_unregister_frontend(). It
* calls the SEC, tuner and demod release functions:
* &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release,
* &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release.
*
- * If the driver is compiled with CONFIG_MEDIA_ATTACH, it also decreases
+ * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases
* the module reference count, needed to allow userspace to remove the
* previously used DVB frontend modules.
*/
@@ -725,7 +712,7 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
/**
* dvb_frontend_suspend() - Suspends a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function prepares a Digital TV frontend to suspend.
*
@@ -743,7 +730,7 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
/**
* dvb_frontend_resume() - Resumes a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function resumes the usual operation of the tuner after resume.
*
@@ -764,7 +751,7 @@ int dvb_frontend_resume(struct dvb_frontend *fe);
/**
* dvb_frontend_reinitialise() - forces a reinitialisation at the frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
* and resets SEC tone and voltage (for Satellite systems).
@@ -779,16 +766,16 @@ void dvb_frontend_reinitialise(struct dvb_frontend *fe);
* dvb_frontend_sleep_until() - Sleep for the amount of time given by
* add_usec parameter
*
- * @waketime: pointer to a struct ktime_t
+ * @waketime: pointer to &struct ktime_t
* @add_usec: time to sleep, in microseconds
*
* This function is used to measure the time required for the
- * %FE_DISHNETWORK_SEND_LEGACY_CMD ioctl to work. It needs to be as precise
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise
* as possible, as it affects the detection of the dish tone command at the
* satellite subsystem.
*
* Its used internally by the DVB frontend core, in order to emulate
- * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage\(\)
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\)
* callback.
*
* NOTE: it should not be used at the drivers, as the emulation for the
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 06b0dcc13695..c018e3c06d5d 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -125,7 +125,7 @@ struct dvb_net_priv {
};
-/**
+/*
* Determine the packet's protocol ID. The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
@@ -155,7 +155,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
rawp = skb->data;
- /**
+ /*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
@@ -164,7 +164,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
if (*(unsigned short *)rawp == 0xFFFF)
return htons(ETH_P_802_3);
- /**
+ /*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
@@ -215,7 +215,8 @@ static int ule_exthdr_padding(struct dvb_net_priv *p)
return 0;
}
-/** Handle ULE extension headers.
+/*
+ * Handle ULE extension headers.
* Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
* Returns: >= 0: nr. of bytes consumed by next extension header
* -1: Mandatory extension header that is not recognized or TEST SNDU; discard.
@@ -291,7 +292,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
}
-/** Prepare for a new ULE SNDU: reset the decoder state. */
+/* Prepare for a new ULE SNDU: reset the decoder state. */
static inline void reset_ule( struct dvb_net_priv *p )
{
p->ule_skb = NULL;
@@ -304,7 +305,7 @@ static inline void reset_ule( struct dvb_net_priv *p )
p->ule_bridged = 0;
}
-/**
+/*
* Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
* TS cells of a single PID.
*/
@@ -1005,7 +1006,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
{
struct net_device *dev = filter->priv;
- /**
+ /*
* we rely on the DVB API definition where exactly one complete
* section is delivered in buffer1
*/
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index e9b18aa03e02..1eae8bad7cc1 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -30,6 +30,22 @@
#ifdef CONFIG_DVB_NET
+/**
+ * struct dvb_net - describes a DVB network interface
+ *
+ * @dvbdev: pointer to &struct dvb_device.
+ * @device: array of pointers to &struct net_device.
+ * @state: array of integers to each net device. A value
+ * different than zero means that the interface is
+ * in usage.
+ * @exit: flag to indicate when the device is being removed.
+ * @demux: pointer to &struct dmx_demux.
+ * @ioctl_mutex: protect access to this struct.
+ *
+ * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
+ * devices.
+ */
+
struct dvb_net {
struct dvb_device *dvbdev;
struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -39,8 +55,22 @@ struct dvb_net {
struct mutex ioctl_mutex;
};
-void dvb_net_release(struct dvb_net *);
-int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
+/**
+ * dvb_net_init - nitializes a digital TV network device and registers it.
+ *
+ * @adap: pointer to &struct dvb_adapter.
+ * @dvbnet: pointer to &struct dvb_net.
+ * @dmxdemux: pointer to &struct dmx_demux.
+ */
+int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet,
+ struct dmx_demux *dmxdemux);
+
+/**
+ * dvb_net_release - releases a digital TV network device and unregisters it.
+ *
+ * @dvbnet: pointer to &struct dvb_net.
+ */
+void dvb_net_release(struct dvb_net *dvbnet);
#else
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 41aad0f99d73..060c60ddfcc3 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -51,8 +51,15 @@ static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
static const char * const dnames[] = {
- "video", "audio", "sec", "frontend", "demux", "dvr", "ca",
- "net", "osd"
+ [DVB_DEVICE_VIDEO] = "video",
+ [DVB_DEVICE_AUDIO] = "audio",
+ [DVB_DEVICE_SEC] = "sec",
+ [DVB_DEVICE_FRONTEND] = "frontend",
+ [DVB_DEVICE_DEMUX] = "demux",
+ [DVB_DEVICE_DVR] = "dvr",
+ [DVB_DEVICE_CA] = "ca",
+ [DVB_DEVICE_NET] = "net",
+ [DVB_DEVICE_OSD] = "osd"
};
#ifdef CONFIG_DVB_DYNAMIC_MINORS
@@ -60,7 +67,22 @@ static const char * const dnames[] = {
#define DVB_MAX_IDS MAX_DVB_MINORS
#else
#define DVB_MAX_IDS 4
-#define nums2minor(num, type, id) ((num << 6) | (id << 4) | type)
+
+static const u8 minor_type[] = {
+ [DVB_DEVICE_VIDEO] = 0,
+ [DVB_DEVICE_AUDIO] = 1,
+ [DVB_DEVICE_SEC] = 2,
+ [DVB_DEVICE_FRONTEND] = 3,
+ [DVB_DEVICE_DEMUX] = 4,
+ [DVB_DEVICE_DVR] = 5,
+ [DVB_DEVICE_CA] = 6,
+ [DVB_DEVICE_NET] = 7,
+ [DVB_DEVICE_OSD] = 8,
+};
+
+#define nums2minor(num, type, id) \
+ (((num) << 6) | ((id) << 4) | minor_type[type])
+
#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
#endif
@@ -426,8 +448,8 @@ static int dvb_register_media_device(struct dvb_device *dvbdev,
}
int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
- const struct dvb_device *template, void *priv, int type,
- int demux_sink_pads)
+ const struct dvb_device *template, void *priv,
+ enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
struct file_operations *dvbdevfops;
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 49189392cf3b..bbc1c20c0529 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -35,15 +35,37 @@
#define DVB_UNSET (-1)
-#define DVB_DEVICE_VIDEO 0
-#define DVB_DEVICE_AUDIO 1
-#define DVB_DEVICE_SEC 2
-#define DVB_DEVICE_FRONTEND 3
-#define DVB_DEVICE_DEMUX 4
-#define DVB_DEVICE_DVR 5
-#define DVB_DEVICE_CA 6
-#define DVB_DEVICE_NET 7
-#define DVB_DEVICE_OSD 8
+/* List of DVB device types */
+
+/**
+ * enum dvb_device_type - type of the Digital TV device
+ *
+ * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI)
+ * @DVB_DEVICE_FRONTEND: Digital TV frontend.
+ * @DVB_DEVICE_DEMUX: Digital TV demux.
+ * @DVB_DEVICE_DVR: Digital TV digital video record (DVR).
+ * @DVB_DEVICE_CA: Digital TV Conditional Access (CA).
+ * @DVB_DEVICE_NET: Digital TV network.
+ *
+ * @DVB_DEVICE_VIDEO: Digital TV video decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_AUDIO: Digital TV audio decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD).
+ * Deprecated. Used only on av7110.
+ */
+enum dvb_device_type {
+ DVB_DEVICE_SEC,
+ DVB_DEVICE_FRONTEND,
+ DVB_DEVICE_DEMUX,
+ DVB_DEVICE_DVR,
+ DVB_DEVICE_CA,
+ DVB_DEVICE_NET,
+
+ DVB_DEVICE_VIDEO,
+ DVB_DEVICE_AUDIO,
+ DVB_DEVICE_OSD,
+};
#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \
static short adapter_nr[] = \
@@ -104,8 +126,7 @@ struct dvb_adapter {
* @list_head: List head with all DVB devices
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
- * @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
- * DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @minor: devnode minor number. Major number is always DVB_MAJOR.
* @id: device ID number, inside the adapter
* @readers: Initialized by the caller. Each call to open() in Read Only mode
@@ -135,7 +156,7 @@ struct dvb_device {
struct list_head list_head;
const struct file_operations *fops;
struct dvb_adapter *adapter;
- int type;
+ enum dvb_device_type type;
int minor;
u32 id;
@@ -194,9 +215,7 @@ int dvb_unregister_adapter(struct dvb_adapter *adap);
* stored
* @template: Template used to create &pdvbdev;
* @priv: private data
- * @type: type of the device: %DVB_DEVICE_SEC, %DVB_DEVICE_FRONTEND,
- * %DVB_DEVICE_DEMUX, %DVB_DEVICE_DVR, %DVB_DEVICE_CA,
- * %DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @demux_sink_pads: Number of demux outputs, to be used to create the TS
* outputs via the Media Controller.
*/
@@ -204,7 +223,7 @@ int dvb_register_device(struct dvb_adapter *adap,
struct dvb_device **pdvbdev,
const struct dvb_device *template,
void *priv,
- int type,
+ enum dvb_device_type type,
int demux_sink_pads);
/**
@@ -242,7 +261,7 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
* dvb_create_media_graph - Creates media graph for the Digital TV part of the
* device.
*
- * @adap: pointer to struct dvb_adapter
+ * @adap: pointer to &struct dvb_adapter
* @create_rf_connector: if true, it creates the RF connector too
*
* This function checks all DVB-related functions at the media controller
@@ -255,12 +274,23 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
__must_check int dvb_create_media_graph(struct dvb_adapter *adap,
bool create_rf_connector);
+/**
+ * dvb_register_media_controller - registers a media controller at DVB adapter
+ *
+ * @adap: pointer to &struct dvb_adapter
+ * @mdev: pointer to &struct media_device
+ */
static inline void dvb_register_media_controller(struct dvb_adapter *adap,
struct media_device *mdev)
{
adap->mdev = mdev;
}
+/**
+ * dvb_get_media_controller - gets the associated media controller
+ *
+ * @adap: pointer to &struct dvb_adapter
+ */
static inline struct media_device
*dvb_get_media_controller(struct dvb_adapter *adap)
{
@@ -277,20 +307,71 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
#define dvb_get_media_controller(a) NULL
#endif
-int dvb_generic_open (struct inode *inode, struct file *file);
-int dvb_generic_release (struct inode *inode, struct file *file);
-long dvb_generic_ioctl (struct file *file,
- unsigned int cmd, unsigned long arg);
+/**
+ * dvb_generic_open - Digital TV open function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and increment negative use count.
+ */
+int dvb_generic_open(struct inode *inode, struct file *file);
-/* we don't mess with video_usercopy() any more,
-we simply define out own dvb_usercopy(), which will hopefully become
-generic_usercopy() someday... */
+/**
+ * dvb_generic_close - Digital TV close function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and decrement negative use count.
+ */
+int dvb_generic_release(struct inode *inode, struct file *file);
+/**
+ * dvb_generic_ioctl - Digital TV close function, used by DVB devices
+ *
+ * @file: pointer to &struct file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid.
+ * If so, calls dvb_usercopy().
+ */
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+/**
+ * dvb_usercopy - copies data from/to userspace memory when an ioctl is
+ * issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will actually handle the ioctl
+ *
+ * Ancillary function that uses ioctl direction and size to copy from
+ * userspace. Then, it calls @func, and, if needed, data is copied back
+ * to userspace.
+ */
int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
+
+/**
+ * dvb_attach - attaches a DVB frontend into the DVB core.
+ *
+ * @FUNCTION: function on a frontend module to be called.
+ * @ARGS...: @FUNCTION arguments.
+ *
+ * This ancillary function loads a frontend module in runtime and runs
+ * the @FUNCTION function there, with @ARGS.
+ * As it increments symbol usage cont, at unregister, dvb_detach()
+ * should be called.
+ */
#define dvb_attach(FUNCTION, ARGS...) ({ \
void *__r = NULL; \
typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
@@ -304,6 +385,14 @@ int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
__r; \
})
+/**
+ * dvb_detach - detaches a DVB frontend loaded via dvb_attach()
+ *
+ * @FUNC: attach function
+ *
+ * Decrements usage count for a function previously called via dvb_attach().
+ */
+
#define dvb_detach(FUNC) symbol_put_addr(FUNC)
#else
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2631d0e0a024..d17722eb4456 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -173,7 +173,7 @@ config DVB_STB6000
tristate "ST STB6000 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0299
@@ -187,7 +187,7 @@ config DVB_STV6110
tristate "ST STV6110 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0900
@@ -902,7 +902,7 @@ config DVB_HELENE
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
comment "Tools to develop new frontends"
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 353274524f1b..a290722c04fd 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -38,6 +38,13 @@
* @api_version: Firmware API version.
* @gpio: GPIOs.
* @get_dvb_frontend: Get DVB frontend callback.
+ *
+ * AF9013/5 GPIOs (mostly guessed):
+ * * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
+ * * demod#1-gpio#1 - xtal setting (?)
+ * * demod#1-gpio#3 - tuner#1
+ * * demod#2-gpio#0 - tuner#2
+ * * demod#2-gpio#1 - xtal setting (?)
*/
struct af9013_platform_data {
/*
@@ -89,16 +96,15 @@ struct af9013_platform_data {
#define AF9013_TS_PARALLEL AF9013_TS_MODE_PARALLEL
#define AF9013_TS_SERIAL AF9013_TS_MODE_SERIAL
-/*
- * AF9013/5 GPIOs (mostly guessed)
- * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
- * demod#1-gpio#1 - xtal setting (?)
- * demod#1-gpio#3 - tuner#1
- * demod#2-gpio#0 - tuner#2
- * demod#2-gpio#1 - xtal setting (?)
- */
-
#if IS_REACHABLE(CONFIG_DVB_AF9013)
+/**
+ * Attach an af9013 demod
+ *
+ * @config: pointer to &struct af9013_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 98d575f2744c..b1c84ee914f0 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -455,11 +455,10 @@ struct dvb_frontend *as102_attach(const char *name,
struct as102_state *state;
struct dvb_frontend *fe;
- state = kzalloc(sizeof(struct as102_state), GFP_KERNEL);
- if (state == NULL) {
- pr_err("%s: unable to allocate memory for state\n", __func__);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
return NULL;
- }
+
fe = &state->frontend;
fe->demodulator_priv = state;
state->ops = ops;
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
index dc61bf7d1b09..418c565baf83 100644
--- a/drivers/media/dvb-frontends/ascot2e.h
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -41,6 +41,15 @@ struct ascot2e_config {
};
#if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
+/**
+ * Attach an ascot2e tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct ascot2e_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
const struct ascot2e_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 0118c2658cf7..ee1f704f81f2 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -552,13 +552,11 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
const struct cx24113_config *config, struct i2c_adapter *i2c)
{
/* allocate memory for the internal state */
- struct cx24113_state *state =
- kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
+ struct cx24113_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
int rc;
- if (state == NULL) {
- cx_err("Unable to kzalloc\n");
- goto error;
- }
+
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index e105532bfba8..8fb3f095e21c 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -221,16 +221,13 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
static int cx24116_writeregN(struct cx24116_state *state, int reg,
const u8 *data, u16 len)
{
- int ret = -EREMOTEIO;
+ int ret;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(len + 1, GFP_KERNEL);
- if (buf == NULL) {
- printk("Unable to kmalloc\n");
- ret = -ENOMEM;
- goto error;
- }
+ if (!buf)
+ return -ENOMEM;
*(buf) = reg;
memcpy(buf + 1, data, len);
@@ -251,7 +248,6 @@ static int cx24116_writeregN(struct cx24116_state *state, int reg,
ret = -EREMOTEIO;
}
-error:
kfree(buf);
return ret;
@@ -1121,15 +1117,15 @@ static const struct dvb_frontend_ops cx24116_ops;
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
struct i2c_adapter *i2c)
{
- struct cx24116_state *state = NULL;
+ struct cx24116_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto error1;
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -1138,8 +1134,9 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
ret = (cx24116_readreg(state, 0xFF) << 8) |
cx24116_readreg(state, 0xFE);
if (ret != 0x0501) {
+ kfree(state);
printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n");
- goto error2;
+ return NULL;
}
/* create dvb_frontend */
@@ -1147,9 +1144,6 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
-
-error2: kfree(state);
-error1: return NULL;
}
EXPORT_SYMBOL(cx24116_attach);
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index f3ff8f6eb3bb..a49400c0e28e 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -49,7 +49,6 @@
* @gpio_chip_base: GPIO.
* @get_dvb_frontend: Get DVB frontend.
*/
-
struct cxd2820r_platform_data {
u8 ts_mode;
bool ts_clk_inv;
@@ -62,6 +61,17 @@ struct cxd2820r_platform_data {
bool attach_in_use;
};
+/**
+ * struct cxd2820r_config - configuration for cxd2020r demod
+ *
+ * @i2c_address: Demodulator I2C address. Driver determines DVB-C slave I2C
+ * address automatically from master address.
+ * Default: none, must set. Values: 0x6c, 0x6d.
+ * @ts_mode: TS output mode. Default: none, must set. Values: FIXME?
+ * @ts_clock_inv: TS clock inverted. Default: 0. Values: 0, 1.
+ * @if_agc_polarity: Default: 0. Values: 0, 1
+ * @spec_inv: Spectrum inversion. Default: 0. Values: 0, 1.
+ */
struct cxd2820r_config {
/* Demodulator I2C address.
* Driver determines DVB-C slave I2C address automatically from master
@@ -98,6 +108,18 @@ struct cxd2820r_config {
#if IS_REACHABLE(CONFIG_DVB_CXD2820R)
+/**
+ * Attach a cxd2820r demod
+ *
+ * @config: pointer to &struct cxd2820r_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @gpio_chip_base: if zero, disables GPIO setting. Otherwise, if
+ * CONFIG_GPIOLIB is set dynamically allocate
+ * gpio base; if is not set, use its value to
+ * setup the GPIO pins.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *cxd2820r_attach(
const struct cxd2820r_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
index 5b5421f70388..2b3af247a1f1 100644
--- a/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
+++ b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
@@ -52,7 +52,7 @@ struct i2c_device_addr {
};
-/**
+/*
* \def IS_I2C_10BIT( addr )
* \brief Determine if I2C address 'addr' is a 10 bits address or not.
* \param addr The I2C address.
@@ -67,7 +67,7 @@ struct i2c_device_addr {
Exported FUNCTIONS
------------------------------------------------------------------------------*/
-/**
+/*
* \fn drxbsp_i2c_init()
* \brief Initialize I2C communication module.
* \return drx_status_t Return status.
@@ -76,7 +76,7 @@ Exported FUNCTIONS
*/
drx_status_t drxbsp_i2c_init(void);
-/**
+/*
* \fn drxbsp_i2c_term()
* \brief Terminate I2C communication module.
* \return drx_status_t Return status.
@@ -85,7 +85,7 @@ Exported FUNCTIONS
*/
drx_status_t drxbsp_i2c_term(void);
-/**
+/*
* \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
* u16 w_count,
* u8 *wData,
@@ -121,7 +121,7 @@ Exported FUNCTIONS
struct i2c_device_addr *r_dev_addr,
u16 r_count, u8 *r_data);
-/**
+/*
* \fn drxbsp_i2c_error_text()
* \brief Returns a human readable error.
* Counter part of numerical drx_i2c_error_g.
@@ -130,7 +130,7 @@ Exported FUNCTIONS
*/
char *drxbsp_i2c_error_text(void);
-/**
+/*
* \var drx_i2c_error_g;
* \brief I2C specific error codes, platform dependent.
*/
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index cd69e187ba7a..855685b6b386 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -46,7 +46,7 @@ struct i2c_device_addr {
void *user_data; /* User data pointer */
};
-/**
+/*
* \def IS_I2C_10BIT( addr )
* \brief Determine if I2C address 'addr' is a 10 bits address or not.
* \param addr The I2C address.
@@ -61,7 +61,7 @@ struct i2c_device_addr {
Exported FUNCTIONS
------------------------------------------------------------------------------*/
-/**
+/*
* \fn drxbsp_i2c_init()
* \brief Initialize I2C communication module.
* \return int Return status.
@@ -70,7 +70,7 @@ Exported FUNCTIONS
*/
int drxbsp_i2c_init(void);
-/**
+/*
* \fn drxbsp_i2c_term()
* \brief Terminate I2C communication module.
* \return int Return status.
@@ -79,7 +79,7 @@ int drxbsp_i2c_init(void);
*/
int drxbsp_i2c_term(void);
-/**
+/*
* \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
* u16 w_count,
* u8 * wData,
@@ -115,7 +115,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
struct i2c_device_addr *r_dev_addr,
u16 r_count, u8 *r_data);
-/**
+/*
* \fn drxbsp_i2c_error_text()
* \brief Returns a human readable error.
* Counter part of numerical drx_i2c_error_g.
@@ -124,7 +124,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
*/
char *drxbsp_i2c_error_text(void);
-/**
+/*
* \var drx_i2c_error_g;
* \brief I2C specific error codes, platform dependent.
*/
@@ -241,13 +241,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
struct i2c_device_addr *r_dev_addr,
u16 r_count, u8 *r_data);
-/**************
+/*************
*
* This section configures the DRX Data Access Protocols (DAPs).
*
**************/
-/**
+/*
* \def DRXDAP_SINGLE_MASTER
* \brief Enable I2C single or I2C multimaster mode on host.
*
@@ -262,7 +262,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
#define DRXDAP_SINGLE_MASTER 1
#endif
-/**
+/*
* \def DRXDAP_MAX_WCHUNKSIZE
* \brief Defines maximum chunksize of an i2c write action by host.
*
@@ -282,7 +282,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
#define DRXDAP_MAX_WCHUNKSIZE 60
#endif
-/**
+/*
* \def DRXDAP_MAX_RCHUNKSIZE
* \brief Defines maximum chunksize of an i2c read action by host.
*
@@ -297,13 +297,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
#define DRXDAP_MAX_RCHUNKSIZE 60
#endif
-/**************
+/*************
*
* This section describes drxdriver defines.
*
**************/
-/**
+/*
* \def DRX_UNKNOWN
* \brief Generic UNKNOWN value for DRX enumerated types.
*
@@ -313,7 +313,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
#define DRX_UNKNOWN (254)
#endif
-/**
+/*
* \def DRX_AUTO
* \brief Generic AUTO value for DRX enumerated types.
*
@@ -324,104 +324,104 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
#define DRX_AUTO (255)
#endif
-/**************
+/*************
*
* This section describes flag definitions for the device capbilities.
*
**************/
-/**
+/*
* \brief LNA capability flag
*
* Device has a Low Noise Amplifier
*
*/
#define DRX_CAPABILITY_HAS_LNA (1UL << 0)
-/**
+/*
* \brief OOB-RX capability flag
*
* Device has OOB-RX
*
*/
#define DRX_CAPABILITY_HAS_OOBRX (1UL << 1)
-/**
+/*
* \brief ATV capability flag
*
* Device has ATV
*
*/
#define DRX_CAPABILITY_HAS_ATV (1UL << 2)
-/**
+/*
* \brief DVB-T capability flag
*
* Device has DVB-T
*
*/
#define DRX_CAPABILITY_HAS_DVBT (1UL << 3)
-/**
+/*
* \brief ITU-B capability flag
*
* Device has ITU-B
*
*/
#define DRX_CAPABILITY_HAS_ITUB (1UL << 4)
-/**
+/*
* \brief Audio capability flag
*
* Device has Audio
*
*/
#define DRX_CAPABILITY_HAS_AUD (1UL << 5)
-/**
+/*
* \brief SAW switch capability flag
*
* Device has SAW switch
*
*/
#define DRX_CAPABILITY_HAS_SAWSW (1UL << 6)
-/**
+/*
* \brief GPIO1 capability flag
*
* Device has GPIO1
*
*/
#define DRX_CAPABILITY_HAS_GPIO1 (1UL << 7)
-/**
+/*
* \brief GPIO2 capability flag
*
* Device has GPIO2
*
*/
#define DRX_CAPABILITY_HAS_GPIO2 (1UL << 8)
-/**
+/*
* \brief IRQN capability flag
*
* Device has IRQN
*
*/
#define DRX_CAPABILITY_HAS_IRQN (1UL << 9)
-/**
+/*
* \brief 8VSB capability flag
*
* Device has 8VSB
*
*/
#define DRX_CAPABILITY_HAS_8VSB (1UL << 10)
-/**
+/*
* \brief SMA-TX capability flag
*
* Device has SMATX
*
*/
#define DRX_CAPABILITY_HAS_SMATX (1UL << 11)
-/**
+/*
* \brief SMA-RX capability flag
*
* Device has SMARX
*
*/
#define DRX_CAPABILITY_HAS_SMARX (1UL << 12)
-/**
+/*
* \brief ITU-A/C capability flag
*
* Device has ITU-A/C
@@ -439,7 +439,7 @@ MACROS
DRX_VERSIONSTRING_HELP(PATCH)
#define DRX_VERSIONSTRING_HELP(NUM) #NUM
-/**
+/*
* \brief Macro to create byte array elements from 16 bit integers.
* This macro is used to create byte arrays for block writes.
* Block writes speed up I2C traffic between host and demod.
@@ -449,7 +449,7 @@ MACROS
#define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
((u8)((((u16)x)>>8)&0xFF))
-/**
+/*
* \brief Macro to convert 16 bit register value to a s32
*/
#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
@@ -461,191 +461,191 @@ MACROS
ENUM
-------------------------------------------------------------------------*/
-/**
+/*
* \enum enum drx_standard
* \brief Modulation standards.
*/
enum drx_standard {
- DRX_STANDARD_DVBT = 0, /**< Terrestrial DVB-T. */
- DRX_STANDARD_8VSB, /**< Terrestrial 8VSB. */
- DRX_STANDARD_NTSC, /**< Terrestrial\Cable analog NTSC. */
+ DRX_STANDARD_DVBT = 0, /*< Terrestrial DVB-T. */
+ DRX_STANDARD_8VSB, /*< Terrestrial 8VSB. */
+ DRX_STANDARD_NTSC, /*< Terrestrial\Cable analog NTSC. */
DRX_STANDARD_PAL_SECAM_BG,
- /**< Terrestrial analog PAL/SECAM B/G */
+ /*< Terrestrial analog PAL/SECAM B/G */
DRX_STANDARD_PAL_SECAM_DK,
- /**< Terrestrial analog PAL/SECAM D/K */
+ /*< Terrestrial analog PAL/SECAM D/K */
DRX_STANDARD_PAL_SECAM_I,
- /**< Terrestrial analog PAL/SECAM I */
+ /*< Terrestrial analog PAL/SECAM I */
DRX_STANDARD_PAL_SECAM_L,
- /**< Terrestrial analog PAL/SECAM L
+ /*< Terrestrial analog PAL/SECAM L
with negative modulation */
DRX_STANDARD_PAL_SECAM_LP,
- /**< Terrestrial analog PAL/SECAM L
+ /*< Terrestrial analog PAL/SECAM L
with positive modulation */
- DRX_STANDARD_ITU_A, /**< Cable ITU ANNEX A. */
- DRX_STANDARD_ITU_B, /**< Cable ITU ANNEX B. */
- DRX_STANDARD_ITU_C, /**< Cable ITU ANNEX C. */
- DRX_STANDARD_ITU_D, /**< Cable ITU ANNEX D. */
- DRX_STANDARD_FM, /**< Terrestrial\Cable FM radio */
- DRX_STANDARD_DTMB, /**< Terrestrial DTMB standard (China)*/
+ DRX_STANDARD_ITU_A, /*< Cable ITU ANNEX A. */
+ DRX_STANDARD_ITU_B, /*< Cable ITU ANNEX B. */
+ DRX_STANDARD_ITU_C, /*< Cable ITU ANNEX C. */
+ DRX_STANDARD_ITU_D, /*< Cable ITU ANNEX D. */
+ DRX_STANDARD_FM, /*< Terrestrial\Cable FM radio */
+ DRX_STANDARD_DTMB, /*< Terrestrial DTMB standard (China)*/
DRX_STANDARD_UNKNOWN = DRX_UNKNOWN,
- /**< Standard unknown. */
+ /*< Standard unknown. */
DRX_STANDARD_AUTO = DRX_AUTO
- /**< Autodetect standard. */
+ /*< Autodetect standard. */
};
-/**
+/*
* \enum enum drx_standard
* \brief Modulation sub-standards.
*/
enum drx_substandard {
- DRX_SUBSTANDARD_MAIN = 0, /**< Main subvariant of standard */
+ DRX_SUBSTANDARD_MAIN = 0, /*< Main subvariant of standard */
DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA,
DRX_SUBSTANDARD_ATV_DK_POLAND,
DRX_SUBSTANDARD_ATV_DK_CHINA,
DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN,
- /**< Sub-standard unknown. */
+ /*< Sub-standard unknown. */
DRX_SUBSTANDARD_AUTO = DRX_AUTO
- /**< Auto (default) sub-standard */
+ /*< Auto (default) sub-standard */
};
-/**
+/*
* \enum enum drx_bandwidth
* \brief Channel bandwidth or channel spacing.
*/
enum drx_bandwidth {
- DRX_BANDWIDTH_8MHZ = 0, /**< Bandwidth 8 MHz. */
- DRX_BANDWIDTH_7MHZ, /**< Bandwidth 7 MHz. */
- DRX_BANDWIDTH_6MHZ, /**< Bandwidth 6 MHz. */
+ DRX_BANDWIDTH_8MHZ = 0, /*< Bandwidth 8 MHz. */
+ DRX_BANDWIDTH_7MHZ, /*< Bandwidth 7 MHz. */
+ DRX_BANDWIDTH_6MHZ, /*< Bandwidth 6 MHz. */
DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN,
- /**< Bandwidth unknown. */
+ /*< Bandwidth unknown. */
DRX_BANDWIDTH_AUTO = DRX_AUTO
- /**< Auto Set Bandwidth */
+ /*< Auto Set Bandwidth */
};
-/**
+/*
* \enum enum drx_mirror
* \brief Indicate if channel spectrum is mirrored or not.
*/
enum drx_mirror {
- DRX_MIRROR_NO = 0, /**< Spectrum is not mirrored. */
- DRX_MIRROR_YES, /**< Spectrum is mirrored. */
+ DRX_MIRROR_NO = 0, /*< Spectrum is not mirrored. */
+ DRX_MIRROR_YES, /*< Spectrum is mirrored. */
DRX_MIRROR_UNKNOWN = DRX_UNKNOWN,
- /**< Unknown if spectrum is mirrored. */
+ /*< Unknown if spectrum is mirrored. */
DRX_MIRROR_AUTO = DRX_AUTO
- /**< Autodetect if spectrum is mirrored. */
+ /*< Autodetect if spectrum is mirrored. */
};
-/**
+/*
* \enum enum drx_modulation
* \brief Constellation type of the channel.
*/
enum drx_modulation {
- DRX_CONSTELLATION_BPSK = 0, /**< Modulation is BPSK. */
- DRX_CONSTELLATION_QPSK, /**< Constellation is QPSK. */
- DRX_CONSTELLATION_PSK8, /**< Constellation is PSK8. */
- DRX_CONSTELLATION_QAM16, /**< Constellation is QAM16. */
- DRX_CONSTELLATION_QAM32, /**< Constellation is QAM32. */
- DRX_CONSTELLATION_QAM64, /**< Constellation is QAM64. */
- DRX_CONSTELLATION_QAM128, /**< Constellation is QAM128. */
- DRX_CONSTELLATION_QAM256, /**< Constellation is QAM256. */
- DRX_CONSTELLATION_QAM512, /**< Constellation is QAM512. */
- DRX_CONSTELLATION_QAM1024, /**< Constellation is QAM1024. */
- DRX_CONSTELLATION_QPSK_NR, /**< Constellation is QPSK_NR */
+ DRX_CONSTELLATION_BPSK = 0, /*< Modulation is BPSK. */
+ DRX_CONSTELLATION_QPSK, /*< Constellation is QPSK. */
+ DRX_CONSTELLATION_PSK8, /*< Constellation is PSK8. */
+ DRX_CONSTELLATION_QAM16, /*< Constellation is QAM16. */
+ DRX_CONSTELLATION_QAM32, /*< Constellation is QAM32. */
+ DRX_CONSTELLATION_QAM64, /*< Constellation is QAM64. */
+ DRX_CONSTELLATION_QAM128, /*< Constellation is QAM128. */
+ DRX_CONSTELLATION_QAM256, /*< Constellation is QAM256. */
+ DRX_CONSTELLATION_QAM512, /*< Constellation is QAM512. */
+ DRX_CONSTELLATION_QAM1024, /*< Constellation is QAM1024. */
+ DRX_CONSTELLATION_QPSK_NR, /*< Constellation is QPSK_NR */
DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
- /**< Constellation unknown. */
+ /*< Constellation unknown. */
DRX_CONSTELLATION_AUTO = DRX_AUTO
- /**< Autodetect constellation. */
+ /*< Autodetect constellation. */
};
-/**
+/*
* \enum enum drx_hierarchy
* \brief Hierarchy of the channel.
*/
enum drx_hierarchy {
- DRX_HIERARCHY_NONE = 0, /**< None hierarchical channel. */
- DRX_HIERARCHY_ALPHA1, /**< Hierarchical channel, alpha=1. */
- DRX_HIERARCHY_ALPHA2, /**< Hierarchical channel, alpha=2. */
- DRX_HIERARCHY_ALPHA4, /**< Hierarchical channel, alpha=4. */
+ DRX_HIERARCHY_NONE = 0, /*< None hierarchical channel. */
+ DRX_HIERARCHY_ALPHA1, /*< Hierarchical channel, alpha=1. */
+ DRX_HIERARCHY_ALPHA2, /*< Hierarchical channel, alpha=2. */
+ DRX_HIERARCHY_ALPHA4, /*< Hierarchical channel, alpha=4. */
DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN,
- /**< Hierarchy unknown. */
+ /*< Hierarchy unknown. */
DRX_HIERARCHY_AUTO = DRX_AUTO
- /**< Autodetect hierarchy. */
+ /*< Autodetect hierarchy. */
};
-/**
+/*
* \enum enum drx_priority
* \brief Channel priority in case of hierarchical transmission.
*/
enum drx_priority {
- DRX_PRIORITY_LOW = 0, /**< Low priority channel. */
- DRX_PRIORITY_HIGH, /**< High priority channel. */
+ DRX_PRIORITY_LOW = 0, /*< Low priority channel. */
+ DRX_PRIORITY_HIGH, /*< High priority channel. */
DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN
- /**< Priority unknown. */
+ /*< Priority unknown. */
};
-/**
+/*
* \enum enum drx_coderate
* \brief Channel priority in case of hierarchical transmission.
*/
enum drx_coderate {
- DRX_CODERATE_1DIV2 = 0, /**< Code rate 1/2nd. */
- DRX_CODERATE_2DIV3, /**< Code rate 2/3nd. */
- DRX_CODERATE_3DIV4, /**< Code rate 3/4nd. */
- DRX_CODERATE_5DIV6, /**< Code rate 5/6nd. */
- DRX_CODERATE_7DIV8, /**< Code rate 7/8nd. */
+ DRX_CODERATE_1DIV2 = 0, /*< Code rate 1/2nd. */
+ DRX_CODERATE_2DIV3, /*< Code rate 2/3nd. */
+ DRX_CODERATE_3DIV4, /*< Code rate 3/4nd. */
+ DRX_CODERATE_5DIV6, /*< Code rate 5/6nd. */
+ DRX_CODERATE_7DIV8, /*< Code rate 7/8nd. */
DRX_CODERATE_UNKNOWN = DRX_UNKNOWN,
- /**< Code rate unknown. */
+ /*< Code rate unknown. */
DRX_CODERATE_AUTO = DRX_AUTO
- /**< Autodetect code rate. */
+ /*< Autodetect code rate. */
};
-/**
+/*
* \enum enum drx_guard
* \brief Guard interval of a channel.
*/
enum drx_guard {
- DRX_GUARD_1DIV32 = 0, /**< Guard interval 1/32nd. */
- DRX_GUARD_1DIV16, /**< Guard interval 1/16th. */
- DRX_GUARD_1DIV8, /**< Guard interval 1/8th. */
- DRX_GUARD_1DIV4, /**< Guard interval 1/4th. */
+ DRX_GUARD_1DIV32 = 0, /*< Guard interval 1/32nd. */
+ DRX_GUARD_1DIV16, /*< Guard interval 1/16th. */
+ DRX_GUARD_1DIV8, /*< Guard interval 1/8th. */
+ DRX_GUARD_1DIV4, /*< Guard interval 1/4th. */
DRX_GUARD_UNKNOWN = DRX_UNKNOWN,
- /**< Guard interval unknown. */
+ /*< Guard interval unknown. */
DRX_GUARD_AUTO = DRX_AUTO
- /**< Autodetect guard interval. */
+ /*< Autodetect guard interval. */
};
-/**
+/*
* \enum enum drx_fft_mode
* \brief FFT mode.
*/
enum drx_fft_mode {
- DRX_FFTMODE_2K = 0, /**< 2K FFT mode. */
- DRX_FFTMODE_4K, /**< 4K FFT mode. */
- DRX_FFTMODE_8K, /**< 8K FFT mode. */
+ DRX_FFTMODE_2K = 0, /*< 2K FFT mode. */
+ DRX_FFTMODE_4K, /*< 4K FFT mode. */
+ DRX_FFTMODE_8K, /*< 8K FFT mode. */
DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
- /**< FFT mode unknown. */
+ /*< FFT mode unknown. */
DRX_FFTMODE_AUTO = DRX_AUTO
- /**< Autodetect FFT mode. */
+ /*< Autodetect FFT mode. */
};
-/**
+/*
* \enum enum drx_classification
* \brief Channel classification.
*/
enum drx_classification {
- DRX_CLASSIFICATION_GAUSS = 0, /**< Gaussion noise. */
- DRX_CLASSIFICATION_HVY_GAUSS, /**< Heavy Gaussion noise. */
- DRX_CLASSIFICATION_COCHANNEL, /**< Co-channel. */
- DRX_CLASSIFICATION_STATIC, /**< Static echo. */
- DRX_CLASSIFICATION_MOVING, /**< Moving echo. */
- DRX_CLASSIFICATION_ZERODB, /**< Zero dB echo. */
+ DRX_CLASSIFICATION_GAUSS = 0, /*< Gaussion noise. */
+ DRX_CLASSIFICATION_HVY_GAUSS, /*< Heavy Gaussion noise. */
+ DRX_CLASSIFICATION_COCHANNEL, /*< Co-channel. */
+ DRX_CLASSIFICATION_STATIC, /*< Static echo. */
+ DRX_CLASSIFICATION_MOVING, /*< Moving echo. */
+ DRX_CLASSIFICATION_ZERODB, /*< Zero dB echo. */
DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN,
- /**< Unknown classification */
+ /*< Unknown classification */
DRX_CLASSIFICATION_AUTO = DRX_AUTO
- /**< Autodetect classification. */
+ /*< Autodetect classification. */
};
-/**
+/*
* /enum enum drx_interleave_mode
* /brief Interleave modes
*/
@@ -673,80 +673,80 @@ enum drx_interleave_mode {
DRX_INTERLEAVEMODE_B52_M48,
DRX_INTERLEAVEMODE_B52_M0,
DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN,
- /**< Unknown interleave mode */
+ /*< Unknown interleave mode */
DRX_INTERLEAVEMODE_AUTO = DRX_AUTO
- /**< Autodetect interleave mode */
+ /*< Autodetect interleave mode */
};
-/**
+/*
* \enum enum drx_carrier_mode
* \brief Channel Carrier Mode.
*/
enum drx_carrier_mode {
- DRX_CARRIER_MULTI = 0, /**< Multi carrier mode */
- DRX_CARRIER_SINGLE, /**< Single carrier mode */
+ DRX_CARRIER_MULTI = 0, /*< Multi carrier mode */
+ DRX_CARRIER_SINGLE, /*< Single carrier mode */
DRX_CARRIER_UNKNOWN = DRX_UNKNOWN,
- /**< Carrier mode unknown. */
- DRX_CARRIER_AUTO = DRX_AUTO /**< Autodetect carrier mode */
+ /*< Carrier mode unknown. */
+ DRX_CARRIER_AUTO = DRX_AUTO /*< Autodetect carrier mode */
};
-/**
+/*
* \enum enum drx_frame_mode
* \brief Channel Frame Mode.
*/
enum drx_frame_mode {
- DRX_FRAMEMODE_420 = 0, /**< 420 with variable PN */
- DRX_FRAMEMODE_595, /**< 595 */
- DRX_FRAMEMODE_945, /**< 945 with variable PN */
+ DRX_FRAMEMODE_420 = 0, /*< 420 with variable PN */
+ DRX_FRAMEMODE_595, /*< 595 */
+ DRX_FRAMEMODE_945, /*< 945 with variable PN */
DRX_FRAMEMODE_420_FIXED_PN,
- /**< 420 with fixed PN */
+ /*< 420 with fixed PN */
DRX_FRAMEMODE_945_FIXED_PN,
- /**< 945 with fixed PN */
+ /*< 945 with fixed PN */
DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN,
- /**< Frame mode unknown. */
+ /*< Frame mode unknown. */
DRX_FRAMEMODE_AUTO = DRX_AUTO
- /**< Autodetect frame mode */
+ /*< Autodetect frame mode */
};
-/**
+/*
* \enum enum drx_tps_frame
* \brief Frame number in current super-frame.
*/
enum drx_tps_frame {
- DRX_TPS_FRAME1 = 0, /**< TPS frame 1. */
- DRX_TPS_FRAME2, /**< TPS frame 2. */
- DRX_TPS_FRAME3, /**< TPS frame 3. */
- DRX_TPS_FRAME4, /**< TPS frame 4. */
+ DRX_TPS_FRAME1 = 0, /*< TPS frame 1. */
+ DRX_TPS_FRAME2, /*< TPS frame 2. */
+ DRX_TPS_FRAME3, /*< TPS frame 3. */
+ DRX_TPS_FRAME4, /*< TPS frame 4. */
DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN
- /**< TPS frame unknown. */
+ /*< TPS frame unknown. */
};
-/**
+/*
* \enum enum drx_ldpc
* \brief TPS LDPC .
*/
enum drx_ldpc {
- DRX_LDPC_0_4 = 0, /**< LDPC 0.4 */
- DRX_LDPC_0_6, /**< LDPC 0.6 */
- DRX_LDPC_0_8, /**< LDPC 0.8 */
+ DRX_LDPC_0_4 = 0, /*< LDPC 0.4 */
+ DRX_LDPC_0_6, /*< LDPC 0.6 */
+ DRX_LDPC_0_8, /*< LDPC 0.8 */
DRX_LDPC_UNKNOWN = DRX_UNKNOWN,
- /**< LDPC unknown. */
- DRX_LDPC_AUTO = DRX_AUTO /**< Autodetect LDPC */
+ /*< LDPC unknown. */
+ DRX_LDPC_AUTO = DRX_AUTO /*< Autodetect LDPC */
};
-/**
+/*
* \enum enum drx_pilot_mode
* \brief Pilot modes in DTMB.
*/
enum drx_pilot_mode {
- DRX_PILOT_ON = 0, /**< Pilot On */
- DRX_PILOT_OFF, /**< Pilot Off */
+ DRX_PILOT_ON = 0, /*< Pilot On */
+ DRX_PILOT_OFF, /*< Pilot Off */
DRX_PILOT_UNKNOWN = DRX_UNKNOWN,
- /**< Pilot unknown. */
- DRX_PILOT_AUTO = DRX_AUTO /**< Autodetect Pilot */
+ /*< Pilot unknown. */
+ DRX_PILOT_AUTO = DRX_AUTO /*< Autodetect Pilot */
};
-/**
+/*
* enum drxu_code_action - indicate if firmware has to be uploaded or verified.
* @UCODE_UPLOAD: Upload the microcode image to device
* @UCODE_VERIFY: Compare microcode image with code on device
@@ -756,7 +756,7 @@ enum drxu_code_action {
UCODE_VERIFY
};
-/**
+/*
* \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator.
*
* The generic lock states have device dependent semantics.
@@ -801,7 +801,7 @@ enum drx_lock_status {
DRX_LOCKED
};
-/**
+/*
* \enum enum drx_uio* \brief Used to address a User IO (UIO).
*/
enum drx_uio {
@@ -840,7 +840,7 @@ enum drx_uio {
DRX_UIO_MAX = DRX_UIO32
};
-/**
+/*
* \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO.
*
* DRX_UIO_MODE_FIRMWARE is an old uio mode.
@@ -850,37 +850,37 @@ enum drx_uio {
*/
enum drxuio_mode {
DRX_UIO_MODE_DISABLE = 0x01,
- /**< not used, pin is configured as input */
+ /*< not used, pin is configured as input */
DRX_UIO_MODE_READWRITE = 0x02,
- /**< used for read/write by application */
+ /*< used for read/write by application */
DRX_UIO_MODE_FIRMWARE = 0x04,
- /**< controlled by firmware, function 0 */
+ /*< controlled by firmware, function 0 */
DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE,
- /**< same as above */
+ /*< same as above */
DRX_UIO_MODE_FIRMWARE1 = 0x08,
- /**< controlled by firmware, function 1 */
+ /*< controlled by firmware, function 1 */
DRX_UIO_MODE_FIRMWARE2 = 0x10,
- /**< controlled by firmware, function 2 */
+ /*< controlled by firmware, function 2 */
DRX_UIO_MODE_FIRMWARE3 = 0x20,
- /**< controlled by firmware, function 3 */
+ /*< controlled by firmware, function 3 */
DRX_UIO_MODE_FIRMWARE4 = 0x40,
- /**< controlled by firmware, function 4 */
+ /*< controlled by firmware, function 4 */
DRX_UIO_MODE_FIRMWARE5 = 0x80
- /**< controlled by firmware, function 5 */
+ /*< controlled by firmware, function 5 */
};
-/**
+/*
* \enum enum drxoob_downstream_standard * \brief Used to select OOB standard.
*
* Based on ANSI 55-1 and 55-2
*/
enum drxoob_downstream_standard {
DRX_OOB_MODE_A = 0,
- /**< ANSI 55-1 */
+ /*< ANSI 55-1 */
DRX_OOB_MODE_B_GRADE_A,
- /**< ANSI 55-2 A */
+ /*< ANSI 55-2 A */
DRX_OOB_MODE_B_GRADE_B
- /**< ANSI 55-2 B */
+ /*< ANSI 55-2 B */
};
/*-------------------------------------------------------------------------
@@ -924,7 +924,7 @@ STRUCTS
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* struct drxu_code_info Parameters for microcode upload and verfiy.
*
* @mc_file: microcode file name
@@ -935,7 +935,7 @@ struct drxu_code_info {
char *mc_file;
};
-/**
+/*
* \struct drx_mc_version_rec_t
* \brief Microcode version record
* Version numbers are stored in BCD format, as usual:
@@ -963,7 +963,7 @@ struct drx_mc_version_rec {
/*========================================*/
-/**
+/*
* \struct drx_filter_info_t
* \brief Parameters for loading filter coefficients
*
@@ -971,18 +971,18 @@ struct drx_mc_version_rec {
*/
struct drx_filter_info {
u8 *data_re;
- /**< pointer to coefficients for RE */
+ /*< pointer to coefficients for RE */
u8 *data_im;
- /**< pointer to coefficients for IM */
+ /*< pointer to coefficients for IM */
u16 size_re;
- /**< size of coefficients for RE */
+ /*< size of coefficients for RE */
u16 size_im;
- /**< size of coefficients for IM */
+ /*< size of coefficients for IM */
};
/*========================================*/
-/**
+/*
* \struct struct drx_channel * \brief The set of parameters describing a single channel.
*
* Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
@@ -991,29 +991,29 @@ struct drx_filter_info {
*/
struct drx_channel {
s32 frequency;
- /**< frequency in kHz */
+ /*< frequency in kHz */
enum drx_bandwidth bandwidth;
- /**< bandwidth */
- enum drx_mirror mirror; /**< mirrored or not on RF */
+ /*< bandwidth */
+ enum drx_mirror mirror; /*< mirrored or not on RF */
enum drx_modulation constellation;
- /**< constellation */
+ /*< constellation */
enum drx_hierarchy hierarchy;
- /**< hierarchy */
- enum drx_priority priority; /**< priority */
- enum drx_coderate coderate; /**< coderate */
- enum drx_guard guard; /**< guard interval */
- enum drx_fft_mode fftmode; /**< fftmode */
+ /*< hierarchy */
+ enum drx_priority priority; /*< priority */
+ enum drx_coderate coderate; /*< coderate */
+ enum drx_guard guard; /*< guard interval */
+ enum drx_fft_mode fftmode; /*< fftmode */
enum drx_classification classification;
- /**< classification */
+ /*< classification */
u32 symbolrate;
- /**< symbolrate in symbols/sec */
+ /*< symbolrate in symbols/sec */
enum drx_interleave_mode interleavemode;
- /**< interleaveMode QAM */
- enum drx_ldpc ldpc; /**< ldpc */
- enum drx_carrier_mode carrier; /**< carrier */
+ /*< interleaveMode QAM */
+ enum drx_ldpc ldpc; /*< ldpc */
+ enum drx_carrier_mode carrier; /*< carrier */
enum drx_frame_mode framemode;
- /**< frame mode */
- enum drx_pilot_mode pilot; /**< pilot mode */
+ /*< frame mode */
+ enum drx_pilot_mode pilot; /*< pilot mode */
};
/*========================================*/
@@ -1027,74 +1027,74 @@ enum drx_cfg_sqi_speed {
/*========================================*/
-/**
+/*
* \struct struct drx_complex * A complex number.
*
* Used by DRX_CTRL_CONSTEL.
*/
struct drx_complex {
s16 im;
- /**< Imaginary part. */
+ /*< Imaginary part. */
s16 re;
- /**< Real part. */
+ /*< Real part. */
};
/*========================================*/
-/**
+/*
* \struct struct drx_frequency_plan * Array element of a frequency plan.
*
* Used by DRX_CTRL_SCAN_INIT.
*/
struct drx_frequency_plan {
s32 first;
- /**< First centre frequency in this band */
+ /*< First centre frequency in this band */
s32 last;
- /**< Last centre frequency in this band */
+ /*< Last centre frequency in this band */
s32 step;
- /**< Stepping frequency in this band */
+ /*< Stepping frequency in this band */
enum drx_bandwidth bandwidth;
- /**< Bandwidth within this frequency band */
+ /*< Bandwidth within this frequency band */
u16 ch_number;
- /**< First channel number in this band, or first
+ /*< First channel number in this band, or first
index in ch_names */
char **ch_names;
- /**< Optional list of channel names in this
+ /*< Optional list of channel names in this
band */
};
/*========================================*/
-/**
+/*
* \struct struct drx_scan_param * Parameters for channel scan.
*
* Used by DRX_CTRL_SCAN_INIT.
*/
struct drx_scan_param {
struct drx_frequency_plan *frequency_plan;
- /**< Frequency plan (array)*/
- u16 frequency_plan_size; /**< Number of bands */
- u32 num_tries; /**< Max channels tried */
- s32 skip; /**< Minimum frequency step to take
+ /*< Frequency plan (array)*/
+ u16 frequency_plan_size; /*< Number of bands */
+ u32 num_tries; /*< Max channels tried */
+ s32 skip; /*< Minimum frequency step to take
after a channel is found */
- void *ext_params; /**< Standard specific params */
+ void *ext_params; /*< Standard specific params */
};
/*========================================*/
-/**
+/*
* \brief Scan commands.
* Used by scanning algorithms.
*/
enum drx_scan_command {
- DRX_SCAN_COMMAND_INIT = 0,/**< Initialize scanning */
- DRX_SCAN_COMMAND_NEXT, /**< Next scan */
- DRX_SCAN_COMMAND_STOP /**< Stop scanning */
+ DRX_SCAN_COMMAND_INIT = 0,/*< Initialize scanning */
+ DRX_SCAN_COMMAND_NEXT, /*< Next scan */
+ DRX_SCAN_COMMAND_STOP /*< Stop scanning */
};
/*========================================*/
-/**
+/*
* \brief Inner scan function prototype.
*/
typedef int(*drx_scan_func_t) (void *scan_context,
@@ -1104,77 +1104,77 @@ typedef int(*drx_scan_func_t) (void *scan_context,
/*========================================*/
-/**
+/*
* \struct struct drxtps_info * TPS information, DVB-T specific.
*
* Used by DRX_CTRL_TPS_INFO.
*/
struct drxtps_info {
- enum drx_fft_mode fftmode; /**< Fft mode */
- enum drx_guard guard; /**< Guard interval */
+ enum drx_fft_mode fftmode; /*< Fft mode */
+ enum drx_guard guard; /*< Guard interval */
enum drx_modulation constellation;
- /**< Constellation */
+ /*< Constellation */
enum drx_hierarchy hierarchy;
- /**< Hierarchy */
+ /*< Hierarchy */
enum drx_coderate high_coderate;
- /**< High code rate */
+ /*< High code rate */
enum drx_coderate low_coderate;
- /**< Low cod rate */
- enum drx_tps_frame frame; /**< Tps frame */
- u8 length; /**< Length */
- u16 cell_id; /**< Cell id */
+ /*< Low cod rate */
+ enum drx_tps_frame frame; /*< Tps frame */
+ u8 length; /*< Length */
+ u16 cell_id; /*< Cell id */
};
/*========================================*/
-/**
+/*
* \brief Power mode of device.
*
* Used by DRX_CTRL_SET_POWER_MODE.
*/
enum drx_power_mode {
DRX_POWER_UP = 0,
- /**< Generic , Power Up Mode */
+ /*< Generic , Power Up Mode */
DRX_POWER_MODE_1,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_2,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_3,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_4,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_5,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_6,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_7,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_8,
- /**< Device specific , Power Up Mode */
+ /*< Device specific , Power Up Mode */
DRX_POWER_MODE_9,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_10,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_11,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_12,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_13,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_14,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_15,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_MODE_16,
- /**< Device specific , Power Down Mode */
+ /*< Device specific , Power Down Mode */
DRX_POWER_DOWN = 255
- /**< Generic , Power Down Mode */
+ /*< Generic , Power Down Mode */
};
/*========================================*/
-/**
+/*
* \enum enum drx_module * \brief Software module identification.
*
* Used by DRX_CTRL_VERSION.
@@ -1191,93 +1191,93 @@ typedef int(*drx_scan_func_t) (void *scan_context,
DRX_MODULE_UNKNOWN
};
-/**
+/*
* \enum struct drx_version * \brief Version information of one software module.
*
* Used by DRX_CTRL_VERSION.
*/
struct drx_version {
enum drx_module module_type;
- /**< Type identifier of the module */
+ /*< Type identifier of the module */
char *module_name;
- /**< Name or description of module */
- u16 v_major; /**< Major version number */
- u16 v_minor; /**< Minor version number */
- u16 v_patch; /**< Patch version number */
- char *v_string; /**< Version as text string */
+ /*< Name or description of module */
+ u16 v_major; /*< Major version number */
+ u16 v_minor; /*< Minor version number */
+ u16 v_patch; /*< Patch version number */
+ char *v_string; /*< Version as text string */
};
-/**
+/*
* \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information.
*
* Used by DRX_CTRL_VERSION.
*/
struct drx_version_list {
- struct drx_version *version;/**< Version information */
+ struct drx_version *version;/*< Version information */
struct drx_version_list *next;
- /**< Next list element */
+ /*< Next list element */
};
/*========================================*/
-/**
+/*
* \brief Parameters needed to confiugure a UIO.
*
* Used by DRX_CTRL_UIO_CFG.
*/
struct drxuio_cfg {
enum drx_uio uio;
- /**< UIO identifier */
+ /*< UIO identifier */
enum drxuio_mode mode;
- /**< UIO operational mode */
+ /*< UIO operational mode */
};
/*========================================*/
-/**
+/*
* \brief Parameters needed to read from or write to a UIO.
*
* Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE.
*/
struct drxuio_data {
enum drx_uio uio;
- /**< UIO identifier */
+ /*< UIO identifier */
bool value;
- /**< UIO value (true=1, false=0) */
+ /*< UIO value (true=1, false=0) */
};
/*========================================*/
-/**
+/*
* \brief Parameters needed to configure OOB.
*
* Used by DRX_CTRL_SET_OOB.
*/
struct drxoob {
- s32 frequency; /**< Frequency in kHz */
+ s32 frequency; /*< Frequency in kHz */
enum drxoob_downstream_standard standard;
- /**< OOB standard */
- bool spectrum_inverted; /**< If true, then spectrum
+ /*< OOB standard */
+ bool spectrum_inverted; /*< If true, then spectrum
is inverted */
};
/*========================================*/
-/**
+/*
* \brief Metrics from OOB.
*
* Used by DRX_CTRL_GET_OOB.
*/
struct drxoob_status {
- s32 frequency; /**< Frequency in Khz */
- enum drx_lock_status lock; /**< Lock status */
- u32 mer; /**< MER */
- s32 symbol_rate_offset; /**< Symbolrate offset in ppm */
+ s32 frequency; /*< Frequency in Khz */
+ enum drx_lock_status lock; /*< Lock status */
+ u32 mer; /*< MER */
+ s32 symbol_rate_offset; /*< Symbolrate offset in ppm */
};
/*========================================*/
-/**
+/*
* \brief Device dependent configuration data.
*
* Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG.
@@ -1285,14 +1285,14 @@ struct drx_version_list {
*/
struct drx_cfg {
u32 cfg_type;
- /**< Function identifier */
+ /*< Function identifier */
void *cfg_data;
- /**< Function data */
+ /*< Function data */
};
/*========================================*/
-/**
+/*
* /struct DRXMpegStartWidth_t
* MStart width [nr MCLK cycles] for serial MPEG output.
*/
@@ -1303,7 +1303,7 @@ struct drx_version_list {
};
/* CTRL CFG MPEG output */
-/**
+/*
* \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control.
*
* Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and
@@ -1311,29 +1311,29 @@ struct drx_version_list {
*/
struct drx_cfg_mpeg_output {
- bool enable_mpeg_output;/**< If true, enable MPEG output */
- bool insert_rs_byte; /**< If true, insert RS byte */
- bool enable_parallel; /**< If true, parallel out otherwise
+ bool enable_mpeg_output;/*< If true, enable MPEG output */
+ bool insert_rs_byte; /*< If true, insert RS byte */
+ bool enable_parallel; /*< If true, parallel out otherwise
serial */
- bool invert_data; /**< If true, invert DATA signals */
- bool invert_err; /**< If true, invert ERR signal */
- bool invert_str; /**< If true, invert STR signals */
- bool invert_val; /**< If true, invert VAL signals */
- bool invert_clk; /**< If true, invert CLK signals */
- bool static_clk; /**< If true, static MPEG clockrate
+ bool invert_data; /*< If true, invert DATA signals */
+ bool invert_err; /*< If true, invert ERR signal */
+ bool invert_str; /*< If true, invert STR signals */
+ bool invert_val; /*< If true, invert VAL signals */
+ bool invert_clk; /*< If true, invert CLK signals */
+ bool static_clk; /*< If true, static MPEG clockrate
will be used, otherwise clockrate
will adapt to the bitrate of the
TS */
- u32 bitrate; /**< Maximum bitrate in b/s in case
+ u32 bitrate; /*< Maximum bitrate in b/s in case
static clockrate is selected */
enum drxmpeg_str_width width_str;
- /**< MPEG start width */
+ /*< MPEG start width */
};
/*========================================*/
-/**
+/*
* \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port.
*
* Used by DRX_CTRL_I2C_READWRITE.
@@ -1341,187 +1341,187 @@ struct drx_version_list {
*
*/
struct drxi2c_data {
- u16 port_nr; /**< I2C port number */
+ u16 port_nr; /*< I2C port number */
struct i2c_device_addr *w_dev_addr;
- /**< Write device address */
- u16 w_count; /**< Size of write data in bytes */
- u8 *wData; /**< Pointer to write data */
+ /*< Write device address */
+ u16 w_count; /*< Size of write data in bytes */
+ u8 *wData; /*< Pointer to write data */
struct i2c_device_addr *r_dev_addr;
- /**< Read device address */
- u16 r_count; /**< Size of data to read in bytes */
- u8 *r_data; /**< Pointer to read buffer */
+ /*< Read device address */
+ u16 r_count; /*< Size of data to read in bytes */
+ u8 *r_data; /*< Pointer to read buffer */
};
/*========================================*/
-/**
+/*
* \enum enum drx_aud_standard * \brief Audio standard identifier.
*
* Used by DRX_CTRL_SET_AUD.
*/
enum drx_aud_standard {
- DRX_AUD_STANDARD_BTSC, /**< set BTSC standard (USA) */
- DRX_AUD_STANDARD_A2, /**< set A2-Korea FM Stereo */
- DRX_AUD_STANDARD_EIAJ, /**< set to Japanese FM Stereo */
- DRX_AUD_STANDARD_FM_STEREO,/**< set to FM-Stereo Radio */
- DRX_AUD_STANDARD_M_MONO, /**< for 4.5 MHz mono detected */
- DRX_AUD_STANDARD_D_K_MONO, /**< for 6.5 MHz mono detected */
- DRX_AUD_STANDARD_BG_FM, /**< set BG_FM standard */
- DRX_AUD_STANDARD_D_K1, /**< set D_K1 standard */
- DRX_AUD_STANDARD_D_K2, /**< set D_K2 standard */
- DRX_AUD_STANDARD_D_K3, /**< set D_K3 standard */
+ DRX_AUD_STANDARD_BTSC, /*< set BTSC standard (USA) */
+ DRX_AUD_STANDARD_A2, /*< set A2-Korea FM Stereo */
+ DRX_AUD_STANDARD_EIAJ, /*< set to Japanese FM Stereo */
+ DRX_AUD_STANDARD_FM_STEREO,/*< set to FM-Stereo Radio */
+ DRX_AUD_STANDARD_M_MONO, /*< for 4.5 MHz mono detected */
+ DRX_AUD_STANDARD_D_K_MONO, /*< for 6.5 MHz mono detected */
+ DRX_AUD_STANDARD_BG_FM, /*< set BG_FM standard */
+ DRX_AUD_STANDARD_D_K1, /*< set D_K1 standard */
+ DRX_AUD_STANDARD_D_K2, /*< set D_K2 standard */
+ DRX_AUD_STANDARD_D_K3, /*< set D_K3 standard */
DRX_AUD_STANDARD_BG_NICAM_FM,
- /**< set BG_NICAM_FM standard */
+ /*< set BG_NICAM_FM standard */
DRX_AUD_STANDARD_L_NICAM_AM,
- /**< set L_NICAM_AM standard */
+ /*< set L_NICAM_AM standard */
DRX_AUD_STANDARD_I_NICAM_FM,
- /**< set I_NICAM_FM standard */
+ /*< set I_NICAM_FM standard */
DRX_AUD_STANDARD_D_K_NICAM_FM,
- /**< set D_K_NICAM_FM standard */
- DRX_AUD_STANDARD_NOT_READY,/**< used to detect audio standard */
+ /*< set D_K_NICAM_FM standard */
+ DRX_AUD_STANDARD_NOT_READY,/*< used to detect audio standard */
DRX_AUD_STANDARD_AUTO = DRX_AUTO,
- /**< Automatic Standard Detection */
+ /*< Automatic Standard Detection */
DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN
- /**< used as auto and for readback */
+ /*< used as auto and for readback */
};
/* CTRL_AUD_GET_STATUS - struct drx_aud_status */
-/**
+/*
* \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier.
*/
enum drx_aud_nicam_status {
DRX_AUD_NICAM_DETECTED = 0,
- /**< NICAM carrier detected */
+ /*< NICAM carrier detected */
DRX_AUD_NICAM_NOT_DETECTED,
- /**< NICAM carrier not detected */
- DRX_AUD_NICAM_BAD /**< NICAM carrier bad quality */
+ /*< NICAM carrier not detected */
+ DRX_AUD_NICAM_BAD /*< NICAM carrier bad quality */
};
-/**
+/*
* \struct struct drx_aud_status * \brief Audio status characteristics.
*/
struct drx_aud_status {
- bool stereo; /**< stereo detection */
- bool carrier_a; /**< carrier A detected */
- bool carrier_b; /**< carrier B detected */
- bool sap; /**< sap / bilingual detection */
- bool rds; /**< RDS data array present */
+ bool stereo; /*< stereo detection */
+ bool carrier_a; /*< carrier A detected */
+ bool carrier_b; /*< carrier B detected */
+ bool sap; /*< sap / bilingual detection */
+ bool rds; /*< RDS data array present */
enum drx_aud_nicam_status nicam_status;
- /**< status of NICAM carrier */
- s8 fm_ident; /**< FM Identification value */
+ /*< status of NICAM carrier */
+ s8 fm_ident; /*< FM Identification value */
};
/* CTRL_AUD_READ_RDS - DRXRDSdata_t */
-/**
+/*
* \struct DRXRDSdata_t
* \brief Raw RDS data array.
*/
struct drx_cfg_aud_rds {
- bool valid; /**< RDS data validation */
- u16 data[18]; /**< data from one RDS data array */
+ bool valid; /*< RDS data validation */
+ u16 data[18]; /*< data from one RDS data array */
};
/* DRX_CFG_AUD_VOLUME - struct drx_cfg_aud_volume - set/get */
-/**
+/*
* \enum DRXAudAVCDecayTime_t
* \brief Automatic volume control configuration.
*/
enum drx_aud_avc_mode {
- DRX_AUD_AVC_OFF, /**< Automatic volume control off */
- DRX_AUD_AVC_DECAYTIME_8S, /**< level volume in 8 seconds */
- DRX_AUD_AVC_DECAYTIME_4S, /**< level volume in 4 seconds */
- DRX_AUD_AVC_DECAYTIME_2S, /**< level volume in 2 seconds */
- DRX_AUD_AVC_DECAYTIME_20MS/**< level volume in 20 millisec */
+ DRX_AUD_AVC_OFF, /*< Automatic volume control off */
+ DRX_AUD_AVC_DECAYTIME_8S, /*< level volume in 8 seconds */
+ DRX_AUD_AVC_DECAYTIME_4S, /*< level volume in 4 seconds */
+ DRX_AUD_AVC_DECAYTIME_2S, /*< level volume in 2 seconds */
+ DRX_AUD_AVC_DECAYTIME_20MS/*< level volume in 20 millisec */
};
-/**
+/*
* /enum DRXAudMaxAVCGain_t
* /brief Automatic volume control max gain in audio baseband.
*/
enum drx_aud_avc_max_gain {
- DRX_AUD_AVC_MAX_GAIN_0DB, /**< maximum AVC gain 0 dB */
- DRX_AUD_AVC_MAX_GAIN_6DB, /**< maximum AVC gain 6 dB */
- DRX_AUD_AVC_MAX_GAIN_12DB /**< maximum AVC gain 12 dB */
+ DRX_AUD_AVC_MAX_GAIN_0DB, /*< maximum AVC gain 0 dB */
+ DRX_AUD_AVC_MAX_GAIN_6DB, /*< maximum AVC gain 6 dB */
+ DRX_AUD_AVC_MAX_GAIN_12DB /*< maximum AVC gain 12 dB */
};
-/**
+/*
* /enum DRXAudMaxAVCAtten_t
* /brief Automatic volume control max attenuation in audio baseband.
*/
enum drx_aud_avc_max_atten {
DRX_AUD_AVC_MAX_ATTEN_12DB,
- /**< maximum AVC attenuation 12 dB */
+ /*< maximum AVC attenuation 12 dB */
DRX_AUD_AVC_MAX_ATTEN_18DB,
- /**< maximum AVC attenuation 18 dB */
- DRX_AUD_AVC_MAX_ATTEN_24DB/**< maximum AVC attenuation 24 dB */
+ /*< maximum AVC attenuation 18 dB */
+ DRX_AUD_AVC_MAX_ATTEN_24DB/*< maximum AVC attenuation 24 dB */
};
-/**
+/*
* \struct struct drx_cfg_aud_volume * \brief Audio volume configuration.
*/
struct drx_cfg_aud_volume {
- bool mute; /**< mute overrides volume setting */
- s16 volume; /**< volume, range -114 to 12 dB */
- enum drx_aud_avc_mode avc_mode; /**< AVC auto volume control mode */
- u16 avc_ref_level; /**< AVC reference level */
+ bool mute; /*< mute overrides volume setting */
+ s16 volume; /*< volume, range -114 to 12 dB */
+ enum drx_aud_avc_mode avc_mode; /*< AVC auto volume control mode */
+ u16 avc_ref_level; /*< AVC reference level */
enum drx_aud_avc_max_gain avc_max_gain;
- /**< AVC max gain selection */
+ /*< AVC max gain selection */
enum drx_aud_avc_max_atten avc_max_atten;
- /**< AVC max attenuation selection */
- s16 strength_left; /**< quasi-peak, left speaker */
- s16 strength_right; /**< quasi-peak, right speaker */
+ /*< AVC max attenuation selection */
+ s16 strength_left; /*< quasi-peak, left speaker */
+ s16 strength_right; /*< quasi-peak, right speaker */
};
/* DRX_CFG_I2S_OUTPUT - struct drx_cfg_i2s_output - set/get */
-/**
+/*
* \enum enum drxi2s_mode * \brief I2S output mode.
*/
enum drxi2s_mode {
- DRX_I2S_MODE_MASTER, /**< I2S is in master mode */
- DRX_I2S_MODE_SLAVE /**< I2S is in slave mode */
+ DRX_I2S_MODE_MASTER, /*< I2S is in master mode */
+ DRX_I2S_MODE_SLAVE /*< I2S is in slave mode */
};
-/**
+/*
* \enum enum drxi2s_word_length * \brief Width of I2S data.
*/
enum drxi2s_word_length {
- DRX_I2S_WORDLENGTH_32 = 0,/**< I2S data is 32 bit wide */
- DRX_I2S_WORDLENGTH_16 = 1 /**< I2S data is 16 bit wide */
+ DRX_I2S_WORDLENGTH_32 = 0,/*< I2S data is 32 bit wide */
+ DRX_I2S_WORDLENGTH_16 = 1 /*< I2S data is 16 bit wide */
};
-/**
+/*
* \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S.
*/
enum drxi2s_format {
DRX_I2S_FORMAT_WS_WITH_DATA,
- /**< I2S data and wordstrobe are aligned */
+ /*< I2S data and wordstrobe are aligned */
DRX_I2S_FORMAT_WS_ADVANCED
- /**< I2S data one cycle after wordstrobe */
+ /*< I2S data one cycle after wordstrobe */
};
-/**
+/*
* \enum enum drxi2s_polarity * \brief Polarity of I2S data.
*/
enum drxi2s_polarity {
- DRX_I2S_POLARITY_RIGHT,/**< wordstrobe - right high, left low */
- DRX_I2S_POLARITY_LEFT /**< wordstrobe - right low, left high */
+ DRX_I2S_POLARITY_RIGHT,/*< wordstrobe - right high, left low */
+ DRX_I2S_POLARITY_LEFT /*< wordstrobe - right low, left high */
};
-/**
+/*
* \struct struct drx_cfg_i2s_output * \brief I2S output configuration.
*/
struct drx_cfg_i2s_output {
- bool output_enable; /**< I2S output enable */
- u32 frequency; /**< range from 8000-48000 Hz */
- enum drxi2s_mode mode; /**< I2S mode, master or slave */
+ bool output_enable; /*< I2S output enable */
+ u32 frequency; /*< range from 8000-48000 Hz */
+ enum drxi2s_mode mode; /*< I2S mode, master or slave */
enum drxi2s_word_length word_length;
- /**< I2S wordlength, 16 or 32 bits */
- enum drxi2s_polarity polarity;/**< I2S wordstrobe polarity */
- enum drxi2s_format format; /**< I2S wordstrobe delay to data */
+ /*< I2S wordlength, 16 or 32 bits */
+ enum drxi2s_polarity polarity;/*< I2S wordstrobe polarity */
+ enum drxi2s_format format; /*< I2S wordstrobe delay to data */
};
/* ------------------------------expert interface-----------------------------*/
-/**
+/*
* /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator.
*
*/
@@ -1531,7 +1531,7 @@ struct drx_version_list {
DRX_AUD_FM_DEEMPH_OFF
};
-/**
+/*
* /enum DRXAudDeviation_t
* setting for deviation mode in audio demodulator.
*
@@ -1541,7 +1541,7 @@ struct drx_version_list {
DRX_AUD_DEVIATION_HIGH
};
-/**
+/*
* /enum enum drx_no_carrier_option * setting for carrier, mute/noise.
*
*/
@@ -1550,7 +1550,7 @@ struct drx_version_list {
DRX_NO_CARRIER_NOISE
};
-/**
+/*
* \enum DRXAudAutoSound_t
* \brief Automatic Sound
*/
@@ -1560,7 +1560,7 @@ struct drx_version_list {
DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF
};
-/**
+/*
* \enum DRXAudASSThres_t
* \brief Automatic Sound Select Thresholds
*/
@@ -1570,7 +1570,7 @@ struct drx_version_list {
u16 nicam; /* Nicam Threshold for ASS configuration */
};
-/**
+/*
* \struct struct drx_aud_carrier * \brief Carrier detection related parameters
*/
struct drx_aud_carrier {
@@ -1580,7 +1580,7 @@ struct drx_version_list {
s32 dco; /* frequency adjustment (A) */
};
-/**
+/*
* \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct
*/
struct drx_cfg_aud_carriers {
@@ -1588,7 +1588,7 @@ struct drx_version_list {
struct drx_aud_carrier b;
};
-/**
+/*
* /enum enum drx_aud_i2s_src * Selection of audio source
*/
enum drx_aud_i2s_src {
@@ -1597,19 +1597,19 @@ struct drx_version_list {
DRX_AUD_SRC_STEREO_OR_A,
DRX_AUD_SRC_STEREO_OR_B};
-/**
+/*
* \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output.
*/
enum drx_aud_i2s_matrix {
DRX_AUD_I2S_MATRIX_A_MONO,
- /**< A sound only, stereo or mono */
+ /*< A sound only, stereo or mono */
DRX_AUD_I2S_MATRIX_B_MONO,
- /**< B sound only, stereo or mono */
+ /*< B sound only, stereo or mono */
DRX_AUD_I2S_MATRIX_STEREO,
- /**< A+B sound, transparant */
- DRX_AUD_I2S_MATRIX_MONO /**< A+B mixed to mono sum, (L+R)/2 */};
+ /*< A+B sound, transparant */
+ DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2 */};
-/**
+/*
* /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator.
*
*/
@@ -1620,7 +1620,7 @@ struct drx_version_list {
DRX_AUD_FM_MATRIX_SOUND_A,
DRX_AUD_FM_MATRIX_SOUND_B};
-/**
+/*
* \struct DRXAudMatrices_t
* \brief Mixer settings
*/
@@ -1630,22 +1630,22 @@ struct drx_cfg_aud_mixer {
enum drx_aud_fm_matrix matrix_fm;
};
-/**
+/*
* \enum DRXI2SVidSync_t
* \brief Audio/video synchronization, interacts with I2S mode.
* AUTO_1 and AUTO_2 are for automatic video standard detection with preference
* for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz)
*/
enum drx_cfg_aud_av_sync {
- DRX_AUD_AVSYNC_OFF,/**< audio/video synchronization is off */
+ DRX_AUD_AVSYNC_OFF,/*< audio/video synchronization is off */
DRX_AUD_AVSYNC_NTSC,
- /**< it is an NTSC system */
+ /*< it is an NTSC system */
DRX_AUD_AVSYNC_MONOCHROME,
- /**< it is a MONOCHROME system */
+ /*< it is a MONOCHROME system */
DRX_AUD_AVSYNC_PAL_SECAM
- /**< it is a PAL/SECAM system */};
+ /*< it is a PAL/SECAM system */};
-/**
+/*
* \struct struct drx_cfg_aud_prescale * \brief Prescalers
*/
struct drx_cfg_aud_prescale {
@@ -1653,7 +1653,7 @@ struct drx_cfg_aud_prescale {
s16 nicam_gain;
};
-/**
+/*
* \struct struct drx_aud_beep * \brief Beep
*/
struct drx_aud_beep {
@@ -1662,14 +1662,14 @@ struct drx_aud_beep {
bool mute;
};
-/**
+/*
* \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode
*/
enum drx_aud_btsc_detect {
DRX_BTSC_STEREO,
DRX_BTSC_MONO_AND_SAP};
-/**
+/*
* \struct struct drx_aud_data * \brief Audio data structure
*/
struct drx_aud_data {
@@ -1692,7 +1692,7 @@ struct drx_aud_data {
bool rds_data_present;
};
-/**
+/*
* \enum enum drx_qam_lock_range * \brief QAM lock range mode
*/
enum drx_qam_lock_range {
@@ -1782,7 +1782,7 @@ struct drx_aud_data {
u32 wdata, /* data to write */
u32 *rdata); /* data to read */
-/**
+/*
* \struct struct drx_access_func * \brief Interface to an access protocol.
*/
struct drx_access_func {
@@ -1811,85 +1811,85 @@ struct drx_reg_dump {
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices.
*/
struct drx_common_attr {
/* Microcode (firmware) attributes */
- char *microcode_file; /**< microcode filename */
+ char *microcode_file; /*< microcode filename */
bool verify_microcode;
- /**< Use microcode verify or not. */
+ /*< Use microcode verify or not. */
struct drx_mc_version_rec mcversion;
- /**< Version record of microcode from file */
+ /*< Version record of microcode from file */
/* Clocks and tuner attributes */
s32 intermediate_freq;
- /**< IF,if tuner instance not used. (kHz)*/
+ /*< IF,if tuner instance not used. (kHz)*/
s32 sys_clock_freq;
- /**< Systemclock frequency. (kHz) */
+ /*< Systemclock frequency. (kHz) */
s32 osc_clock_freq;
- /**< Oscillator clock frequency. (kHz) */
+ /*< Oscillator clock frequency. (kHz) */
s16 osc_clock_deviation;
- /**< Oscillator clock deviation. (ppm) */
+ /*< Oscillator clock deviation. (ppm) */
bool mirror_freq_spect;
- /**< Mirror IF frequency spectrum or not.*/
+ /*< Mirror IF frequency spectrum or not.*/
/* Initial MPEG output attributes */
struct drx_cfg_mpeg_output mpeg_cfg;
- /**< MPEG configuration */
+ /*< MPEG configuration */
- bool is_opened; /**< if true instance is already opened. */
+ bool is_opened; /*< if true instance is already opened. */
/* Channel scan */
struct drx_scan_param *scan_param;
- /**< scan parameters */
+ /*< scan parameters */
u16 scan_freq_plan_index;
- /**< next index in freq plan */
+ /*< next index in freq plan */
s32 scan_next_frequency;
- /**< next freq to scan */
- bool scan_ready; /**< scan ready flag */
- u32 scan_max_channels;/**< number of channels in freqplan */
+ /*< next freq to scan */
+ bool scan_ready; /*< scan ready flag */
+ u32 scan_max_channels;/*< number of channels in freqplan */
u32 scan_channels_scanned;
- /**< number of channels scanned */
+ /*< number of channels scanned */
/* Channel scan - inner loop: demod related */
drx_scan_func_t scan_function;
- /**< function to check channel */
+ /*< function to check channel */
/* Channel scan - inner loop: SYSObj related */
- void *scan_context; /**< Context Pointer of SYSObj */
+ void *scan_context; /*< Context Pointer of SYSObj */
/* Channel scan - parameters for default DTV scan function in core driver */
u16 scan_demod_lock_timeout;
- /**< millisecs to wait for lock */
+ /*< millisecs to wait for lock */
enum drx_lock_status scan_desired_lock;
- /**< lock requirement for channel found */
+ /*< lock requirement for channel found */
/* scan_active can be used by SetChannel to decide how to program the tuner,
fast or slow (but stable). Usually fast during scan. */
- bool scan_active; /**< true when scan routines are active */
+ bool scan_active; /*< true when scan routines are active */
/* Power management */
enum drx_power_mode current_power_mode;
- /**< current power management mode */
+ /*< current power management mode */
/* Tuner */
- u8 tuner_port_nr; /**< nr of I2C port to wich tuner is */
+ u8 tuner_port_nr; /*< nr of I2C port to wich tuner is */
s32 tuner_min_freq_rf;
- /**< minimum RF input frequency, in kHz */
+ /*< minimum RF input frequency, in kHz */
s32 tuner_max_freq_rf;
- /**< maximum RF input frequency, in kHz */
- bool tuner_rf_agc_pol; /**< if true invert RF AGC polarity */
- bool tuner_if_agc_pol; /**< if true invert IF AGC polarity */
- bool tuner_slow_mode; /**< if true invert IF AGC polarity */
+ /*< maximum RF input frequency, in kHz */
+ bool tuner_rf_agc_pol; /*< if true invert RF AGC polarity */
+ bool tuner_if_agc_pol; /*< if true invert IF AGC polarity */
+ bool tuner_slow_mode; /*< if true invert IF AGC polarity */
struct drx_channel current_channel;
- /**< current channel parameters */
+ /*< current channel parameters */
enum drx_standard current_standard;
- /**< current standard selection */
+ /*< current standard selection */
enum drx_standard prev_standard;
- /**< previous standard selection */
+ /*< previous standard selection */
enum drx_standard di_cache_standard;
- /**< standard in DI cache if available */
- bool use_bootloader; /**< use bootloader in open */
- u32 capabilities; /**< capabilities flags */
- u32 product_id; /**< product ID inc. metal fix number */};
+ /*< standard in DI cache if available */
+ bool use_bootloader; /*< use bootloader in open */
+ u32 capabilities; /*< capabilities flags */
+ u32 product_id; /*< product ID inc. metal fix number */};
/*
* Generic functions for DRX devices.
@@ -1897,16 +1897,16 @@ struct drx_reg_dump {
struct drx_demod_instance;
-/**
+/*
* \struct struct drx_demod_instance * \brief Top structure of demodulator instance.
*/
struct drx_demod_instance {
- /**< data access protocol functions */
+ /*< data access protocol functions */
struct i2c_device_addr *my_i2c_dev_addr;
- /**< i2c address and device identifier */
+ /*< i2c address and device identifier */
struct drx_common_attr *my_common_attr;
- /**< common DRX attributes */
- void *my_ext_attr; /**< device specific attributes */
+ /*< common DRX attributes */
+ void *my_ext_attr; /*< device specific attributes */
/* generic demodulator data */
struct i2c_adapter *i2c;
@@ -2195,7 +2195,7 @@ Conversion from enum values to human readable form.
Access macros
-------------------------------------------------------------------------*/
-/**
+/*
* \brief Create a compilable reference to the microcode attribute
* \param d pointer to demod instance
*
@@ -2229,7 +2229,7 @@ Access macros
#define DRX_ATTR_I2CDEVID(d) ((d)->my_i2c_dev_addr->i2c_dev_id)
#define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD)
-/**************************/
+/*************************/
/* Macros with device-specific handling are converted to CFG functions */
@@ -2285,7 +2285,7 @@ Access macros
#define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \
DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN)
-/**
+/*
* \brief Macro to check if std is an ATV standard
* \retval true std is an ATV standard
* \retval false std is an ATV standard
@@ -2298,7 +2298,7 @@ Access macros
((std) == DRX_STANDARD_NTSC) || \
((std) == DRX_STANDARD_FM))
-/**
+/*
* \brief Macro to check if std is an QAM standard
* \retval true std is an QAM standards
* \retval false std is an QAM standards
@@ -2308,14 +2308,14 @@ Access macros
((std) == DRX_STANDARD_ITU_C) || \
((std) == DRX_STANDARD_ITU_D))
-/**
+/*
* \brief Macro to check if std is VSB standard
* \retval true std is VSB standard
* \retval false std is not VSB standard
*/
#define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB)
-/**
+/*
* \brief Macro to check if std is DVBT standard
* \retval true std is DVBT standard
* \retval false std is not DVBT standard
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 499ccff557bf..8cbd8cc21059 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -73,7 +73,7 @@ INCLUDE FILES
#define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw"
-/**
+/*
* \brief Maximum u32 value.
*/
#ifndef MAX_U32
@@ -100,8 +100,8 @@ INCLUDE FILES
#ifndef OOB_DRX_DRIVE_STRENGTH
#define OOB_DRX_DRIVE_STRENGTH 0x02
#endif
-/**** START DJCOMBO patches to DRXJ registermap constants *********************/
-/**** registermap 200706071303 from drxj **************************************/
+/*** START DJCOMBO patches to DRXJ registermap constants *********************/
+/*** registermap 200706071303 from drxj **************************************/
#define ATV_TOP_CR_AMP_TH_FM 0x0
#define ATV_TOP_CR_AMP_TH_L 0xA
#define ATV_TOP_CR_AMP_TH_LP 0xA
@@ -188,7 +188,7 @@ INCLUDE FILES
#define IQM_RC_ADJ_SEL_B_OFF 0x0
#define IQM_RC_ADJ_SEL_B_QAM 0x1
#define IQM_RC_ADJ_SEL_B_VSB 0x2
-/**** END DJCOMBO patches to DRXJ registermap *********************************/
+/*** END DJCOMBO patches to DRXJ registermap *********************************/
#include "drx_driver_version.h"
@@ -208,25 +208,25 @@ DEFINES
#define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr)
#endif
-/**
+/*
* \def DRXJ_DEF_I2C_ADDR
* \brief Default I2C address of a demodulator instance.
*/
#define DRXJ_DEF_I2C_ADDR (0x52)
-/**
+/*
* \def DRXJ_DEF_DEMOD_DEV_ID
* \brief Default device identifier of a demodultor instance.
*/
#define DRXJ_DEF_DEMOD_DEV_ID (1)
-/**
+/*
* \def DRXJ_SCAN_TIMEOUT
* \brief Timeout value for waiting on demod lock during channel scan (millisec).
*/
#define DRXJ_SCAN_TIMEOUT 1000
-/**
+/*
* \def HI_I2C_DELAY
* \brief HI timing delay for I2C timing (in nano seconds)
*
@@ -234,7 +234,7 @@ DEFINES
*/
#define HI_I2C_DELAY 42
-/**
+/*
* \def HI_I2C_BRIDGE_DELAY
* \brief HI timing delay for I2C timing (in nano seconds)
*
@@ -242,13 +242,13 @@ DEFINES
*/
#define HI_I2C_BRIDGE_DELAY 750
-/**
+/*
* \brief Time Window for MER and SER Measurement in Units of Segment duration.
*/
#define VSB_TOP_MEASUREMENT_PERIOD 64
#define SYMBOLS_PER_SEGMENT 832
-/**
+/*
* \brief bit rate and segment rate constants used for SER and BER.
*/
/* values taken from the QAM microcode */
@@ -260,21 +260,21 @@ DEFINES
#define DRXJ_QAM_SL_SIG_POWER_QAM64 43008
#define DRXJ_QAM_SL_SIG_POWER_QAM128 20992
#define DRXJ_QAM_SL_SIG_POWER_QAM256 43520
-/**
+/*
* \brief Min supported symbolrates.
*/
#ifndef DRXJ_QAM_SYMBOLRATE_MIN
#define DRXJ_QAM_SYMBOLRATE_MIN (520000)
#endif
-/**
+/*
* \brief Max supported symbolrates.
*/
#ifndef DRXJ_QAM_SYMBOLRATE_MAX
#define DRXJ_QAM_SYMBOLRATE_MAX (7233000)
#endif
-/**
+/*
* \def DRXJ_QAM_MAX_WAITTIME
* \brief Maximal wait time for QAM auto constellation in ms
*/
@@ -290,7 +290,7 @@ DEFINES
#define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200
#endif
-/**
+/*
* \def SCU status and results
* \brief SCU
*/
@@ -299,7 +299,7 @@ DEFINES
#define FEC_RS_MEASUREMENT_PERIOD 12894 /* 1 sec */
#define FEC_RS_MEASUREMENT_PRESCALE 1 /* n sec */
-/**
+/*
* \def DRX_AUD_MAX_DEVIATION
* \brief Needed for calculation of prescale feature in AUD
*/
@@ -307,14 +307,14 @@ DEFINES
#define DRXJ_AUD_MAX_FM_DEVIATION 100 /* kHz */
#endif
-/**
+/*
* \brief Needed for calculation of NICAM prescale feature in AUD
*/
#ifndef DRXJ_AUD_MAX_NICAM_PRESCALE
#define DRXJ_AUD_MAX_NICAM_PRESCALE (9) /* dB */
#endif
-/**
+/*
* \brief Needed for calculation of NICAM prescale feature in AUD
*/
#ifndef DRXJ_AUD_MAX_WAITTIME
@@ -371,21 +371,21 @@ DEFINES
/*============================================================================*/
/*=== GLOBAL VARIABLEs =======================================================*/
/*============================================================================*/
-/**
+/*
*/
-/**
+/*
* \brief Temporary register definitions.
* (register definitions that are not yet available in register master)
*/
-/******************************************************************************/
+/*****************************************************************************/
/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */
/* RAM adresses directly. This must be READ ONLY to avoid problems. */
/* Writing to the interface adresses is more than only writing the RAM */
/* locations */
-/******************************************************************************/
-/**
+/*****************************************************************************/
+/*
* \brief RAM location of MODUS registers
*/
#define AUD_DEM_RAM_MODUS_HI__A 0x10204A3
@@ -394,13 +394,13 @@ DEFINES
#define AUD_DEM_RAM_MODUS_LO__A 0x10204A4
#define AUD_DEM_RAM_MODUS_LO__M 0x0FFF
-/**
+/*
* \brief RAM location of I2S config registers
*/
#define AUD_DEM_RAM_I2S_CONFIG1__A 0x10204B1
#define AUD_DEM_RAM_I2S_CONFIG2__A 0x10204B2
-/**
+/*
* \brief RAM location of DCO config registers
*/
#define AUD_DEM_RAM_DCO_B_HI__A 0x1020461
@@ -408,20 +408,20 @@ DEFINES
#define AUD_DEM_RAM_DCO_A_HI__A 0x1020463
#define AUD_DEM_RAM_DCO_A_LO__A 0x1020464
-/**
+/*
* \brief RAM location of Threshold registers
*/
#define AUD_DEM_RAM_NICAM_THRSHLD__A 0x102045A
#define AUD_DEM_RAM_A2_THRSHLD__A 0x10204BB
#define AUD_DEM_RAM_BTSC_THRSHLD__A 0x10204A6
-/**
+/*
* \brief RAM location of Carrier Threshold registers
*/
#define AUD_DEM_RAM_CM_A_THRSHLD__A 0x10204AF
#define AUD_DEM_RAM_CM_B_THRSHLD__A 0x10204B0
-/**
+/*
* \brief FM Matrix register fix
*/
#ifdef AUD_DEM_WR_FM_MATRIX__A
@@ -430,7 +430,7 @@ DEFINES
#define AUD_DEM_WR_FM_MATRIX__A 0x105006F
/*============================================================================*/
-/**
+/*
* \brief Defines required for audio
*/
#define AUD_VOLUME_ZERO_DB 115
@@ -443,14 +443,14 @@ DEFINES
#define AUD_I2S_FREQUENCY_MIN 12000UL
#define AUD_RDS_ARRAY_SIZE 18
-/**
+/*
* \brief Needed for calculation of prescale feature in AUD
*/
#ifndef DRX_AUD_MAX_FM_DEVIATION
#define DRX_AUD_MAX_FM_DEVIATION (100) /* kHz */
#endif
-/**
+/*
* \brief Needed for calculation of NICAM prescale feature in AUD
*/
#ifndef DRX_AUD_MAX_NICAM_PRESCALE
@@ -478,7 +478,7 @@ DEFINES
/*=== REGISTER ACCESS MACROS =================================================*/
/*============================================================================*/
-/**
+/*
* This macro is used to create byte arrays for block writes.
* Block writes speed up I2C traffic between host and demod.
* The macro takes care of the required byte order in a 16 bits word.
@@ -486,7 +486,7 @@ DEFINES
*/
#define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
((u8)((((u16)x)>>8)&0xFF))
-/**
+/*
* This macro is used to convert byte array to 16 bit register value for block read.
* Block read speed up I2C traffic between host and demod.
* The macro takes care of the required byte order in a 16 bits word.
@@ -501,7 +501,7 @@ DEFINES
/*=== HI COMMAND RELATED DEFINES =============================================*/
/*============================================================================*/
-/**
+/*
* \brief General maximum number of retries for ucode command interfaces
*/
#define DRXJ_MAX_RETRIES (100)
@@ -807,7 +807,7 @@ static struct drxj_data drxj_data_g = {
},
};
-/**
+/*
* \var drxj_default_addr_g
* \brief Default I2C address and device identifier.
*/
@@ -816,7 +816,7 @@ static struct i2c_device_addr drxj_default_addr_g = {
DRXJ_DEF_DEMOD_DEV_ID /* device id */
};
-/**
+/*
* \var drxj_default_comm_attr_g
* \brief Default common attributes of a drxj demodulator instance.
*/
@@ -887,7 +887,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
0 /* mfx */
};
-/**
+/*
* \var drxj_default_demod_g
* \brief Default drxj demodulator instance.
*/
@@ -897,7 +897,7 @@ static struct drx_demod_instance drxj_default_demod_g = {
&drxj_data_g /* demod device specific attributes */
};
-/**
+/*
* \brief Default audio data structure for DRK demodulator instance.
*
* This structure is DRXK specific.
@@ -997,7 +997,7 @@ struct drxj_hi_cmd {
/*=== MICROCODE RELATED STRUCTURES ===========================================*/
/*============================================================================*/
-/**
+/*
* struct drxu_code_block_hdr - Structure of the microcode block headers
*
* @addr: Destination address of the data in this block
@@ -1086,7 +1086,7 @@ static u32 frac28(u32 N, u32 D)
return Q1;
}
-/**
+/*
* \fn u32 log1_times100( u32 x)
* \brief Compute: 100*log10(x)
* \param x 32 bits
@@ -1198,7 +1198,7 @@ static u32 log1_times100(u32 x)
}
-/**
+/*
* \fn u32 frac_times1e6( u16 N, u32 D)
* \brief Compute: (N/D) * 1000000.
* \param N nominator 16-bits.
@@ -1235,7 +1235,7 @@ static u32 frac_times1e6(u32 N, u32 D)
/*============================================================================*/
-/**
+/*
* \brief Values for NICAM prescaler gain. Computed from dB to integer
* and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
*
@@ -1280,7 +1280,7 @@ static const u16 nicam_presc_table_val[43] = {
#define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */
/*============================================================================*/
-/**
+/*
* \fn bool is_handled_by_aud_tr_if( u32 addr )
* \brief Check if this address is handled by the audio token ring interface.
* \param addr
@@ -1386,7 +1386,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
/*============================================================================*/
-/******************************
+/*****************************
*
* int drxdap_fasi_read_block (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1498,7 +1498,7 @@ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
}
-/******************************
+/*****************************
*
* int drxdap_fasi_read_reg16 (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1531,7 +1531,7 @@ static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr,
return rc;
}
-/******************************
+/*****************************
*
* int drxdap_fasi_read_reg32 (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1566,7 +1566,7 @@ static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
return rc;
}
-/******************************
+/*****************************
*
* int drxdap_fasi_write_block (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1705,7 +1705,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
return first_err;
}
-/******************************
+/*****************************
*
* int drxdap_fasi_write_reg16 (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1734,7 +1734,7 @@ static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr,
return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
}
-/******************************
+/*****************************
*
* int drxdap_fasi_read_modify_write_reg16 (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1778,7 +1778,7 @@ static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
return rc;
}
-/******************************
+/*****************************
*
* int drxdap_fasi_write_reg32 (
* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1811,7 +1811,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
/*============================================================================*/
-/**
+/*
* \fn int drxj_dap_rm_write_reg16short
* \brief Read modify write 16 bits audio register using short format only.
* \param dev_addr
@@ -1890,7 +1890,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
/*============================================================================*/
-/**
+/*
* \fn int drxj_dap_read_aud_reg16
* \brief Read 16 bits audio register
* \param dev_addr
@@ -1997,7 +1997,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
}
/*============================================================================*/
-/**
+/*
* \fn int drxj_dap_write_aud_reg16
* \brief Write 16 bits audio register
* \param dev_addr
@@ -2086,7 +2086,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
#define DRXJ_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
#define DRXJ_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
-/**
+/*
* \fn int drxj_dap_atomic_read_write_block()
* \brief Basic access routine for atomic read or write access
* \param dev_addr pointer to i2c dev address
@@ -2168,7 +2168,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int drxj_dap_atomic_read_reg32()
* \brief Atomic read of 32 bits words
*/
@@ -2215,7 +2215,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \fn int hi_cfg_command()
* \brief Configure HI with settings stored in the demod structure.
* \param demod Demodulator.
@@ -2258,7 +2258,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int hi_command()
* \brief Configure HI with settings stored in the demod structure.
* \param dev_addr I2C address.
@@ -2369,7 +2369,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int init_hi( const struct drx_demod_instance *demod )
* \brief Initialise and configurate HI.
* \param demod pointer to demod data.
@@ -2450,7 +2450,7 @@ rw_error:
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \fn int get_device_capabilities()
* \brief Get and store device capabilities.
* \param demod Pointer to demodulator instance.
@@ -2656,7 +2656,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int power_up_device()
* \brief Power up device.
* \param demod Pointer to demodulator instance.
@@ -2710,7 +2710,7 @@ static int power_up_device(struct drx_demod_instance *demod)
/*----------------------------------------------------------------------------*/
/* MPEG Output Configuration Functions - begin */
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int ctrl_set_cfg_mpeg_output()
* \brief Set MPEG output configuration of the device.
* \param devmod Pointer to demodulator instance.
@@ -3356,7 +3356,7 @@ rw_error:
/* miscellaneous configurations - begin */
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int set_mpegtei_handling()
* \brief Activate MPEG TEI handling settings.
* \param devmod Pointer to demodulator instance.
@@ -3429,7 +3429,7 @@ rw_error:
}
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int bit_reverse_mpeg_output()
* \brief Set MPEG output bit-endian settings.
* \param devmod Pointer to demodulator instance.
@@ -3472,7 +3472,7 @@ rw_error:
}
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int set_mpeg_start_width()
* \brief Set MPEG start width.
* \param devmod Pointer to demodulator instance.
@@ -3522,7 +3522,7 @@ rw_error:
/*----------------------------------------------------------------------------*/
/* UIO Configuration Functions - begin */
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int ctrl_set_uio_cfg()
* \brief Configure modus oprandi UIO.
* \param demod Pointer to demodulator instance.
@@ -3659,7 +3659,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int ctrl_uio_write()
* \brief Write to a UIO.
* \param demod Pointer to demodulator instance.
@@ -3868,7 +3868,7 @@ rw_error:
/*----------------------------------------------------------------------------*/
/* I2C Bridge Functions - begin */
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int ctrl_i2c_bridge()
* \brief Open or close the I2C switch to tuner.
* \param demod Pointer to demodulator instance.
@@ -3903,7 +3903,7 @@ ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed)
/*----------------------------------------------------------------------------*/
/* Smart antenna Functions - begin */
/*----------------------------------------------------------------------------*/
-/**
+/*
* \fn int smart_ant_init()
* \brief Initialize Smart Antenna.
* \param pointer to struct drx_demod_instance.
@@ -4116,7 +4116,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int DRXJ_DAP_SCUAtomicReadWriteBlock()
* \brief Basic access routine for SCU atomic read or write access
* \param dev_addr pointer to i2c dev address
@@ -4188,7 +4188,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int DRXJ_DAP_AtomicReadReg16()
* \brief Atomic read of 16 bits words
*/
@@ -4216,7 +4216,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
}
/*============================================================================*/
-/**
+/*
* \fn int drxj_dap_scu_atomic_write_reg16()
* \brief Atomic read of 16 bits words
*/
@@ -4237,7 +4237,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
}
/* -------------------------------------------------------------------------- */
-/**
+/*
* \brief Measure result of ADC synchronisation
* \param demod demod instance
* \param count (returned) count
@@ -4297,7 +4297,7 @@ rw_error:
return rc;
}
-/**
+/*
* \brief Synchronize analog and digital clock domains
* \param demod demod instance
* \return int.
@@ -4365,7 +4365,7 @@ rw_error:
/*== 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \fn int init_agc ()
* \brief Initialize AGC for all standards.
* \param demod instance of demodulator.
@@ -4741,7 +4741,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_frequency ()
* \brief Set frequency shift.
* \param demod instance of demodulator.
@@ -4839,7 +4839,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int get_acc_pkt_err()
* \brief Retrieve signal strength for VSB and QAM.
* \param demod Pointer to demod instance
@@ -4891,7 +4891,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_agc_rf ()
* \brief Configure RF AGC
* \param demod instance of demodulator.
@@ -5105,7 +5105,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_agc_if ()
* \brief Configure If AGC
* \param demod instance of demodulator.
@@ -5334,7 +5334,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_iqm_af ()
* \brief Configure IQM AF registers
* \param demod instance of demodulator.
@@ -5380,7 +5380,7 @@ rw_error:
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \fn int power_down_vsb ()
* \brief Powr down QAM related blocks.
* \param demod instance of demodulator.
@@ -5478,7 +5478,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_vsb_leak_n_gain ()
* \brief Set ATSC demod.
* \param demod instance of demodulator.
@@ -5694,7 +5694,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_vsb()
* \brief Set 8VSB demod.
* \param demod instance of demodulator.
@@ -6200,7 +6200,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs)
* \brief Get the values of packet error in 8VSB mode
* \return Error code
@@ -6239,7 +6239,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber)
* \brief Get the values of ber in VSB mode
* \return Error code
@@ -6284,7 +6284,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber)
* \brief Get the values of ber in VSB mode
* \return Error code
@@ -6306,7 +6306,7 @@ static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr,
return 0;
}
-/**
+/*
* \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
* \brief Get the values of MER
* \return Error code
@@ -6340,7 +6340,7 @@ rw_error:
/*============================================================================*/
/*============================================================================*/
-/**
+/*
* \fn int power_down_qam ()
* \brief Powr down QAM related blocks.
* \param demod instance of demodulator.
@@ -6444,7 +6444,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam_measurement ()
* \brief Setup of the QAM Measuremnt intervals for signal quality
* \param demod instance of demod.
@@ -6656,7 +6656,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam16 ()
* \brief QAM16 specific setup
* \param demod instance of demod.
@@ -6891,7 +6891,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam32 ()
* \brief QAM32 specific setup
* \param demod instance of demod.
@@ -7126,7 +7126,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam64 ()
* \brief QAM64 specific setup
* \param demod instance of demod.
@@ -7362,7 +7362,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam128 ()
* \brief QAM128 specific setup
* \param demod: instance of demod.
@@ -7597,7 +7597,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int set_qam256 ()
* \brief QAM256 specific setup
* \param demod: instance of demod.
@@ -7835,7 +7835,7 @@ rw_error:
#define QAM_SET_OP_CONSTELLATION 0x2
#define QAM_SET_OP_SPECTRUM 0X4
-/**
+/*
* \fn int set_qam ()
* \brief Set QAM demod.
* \param demod: instance of demod.
@@ -8845,7 +8845,7 @@ rw_error:
#define DEMOD_LOCKED 0x1
#define SYNC_FLIPPED 0x2
#define SPEC_MIRRORED 0x4
-/**
+/*
* \fn int qam64auto ()
* \brief auto do sync pattern switching and mirroring.
* \param demod: instance of demod.
@@ -8993,7 +8993,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int qam256auto ()
* \brief auto do sync pattern switching and mirroring.
* \param demod: instance of demod.
@@ -9077,7 +9077,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_qam_channel ()
* \brief Set QAM channel according to the requested constellation.
* \param demod: instance of demod.
@@ -9284,7 +9284,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr)
* \brief Get RS error count in QAM mode (used for post RS BER calculation)
* \return Error code
@@ -9355,7 +9355,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int get_sig_strength()
* \brief Retrieve signal strength for VSB and QAM.
* \param demod Pointer to demod instance
@@ -9435,7 +9435,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int ctrl_get_qam_sig_quality()
* \brief Retrieve QAM signal quality from device.
* \param devmod Pointer to demodulator instance.
@@ -9721,7 +9721,7 @@ rw_error:
*/
/* -------------------------------------------------------------------------- */
-/**
+/*
* \fn int power_down_atv ()
* \brief Power down ATV.
* \param demod instance of demodulator
@@ -9822,7 +9822,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \brief Power up AUD.
* \param demod instance of demodulator
* \return int.
@@ -9850,7 +9850,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int set_orx_nsu_aox()
* \brief Configure OrxNsuAox for OOB
* \param demod instance of demodulator.
@@ -9884,7 +9884,7 @@ rw_error:
return rc;
}
-/**
+/*
* \fn int ctrl_set_oob()
* \brief Set OOB channel to be used.
* \param demod instance of demodulator
@@ -9986,9 +9986,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
20;
}
- /*********/
+ /********/
/* Stop */
- /*********/
+ /********/
rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
@@ -10004,9 +10004,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
pr_err("error %d\n", rc);
goto rw_error;
}
- /*********/
+ /********/
/* Reset */
- /*********/
+ /********/
scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
| SCU_RAM_COMMAND_CMD_DEMOD_RESET;
scu_cmd.parameter_len = 0;
@@ -10017,9 +10017,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
pr_err("error %d\n", rc);
goto rw_error;
}
- /***********/
+ /**********/
/* SET_ENV */
- /***********/
+ /**********/
/* set frequency, spectrum inversion and data rate */
scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
| SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
@@ -10376,9 +10376,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
pr_err("error %d\n", rc);
goto rw_error;
}
- /*********/
+ /********/
/* Start */
- /*********/
+ /********/
scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
| SCU_RAM_COMMAND_CMD_DEMOD_START;
scu_cmd.parameter_len = 0;
@@ -10419,7 +10419,7 @@ rw_error:
/*=============================================================================
===== ctrl_set_channel() ==========================================================
===========================================================================*/
-/**
+/*
* \fn int ctrl_set_channel()
* \brief Select a new transmission channel.
* \param demod instance of demod.
@@ -10652,7 +10652,7 @@ rw_error:
===== SigQuality() ==========================================================
===========================================================================*/
-/**
+/*
* \fn int ctrl_sig_quality()
* \brief Retrieve signal quality form device.
* \param devmod Pointer to demodulator instance.
@@ -10768,7 +10768,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int ctrl_lock_status()
* \brief Retrieve lock status .
* \param dev_addr Pointer to demodulator device address.
@@ -10856,7 +10856,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int ctrl_set_standard()
* \brief Set modulation standard to be used.
* \param standard Modulation standard.
@@ -11012,7 +11012,7 @@ static void drxj_reset_mode(struct drxj_data *ext_attr)
ext_attr->vsb_pre_saw_cfg.use_pre_saw = true;
}
-/**
+/*
* \fn int ctrl_power_mode()
* \brief Set the power mode of the device to the specified power mode
* \param demod Pointer to demodulator instance.
@@ -11171,7 +11171,7 @@ rw_error:
/*== CTRL Set/Get Config related functions ===================================*/
/*============================================================================*/
-/**
+/*
* \fn int ctrl_set_cfg_pre_saw()
* \brief Set Pre-saw reference.
* \param demod demod instance
@@ -11234,7 +11234,7 @@ rw_error:
/*============================================================================*/
-/**
+/*
* \fn int ctrl_set_cfg_afe_gain()
* \brief Set AFE Gain.
* \param demod demod instance
@@ -11324,7 +11324,7 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
enum drxu_code_action action);
static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state);
-/**
+/*
* \fn drxj_open()
* \brief Open the demod instance, configure device, configure drxdriver
* \return Status_t Return status.
@@ -11543,7 +11543,7 @@ rw_error:
}
/*============================================================================*/
-/**
+/*
* \fn drxj_close()
* \brief Close the demod instance, power down the device
* \return Status_t Return status.
@@ -11594,7 +11594,7 @@ rw_error:
* Microcode related functions
*/
-/**
+/*
* drx_u_code_compute_crc - Compute CRC of block of microcode data.
* @block_data: Pointer to microcode data.
* @nr_words: Size of microcode block (number of 16 bits words).
@@ -11622,7 +11622,7 @@ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
return (u16)(crc_word >> 16);
}
-/**
+/*
* drx_check_firmware - checks if the loaded firmware is valid
*
* @demod: demod structure
@@ -11708,7 +11708,7 @@ eof:
return -EINVAL;
}
-/**
+/*
* drx_ctrl_u_code - Handle microcode upload or verify.
* @dev_addr: Address of device.
* @mc_info: Pointer to information about microcode data.
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h
index 6c5b8f78f9f6..d3ee1c23bb2f 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h
@@ -69,15 +69,15 @@ TYPEDEFS
struct drxjscu_cmd {
u16 command;
- /**< Command number */
+ /*< Command number */
u16 parameter_len;
- /**< Data length in byte */
+ /*< Data length in byte */
u16 result_len;
- /**< result length in byte */
+ /*< result length in byte */
u16 *parameter;
- /**< General purpous param */
+ /*< General purpous param */
u16 *result;
- /**< General purpous param */};
+ /*< General purpous param */};
/*============================================================================*/
/*============================================================================*/
@@ -130,7 +130,7 @@ TYPEDEFS
DRXJ_CFG_MAX /* dummy, never to be used */};
-/**
+/*
* /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
*/
enum drxj_cfg_smart_ant_io {
@@ -138,7 +138,7 @@ enum drxj_cfg_smart_ant_io {
DRXJ_SMT_ANT_INPUT
};
-/**
+/*
* /struct struct drxj_cfg_smart_ant * Set smart antenna.
*/
struct drxj_cfg_smart_ant {
@@ -146,7 +146,7 @@ enum drxj_cfg_smart_ant_io {
u16 ctrl_data;
};
-/**
+/*
* /struct DRXJAGCSTATUS_t
* AGC status information from the DRXJ-IQM-AF.
*/
@@ -158,7 +158,7 @@ struct drxj_agc_status {
/* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
-/**
+/*
* /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
*/
enum drxj_agc_ctrl_mode {
@@ -166,7 +166,7 @@ struct drxj_agc_status {
DRX_AGC_CTRL_USER,
DRX_AGC_CTRL_OFF};
-/**
+/*
* /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
*/
struct drxj_cfg_agc {
@@ -182,7 +182,7 @@ struct drxj_agc_status {
/* DRXJ_CFG_PRE_SAW */
-/**
+/*
* /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
*/
struct drxj_cfg_pre_saw {
@@ -192,14 +192,14 @@ struct drxj_agc_status {
/* DRXJ_CFG_AFE_GAIN */
-/**
+/*
* /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
*/
struct drxj_cfg_afe_gain {
enum drx_standard standard; /* standard to which these settings apply */
u16 gain; /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */};
-/**
+/*
* /struct drxjrs_errors
* Available failure information in DRXJ_FEC_RS.
*
@@ -208,25 +208,25 @@ struct drxj_agc_status {
*/
struct drxjrs_errors {
u16 nr_bit_errors;
- /**< no of pre RS bit errors */
+ /*< no of pre RS bit errors */
u16 nr_symbol_errors;
- /**< no of pre RS symbol errors */
+ /*< no of pre RS symbol errors */
u16 nr_packet_errors;
- /**< no of pre RS packet errors */
+ /*< no of pre RS packet errors */
u16 nr_failures;
- /**< no of post RS failures to decode */
+ /*< no of post RS failures to decode */
u16 nr_snc_par_fail_count;
- /**< no of post RS bit erros */
+ /*< no of post RS bit erros */
};
-/**
+/*
* /struct struct drxj_cfg_vsb_misc * symbol error rate
*/
struct drxj_cfg_vsb_misc {
u32 symb_error;
- /**< symbol error rate sps */};
+ /*< symbol error rate sps */};
-/**
+/*
* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
*
*/
@@ -234,7 +234,7 @@ struct drxj_agc_status {
DRXJ_MPEG_START_WIDTH_1CLKCYC,
DRXJ_MPEG_START_WIDTH_8CLKCYC};
-/**
+/*
* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
*
*/
@@ -247,20 +247,20 @@ struct drxj_agc_status {
DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K,
DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K};
-/**
+/*
* /struct DRXJCfgMisc_t
* Change TEI bit of MPEG output
* reverse MPEG output bit order
* set MPEG output clock rate
*/
struct drxj_cfg_mpeg_output_misc {
- bool disable_tei_handling; /**< if true pass (not change) TEI bit */
- bool bit_reverse_mpeg_outout; /**< if true, parallel: msb on MD0; serial: lsb out first */
+ bool disable_tei_handling; /*< if true pass (not change) TEI bit */
+ bool bit_reverse_mpeg_outout; /*< if true, parallel: msb on MD0; serial: lsb out first */
enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
- /**< set MPEG output clock rate that overwirtes the derived one from symbol rate */
- enum drxj_mpeg_start_width mpeg_start_width; /**< set MPEG output start width */};
+ /*< set MPEG output clock rate that overwirtes the derived one from symbol rate */
+ enum drxj_mpeg_start_width mpeg_start_width; /*< set MPEG output start width */};
-/**
+/*
* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
*/
enum drxj_xtal_freq {
@@ -269,21 +269,21 @@ struct drxj_agc_status {
DRXJ_XTAL_FREQ_20P25MHZ,
DRXJ_XTAL_FREQ_4MHZ};
-/**
+/*
* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
*/
enum drxji2c_speed {
DRXJ_I2C_SPEED_400KBPS,
DRXJ_I2C_SPEED_100KBPS};
-/**
+/*
* /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
*/
struct drxj_cfg_hw_cfg {
enum drxj_xtal_freq xtal_freq;
- /**< crystal reference frequency */
+ /*< crystal reference frequency */
enum drxji2c_speed i2c_speed;
- /**< 100 or 400 kbps */};
+ /*< 100 or 400 kbps */};
/*
* DRXJ_CFG_ATV_MISC
@@ -352,7 +352,7 @@ struct drxj_cfg_oob_misc {
* DRXJ_CFG_ATV_OUTPUT
*/
-/**
+/*
* /enum DRXJAttenuation_t
* Attenuation setting for SIF AGC.
*
@@ -363,7 +363,7 @@ struct drxj_cfg_oob_misc {
DRXJ_SIF_ATTENUATION_6DB,
DRXJ_SIF_ATTENUATION_9DB};
-/**
+/*
* /struct struct drxj_cfg_atv_output * SIF attenuation setting.
*
*/
@@ -398,7 +398,7 @@ struct drxj_cfg_atv_output {
/*============================================================================*/
/*========================================*/
-/**
+/*
* /struct struct drxj_data * DRXJ specific attributes.
*
* Global data container for DRXJ specific data.
@@ -406,93 +406,93 @@ struct drxj_cfg_atv_output {
*/
struct drxj_data {
/* device capabilties (determined during drx_open()) */
- bool has_lna; /**< true if LNA (aka PGA) present */
- bool has_oob; /**< true if OOB supported */
- bool has_ntsc; /**< true if NTSC supported */
- bool has_btsc; /**< true if BTSC supported */
- bool has_smatx; /**< true if mat_tx is available */
- bool has_smarx; /**< true if mat_rx is available */
- bool has_gpio; /**< true if GPIO is available */
- bool has_irqn; /**< true if IRQN is available */
+ bool has_lna; /*< true if LNA (aka PGA) present */
+ bool has_oob; /*< true if OOB supported */
+ bool has_ntsc; /*< true if NTSC supported */
+ bool has_btsc; /*< true if BTSC supported */
+ bool has_smatx; /*< true if mat_tx is available */
+ bool has_smarx; /*< true if mat_rx is available */
+ bool has_gpio; /*< true if GPIO is available */
+ bool has_irqn; /*< true if IRQN is available */
/* A1/A2/A... */
- u8 mfx; /**< metal fix */
+ u8 mfx; /*< metal fix */
/* tuner settings */
- bool mirror_freq_spect_oob;/**< tuner inversion (true = tuner mirrors the signal */
+ bool mirror_freq_spect_oob;/*< tuner inversion (true = tuner mirrors the signal */
/* standard/channel settings */
- enum drx_standard standard; /**< current standard information */
+ enum drx_standard standard; /*< current standard information */
enum drx_modulation constellation;
- /**< current constellation */
- s32 frequency; /**< center signal frequency in KHz */
+ /*< current constellation */
+ s32 frequency; /*< center signal frequency in KHz */
enum drx_bandwidth curr_bandwidth;
- /**< current channel bandwidth */
- enum drx_mirror mirror; /**< current channel mirror */
+ /*< current channel bandwidth */
+ enum drx_mirror mirror; /*< current channel mirror */
/* signal quality information */
- u32 fec_bits_desired; /**< BER accounting period */
- u16 fec_vd_plen; /**< no of trellis symbols: VD SER measurement period */
- u16 qam_vd_prescale; /**< Viterbi Measurement Prescale */
- u16 qam_vd_period; /**< Viterbi Measurement period */
- u16 fec_rs_plen; /**< defines RS BER measurement period */
- u16 fec_rs_prescale; /**< ReedSolomon Measurement Prescale */
- u16 fec_rs_period; /**< ReedSolomon Measurement period */
- bool reset_pkt_err_acc; /**< Set a flag to reset accumulated packet error */
- u16 pkt_err_acc_start; /**< Set a flag to reset accumulated packet error */
+ u32 fec_bits_desired; /*< BER accounting period */
+ u16 fec_vd_plen; /*< no of trellis symbols: VD SER measurement period */
+ u16 qam_vd_prescale; /*< Viterbi Measurement Prescale */
+ u16 qam_vd_period; /*< Viterbi Measurement period */
+ u16 fec_rs_plen; /*< defines RS BER measurement period */
+ u16 fec_rs_prescale; /*< ReedSolomon Measurement Prescale */
+ u16 fec_rs_period; /*< ReedSolomon Measurement period */
+ bool reset_pkt_err_acc; /*< Set a flag to reset accumulated packet error */
+ u16 pkt_err_acc_start; /*< Set a flag to reset accumulated packet error */
/* HI configuration */
- u16 hi_cfg_timing_div; /**< HI Configure() parameter 2 */
- u16 hi_cfg_bridge_delay; /**< HI Configure() parameter 3 */
- u16 hi_cfg_wake_up_key; /**< HI Configure() parameter 4 */
- u16 hi_cfg_ctrl; /**< HI Configure() parameter 5 */
- u16 hi_cfg_transmit; /**< HI Configure() parameter 6 */
+ u16 hi_cfg_timing_div; /*< HI Configure() parameter 2 */
+ u16 hi_cfg_bridge_delay; /*< HI Configure() parameter 3 */
+ u16 hi_cfg_wake_up_key; /*< HI Configure() parameter 4 */
+ u16 hi_cfg_ctrl; /*< HI Configure() parameter 5 */
+ u16 hi_cfg_transmit; /*< HI Configure() parameter 6 */
/* UIO configuration */
- enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin */
- enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin */
- enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin */
- enum drxuio_mode uio_irqn_mode; /**< current mode of IRQN pin */
+ enum drxuio_mode uio_sma_rx_mode;/*< current mode of SmaRx pin */
+ enum drxuio_mode uio_sma_tx_mode;/*< current mode of SmaTx pin */
+ enum drxuio_mode uio_gpio_mode; /*< current mode of ASEL pin */
+ enum drxuio_mode uio_irqn_mode; /*< current mode of IRQN pin */
/* IQM fs frequecy shift and inversion */
- u32 iqm_fs_rate_ofs; /**< frequency shifter setting after setchannel */
- bool pos_image; /**< Ture: positive image */
+ u32 iqm_fs_rate_ofs; /*< frequency shifter setting after setchannel */
+ bool pos_image; /*< Ture: positive image */
/* IQM RC frequecy shift */
- u32 iqm_rc_rate_ofs; /**< frequency shifter setting after setchannel */
+ u32 iqm_rc_rate_ofs; /*< frequency shifter setting after setchannel */
/* ATV configuration */
- u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */
- s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU0__A */
- s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU1__A */
- s16 atv_top_equ2[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU2__A */
- s16 atv_top_equ3[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU3__A */
- bool phase_correction_bypass;/**< flag: true=bypass */
- s16 atv_top_vid_peak; /**< shadow of ATV_TOP_VID_PEAK__A */
- u16 atv_top_noise_th; /**< shadow of ATV_TOP_NOISE_TH__A */
- bool enable_cvbs_output; /**< flag CVBS ouput enable */
- bool enable_sif_output; /**< flag SIF ouput enable */
+ u32 atv_cfg_changed_flags; /*< flag: flags cfg changes */
+ s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU0__A */
+ s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU1__A */
+ s16 atv_top_equ2[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU2__A */
+ s16 atv_top_equ3[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU3__A */
+ bool phase_correction_bypass;/*< flag: true=bypass */
+ s16 atv_top_vid_peak; /*< shadow of ATV_TOP_VID_PEAK__A */
+ u16 atv_top_noise_th; /*< shadow of ATV_TOP_NOISE_TH__A */
+ bool enable_cvbs_output; /*< flag CVBS ouput enable */
+ bool enable_sif_output; /*< flag SIF ouput enable */
enum drxjsif_attenuation sif_attenuation;
- /**< current SIF att setting */
+ /*< current SIF att setting */
/* Agc configuration for QAM and VSB */
- struct drxj_cfg_agc qam_rf_agc_cfg; /**< qam RF AGC config */
- struct drxj_cfg_agc qam_if_agc_cfg; /**< qam IF AGC config */
- struct drxj_cfg_agc vsb_rf_agc_cfg; /**< vsb RF AGC config */
- struct drxj_cfg_agc vsb_if_agc_cfg; /**< vsb IF AGC config */
+ struct drxj_cfg_agc qam_rf_agc_cfg; /*< qam RF AGC config */
+ struct drxj_cfg_agc qam_if_agc_cfg; /*< qam IF AGC config */
+ struct drxj_cfg_agc vsb_rf_agc_cfg; /*< vsb RF AGC config */
+ struct drxj_cfg_agc vsb_if_agc_cfg; /*< vsb IF AGC config */
/* PGA gain configuration for QAM and VSB */
- u16 qam_pga_cfg; /**< qam PGA config */
- u16 vsb_pga_cfg; /**< vsb PGA config */
+ u16 qam_pga_cfg; /*< qam PGA config */
+ u16 vsb_pga_cfg; /*< vsb PGA config */
/* Pre SAW configuration for QAM and VSB */
struct drxj_cfg_pre_saw qam_pre_saw_cfg;
- /**< qam pre SAW config */
+ /*< qam pre SAW config */
struct drxj_cfg_pre_saw vsb_pre_saw_cfg;
- /**< qam pre SAW config */
+ /*< qam pre SAW config */
/* Version information */
- char v_text[2][12]; /**< allocated text versions */
- struct drx_version v_version[2]; /**< allocated versions structs */
+ char v_text[2][12]; /*< allocated text versions */
+ struct drx_version v_version[2]; /*< allocated versions structs */
struct drx_version_list v_list_elements[2];
- /**< allocated version list */
+ /*< allocated version list */
/* smart antenna configuration */
bool smart_ant_inverted;
@@ -502,25 +502,25 @@ struct drxj_cfg_atv_output {
bool oob_power_on;
/* MPEG static bitrate setting */
- u32 mpeg_ts_static_bitrate; /**< bitrate static MPEG output */
- bool disable_te_ihandling; /**< MPEG TS TEI handling */
- bool bit_reverse_mpeg_outout;/**< MPEG output bit order */
+ u32 mpeg_ts_static_bitrate; /*< bitrate static MPEG output */
+ bool disable_te_ihandling; /*< MPEG TS TEI handling */
+ bool bit_reverse_mpeg_outout;/*< MPEG output bit order */
enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
- /**< MPEG output clock rate */
+ /*< MPEG output clock rate */
enum drxj_mpeg_start_width mpeg_start_width;
- /**< MPEG Start width */
+ /*< MPEG Start width */
/* Pre SAW & Agc configuration for ATV */
struct drxj_cfg_pre_saw atv_pre_saw_cfg;
- /**< atv pre SAW config */
- struct drxj_cfg_agc atv_rf_agc_cfg; /**< atv RF AGC config */
- struct drxj_cfg_agc atv_if_agc_cfg; /**< atv IF AGC config */
- u16 atv_pga_cfg; /**< atv pga config */
+ /*< atv pre SAW config */
+ struct drxj_cfg_agc atv_rf_agc_cfg; /*< atv RF AGC config */
+ struct drxj_cfg_agc atv_if_agc_cfg; /*< atv IF AGC config */
+ u16 atv_pga_cfg; /*< atv pga config */
u32 curr_symbol_rate;
/* pin-safe mode */
- bool pdr_safe_mode; /**< PDR safe mode activated */
+ bool pdr_safe_mode; /*< PDR safe mode activated */
u16 pdr_safe_restore_val_gpio;
u16 pdr_safe_restore_val_v_sync;
u16 pdr_safe_restore_val_sma_rx;
@@ -531,12 +531,12 @@ struct drxj_cfg_atv_output {
enum drxj_cfg_oob_lo_power oob_lo_pow;
struct drx_aud_data aud_data;
- /**< audio storage */};
+ /*< audio storage */};
/*-------------------------------------------------------------------------
Access MACROS
-------------------------------------------------------------------------*/
-/**
+/*
* \brief Compilable references to attributes
* \param d pointer to demod instance
*
@@ -554,7 +554,7 @@ Access MACROS
DEFINES
-------------------------------------------------------------------------*/
-/**
+/*
* \def DRXJ_NTSC_CARRIER_FREQ_OFFSET
* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
*
@@ -569,7 +569,7 @@ DEFINES
*/
#define DRXJ_NTSC_CARRIER_FREQ_OFFSET ((s32)(1750))
-/**
+/*
* \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
*
@@ -585,7 +585,7 @@ DEFINES
*/
#define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET ((s32)(2375))
-/**
+/*
* \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
*
@@ -601,7 +601,7 @@ DEFINES
*/
#define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775))
-/**
+/*
* \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
*
@@ -616,7 +616,7 @@ DEFINES
*/
#define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET ((s32)(-3255))
-/**
+/*
* \def DRXJ_FM_CARRIER_FREQ_OFFSET
* \brief Offset from sound carrier to centre frequency in kHz, in RF domain
*
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 7d04400b18dd..0696bc62dcc9 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -328,7 +328,7 @@ static int WriteTable(struct drxd_state *state, u8 * pTable)
{
int status = 0;
- if (pTable == NULL)
+ if (!pTable)
return 0;
while (!status) {
@@ -640,7 +640,7 @@ static int SetCfgIfAgc(struct drxd_state *state, struct SCfgAgc *cfg)
const u16 maxRur = 8;
static const u16 slowIncrDecLUT[] = {
3, 4, 4, 5, 6 };
- const u16 fastIncrDecLUT[] = {
+ static const u16 fastIncrDecLUT[] = {
14, 15, 15, 16,
17, 18, 18, 19,
20, 21, 22, 23,
@@ -909,9 +909,8 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
}
state->microcode = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (state->microcode == NULL) {
+ if (!state->microcode) {
release_firmware(fw);
- printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
@@ -2630,7 +2629,7 @@ static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
break;
/* Apply I2c address patch to B1 */
- if (!state->type_A && state->m_HiI2cPatch != NULL) {
+ if (!state->type_A && state->m_HiI2cPatch) {
status = WriteTable(state, state->m_HiI2cPatch);
if (status < 0)
break;
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index eb9bdc9f59c4..b16fedbb53a3 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -20,17 +20,18 @@
* @antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1
* means that 1=DVBC, 0 = DVBT. Zero means the opposite.
* @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength.
+ * @chunk_size: maximum size for I2C messages
* @microcode_name: Name of the firmware file with the microcode
* @qam_demod_parameter_count: The number of parameters used for the command
* to set the demodulator parameters. All
* firmwares are using the 2-parameter commmand.
- * An exception is the "drxk_a3.mc" firmware,
+ * An exception is the ``drxk_a3.mc`` firmware,
* which uses the 4-parameter command.
* A value of 0 (default) or lower indicates that
* the correct number of parameters will be
* automatically detected.
*
- * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
+ * On the ``*_gpio`` vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
* UIO-3.
*/
struct drxk_config {
@@ -52,6 +53,14 @@ struct drxk_config {
};
#if IS_REACHABLE(CONFIG_DVB_DRXK)
+/**
+ * Attach a drxk demod
+ *
+ * @config: pointer to &struct drxk_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 48a8aad47a74..f59ac2e91c59 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -207,9 +207,9 @@ static inline u32 log10times100(u32 value)
return (100L * intlog10(value)) >> 24;
}
-/****************************************************************************/
+/***************************************************************************/
/* I2C **********************************************************************/
-/****************************************************************************/
+/***************************************************************************/
static int drxk_i2c_lock(struct drxk_state *state)
{
@@ -3444,7 +3444,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Activate DVBT specific presets
* \param demod instance of demodulator.
* \return DRXStatus_t.
@@ -3484,7 +3484,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Initialize channelswitch-independent settings for DVBT.
* \param demod instance of demodulator.
* \return DRXStatus_t.
@@ -3696,7 +3696,7 @@ error:
}
/*============================================================================*/
-/**
+/*
* \brief start dvbt demodulating for channel.
* \param demod instance of demodulator.
* \return DRXStatus_t.
@@ -3732,7 +3732,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Set up dvbt demodulator for channel.
* \param demod instance of demodulator.
* \return DRXStatus_t.
@@ -4086,7 +4086,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Retrieve lock status .
* \param demod Pointer to demodulator instance.
* \param lockStat Pointer to lock status structure.
@@ -4148,7 +4148,7 @@ static int power_up_qam(struct drxk_state *state)
}
-/** Power Down QAM */
+/* Power Down QAM */
static int power_down_qam(struct drxk_state *state)
{
u16 data = 0;
@@ -4186,7 +4186,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Setup of the QAM Measurement intervals for signal quality
* \param demod instance of demod.
* \param modulation current modulation.
@@ -4461,7 +4461,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief QAM32 specific setup
* \param demod instance of demod.
* \return DRXStatus_t.
@@ -4657,7 +4657,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief QAM64 specific setup
* \param demod instance of demod.
* \return DRXStatus_t.
@@ -4852,7 +4852,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief QAM128 specific setup
* \param demod: instance of demod.
* \return DRXStatus_t.
@@ -5049,7 +5049,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief QAM256 specific setup
* \param demod: instance of demod.
* \return DRXStatus_t.
@@ -5244,7 +5244,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Reset QAM block.
* \param demod: instance of demod.
* \param channel: pointer to channel data.
@@ -5272,7 +5272,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Set QAM symbolrate.
* \param demod: instance of demod.
* \param channel: pointer to channel data.
@@ -5341,7 +5341,7 @@ error:
/*============================================================================*/
-/**
+/*
* \brief Get QAM lock status.
* \param demod: instance of demod.
* \param channel: pointer to channel data.
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 0b17a45c5640..bd4f8278c906 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -277,10 +277,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
- if (buf == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
+ if (!buf)
return -ENOMEM;
- }
*(buf) = reg;
@@ -835,17 +833,15 @@ static const struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
{
- struct ds3000_state *state = NULL;
+ struct ds3000_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL);
- if (state == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
- goto error2;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -854,8 +850,9 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
/* check if the demod is present */
ret = ds3000_readreg(state, 0x00) & 0xfe;
if (ret != 0xe0) {
+ kfree(state);
printk(KERN_ERR "Invalid probe, probably not a DS3000\n");
- goto error3;
+ return NULL;
}
printk(KERN_INFO "DS3000 chip version: %d.%d attached.\n",
@@ -873,11 +870,6 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
*/
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
-
-error3:
- kfree(state);
-error2:
- return NULL;
}
EXPORT_SYMBOL(ds3000_attach);
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index 6aaa9c6bff9c..212e0730f154 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -30,16 +30,17 @@
#define DVB_PLL_TDEE4 18
#define DVB_PLL_THOMSON_DTT7520X 19
+#if IS_REACHABLE(CONFIG_DVB_PLL)
/**
* Attach a dvb-pll to the supplied frontend structure.
*
- * @param fe Frontend to attach to.
- * @param pll_addr i2c address of the PLL (if used).
- * @param i2c i2c adapter to use (set to NULL if not used).
- * @param pll_desc_id dvb_pll_desc to use.
- * @return Frontend pointer on success, NULL on failure
+ * @fe: Frontend to attach to.
+ * @pll_addr: i2c address of the PLL (if used).
+ * @i2c: i2c adapter to use (set to NULL if not used).
+ * @pll_desc_id: dvb_pll_desc to use.
+ *
+ * return: Frontend pointer on success, NULL on failure
*/
-#if IS_REACHABLE(CONFIG_DVB_PLL)
extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index 333615491d9e..c9fc81c7e4e7 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -38,6 +38,7 @@ enum helene_xtal {
* @set_tuner_priv: Callback function private context
* @set_tuner_callback: Callback function that notifies the parent driver
* which tuner is active now
+ * @xtal: Cristal frequency as described by &enum helene_xtal
*/
struct helene_config {
u8 i2c_address;
@@ -48,9 +49,31 @@ struct helene_config {
};
#if IS_REACHABLE(CONFIG_DVB_HELENE)
+/**
+ * Attach a helene tuner (terrestrial and cable standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
const struct helene_config *config,
struct i2c_adapter *i2c);
+
+/**
+ * Attach a helene tuner (satellite standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
+extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
const struct helene_config *config,
@@ -59,13 +82,6 @@ static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-#endif
-
-#if IS_REACHABLE(CONFIG_DVB_HELENE)
-extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
- const struct helene_config *config,
- struct i2c_adapter *i2c);
-#else
static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
const struct helene_config *config,
struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index 672a556df71a..9157fd037e2f 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -41,6 +41,15 @@ struct horus3a_config {
};
#if IS_REACHABLE(CONFIG_DVB_HORUS3A)
+/**
+ * Attach a horus3a tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
const struct horus3a_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 534b24fa2b95..965012ad5c59 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -1,4 +1,4 @@
-/**
+/*
* Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
*
* Copyright (C) 2010 Malcolm Priestley
@@ -36,7 +36,7 @@ struct ix2505v_state {
u32 frequency;
};
-/**
+/*
* Data read format of the Sharp IX2505V B0017
*
* byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1
@@ -99,7 +99,7 @@ static void ix2505v_release(struct dvb_frontend *fe)
}
-/**
+/*
* Data write format of the Sharp IX2505V B0017
*
* byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index 0b0a431c74f6..49ed93e754ed 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -20,31 +20,33 @@
#include "dvb_frontend.h"
/**
- * Attach a ix2505v tuner to the supplied frontend structure.
+ * struct ix2505v_config - ix2505 attachment configuration
*
- * @param fe Frontend to attach to.
- * @param config ix2505v_config structure
- * @return FE pointer on success, NULL on failure.
+ * @tuner_address: tuner address
+ * @tuner_gain: Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB
+ * @tuner_chargepump: Charge pump output +/- 0=120 1=260 2=555 3=1200(default)
+ * @min_delay_ms: delay after tune
+ * @tuner_write_only: disables reads
*/
-
struct ix2505v_config {
u8 tuner_address;
-
- /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
u8 tuner_gain;
-
- /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
u8 tuner_chargepump;
-
- /* delay after tune */
int min_delay_ms;
-
- /* disables reads*/
u8 tuner_write_only;
};
#if IS_REACHABLE(CONFIG_DVB_IX2505V)
+/**
+ * Attach a ix2505v tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: pointer to &struct ix2505v_config
+ * @i2c: pointer to &struct i2c_adapter.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
const struct ix2505v_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 68923c84679a..e5a6c1766664 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -517,7 +517,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
state->i2c = i2c;
state->first = 1;
- /**
+ /*
* the L64781 won't show up before we send the reset_and_configure()
* broadcast. If nothing responds there is no L64781 on the bus...
*/
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 5798079add10..9854096839ae 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1048,16 +1048,6 @@ fail:
return ret;
}
-static int lg216x_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
- lg216x_get_frontend(fe, c) : 0;
-}
-
-
static int lg2160_set_frontend(struct dvb_frontend *fe)
{
struct lg216x_state *state = fe->demodulator_priv;
@@ -1368,8 +1358,6 @@ static const struct dvb_frontend_ops lg2160_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
@@ -1396,8 +1384,6 @@ static const struct dvb_frontend_ops lg2161_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index c9b1eb38444e..724e9aac0f11 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
+#include <linux/kernel.h>
#include <linux/dvb/frontend.h>
#include "dvb_math.h"
#include "lgdt3306a.h"
@@ -2072,7 +2073,7 @@ static const short regtab[] = {
0x30aa, /* MPEGLOCK */
};
-#define numDumpRegs (sizeof(regtab)/sizeof(regtab[0]))
+#define numDumpRegs (ARRAY_SIZE(regtab))
static u8 regval1[numDumpRegs] = {0, };
static u8 regval2[numDumpRegs] = {0, };
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
index 04b355a005fb..1a8964a2265d 100644
--- a/drivers/media/dvb-frontends/m88ds3103.h
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -25,6 +25,34 @@
*/
/**
+ * enum m88ds3103_ts_mode - TS connection mode
+ * @M88DS3103_TS_SERIAL: TS output pin D0, normal
+ * @M88DS3103_TS_SERIAL_D7: TS output pin D7
+ * @M88DS3103_TS_PARALLEL: TS Parallel mode
+ * @M88DS3103_TS_CI: TS CI Mode
+ */
+enum m88ds3103_ts_mode {
+ M88DS3103_TS_SERIAL,
+ M88DS3103_TS_SERIAL_D7,
+ M88DS3103_TS_PARALLEL,
+ M88DS3103_TS_CI
+};
+
+/**
+ * enum m88ds3103_clock_out
+ * @M88DS3103_CLOCK_OUT_DISABLED: Clock output is disabled
+ * @M88DS3103_CLOCK_OUT_ENABLED: Clock output is enabled with crystal
+ * clock.
+ * @M88DS3103_CLOCK_OUT_ENABLED_DIV2: Clock output is enabled with half
+ * crystal clock.
+ */
+enum m88ds3103_clock_out {
+ M88DS3103_CLOCK_OUT_DISABLED,
+ M88DS3103_CLOCK_OUT_ENABLED,
+ M88DS3103_CLOCK_OUT_ENABLED_DIV2
+};
+
+/**
* struct m88ds3103_platform_data - Platform data for the m88ds3103 driver
* @clk: Clock frequency.
* @i2c_wr_max: Max bytes I2C adapter can write at once.
@@ -44,24 +72,16 @@
* @get_dvb_frontend: Get DVB frontend.
* @get_i2c_adapter: Get I2C adapter.
*/
-
struct m88ds3103_platform_data {
u32 clk;
u16 i2c_wr_max;
-#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
-#define M88DS3103_TS_CI 3 /* TS CI Mode */
- u8 ts_mode:2;
+ enum m88ds3103_ts_mode ts_mode;
u32 ts_clk;
+ enum m88ds3103_clock_out clk_out;
u8 ts_clk_pol:1;
u8 spec_inv:1;
u8 agc;
u8 agc_inv:1;
-#define M88DS3103_CLOCK_OUT_DISABLED 0
-#define M88DS3103_CLOCK_OUT_ENABLED 1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
- u8 clk_out:2;
u8 envelope_mode:1;
u8 lnb_hv_pol:1;
u8 lnb_en_pol:1;
@@ -73,105 +93,60 @@ struct m88ds3103_platform_data {
u8 attach_in_use:1;
};
-/*
- * Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+/**
+ * struct m88ds3103_config - m88ds3102 configuration
+ *
+ * @i2c_addr: I2C address. Default: none, must set. Example: 0x68, ...
+ * @clock: Device's clock. Default: none, must set. Example: 27000000
+ * @i2c_wr_max: Max bytes I2C provider is asked to write at once.
+ * Default: none, must set. Example: 33, 65, ...
+ * @ts_mode: TS output mode, as defined by &enum m88ds3103_ts_mode.
+ * Default: M88DS3103_TS_SERIAL.
+ * @ts_clk: TS clk in KHz. Default: 0.
+ * @ts_clk_pol: TS clk polarity.Default: 0.
+ * 1-active at falling edge; 0-active at rising edge.
+ * @spec_inv: Spectrum inversion. Default: 0.
+ * @agc_inv: AGC polarity. Default: 0.
+ * @clock_out: Clock output, as defined by &enum m88ds3103_clock_out.
+ * Default: M88DS3103_CLOCK_OUT_DISABLED.
+ * @envelope_mode: DiSEqC envelope mode. Default: 0.
+ * @agc: AGC configuration. Default: none, must set.
+ * @lnb_hv_pol: LNB H/V pin polarity. Default: 0. Values:
+ * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18;
+ * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
+ * @lnb_en_pol: LNB enable pin polarity. Default: 0. Values:
+ * 1: pin high to enable, pin low to disable;
+ * 0: pin high to disable, pin low to enable.
*/
struct m88ds3103_config {
- /*
- * I2C address
- * Default: none, must set
- * 0x68, ...
- */
u8 i2c_addr;
-
- /*
- * clock
- * Default: none, must set
- * 27000000
- */
u32 clock;
-
- /*
- * max bytes I2C provider is asked to write at once
- * Default: none, must set
- * 33, 65, ...
- */
u16 i2c_wr_max;
-
- /*
- * TS output mode
- * Default: M88DS3103_TS_SERIAL
- */
-#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
-#define M88DS3103_TS_CI 3 /* TS CI Mode */
u8 ts_mode;
-
- /*
- * TS clk in KHz
- * Default: 0.
- */
u32 ts_clk;
-
- /*
- * TS clk polarity.
- * Default: 0. 1-active at falling edge; 0-active at rising edge.
- */
u8 ts_clk_pol:1;
-
- /*
- * spectrum inversion
- * Default: 0
- */
u8 spec_inv:1;
-
- /*
- * AGC polarity
- * Default: 0
- */
u8 agc_inv:1;
-
- /*
- * clock output
- * Default: M88DS3103_CLOCK_OUT_DISABLED
- */
-#define M88DS3103_CLOCK_OUT_DISABLED 0
-#define M88DS3103_CLOCK_OUT_ENABLED 1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
u8 clock_out;
-
- /*
- * DiSEqC envelope mode
- * Default: 0
- */
u8 envelope_mode:1;
-
- /*
- * AGC configuration
- * Default: none, must set
- */
u8 agc;
-
- /*
- * LNB H/V pin polarity
- * Default: 0.
- * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
- * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
- */
u8 lnb_hv_pol:1;
-
- /*
- * LNB enable pin polarity
- * Default: 0.
- * 1: pin high to enable, pin low to disable.
- * 0: pin high to disable, pin low to enable.
- */
u8 lnb_en_pol:1;
};
#if defined(CONFIG_DVB_M88DS3103) || \
(defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+/**
+ * Attach a m88ds3103 demod
+ *
+ * @config: pointer to &struct m88ds3103_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @tuner_i2c: on success, returns the I2C adapter associated with
+ * m88ds3103 tuner.
+ *
+ * return: FE pointer on success, NULL on failure.
+ * Note: Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+ */
extern struct dvb_frontend *m88ds3103_attach(
const struct m88ds3103_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index e8ac8c3e2ec0..bdaf9d235fed 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2071,12 +2071,9 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_dbg(&i2c->dev, "%s called.\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
- if (state == NULL) {
- dev_err(&i2c->dev,
- "%s: unable to allocate memory for state\n", __func__);
- goto error;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
@@ -2089,22 +2086,16 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
/* Check if it is a mb86a20s frontend */
rev = mb86a20s_readreg(state, 0);
-
- if (rev == 0x13) {
- dev_info(&i2c->dev,
- "Detected a Fujitsu mb86a20s frontend\n");
- } else {
+ if (rev != 0x13) {
+ kfree(state);
dev_dbg(&i2c->dev,
"Frontend revision %d is unknown - aborting.\n",
rev);
- goto error;
+ return NULL;
}
+ dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
}
EXPORT_SYMBOL(mb86a20s_attach);
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index dfb02db2126c..05c9725d1c5f 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -26,7 +26,6 @@
* @demod_address: the demodulator's i2c address
* @is_serial: if true, TS is serial. Otherwise, TS is parallel
*/
-
struct mb86a20s_config {
u32 fclk;
u8 demod_address;
@@ -34,9 +33,17 @@ struct mb86a20s_config {
};
#if IS_REACHABLE(CONFIG_DVB_MB86A20S)
+/**
+ * Attach a mb86a20s demod
+ *
+ * @config: pointer to &struct mb86a20s_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c);
-extern struct i2c_adapter *mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *);
+
#else
static inline struct dvb_frontend *mb86a20s_attach(
const struct mb86a20s_config *config, struct i2c_adapter *i2c)
@@ -44,12 +51,6 @@ static inline struct dvb_frontend *mb86a20s_attach(
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline struct i2c_adapter *
- mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *fe)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
#endif
#endif /* MB86A20S */
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
index 323632523876..8cd5ef61903b 100644
--- a/drivers/media/dvb-frontends/mn88472.h
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -19,21 +19,21 @@
#include <linux/dvb/frontend.h>
+/* Define old names for backward compatibility */
+#define VARIABLE_TS_CLOCK MN88472_TS_CLK_VARIABLE
+#define FIXED_TS_CLOCK MN88472_TS_CLK_FIXED
+#define SERIAL_TS_MODE MN88472_TS_MODE_SERIAL
+#define PARALLEL_TS_MODE MN88472_TS_MODE_PARALLEL
+
/**
* struct mn88472_config - Platform data for the mn88472 driver
* @xtal: Clock frequency.
* @ts_mode: TS mode.
* @ts_clock: TS clock config.
* @i2c_wr_max: Max number of bytes driver writes to I2C at once.
- * @get_dvb_frontend: Get DVB frontend.
+ * @fe: pointer to a frontend pointer
+ * @get_dvb_frontend: Get DVB frontend callback.
*/
-
-/* Define old names for backward compatibility */
-#define VARIABLE_TS_CLOCK MN88472_TS_CLK_VARIABLE
-#define FIXED_TS_CLOCK MN88472_TS_CLK_FIXED
-#define SERIAL_TS_MODE MN88472_TS_MODE_SERIAL
-#define PARALLEL_TS_MODE MN88472_TS_MODE_PARALLEL
-
struct mn88472_config {
unsigned int xtal;
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 676c96c216c3..53064e11f5f1 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -43,7 +43,7 @@
#define BYTE2(v) ((v >> 16) & 0xff)
#define BYTE3(v) ((v >> 24) & 0xff)
-LIST_HEAD(mxllist);
+static LIST_HEAD(mxllist);
struct mxl_base {
struct list_head mxllist;
diff --git a/drivers/media/dvb-frontends/rtl2830.h b/drivers/media/dvb-frontends/rtl2830.h
index 0cde151e6608..458ac94e8a8b 100644
--- a/drivers/media/dvb-frontends/rtl2830.h
+++ b/drivers/media/dvb-frontends/rtl2830.h
@@ -32,7 +32,6 @@
* @pid_filter: Set PID to PID filter.
* @pid_filter_ctrl: Control PID filter.
*/
-
struct rtl2830_platform_data {
u32 clk;
bool spec_inv;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 03c0de039fa9..6a124ff71c2b 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -35,7 +35,6 @@
* @pid_filter: Set PID to PID filter.
* @pid_filter_ctrl: Control PID filter.
*/
-
struct rtl2832_platform_data {
u32 clk;
/*
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.h b/drivers/media/dvb-frontends/rtl2832_sdr.h
index d8fc7e7212e3..8f88c2fb8627 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.h
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.h
@@ -33,15 +33,11 @@
* struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver
* @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
* @tuner: Used tuner model.
- * @i2c_client: rtl2832 demod driver I2C client.
- * @bulk_read: rtl2832 driver private I/O interface.
- * @bulk_write: rtl2832 driver private I/O interface.
- * @update_bits: rtl2832 driver private I/O interface.
+ * @regmap: pointer to &struct regmap.
* @dvb_frontend: rtl2832 DVB frontend.
* @v4l2_subdev: Tuner v4l2 controls.
* @dvb_usb_device: DVB USB interface for USB streaming.
*/
-
struct rtl2832_sdr_platform_data {
u32 clk;
/*
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 172fc367ccaa..41d9c513b7e8 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -696,7 +696,6 @@ static int si2168_probe(struct i2c_client *client,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 43d47dfcc7b8..53e66c232d3c 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -357,14 +357,14 @@ static int sp2_exit(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- if (client == NULL)
+ if (!client)
return 0;
s = i2c_get_clientdata(client);
- if (s == NULL)
+ if (!s)
return 0;
- if (s->ca.data == NULL)
+ if (!s->ca.data)
return 0;
dvb_ca_en50221_release(&s->ca);
@@ -381,10 +381,9 @@ static int sp2_probe(struct i2c_client *client,
dev_dbg(&client->dev, "\n");
- s = kzalloc(sizeof(struct sp2), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 7c511c3cd4ca..d2c402b52c6e 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -57,7 +57,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
int ret;
if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) {
- /**
+ /*
* in case of soft reset we ignore ACK errors...
*/
if (!(reg == 0xf1a && data == 0x000 &&
@@ -130,7 +130,7 @@ static void sp887x_setup_agc (struct sp887x_state* state)
#define BLOCKSIZE 30
#define FW_SIZE 0x4000
-/**
+/*
* load firmware and setup MPEG interface...
*/
static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware *fw)
@@ -279,7 +279,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
return 0;
}
-/**
+/*
* estimates division of two 24bit numbers,
* derived from the ves1820/stv0299 driver code
*/
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index 78e75dfc317f..e94a3d5facf6 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -26,15 +26,16 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
+#if IS_REACHABLE(CONFIG_DVB_STB6000)
/**
* Attach a stb6000 tuner to the supplied frontend structure.
*
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
*/
-#if IS_REACHABLE(CONFIG_DVB_STB6000)
extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 45cbc898ad25..67f91814b9f7 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -447,12 +447,6 @@ static int stv0288_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
static int stv0288_set_frontend(struct dvb_frontend *fe)
{
struct stv0288_state *state = fe->demodulator_priv;
@@ -567,7 +561,6 @@ static const struct dvb_frontend_ops stv0288_ops = {
.set_tone = stv0288_set_tone,
.set_voltage = stv0288_set_voltage,
- .set_property = stv0288_set_property,
.set_frontend = stv0288_set_frontend,
};
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index b36b21a13201..b1f3d675d316 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -368,7 +368,7 @@ static int stv0299_set_voltage(struct dvb_frontend *fe,
reg0x08 = stv0299_readreg (state, 0x08);
reg0x0c = stv0299_readreg (state, 0x0c);
- /**
+ /*
* H/V switching over OP0, OP1 and OP2 are LNB power enable bits
*/
reg0x0c &= 0x0f;
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index e4fd9c1b0560..6aad0efa3174 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -258,11 +258,9 @@ static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
{
struct stv6110_priv *priv = fe->tuner_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 ret = 0x04;
u32 divider, ref, p, presc, i, result_freq, vco_freq;
s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val;
- s32 srate;
dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__,
frequency, priv->mclk);
@@ -273,13 +271,6 @@ static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
((((priv->mclk / 1000000) - 16) & 0x1f) << 3);
/* BB_GAIN = db/2 */
- if (fe->ops.set_property && fe->ops.get_property) {
- srate = c->symbol_rate;
- dprintk("%s: Get Frontend parameters: srate=%d\n",
- __func__, srate);
- } else
- srate = 15000000;
-
priv->regs[RSTV6110_CTRL2] &= ~0x0f;
priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f);
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 8f184026ee11..da1a87bc1603 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -38,7 +38,6 @@
* @tuner_i2c_addr: CX24118A tuner I2C address (0x14, 0x54, ...).
* @get_dvb_frontend: Get DVB frontend.
*/
-
struct tda10071_platform_data {
u32 clk;
u16 i2c_wr_max;
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index 81abe1aebe9f..6a7bed12e741 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -29,11 +29,12 @@
/**
* Attach a tda826x tuner to the supplied frontend structure.
*
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ * @has_loopthrough: Set to 1 if the card has a loopthrough RF connector.
+ *
+ * return: FE pointer on success, NULL on failure.
*/
#if IS_REACHABLE(CONFIG_DVB_TDA826X)
extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 18e6d4c5be21..1d41abd47f04 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -1,4 +1,4 @@
-/**
+/*
* Driver for Infineon tua6100 pll.
*
* (c) 2006 Andrew de Quincey
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index 9f15cbdfdeca..6c098a894ea6 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -1,4 +1,4 @@
-/**
+/*
* Driver for Infineon tua6100 PLL.
*
* (c) 2006 Andrew de Quincey
diff --git a/drivers/media/dvb-frontends/zd1301_demod.h b/drivers/media/dvb-frontends/zd1301_demod.h
index ceb2e05e873c..6cd8f6f9c415 100644
--- a/drivers/media/dvb-frontends/zd1301_demod.h
+++ b/drivers/media/dvb-frontends/zd1301_demod.h
@@ -27,7 +27,6 @@
* @reg_read: Register read callback.
* @reg_write: Register write callback.
*/
-
struct zd1301_demod_platform_data {
void *reg_priv;
int (*reg_read)(void *, u16, u8 *);
@@ -41,8 +40,7 @@ struct zd1301_demod_platform_data {
*
* Return: Pointer to DVB frontend which given platform device owns.
*/
-
-struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
+struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *pdev);
/**
* zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter
@@ -50,11 +48,16 @@ struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
*
* Return: Pointer to I2C adapter which given platform device owns.
*/
-
-struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *);
+struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev);
#else
+/**
+ * zd1301_demod_get_dvb_frontend() - Attach a zd1301 frontend
+ * @dev: Pointer to platform device
+ *
+ * Return: Pointer to %struct dvb_frontend or NULL if attach fails.
+ */
static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 062282739ce5..89dd65ae88ad 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -1,4 +1,4 @@
-/**
+/*
* Driver for Zarlink zl10036 DVB-S silicon tuner
*
* Copyright (C) 2006 Tino Reichardt
@@ -157,7 +157,7 @@ static int zl10036_sleep(struct dvb_frontend *fe)
return ret;
}
-/**
+/*
* register map of the ZL10036/ZL10038
*
* reg[default] content
@@ -219,7 +219,7 @@ static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw)
if (fbw <= 28820) {
br = _BR_MAXIMUM;
} else {
- /**
+ /*
* f(bw)=34,6MHz f(xtal)=10.111MHz
* br = (10111/34600) * 63 * 1/K = 14;
*/
@@ -315,7 +315,7 @@ static int zl10036_set_params(struct dvb_frontend *fe)
|| (frequency > fe->ops.info.frequency_max))
return -EINVAL;
- /**
+ /*
* alpha = 1.35 for dvb-s
* fBW = (alpha*symbolrate)/(2*0.8)
* 1.35 / (2*0.8) = 27 / 32
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 88751adfecf7..ec90ca927739 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -20,20 +20,20 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-/**
- * Attach a zl10036 tuner to the supplied frontend structure.
- *
- * @param fe Frontend to attach to.
- * @param config zl10036_config structure
- * @return FE pointer on success, NULL on failure.
- */
-
struct zl10036_config {
u8 tuner_address;
int rf_loop_enable;
};
#if IS_REACHABLE(CONFIG_DVB_ZL10036)
+/**
+ * Attach a zl10036 tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: zl10036_config structure.
+ * @i2c: pointer to struct i2c_adapter.
+ * return: FE pointer on success, NULL on failure.
+ */
extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe,
const struct zl10036_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 94153895fcd4..cb5d7ff82915 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -354,6 +354,14 @@ config VIDEO_TC358743
To compile this driver as a module, choose M here: the
module will be called tc358743.
+config VIDEO_TC358743_CEC
+ bool "Enable Toshiba TC358743 CEC support"
+ depends on VIDEO_TC358743
+ select CEC_CORE
+ ---help---
+ When selected the tc358743 will support the optional
+ HDMI CEC feature.
+
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
@@ -547,6 +555,14 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_IMX274
+ tristate "Sony IMX274 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a V4L2 sensor-level driver for the Sony IMX274
+ CMOS image sensor.
+
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -660,6 +676,7 @@ config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the OmniVision
OV13858 camera.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f104650d6000..548a9efce966 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -93,5 +93,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 3df28f2f9b38..6fb818a775db 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1328,7 +1328,7 @@ static int adv7180_probe(struct i2c_client *client,
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
ret = adv7180_init_controls(state);
if (ret)
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index b33ccfc08708..4aa8e45b5cd3 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -217,6 +217,7 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int afe_std;
int ret;
mutex_lock(&state->mutex);
@@ -235,8 +236,12 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
/* Read detected standard */
ret = adv748x_afe_status(afe, NULL, std);
+ afe_std = adv748x_afe_std(afe->curr_norm);
+ if (afe_std < 0)
+ goto unlock;
+
/* Restore original state */
- adv748x_afe_set_video_standard(state, afe->curr_norm);
+ adv748x_afe_set_video_standard(state, afe_std);
unlock:
mutex_unlock(&state->mutex);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f289b8aca1da..c786cd125417 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1948,7 +1948,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
return -EINVAL;
info = adv76xx_format_info(state, format->format.code);
- if (info == NULL)
+ if (!info)
info = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
adv76xx_fill_format(state, &format->format);
@@ -2256,7 +2256,7 @@ static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return 0;
}
- if (data == NULL)
+ if (!data)
return -ENODATA;
if (edid->start_block >= state->edid.blocks)
@@ -3316,10 +3316,8 @@ static int adv76xx_probe(struct i2c_client *client,
client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv76xx_state memory!\n");
+ if (!state)
return -ENOMEM;
- }
state->i2c_clients[ADV76XX_PAGE_IO] = client;
@@ -3482,7 +3480,7 @@ static int adv76xx_probe(struct i2c_client *client,
state->i2c_clients[i] =
adv76xx_dummy_client(sd, state->pdata.i2c_addresses[i],
0xf2 + i);
- if (state->i2c_clients[i] == NULL) {
+ if (!state->i2c_clients[i]) {
err = -ENOMEM;
v4l2_err(sd, "failed to create i2c client %u\n", i);
goto err_i2c;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 65f34e7e146f..136aa80a834b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3467,11 +3467,9 @@ static int adv7842_probe(struct i2c_client *client,
return -ENODEV;
}
- state = devm_kzalloc(&client->dev, sizeof(struct adv7842_state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv7842_state memory!\n");
+ state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
return -ENOMEM;
- }
/* platform data */
state->pdata = *pdata;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 39f51daa7558..f38bf819d805 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -1745,7 +1745,7 @@ static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- v4l2_std_id stds[] = {
+ static const v4l2_std_id stds[] = {
/* 0000 */ V4L2_STD_UNKNOWN,
/* 0001 */ V4L2_STD_NTSC_M,
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 95af4fc99cd0..ed01e8bd4331 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -21,6 +21,11 @@
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define DW9714_FOCUS_STEPS 1
+/*
* This acts as the minimum granularity of lens movement.
* Keep this value power of 2, so the control steps can be
* uniformly adjusted for gradual lens movement, with desired
@@ -137,7 +142,7 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
v4l2_ctrl_handler_init(hdl, 1);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
- 0, DW9714_MAX_FOCUS_POS, DW9714_CTRL_STEPS, 0);
+ 0, DW9714_MAX_FOCUS_POS, DW9714_FOCUS_STEPS, 0);
if (hdl->error)
dev_err(&client->dev, "%s fail error: 0x%x\n",
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index 14399365ad7f..9fe409e95666 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_ET8EK8
tristate "ET8EK8 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
---help---
This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
It is used for example in Nokia N900 (RX-51).
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index c14f0fd6ded3..e9eff9039ef5 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1453,7 +1453,7 @@ static int et8ek8_probe(struct i2c_client *client,
goto err_mutex;
}
- ret = v4l2_async_register_subdev(&sensor->subdev);
+ ret = v4l2_async_register_subdev_sensor_common(&sensor->subdev);
if (ret < 0)
goto err_entity;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
new file mode 100644
index 000000000000..2f71af2f90bf
--- /dev/null
+++ b/drivers/media/i2c/imx274.c
@@ -0,0 +1,1810 @@
+/*
+ * imx274.c - IMX274 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2017, Leopard Imaging, Inc.
+ *
+ * Leon Luo <leonl@leopardimaging.com>
+ * Edwin Zou <edwinz@leopardimaging.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * See "SHR, SVR Setting" in datasheet
+ */
+#define IMX274_DEFAULT_FRAME_LENGTH (4550)
+#define IMX274_MAX_FRAME_LENGTH (0x000fffff)
+
+/*
+ * See "Frame Rate Adjustment" in datasheet
+ */
+#define IMX274_PIXCLK_CONST1 (72000000)
+#define IMX274_PIXCLK_CONST2 (1000000)
+
+/*
+ * The input gain is shifted by IMX274_GAIN_SHIFT to get
+ * decimal number. The real gain is
+ * (float)input_gain_value / (1 << IMX274_GAIN_SHIFT)
+ */
+#define IMX274_GAIN_SHIFT (8)
+#define IMX274_GAIN_SHIFT_MASK ((1 << IMX274_GAIN_SHIFT) - 1)
+
+/*
+ * See "Analog Gain" and "Digital Gain" in datasheet
+ * min gain is 1X
+ * max gain is calculated based on IMX274_GAIN_REG_MAX
+ */
+#define IMX274_GAIN_REG_MAX (1957)
+#define IMX274_MIN_GAIN (0x01 << IMX274_GAIN_SHIFT)
+#define IMX274_MAX_ANALOG_GAIN ((2048 << IMX274_GAIN_SHIFT)\
+ / (2048 - IMX274_GAIN_REG_MAX))
+#define IMX274_MAX_DIGITAL_GAIN (8)
+#define IMX274_DEF_GAIN (20 << IMX274_GAIN_SHIFT)
+#define IMX274_GAIN_CONST (2048) /* for gain formula */
+
+/*
+ * 1 line time in us = (HMAX / 72), minimal is 4 lines
+ */
+#define IMX274_MIN_EXPOSURE_TIME (4 * 260 / 72)
+
+#define IMX274_DEFAULT_MODE IMX274_MODE_3840X2160
+#define IMX274_MAX_WIDTH (3840)
+#define IMX274_MAX_HEIGHT (2160)
+#define IMX274_MAX_FRAME_RATE (120)
+#define IMX274_MIN_FRAME_RATE (5)
+#define IMX274_DEF_FRAME_RATE (60)
+
+/*
+ * register SHR is limited to (SVR value + 1) x VMAX value - 4
+ */
+#define IMX274_SHR_LIMIT_CONST (4)
+
+/*
+ * Constants for sensor reset delay
+ */
+#define IMX274_RESET_DELAY1 (2000)
+#define IMX274_RESET_DELAY2 (2200)
+
+/*
+ * shift and mask constants
+ */
+#define IMX274_SHIFT_8_BITS (8)
+#define IMX274_SHIFT_16_BITS (16)
+#define IMX274_MASK_LSB_2_BITS (0x03)
+#define IMX274_MASK_LSB_3_BITS (0x07)
+#define IMX274_MASK_LSB_4_BITS (0x0f)
+#define IMX274_MASK_LSB_8_BITS (0x00ff)
+
+#define DRIVER_NAME "IMX274"
+
+/*
+ * IMX274 register definitions
+ */
+#define IMX274_FRAME_LENGTH_ADDR_1 0x30FA /* VMAX, MSB */
+#define IMX274_FRAME_LENGTH_ADDR_2 0x30F9 /* VMAX */
+#define IMX274_FRAME_LENGTH_ADDR_3 0x30F8 /* VMAX, LSB */
+#define IMX274_SVR_REG_MSB 0x300F /* SVR */
+#define IMX274_SVR_REG_LSB 0x300E /* SVR */
+#define IMX274_HMAX_REG_MSB 0x30F7 /* HMAX */
+#define IMX274_HMAX_REG_LSB 0x30F6 /* HMAX */
+#define IMX274_COARSE_TIME_ADDR_MSB 0x300D /* SHR */
+#define IMX274_COARSE_TIME_ADDR_LSB 0x300C /* SHR */
+#define IMX274_ANALOG_GAIN_ADDR_LSB 0x300A /* ANALOG GAIN LSB */
+#define IMX274_ANALOG_GAIN_ADDR_MSB 0x300B /* ANALOG GAIN MSB */
+#define IMX274_DIGITAL_GAIN_REG 0x3012 /* Digital Gain */
+#define IMX274_VFLIP_REG 0x301A /* VERTICAL FLIP */
+#define IMX274_TEST_PATTERN_REG 0x303D /* TEST PATTERN */
+#define IMX274_STANDBY_REG 0x3000 /* STANDBY */
+
+#define IMX274_TABLE_WAIT_MS 0
+#define IMX274_TABLE_END 1
+
+/*
+ * imx274 I2C operation related structure
+ */
+struct reg_8 {
+ u16 addr;
+ u8 val;
+};
+
+static const struct regmap_config imx274_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+enum imx274_mode {
+ IMX274_MODE_3840X2160,
+ IMX274_MODE_1920X1080,
+ IMX274_MODE_1280X720,
+
+ IMX274_MODE_START_STREAM_1,
+ IMX274_MODE_START_STREAM_2,
+ IMX274_MODE_START_STREAM_3,
+ IMX274_MODE_START_STREAM_4,
+ IMX274_MODE_STOP_STREAM
+};
+
+/*
+ * imx274 format related structure
+ */
+struct imx274_frmfmt {
+ u32 mbus_code;
+ enum v4l2_colorspace colorspace;
+ struct v4l2_frmsize_discrete size;
+ enum imx274_mode mode;
+};
+
+/*
+ * imx274 test pattern related structure
+ */
+enum {
+ TEST_PATTERN_DISABLED = 0,
+ TEST_PATTERN_ALL_000H,
+ TEST_PATTERN_ALL_FFFH,
+ TEST_PATTERN_ALL_555H,
+ TEST_PATTERN_ALL_AAAH,
+ TEST_PATTERN_VSP_5AH, /* VERTICAL STRIPE PATTERN 555H/AAAH */
+ TEST_PATTERN_VSP_A5H, /* VERTICAL STRIPE PATTERN AAAH/555H */
+ TEST_PATTERN_VSP_05H, /* VERTICAL STRIPE PATTERN 000H/555H */
+ TEST_PATTERN_VSP_50H, /* VERTICAL STRIPE PATTERN 555H/000H */
+ TEST_PATTERN_VSP_0FH, /* VERTICAL STRIPE PATTERN 000H/FFFH */
+ TEST_PATTERN_VSP_F0H, /* VERTICAL STRIPE PATTERN FFFH/000H */
+ TEST_PATTERN_H_COLOR_BARS,
+ TEST_PATTERN_V_COLOR_BARS,
+};
+
+static const char * const tp_qmenu[] = {
+ "Disabled",
+ "All 000h Pattern",
+ "All FFFh Pattern",
+ "All 555h Pattern",
+ "All AAAh Pattern",
+ "Vertical Stripe (555h / AAAh)",
+ "Vertical Stripe (AAAh / 555h)",
+ "Vertical Stripe (000h / 555h)",
+ "Vertical Stripe (555h / 000h)",
+ "Vertical Stripe (000h / FFFh)",
+ "Vertical Stripe (FFFh / 000h)",
+ "Horizontal Color Bars",
+ "Vertical Color Bars",
+};
+
+/*
+ * All-pixel scan mode (10-bit)
+ * imx274 mode1(refer to datasheet) register configuration with
+ * 3840x2160 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode1_3840x2160_raw10[] = {
+ {0x3004, 0x01},
+ {0x3005, 0x01},
+ {0x3006, 0x00},
+ {0x3007, 0x02},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x01},
+ {0x30F6, 0x07}, /* HMAX, 263 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* crop to 2160 */
+ {0x30de, 0x06},
+ {0x30df, 0x00},
+ {0x30e0, 0x12},
+ {0x30e1, 0x00},
+ {0x3037, 0x01}, /* to crop to 3840 */
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x86},
+ {0x3131, 0x08},
+ {0x3132, 0x7E},
+ {0x3133, 0x08},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x16},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x1F},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Horizontal/vertical 2/2-line binning
+ * (Horizontal and vertical weightedbinning, 10-bit)
+ * imx274 mode3(refer to datasheet) register configuration with
+ * 1920x1080 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode3_1920x1080_raw10[] = {
+ {0x3004, 0x02},
+ {0x3005, 0x21},
+ {0x3006, 0x00},
+ {0x3007, 0x11},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x02},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* to crop to 1920x1080 */
+ {0x30de, 0x05},
+ {0x30df, 0x00},
+ {0x30e0, 0x04},
+ {0x30e1, 0x00},
+ {0x3037, 0x01},
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x4E},
+ {0x3131, 0x04},
+ {0x3132, 0x46},
+ {0x3133, 0x04},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1A},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Vertical 2/3 subsampling binning horizontal 3 binning
+ * imx274 mode5(refer to datasheet) register configuration with
+ * 1280x720 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode5_1280x720_raw10[] = {
+ {0x3004, 0x03},
+ {0x3005, 0x31},
+ {0x3006, 0x00},
+ {0x3007, 0x09},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x03},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30DD, 0x01},
+ {0x30DE, 0x07},
+ {0x30DF, 0x00},
+ {0x40E0, 0x04},
+ {0x30E1, 0x00},
+ {0x3030, 0xD4},
+ {0x3031, 0x02},
+ {0x3032, 0xD0},
+ {0x3033, 0x02},
+
+ {0x30EE, 0x01},
+ {0x3130, 0xE2},
+ {0x3131, 0x02},
+ {0x3132, 0xDE},
+ {0x3133, 0x02},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1B},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x19},
+ {0x366C, 0x17},
+ {0x366D, 0x17},
+ {0x3A41, 0x04},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 first step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_1[] = {
+ {IMX274_STANDBY_REG, 0x12},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 second step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_2[] = {
+ {0x3120, 0xF0}, /* clock settings */
+ {0x3121, 0x00}, /* clock settings */
+ {0x3122, 0x02}, /* clock settings */
+ {0x3129, 0x9C}, /* clock settings */
+ {0x312A, 0x02}, /* clock settings */
+ {0x312D, 0x02}, /* clock settings */
+
+ {0x310B, 0x00},
+
+ /* PLSTMG */
+ {0x304C, 0x00}, /* PLSTMG01 */
+ {0x304D, 0x03},
+ {0x331C, 0x1A},
+ {0x331D, 0x00},
+ {0x3502, 0x02},
+ {0x3529, 0x0E},
+ {0x352A, 0x0E},
+ {0x352B, 0x0E},
+ {0x3538, 0x0E},
+ {0x3539, 0x0E},
+ {0x3553, 0x00},
+ {0x357D, 0x05},
+ {0x357F, 0x05},
+ {0x3581, 0x04},
+ {0x3583, 0x76},
+ {0x3587, 0x01},
+ {0x35BB, 0x0E},
+ {0x35BC, 0x0E},
+ {0x35BD, 0x0E},
+ {0x35BE, 0x0E},
+ {0x35BF, 0x0E},
+ {0x366E, 0x00},
+ {0x366F, 0x00},
+ {0x3670, 0x00},
+ {0x3671, 0x00},
+
+ /* PSMIPI */
+ {0x3304, 0x32}, /* PSMIPI1 */
+ {0x3305, 0x00},
+ {0x3306, 0x32},
+ {0x3307, 0x00},
+ {0x3590, 0x32},
+ {0x3591, 0x00},
+ {0x3686, 0x32},
+ {0x3687, 0x00},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 third step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_3[] = {
+ {IMX274_STANDBY_REG, 0x00},
+ {0x303E, 0x02}, /* SYS_MODE = 2 */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 forth step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_4[] = {
+ {0x30F4, 0x00},
+ {0x3018, 0xA2}, /* XHS VHS OUTUPT */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 register configuration for stoping stream
+ */
+static const struct reg_8 imx274_stop[] = {
+ {IMX274_STANDBY_REG, 0x01},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 disable test pattern register configuration
+ */
+static const struct reg_8 imx274_tp_disabled[] = {
+ {0x303C, 0x00},
+ {0x377F, 0x00},
+ {0x3781, 0x00},
+ {0x370B, 0x00},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 test pattern register configuration
+ * reg 0x303D defines the test pattern modes
+ */
+static const struct reg_8 imx274_tp_regs[] = {
+ {0x303C, 0x11},
+ {0x370E, 0x01},
+ {0x377F, 0x01},
+ {0x3781, 0x01},
+ {0x370B, 0x11},
+ {IMX274_TABLE_END, 0x00}
+};
+
+static const struct reg_8 *mode_table[] = {
+ [IMX274_MODE_3840X2160] = imx274_mode1_3840x2160_raw10,
+ [IMX274_MODE_1920X1080] = imx274_mode3_1920x1080_raw10,
+ [IMX274_MODE_1280X720] = imx274_mode5_1280x720_raw10,
+
+ [IMX274_MODE_START_STREAM_1] = imx274_start_1,
+ [IMX274_MODE_START_STREAM_2] = imx274_start_2,
+ [IMX274_MODE_START_STREAM_3] = imx274_start_3,
+ [IMX274_MODE_START_STREAM_4] = imx274_start_4,
+ [IMX274_MODE_STOP_STREAM] = imx274_stop,
+};
+
+/*
+ * imx274 format related structure
+ */
+static const struct imx274_frmfmt imx274_formats[] = {
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {3840, 2160},
+ IMX274_MODE_3840X2160},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1920, 1080},
+ IMX274_MODE_1920X1080},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1280, 720},
+ IMX274_MODE_1280X720},
+};
+
+/*
+ * minimal frame length for each mode
+ * refer to datasheet section "Frame Rate Adjustment (CSI-2)"
+ */
+static const int min_frame_len[] = {
+ 4550, /* mode 1, 4K */
+ 2310, /* mode 3, 1080p */
+ 2310 /* mode 5, 720p */
+};
+
+/*
+ * minimal numbers of SHR register
+ * refer to datasheet table "Shutter Setting (CSI-2)"
+ */
+static const int min_SHR[] = {
+ 12, /* mode 1, 4K */
+ 8, /* mode 3, 1080p */
+ 8 /* mode 5, 720p */
+};
+
+static const int max_frame_rate[] = {
+ 60, /* mode 1 , 4K */
+ 120, /* mode 3, 1080p */
+ 120 /* mode 5, 720p */
+};
+
+/*
+ * Number of clocks per internal offset period
+ * a constant based on mode
+ * refer to section "Integration Time in Each Readout Drive Mode (CSI-2)"
+ * in the datasheet
+ * for the implemented 3 modes, it happens to be the same number
+ */
+static const int nocpiop[] = {
+ 112, /* mode 1 , 4K */
+ 112, /* mode 3, 1080p */
+ 112 /* mode 5, 720p */
+};
+
+/*
+ * struct imx274_ctrls - imx274 ctrl structure
+ * @handler: V4L2 ctrl handler structure
+ * @exposure: Pointer to expsure ctrl structure
+ * @gain: Pointer to gain ctrl structure
+ * @vflip: Pointer to vflip ctrl structure
+ * @test_pattern: Pointer to test pattern ctrl structure
+ */
+struct imx274_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *test_pattern;
+};
+
+/*
+ * struct stim274 - imx274 device structure
+ * @sd: V4L2 subdevice structure
+ * @pd: Media pad structure
+ * @client: Pointer to I2C client
+ * @ctrls: imx274 control structure
+ * @format: V4L2 media bus frame format structure
+ * @frame_rate: V4L2 frame rate structure
+ * @regmap: Pointer to regmap structure
+ * @reset_gpio: Pointer to reset gpio
+ * @lock: Mutex structure
+ * @mode_index: Resolution mode index
+ */
+struct stimx274 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct i2c_client *client;
+ struct imx274_ctrls ctrls;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct mutex lock; /* mutex lock for operations */
+ u32 mode_index;
+};
+
+/*
+ * Function declaration
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl);
+static int imx274_set_exposure(struct stimx274 *priv, int val);
+static int imx274_set_vflip(struct stimx274 *priv, int val);
+static int imx274_set_test_pattern(struct stimx274 *priv, int val);
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval);
+
+static inline void msleep_range(unsigned int delay_base)
+{
+ usleep_range(delay_base * 1000, delay_base * 1000 + 500);
+}
+
+/*
+ * v4l2_ctrl and v4l2_subdev related operations
+ */
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler,
+ struct stimx274, ctrls.handler)->sd;
+}
+
+static inline struct stimx274 *to_imx274(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct stimx274, sd);
+}
+
+/*
+ * imx274_regmap_util_write_table_8 - Function for writing register table
+ * @regmap: Pointer to device reg map structure
+ * @table: Table containing register values
+ * @wait_ms_addr: Flag for performing delay
+ * @end_addr: Flag for incating end of table
+ *
+ * This is used to write register table into sensor's reg map.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_regmap_util_write_table_8(struct regmap *regmap,
+ const struct reg_8 table[],
+ u16 wait_ms_addr, u16 end_addr)
+{
+ int err;
+ const struct reg_8 *next;
+ u8 val;
+
+ int range_start = -1;
+ int range_count = 0;
+ u8 range_vals[16];
+ int max_range_vals = ARRAY_SIZE(range_vals);
+
+ for (next = table;; next++) {
+ if ((next->addr != range_start + range_count) ||
+ (next->addr == end_addr) ||
+ (next->addr == wait_ms_addr) ||
+ (range_count == max_range_vals)) {
+ if (range_count == 1)
+ err = regmap_write(regmap,
+ range_start, range_vals[0]);
+ else if (range_count > 1)
+ err = regmap_bulk_write(regmap, range_start,
+ &range_vals[0],
+ range_count);
+
+ if (err)
+ return err;
+
+ range_start = -1;
+ range_count = 0;
+
+ /* Handle special address values */
+ if (next->addr == end_addr)
+ break;
+
+ if (next->addr == wait_ms_addr) {
+ msleep_range(next->val);
+ continue;
+ }
+ }
+
+ val = next->val;
+
+ if (range_start == -1)
+ range_start = next->addr;
+
+ range_vals[range_count++] = val;
+ }
+ return 0;
+}
+
+static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
+{
+ int err;
+
+ err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c read failed, addr = %x\n", __func__, addr);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, *val);
+ return err;
+}
+
+static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
+{
+ int err;
+
+ err = regmap_write(priv->regmap, addr, val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c write failed, %x = %x\n", __func__,
+ addr, val);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, val);
+ return err;
+}
+
+static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
+{
+ return imx274_regmap_util_write_table_8(priv->regmap,
+ table, IMX274_TABLE_WAIT_MS, IMX274_TABLE_END);
+}
+
+/*
+ * imx274_mode_regs - Function for set mode registers per mode index
+ * @priv: Pointer to device structure
+ * @mode: Mode index value
+ *
+ * This is used to start steam per mode index.
+ * mode = 0, start stream for sensor Mode 1: 4K/raw10
+ * mode = 1, start stream for sensor Mode 3: 1080p/raw10
+ * mode = 2, start stream for sensor Mode 5: 720p/raw10
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_mode_regs(struct stimx274 *priv, int mode)
+{
+ int err = 0;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_1]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_2]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[mode]);
+
+ return err;
+}
+
+/*
+ * imx274_start_stream - Function for starting stream per mode index
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_start_stream(struct stimx274 *priv)
+{
+ int err = 0;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 10ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(11);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_3]);
+ if (err)
+ return err;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 7ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(8);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_4]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * imx274_reset - Function called to reset the sensor
+ * @priv: Pointer to device structure
+ * @rst: Input value for determining the sensor's end state after reset
+ *
+ * Set the senor in reset and then
+ * if rst = 0, keep it in reset;
+ * if rst = 1, bring it out of reset.
+ *
+ */
+static void imx274_reset(struct stimx274 *priv, int rst)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+ gpiod_set_value_cansleep(priv->reset_gpio, !!rst);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+}
+
+/**
+ * imx274_s_ctrl - This is used to set the imx274 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = -EINVAL;
+
+ dev_dbg(&imx274->client->dev,
+ "%s : s_ctrl: %s, value: %d\n", __func__,
+ ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_EXPOSURE\n", __func__);
+ ret = imx274_set_exposure(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_GAIN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_GAIN\n", __func__);
+ ret = imx274_set_gain(imx274, ctrl);
+ break;
+
+ case V4L2_CID_VFLIP:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_VFLIP\n", __func__);
+ ret = imx274_set_vflip(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_TEST_PATTERN\n", __func__);
+ ret = imx274_set_test_pattern(imx274, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * imx274_get_fmt - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ mutex_lock(&imx274->lock);
+ fmt->format = imx274->format;
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_set_fmt - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @format: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct i2c_client *client = imx274->client;
+ int index;
+
+ dev_dbg(&client->dev,
+ "%s: width = %d height = %d code = %d mbus_code = %d\n",
+ __func__, fmt->width, fmt->height, fmt->code,
+ imx274_formats[imx274->mode_index].mbus_code);
+
+ mutex_lock(&imx274->lock);
+
+ for (index = 0; index < ARRAY_SIZE(imx274_formats); index++) {
+ if (imx274_formats[index].size.width == fmt->width &&
+ imx274_formats[index].size.height == fmt->height)
+ break;
+ }
+
+ if (index >= ARRAY_SIZE(imx274_formats)) {
+ /* default to first format */
+ index = 0;
+ }
+
+ imx274->mode_index = index;
+
+ if (fmt->width > IMX274_MAX_WIDTH)
+ fmt->width = IMX274_MAX_WIDTH;
+ if (fmt->height > IMX274_MAX_HEIGHT)
+ fmt->height = IMX274_MAX_HEIGHT;
+ fmt->width = fmt->width & (~IMX274_MASK_LSB_2_BITS);
+ fmt->height = fmt->height & (~IMX274_MASK_LSB_2_BITS);
+ fmt->field = V4L2_FIELD_NONE;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = *fmt;
+ else
+ imx274->format = *fmt;
+
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_g_frame_interval - Get the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ *
+ * Return: 0 on success
+ */
+static int imx274_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ fi->interval = imx274->frame_interval;
+ dev_dbg(&imx274->client->dev, "%s frame rate = %d / %d\n",
+ __func__, imx274->frame_interval.numerator,
+ imx274->frame_interval.denominator);
+
+ return 0;
+}
+
+/**
+ * imx274_s_frame_interval - Set the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to set the frame intervavl.
+ *
+ * Return: 0 on success
+ */
+static int imx274_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct v4l2_ctrl *ctrl = imx274->ctrls.exposure;
+ int min, max, def;
+ int ret;
+
+ mutex_lock(&imx274->lock);
+ ret = imx274_set_frame_interval(imx274, fi->interval);
+
+ if (!ret) {
+ /*
+ * exposure time range is decided by frame interval
+ * need to update it after frame interal changes
+ */
+ min = IMX274_MIN_EXPOSURE_TIME;
+ max = fi->interval.numerator * 1000000
+ / fi->interval.denominator;
+ def = max;
+ if (__v4l2_ctrl_modify_range(ctrl, min, max, 1, def)) {
+ dev_err(&imx274->client->dev,
+ "Exposure ctrl range update failed\n");
+ goto unlock;
+ }
+
+ /* update exposure time accordingly */
+ imx274_set_exposure(imx274, imx274->ctrls.exposure->val);
+
+ dev_dbg(&imx274->client->dev, "set frame interval to %uus\n",
+ fi->interval.numerator * 1000000
+ / fi->interval.denominator);
+ }
+
+unlock:
+ mutex_unlock(&imx274->lock);
+
+ return ret;
+}
+
+/**
+ * imx274_load_default - load default control values
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_load_default(struct stimx274 *priv)
+{
+ int ret;
+
+ /* load default control values */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+ priv->ctrls.exposure->val = 1000000 / IMX274_DEF_FRAME_RATE;
+ priv->ctrls.gain->val = IMX274_DEF_GAIN;
+ priv->ctrls.vflip->val = 0;
+ priv->ctrls.test_pattern->val = TEST_PATTERN_DISABLED;
+
+ /* update frame rate */
+ ret = imx274_set_frame_interval(priv,
+ priv->frame_interval);
+ if (ret)
+ return ret;
+
+ /* update exposure time */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.exposure, priv->ctrls.exposure->val);
+ if (ret)
+ return ret;
+
+ /* update gain */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.gain, priv->ctrls.gain->val);
+ if (ret)
+ return ret;
+
+ /* update vflip */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.vflip, priv->ctrls.vflip->val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * imx274_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @on: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = 0;
+
+ dev_dbg(&imx274->client->dev, "%s : %s, mode index = %d\n", __func__,
+ on ? "Stream Start" : "Stream Stop", imx274->mode_index);
+
+ mutex_lock(&imx274->lock);
+
+ if (on) {
+ /* load mode registers */
+ ret = imx274_mode_regs(imx274, imx274->mode_index);
+ if (ret)
+ goto fail;
+
+ /*
+ * update frame rate & expsoure. if the last mode is different,
+ * HMAX could be changed. As the result, frame rate & exposure
+ * are changed.
+ * gain is not affected.
+ */
+ ret = imx274_set_frame_interval(imx274,
+ imx274->frame_interval);
+ if (ret)
+ goto fail;
+
+ /* update exposure time */
+ ret = __v4l2_ctrl_s_ctrl(imx274->ctrls.exposure,
+ imx274->ctrls.exposure->val);
+ if (ret)
+ goto fail;
+
+ /* start stream */
+ ret = imx274_start_stream(imx274);
+ if (ret)
+ goto fail;
+ } else {
+ /* stop stream */
+ ret = imx274_write_table(imx274,
+ mode_table[IMX274_MODE_STOP_STREAM]);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_unlock(&imx274->lock);
+ dev_dbg(&imx274->client->dev,
+ "%s : Done: mode = %d\n", __func__, imx274->mode_index);
+ return 0;
+
+fail:
+ mutex_unlock(&imx274->lock);
+ dev_err(&imx274->client->dev, "s_stream failed\n");
+ return ret;
+}
+
+/*
+ * imx274_get_frame_length - Function for obtaining current frame length
+ * @priv: Pointer to device structure
+ * @val: Pointer to obainted value
+ *
+ * frame_length = vmax x (svr + 1), in unit of hmax.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_frame_length(struct stimx274 *priv, u32 *val)
+{
+ int err;
+ u16 svr;
+ u32 vmax;
+ u8 reg_val[3];
+
+ /* svr */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ /* vmax */
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_3, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_2, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_1, &reg_val[2]);
+ if (err)
+ goto fail;
+
+ vmax = ((reg_val[2] & IMX274_MASK_LSB_3_BITS) << IMX274_SHIFT_16_BITS)
+ + (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ *val = vmax * (svr + 1);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static int imx274_clamp_coarse_time(struct stimx274 *priv, u32 *val,
+ u32 *frame_length)
+{
+ int err;
+
+ err = imx274_get_frame_length(priv, frame_length);
+ if (err)
+ return err;
+
+ if (*frame_length < min_frame_len[priv->mode_index])
+ *frame_length = min_frame_len[priv->mode_index];
+
+ *val = *frame_length - *val; /* convert to raw shr */
+ if (*val > *frame_length - IMX274_SHR_LIMIT_CONST)
+ *val = *frame_length - IMX274_SHR_LIMIT_CONST;
+ else if (*val < min_SHR[priv->mode_index])
+ *val = min_SHR[priv->mode_index];
+
+ return 0;
+}
+
+/*
+ * imx274_set_digital gain - Function called when setting digital gain
+ * @priv: Pointer to device structure
+ * @dgain: Value of digital gain.
+ *
+ * Digital gain has only 4 steps: 1x, 2x, 4x, and 8x
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_digital_gain(struct stimx274 *priv, u32 dgain)
+{
+ u8 reg_val;
+
+ reg_val = ffs(dgain);
+
+ if (reg_val)
+ reg_val--;
+
+ reg_val = clamp(reg_val, (u8)0, (u8)3);
+
+ return imx274_write_reg(priv, IMX274_DIGITAL_GAIN_REG,
+ reg_val & IMX274_MASK_LSB_4_BITS);
+}
+
+static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
+{
+ regs->addr = IMX274_ANALOG_GAIN_ADDR_MSB;
+ regs->val = (gain >> IMX274_SHIFT_8_BITS) & IMX274_MASK_LSB_3_BITS;
+
+ (regs + 1)->addr = IMX274_ANALOG_GAIN_ADDR_LSB;
+ (regs + 1)->val = (gain) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_gain - Function called when setting gain
+ * @priv: Pointer to device structure
+ * @val: Value of gain. the real value = val << IMX274_GAIN_SHIFT;
+ * @ctrl: v4l2 control pointer
+ *
+ * Set the gain based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 gain, analog_gain, digital_gain, gain_reg;
+ int i;
+
+ gain = (u32)(ctrl->val);
+
+ dev_dbg(&priv->client->dev,
+ "%s : input gain = %d.%d\n", __func__,
+ gain >> IMX274_GAIN_SHIFT,
+ ((gain & IMX274_GAIN_SHIFT_MASK) * 100) >> IMX274_GAIN_SHIFT);
+
+ if (gain > IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN)
+ gain = IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN;
+ else if (gain < IMX274_MIN_GAIN)
+ gain = IMX274_MIN_GAIN;
+
+ if (gain <= IMX274_MAX_ANALOG_GAIN)
+ digital_gain = 1;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 2)
+ digital_gain = 2;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 4)
+ digital_gain = 4;
+ else
+ digital_gain = IMX274_MAX_DIGITAL_GAIN;
+
+ analog_gain = gain / digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : digital gain = %d, analog gain = %d.%d\n",
+ __func__, digital_gain, analog_gain >> IMX274_GAIN_SHIFT,
+ ((analog_gain & IMX274_GAIN_SHIFT_MASK) * 100)
+ >> IMX274_GAIN_SHIFT);
+
+ err = imx274_set_digital_gain(priv, digital_gain);
+ if (err)
+ goto fail;
+
+ /* convert to register value, refer to imx274 datasheet */
+ gain_reg = (u32)IMX274_GAIN_CONST -
+ (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT) / analog_gain;
+ if (gain_reg > IMX274_GAIN_REG_MAX)
+ gain_reg = IMX274_GAIN_REG_MAX;
+
+ imx274_calculate_gain_regs(reg_list, (u16)gain_reg);
+
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ if (IMX274_GAIN_CONST - gain_reg == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* convert register value back to gain value */
+ ctrl->val = (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT)
+ / (IMX274_GAIN_CONST - gain_reg) * digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : GAIN control success, gain_reg = %d, new gain = %d\n",
+ __func__, gain_reg, ctrl->val);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
+ u32 coarse_time)
+{
+ regs->addr = IMX274_COARSE_TIME_ADDR_MSB;
+ regs->val = (coarse_time >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 1)->addr = IMX274_COARSE_TIME_ADDR_LSB;
+ (regs + 1)->val = (coarse_time) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_coarse_time - Function called when setting SHR value
+ * @priv: Pointer to device structure
+ * @val: Value for exposure time in number of line_length, or [HMAX]
+ *
+ * Set SHR value based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 coarse_time, frame_length;
+ int i;
+
+ coarse_time = *val;
+
+ /* convert exposure_time to appropriate SHR value */
+ err = imx274_clamp_coarse_time(priv, &coarse_time, &frame_length);
+ if (err)
+ goto fail;
+
+ /* prepare SHR registers */
+ imx274_calculate_coarse_time_regs(reg_list, coarse_time);
+
+ /* write to SHR registers */
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ *val = frame_length - coarse_time;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_exposure - Function called when setting exposure time
+ * @priv: Pointer to device structure
+ * @val: Variable for exposure time, in the unit of micro-second
+ *
+ * Set exposure time based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_exposure(struct stimx274 *priv, int val)
+{
+ int err;
+ u16 hmax;
+ u8 reg_val[2];
+ u32 coarse_time; /* exposure time in unit of line (HMAX)*/
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control input = %d\n", __func__, val);
+
+ /* step 1: convert input exposure_time (val) into number of 1[HMAX] */
+
+ /* obtain HMAX value */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ if (hmax == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ coarse_time = (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2 * val
+ - nocpiop[priv->mode_index]) / hmax;
+
+ /* step 2: convert exposure_time into SHR value */
+
+ /* set SHR */
+ err = imx274_set_coarse_time(priv, &coarse_time);
+ if (err)
+ goto fail;
+
+ priv->ctrls.exposure->val =
+ (coarse_time * hmax + nocpiop[priv->mode_index])
+ / (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2);
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control success\n", __func__);
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+/*
+ * imx274_set_vflip - Function called when setting vertical flip
+ * @priv: Pointer to device structure
+ * @val: Value for vflip setting
+ *
+ * Set vertical flip based on input value.
+ * val = 0: normal, no vertical flip
+ * val = 1: vertical flip enabled
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_vflip(struct stimx274 *priv, int val)
+{
+ int err;
+
+ err = imx274_write_reg(priv, IMX274_VFLIP_REG, val);
+ if (err) {
+ dev_err(&priv->client->dev, "VFILP control error\n");
+ return err;
+ }
+
+ dev_dbg(&priv->client->dev,
+ "%s : VFLIP control success\n", __func__);
+
+ return 0;
+}
+
+/*
+ * imx274_set_test_pattern - Function called when setting test pattern
+ * @priv: Pointer to device structure
+ * @val: Variable for test pattern
+ *
+ * Set to different test patterns based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_test_pattern(struct stimx274 *priv, int val)
+{
+ int err = 0;
+
+ if (val == TEST_PATTERN_DISABLED) {
+ err = imx274_write_table(priv, imx274_tp_disabled);
+ } else if (val <= TEST_PATTERN_V_COLOR_BARS) {
+ err = imx274_write_reg(priv, IMX274_TEST_PATTERN_REG, val - 1);
+ if (!err)
+ err = imx274_write_table(priv, imx274_tp_regs);
+ } else {
+ err = -EINVAL;
+ }
+
+ if (!err)
+ dev_dbg(&priv->client->dev,
+ "%s : TEST PATTERN control success\n", __func__);
+ else
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
+ u32 frame_length)
+{
+ regs->addr = IMX274_FRAME_LENGTH_ADDR_1;
+ regs->val = (frame_length >> IMX274_SHIFT_16_BITS)
+ & IMX274_MASK_LSB_4_BITS;
+ (regs + 1)->addr = IMX274_FRAME_LENGTH_ADDR_2;
+ (regs + 1)->val = (frame_length >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 2)->addr = IMX274_FRAME_LENGTH_ADDR_3;
+ (regs + 2)->val = (frame_length) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_frame_length - Function called when setting frame length
+ * @priv: Pointer to device structure
+ * @val: Variable for frame length (= VMAX, i.e. vertical drive period length)
+ *
+ * Set frame length based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_length(struct stimx274 *priv, u32 val)
+{
+ struct reg_8 reg_list[3];
+ int err;
+ u32 frame_length;
+ int i;
+
+ dev_dbg(&priv->client->dev, "%s : input length = %d\n",
+ __func__, val);
+
+ frame_length = (u32)val;
+
+ imx274_calculate_frame_length_regs(reg_list, frame_length);
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_frame_interval - Function called when setting frame interval
+ * @priv: Pointer to device structure
+ * @frame_interval: Variable for frame interval
+ *
+ * Change frame interval by updating VMAX value
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval)
+{
+ int err;
+ u32 frame_length, req_frame_rate;
+ u16 svr;
+ u16 hmax;
+ u8 reg_val[2];
+
+ dev_dbg(&priv->client->dev, "%s: input frame interval = %d / %d",
+ __func__, frame_interval.numerator,
+ frame_interval.denominator);
+
+ if (frame_interval.numerator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ req_frame_rate = (u32)(frame_interval.denominator
+ / frame_interval.numerator);
+
+ /* boundary check */
+ if (req_frame_rate > max_frame_rate[priv->mode_index]) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator =
+ max_frame_rate[priv->mode_index];
+ } else if (req_frame_rate < IMX274_MIN_FRAME_RATE) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator = IMX274_MIN_FRAME_RATE;
+ }
+
+ /*
+ * VMAX = 1/frame_rate x 72M / (SVR+1) / HMAX
+ * frame_length (i.e. VMAX) = (frame_interval) x 72M /(SVR+1) / HMAX
+ */
+
+ /* SVR */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register SVR = %d\n", __func__, svr);
+
+ /* HMAX */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register HMAX = %d\n", __func__, hmax);
+
+ if (hmax == 0 || frame_interval.denominator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ frame_length = IMX274_PIXCLK_CONST1 / (svr + 1) / hmax
+ * frame_interval.numerator
+ / frame_interval.denominator;
+
+ err = imx274_set_frame_length(priv, frame_length);
+ if (err)
+ goto fail;
+
+ priv->frame_interval = frame_interval;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
+ .get_fmt = imx274_get_fmt,
+ .set_fmt = imx274_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops imx274_video_ops = {
+ .g_frame_interval = imx274_g_frame_interval,
+ .s_frame_interval = imx274_s_frame_interval,
+ .s_stream = imx274_s_stream,
+};
+
+static const struct v4l2_subdev_ops imx274_subdev_ops = {
+ .pad = &imx274_pad_ops,
+ .video = &imx274_video_ops,
+};
+
+static const struct v4l2_ctrl_ops imx274_ctrl_ops = {
+ .s_ctrl = imx274_s_ctrl,
+};
+
+static const struct of_device_id imx274_of_id_table[] = {
+ { .compatible = "sony,imx274" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx274_of_id_table);
+
+static const struct i2c_device_id imx274_id[] = {
+ { "IMX274", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, imx274_id);
+
+static int imx274_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct stimx274 *imx274;
+ int ret;
+
+ /* initialize imx274 */
+ imx274 = devm_kzalloc(&client->dev, sizeof(*imx274), GFP_KERNEL);
+ if (!imx274)
+ return -ENOMEM;
+
+ mutex_init(&imx274->lock);
+
+ /* initialize regmap */
+ imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
+ if (IS_ERR(imx274->regmap)) {
+ dev_err(&client->dev,
+ "regmap init failed: %ld\n", PTR_ERR(imx274->regmap));
+ ret = -ENODEV;
+ goto err_regmap;
+ }
+
+ /* initialize subdevice */
+ imx274->client = client;
+ sd = &imx274->sd;
+ v4l2_i2c_subdev_init(sd, client, &imx274_subdev_ops);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* initialize subdev media pad */
+ imx274->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &imx274->pad);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : media entity init Failed %d\n", __func__, ret);
+ goto err_regmap;
+ }
+
+ /* initialize sensor reset gpio */
+ imx274->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(imx274->reset_gpio)) {
+ if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER)
+ dev_err(&client->dev, "Reset GPIO not setup in DT");
+ ret = PTR_ERR(imx274->reset_gpio);
+ goto err_me;
+ }
+
+ /* pull sensor out of reset */
+ imx274_reset(imx274, 1);
+
+ /* initialize controls */
+ ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 2);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : ctrl handler init Failed\n", __func__);
+ goto err_me;
+ }
+
+ imx274->ctrls.handler.lock = &imx274->lock;
+
+ /* add new controls */
+ imx274->ctrls.test_pattern = v4l2_ctrl_new_std_menu_items(
+ &imx274->ctrls.handler, &imx274_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(tp_qmenu) - 1, 0, 0, tp_qmenu);
+
+ imx274->ctrls.gain = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_GAIN, IMX274_MIN_GAIN,
+ IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN, 1,
+ IMX274_DEF_GAIN);
+
+ imx274->ctrls.exposure = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_EXPOSURE, IMX274_MIN_EXPOSURE_TIME,
+ 1000000 / IMX274_DEF_FRAME_RATE, 1,
+ IMX274_MIN_EXPOSURE_TIME);
+
+ imx274->ctrls.vflip = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ imx274->sd.ctrl_handler = &imx274->ctrls.handler;
+ if (imx274->ctrls.handler.error) {
+ ret = imx274->ctrls.handler.error;
+ goto err_ctrls;
+ }
+
+ /* setup default controls */
+ ret = v4l2_ctrl_handler_setup(&imx274->ctrls.handler);
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d setup default controls\n", ret);
+ goto err_ctrls;
+ }
+
+ /* initialize format */
+ imx274->mode_index = IMX274_MODE_3840X2160;
+ imx274->format.width = imx274_formats[0].size.width;
+ imx274->format.height = imx274_formats[0].size.height;
+ imx274->format.field = V4L2_FIELD_NONE;
+ imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
+ imx274->frame_interval.numerator = 1;
+ imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+
+ /* load default control values */
+ ret = imx274_load_default(imx274);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s : imx274_load_default failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ /* register subdevice */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : v4l2_async_register_subdev failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ dev_info(&client->dev, "imx274 : imx274 probe success !\n");
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&imx274->ctrls.handler);
+err_me:
+ media_entity_cleanup(&sd->entity);
+err_regmap:
+ mutex_destroy(&imx274->lock);
+ return ret;
+}
+
+static int imx274_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ /* stop stream */
+ imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&imx274->ctrls.handler);
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&imx274->lock);
+ return 0;
+}
+
+static struct i2c_driver imx274_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = imx274_of_id_table,
+ },
+ .probe = imx274_probe,
+ .remove = imx274_remove,
+ .id_table = imx274_id,
+};
+
+module_i2c_driver(imx274_i2c_driver);
+
+MODULE_AUTHOR("Leon Luo <leonl@leopardimaging.com>");
+MODULE_DESCRIPTION("IMX274 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index a374e2a0ac3d..8b5f7d0435e4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -460,7 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
*/
rc->map_name = ir->ir_codes;
rc->allowed_protocols = rc_proto;
- rc->enabled_protocols = rc_proto;
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 251a2aaf98c3..b600e03aa94b 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -50,6 +50,7 @@ enum led_enable {
/**
* struct lm3560_flash
*
+ * @dev: pointer to &struct device
* @pdata: platform data
* @regmap: reg. map for i2c
* @lock: muxtex for serial access.
diff --git a/drivers/media/i2c/m5mols/m5mols_capture.c b/drivers/media/i2c/m5mols/m5mols_capture.c
index a0cd6dc32eb0..0fb457f57995 100644
--- a/drivers/media/i2c/m5mols/m5mols_capture.c
+++ b/drivers/media/i2c/m5mols/m5mols_capture.c
@@ -33,6 +33,10 @@
/**
* m5mols_read_rational - I2C read of a rational number
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @addr_num: numerator register
+ * @addr_den: denominator register
+ * @val: place to store the division result
*
* Read numerator and denominator from registers @addr_num and @addr_den
* respectively and return the division result in @val.
@@ -53,6 +57,7 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
/**
* m5mols_capture_info - Gather captured image information
+ * @info: M-5MOLS driver data structure
*
* For now it gathers only EXIF information and file size.
*/
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index c2218c0a9e6f..82eab7c2bc8c 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -126,6 +126,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
/**
* m5mols_do_scenemode() - Change current scenemode
+ * @info: M-5MOLS driver data structure
* @mode: Desired mode of the scenemode
*
* WARNING: The execution order is important. Do not change the order.
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 463534d44756..12e79f9e32d5 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -114,7 +114,8 @@ static const struct m5mols_resolution m5mols_reg_res[] = {
/**
* m5mols_swap_byte - an byte array to integer conversion function
- * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ * @data: byte array
+ * @length: size in bytes of I2C packet defined in the M-5MOLS datasheet
*
* Convert I2C data byte array with performing any required byte
* reordering to assure proper values for each data type, regardless
@@ -132,8 +133,9 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
/**
* m5mols_read - I2C read function
- * @reg: combination of size, category and command for the I2C packet
+ * @sd: sub-device, as pointed by struct v4l2_subdev
* @size: desired size of I2C packet
+ * @reg: combination of size, category and command for the I2C packet
* @val: read value
*
* Returns 0 on success, or else negative errno.
@@ -232,6 +234,7 @@ int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
/**
* m5mols_write - I2C command write function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
* @reg: combination of size, category and command for the I2C packet
* @val: value to write
*
@@ -284,6 +287,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
/**
* m5mols_busy_wait - Busy waiting with I2C register polling
+ * @sd: sub-device, as pointed by struct v4l2_subdev
* @reg: the I2C_REG() address of an 8-bit status register to check
* @value: expected status register value
* @mask: bit mask for the read status register value
@@ -316,6 +320,8 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
/**
* m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @reg: combination of size, category and command for the I2C packet
*
* Before writing desired interrupt value the INT_FACTOR register should
* be read to clear pending interrupts.
@@ -349,6 +355,8 @@ int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
/**
* m5mols_reg_mode - Write the mode and check busy status
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @mode: the required operation mode
*
* It always accompanies a little delay changing the M-5MOLS mode, so it is
* needed checking current busy status to guarantee right mode.
@@ -364,6 +372,7 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
/**
* m5mols_set_mode - set the M-5MOLS controller mode
+ * @info: M-5MOLS driver data structure
* @mode: the required operation mode
*
* The commands of M-5MOLS are grouped into specific modes. Each functionality
@@ -421,6 +430,7 @@ int m5mols_set_mode(struct m5mols_info *info, u8 mode)
/**
* m5mols_get_version - retrieve full revisions information of M-5MOLS
+ * @sd: sub-device, as pointed by struct v4l2_subdev
*
* The version information includes revisions of hardware and firmware,
* AutoFocus alghorithm version and the version string.
@@ -489,6 +499,7 @@ static enum m5mols_restype __find_restype(u32 code)
/**
* __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @sd: sub-device, as pointed by struct v4l2_subdev
* @mf: pixel format to find/negotiate the resolution preset for
* @type: M-5MOLS resolution type
* @resolution: M-5MOLS resolution preset register value
@@ -662,6 +673,7 @@ static const struct v4l2_subdev_pad_ops m5mols_pad_ops = {
/**
* m5mols_restore_controls - Apply current control values to the registers
+ * @info: M-5MOLS driver data structure
*
* m5mols_do_scenemode() handles all parameters for which there is yet no
* individual control. It should be replaced at some point by setting each
@@ -686,6 +698,7 @@ int m5mols_restore_controls(struct m5mols_info *info)
/**
* m5mols_start_monitor - Start the monitor mode
+ * @info: M-5MOLS driver data structure
*
* Before applying the controls setup the resolution and frame rate
* in PARAMETER mode, and then switch over to MONITOR mode.
@@ -789,6 +802,7 @@ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
/**
* m5mols_fw_start - M-5MOLS internal ARM controller initialization
+ * @sd: sub-device, as pointed by struct v4l2_subdev
*
* Execute the M-5MOLS internal ARM controller initialization sequence.
* This function should be called after the supply voltage has been
@@ -844,6 +858,8 @@ static int m5mols_auto_focus_stop(struct m5mols_info *info)
/**
* m5mols_s_power - Main sensor power control function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @on: if true, powers on the device; powers off otherwise.
*
* To prevent breaking the lens when the sensor is powered off the Soft-Landing
* algorithm is called where available. The Soft-Landing algorithm availability
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index bf0e821a2b93..2f1966bdc473 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1345,7 +1345,7 @@ static int max2175_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(sd, client, &max2175_ops);
ctx->client = client;
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
/* Controls */
hdl = &ctx->ctrl_hdl;
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 99b992e46702..b1665d97e0fd 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -945,7 +945,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return -EPROBE_DEFER;
+ return PTR_ERR(mt9m111->clk);
/* Default HIGHPOWER context */
mt9m111->ctx = &context_b;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index af7af0d14c69..bf7d06f3f21a 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -104,7 +104,6 @@ struct ov13858_reg_list {
/* Link frequency config */
struct ov13858_link_freq_config {
- u32 pixel_rate;
u32 pixels_per_line;
/* PLL registers for this link frequency */
@@ -238,11 +237,11 @@ static const struct ov13858_reg mode_4224x3136_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x00},
+ {0x3803, 0x08},
{0x3804, 0x10},
{0x3805, 0x9f},
{0x3806, 0x0c},
- {0x3807, 0x5f},
+ {0x3807, 0x57},
{0x3808, 0x10},
{0x3809, 0x80},
{0x380a, 0x0c},
@@ -948,6 +947,18 @@ static const char * const ov13858_test_pattern_menu[] = {
#define OV13858_LINK_FREQ_INDEX_0 0
#define OV13858_LINK_FREQ_INDEX_1 1
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * 4;
+ do_div(f, 10);
+
+ return f;
+}
+
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
OV13858_LINK_FREQ_540MHZ,
@@ -958,8 +969,6 @@ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
static const struct ov13858_link_freq_config
link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = {
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_540MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_540MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps),
@@ -967,8 +976,6 @@ static const struct ov13858_link_freq_config
}
},
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_270MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_270MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps),
@@ -1385,6 +1392,8 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
s32 vblank_def;
s32 vblank_min;
s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
mutex_lock(&ov13858->mutex);
@@ -1400,9 +1409,10 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
} else {
ov13858->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index);
- __v4l2_ctrl_s_ctrl_int64(
- ov13858->pixel_rate,
- link_freq_configs[mode->link_freq_index].pixel_rate);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13858->pixel_rate, pixel_rate);
+
/* Update limits and set FPS to default */
vblank_def = ov13858->cur_mode->vts_def -
ov13858->cur_mode->height;
@@ -1617,6 +1627,10 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
s64 exposure_max;
s64 vblank_def;
s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13858_mode *mode;
int ret;
ctrl_hdlr = &ov13858->ctrl_handler;
@@ -1634,29 +1648,30 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
link_freq_menu_items);
ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
/* By default, PIXEL_RATE is read only */
ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops,
- V4L2_CID_PIXEL_RATE, 0,
- link_freq_configs[0].pixel_rate, 1,
- link_freq_configs[0].pixel_rate);
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
- vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height;
- vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height;
+ mode = ov13858->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
ov13858->vblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK,
- vblank_min,
- OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
+ vblank_min, OV13858_VTS_MAX - mode->height, 1,
vblank_def);
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- 1,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width);
+ hblank, hblank, 1, hblank);
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = ov13858->cur_mode->vts_def - 8;
+ exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops,
V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN,
@@ -1746,7 +1761,7 @@ static int ov13858_probe(struct i2c_client *client,
goto error_handler_free;
}
- ret = v4l2_async_register_subdev(&ov13858->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov13858->sd);
if (ret < 0)
goto error_media_entity;
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index e6d0c1f64f0b..518868388d65 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -685,7 +685,7 @@ static int ov2640_mask_set(struct i2c_client *client,
static int ov2640_reset(struct i2c_client *client)
{
int ret;
- const struct regval_list reset_seq[] = {
+ static const struct regval_list reset_seq[] = {
{BANK_SEL, BANK_SEL_SENS},
{COM7, COM7_SRST},
ENDMARKER,
@@ -1097,18 +1097,17 @@ static int ov2640_probe(struct i2c_client *client,
return -EIO;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&adapter->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
if (client->dev.of_node) {
priv->clk = devm_clk_get(&client->dev, "xvclk");
if (IS_ERR(priv->clk))
- return -EPROBE_DEFER;
- clk_prepare_enable(priv->clk);
+ return PTR_ERR(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
}
ret = ov2640_probe_dt(client, priv);
@@ -1116,7 +1115,7 @@ static int ov2640_probe(struct i2c_client *client,
goto err_clk;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
- priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&priv->hdl, 2);
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 39a2269c0bee..c89ed6609738 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2271,7 +2271,7 @@ static int ov5640_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
- sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 95ce90fdb876..da39c49de503 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -35,9 +35,18 @@
#define SENSOR_NAME "ov5647"
-#define OV5647_SW_RESET 0x0103
-#define OV5647_REG_CHIPID_H 0x300A
-#define OV5647_REG_CHIPID_L 0x300B
+#define MIPI_CTRL00_CLOCK_LANE_GATE BIT(5)
+#define MIPI_CTRL00_BUS_IDLE BIT(2)
+#define MIPI_CTRL00_CLOCK_LANE_DISABLE BIT(0)
+
+#define OV5647_SW_STANDBY 0x0100
+#define OV5647_SW_RESET 0x0103
+#define OV5647_REG_CHIPID_H 0x300A
+#define OV5647_REG_CHIPID_L 0x300B
+#define OV5640_REG_PAD_OUT 0x300D
+#define OV5647_REG_FRAME_OFF_NUMBER 0x4202
+#define OV5647_REG_MIPI_CTRL00 0x4800
+#define OV5647_REG_MIPI_CTRL14 0x4814
#define REG_TERM 0xfffe
#define VAL_TERM 0xfe
@@ -241,34 +250,43 @@ static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
u8 channel_id;
int ret;
- ret = ov5647_read(sd, 0x4814, &channel_id);
+ ret = ov5647_read(sd, OV5647_REG_MIPI_CTRL14, &channel_id);
if (ret < 0)
return ret;
channel_id &= ~(3 << 6);
- return ov5647_write(sd, 0x4814, channel_id | (channel << 6));
+ return ov5647_write(sd, OV5647_REG_MIPI_CTRL14, channel_id | (channel << 6));
}
static int ov5647_stream_on(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_BUS_IDLE);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
}
static int ov5647_stream_off(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x0f);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_CLOCK_LANE_GATE
+ | MIPI_CTRL00_BUS_IDLE | MIPI_CTRL00_CLOCK_LANE_DISABLE);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x01);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
}
static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
@@ -276,7 +294,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
int ret;
u8 rdval;
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -285,7 +303,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
else
rdval |= 0x01;
- return ov5647_write(sd, 0x0100, rdval);
+ return ov5647_write(sd, OV5647_SW_STANDBY, rdval);
}
static int __sensor_init(struct v4l2_subdev *sd)
@@ -294,7 +312,7 @@ static int __sensor_init(struct v4l2_subdev *sd)
u8 resetval, rdval;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -309,18 +327,21 @@ static int __sensor_init(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = ov5647_read(sd, 0x0100, &resetval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
if (ret < 0)
return ret;
if (!(resetval & 0x01)) {
dev_err(&client->dev, "Device was in SW standby");
- ret = ov5647_write(sd, 0x0100, 0x01);
+ ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
if (ret < 0)
return ret;
}
- return ov5647_write(sd, 0x4800, 0x04);
+ /*
+ * stream off to make the clock lane into LP-11 state.
+ */
+ return ov5647_stream_off(sd);
}
static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
@@ -407,8 +428,8 @@ static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
}
#endif
-/**
- * @short Subdev core operations registration
+/*
+ * Subdev core operations registration
*/
static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
.s_power = ov5647_sensor_power,
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 6f7a1d6d2200..9f9196568eb8 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -390,7 +390,10 @@ static const struct ov5670_reg mode_2592x1944_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1296x972_regs[] = {
@@ -653,7 +656,10 @@ static const struct ov5670_reg mode_1296x972_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_648x486_regs[] = {
@@ -916,7 +922,10 @@ static const struct ov5670_reg mode_648x486_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_2560x1440_regs[] = {
@@ -1178,7 +1187,10 @@ static const struct ov5670_reg mode_2560x1440_regs[] = {
{0x5791, 0x06},
{0x5792, 0x00},
{0x5793, 0x52},
- {0x5794, 0xa3}
+ {0x5794, 0xa3},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1280x720_regs[] = {
@@ -1441,7 +1453,10 @@ static const struct ov5670_reg mode_1280x720_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_640x360_regs[] = {
@@ -1704,7 +1719,10 @@ static const struct ov5670_reg mode_640x360_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const char * const ov5670_test_pattern_menu[] = {
@@ -2323,8 +2341,6 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
return ret;
}
- ov5670->streaming = true;
-
return 0;
}
@@ -2338,8 +2354,6 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
if (ret)
dev_err(&client->dev, "%s failed to set stream\n", __func__);
- ov5670->streaming = false;
-
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
*/
@@ -2370,6 +2384,7 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
ret = ov5670_stop_streaming(ov5670);
pm_runtime_put(&client->dev);
}
+ ov5670->streaming = enable;
goto unlock_and_return;
error:
@@ -2514,7 +2529,7 @@ static int ov5670_probe(struct i2c_client *client)
}
/* Async register for subdev */
- ret = v4l2_async_register_subdev(&ov5670->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov5670->sd);
if (ret < 0) {
err_msg = "v4l2_async_register_subdev() error";
goto error_entity_cleanup;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 768f2950ea36..8975d16b2b24 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -951,11 +951,8 @@ static int ov6650_probe(struct i2c_client *client,
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e88549f0e704..950a0acf85fb 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -213,6 +213,9 @@ struct ov7670_devtype {
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
struct v4l2_ctrl_handler hdl;
struct {
/* gain cluster */
@@ -229,6 +232,7 @@ struct ov7670_info {
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *hue;
};
+ struct v4l2_mbus_framefmt format;
struct ov7670_format_struct *fmt; /* Current format */
struct clk *clk;
struct gpio_desc *resetb_gpio;
@@ -972,6 +976,9 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
fmt->width = wsize->width;
fmt->height = wsize->height;
fmt->colorspace = ov7670_formats[index].colorspace;
+
+ info->format = *fmt;
+
return 0;
}
@@ -985,6 +992,9 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
unsigned char com7;
int ret;
@@ -995,8 +1005,13 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
return ret;
- cfg->try_fmt = format->format;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ *mbus_fmt = format->format;
return 0;
+#else
+ return -ENOTTY;
+#endif
}
ret = ov7670_try_fmt_internal(sd, &format->format, &ovfmt, &wsize);
@@ -1038,6 +1053,30 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int ov7670_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *mbus_fmt;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ format->format = info->format;
+ }
+
+ return 0;
+}
+
/*
* Implement G/S_PARM. There is a "high quality" mode we could try
* to do someday; for now, we just do the frame rate tweak.
@@ -1505,6 +1544,46 @@ static int ov7670_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regis
}
#endif
+static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ if (info->pwdn_gpio)
+ gpiod_set_value(info->pwdn_gpio, !on);
+ if (on && info->resetb_gpio) {
+ gpiod_set_value(info->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(info->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
+static void ov7670_get_default_format(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ format->width = info->devtype->win_sizes[0].width;
+ format->height = info->devtype->win_sizes[0].height;
+ format->colorspace = info->fmt->colorspace;
+ format->code = info->fmt->mbus_code;
+ format->field = V4L2_FIELD_NONE;
+}
+
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ ov7670_get_default_format(sd, format);
+
+ return 0;
+}
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
@@ -1525,6 +1604,7 @@ static const struct v4l2_subdev_pad_ops ov7670_pad_ops = {
.enum_frame_interval = ov7670_enum_frame_interval,
.enum_frame_size = ov7670_enum_frame_size,
.enum_mbus_code = ov7670_enum_mbus_code,
+ .get_fmt = ov7670_get_fmt,
.set_fmt = ov7670_set_fmt,
};
@@ -1534,6 +1614,12 @@ static const struct v4l2_subdev_ops ov7670_ops = {
.pad = &ov7670_pad_ops,
};
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static const struct v4l2_subdev_internal_ops ov7670_subdev_internal_ops = {
+ .open = ov7670_open,
+};
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct ov7670_devtype ov7670_devdata[] = {
@@ -1586,6 +1672,11 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ sd->internal_ops = &ov7670_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+#endif
+
info->clock_speed = 30; /* default: a guess */
if (client->dev.platform_data) {
struct ov7670_config *config = client->dev.platform_data;
@@ -1619,29 +1710,34 @@ static int ov7670_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = ov7670_init_gpio(client, info);
- if (ret)
- goto clk_disable;
-
info->clock_speed = clk_get_rate(info->clk) / 1000000;
if (info->clock_speed < 10 || info->clock_speed > 48) {
ret = -EINVAL;
goto clk_disable;
}
+ ret = ov7670_init_gpio(client, info);
+ if (ret)
+ goto clk_disable;
+
+ ov7670_s_power(sd, 1);
+
/* Make sure it's an ov7670 */
ret = ov7670_detect(sd);
if (ret) {
v4l_dbg(1, debug, client,
"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
client->addr << 1, client->adapter->name);
- goto clk_disable;
+ goto power_off;
}
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
+
+ ov7670_get_default_format(sd, &info->format);
+
info->clkrc = 0;
/* Set default frame rate to 30 fps */
@@ -1688,16 +1784,31 @@ static int ov7670_probe(struct i2c_client *client,
v4l2_ctrl_auto_cluster(2, &info->auto_exposure,
V4L2_EXPOSURE_MANUAL, false);
v4l2_ctrl_cluster(2, &info->saturation);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ info->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&info->sd.entity, 1, &info->pad);
+ if (ret < 0)
+ goto hdl_free;
+#endif
+
v4l2_ctrl_handler_setup(&info->hdl);
ret = v4l2_async_register_subdev(&info->sd);
if (ret < 0)
- goto hdl_free;
+ goto entity_cleanup;
return 0;
+entity_cleanup:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
hdl_free:
v4l2_ctrl_handler_free(&info->hdl);
+power_off:
+ ov7670_s_power(sd, 0);
clk_disable:
clk_disable_unprepare(info->clk);
return ret;
@@ -1712,6 +1823,10 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
clk_disable_unprepare(info->clk);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
+ ov7670_s_power(sd, 0);
return 0;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 6ffb460e8589..69433e1e2533 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -985,7 +985,6 @@ static const struct v4l2_ctrl_ops ov965x_ctrl_ops = {
static const char * const test_pattern_menu[] = {
"Disabled",
"Color bars",
- NULL
};
static int ov965x_initialize_controls(struct ov965x *ov965x)
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 67dcca76f981..2e140272794b 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -53,6 +53,9 @@ enum {
* @gpio_reset: GPIO connected to the sensor's reset pin
* @lock: mutex protecting the structure's members below
* @format: media bus format at the sensor's source pad
+ * @clock: pointer to &struct clk.
+ * @clock_frequency: clock frequency
+ * @power_count: stores state if device is powered
*/
struct s5k6a3 {
struct device *dev;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 9fd254a8e20d..13c10b5e2b45 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
/**
* s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
*
* Configure the internal ISP PLL for the required output frequency.
* Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
/**
* s5k6aa_configure_video_bus - configure the video output interface
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
* @bus_type: video bus type: parallel or MIPI-CSI
* @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
*
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
/**
* s5k6aa_set_prev_config - write user preview register set
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
+ * @preset: s5kaa preset to be applied
*
* Configure output resolution and color fromat, pixel clock
* frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
/**
* s5k6aa_initialize_isp - basic ISP MCU initialization
+ * @sd: pointer to V4L2 sub-device descriptor
*
* Configure AHB addresses for registers read/write; configure PLLs for
* required output pixel clock. The ISP power supply needs to be already
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 700f433261d0..e6b717b83b18 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1239,6 +1239,10 @@ static int smiapp_power_on(struct device *dev)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
usleep_range(sleep, sleep);
+ mutex_lock(&sensor->mutex);
+
+ sensor->active = true;
+
/*
* Failures to respond to the address change command have been noticed.
* Those failures seem to be caused by the sensor requiring a longer
@@ -1313,7 +1317,7 @@ static int smiapp_power_on(struct device *dev)
rval = smiapp_write(sensor, SMIAPP_REG_U8_DPHY_CTRL,
SMIAPP_DPHY_CTRL_UI);
if (rval < 0)
- return rval;
+ goto out_cci_addr_fail;
rval = smiapp_call_quirk(sensor, post_poweron);
if (rval) {
@@ -1321,28 +1325,28 @@ static int smiapp_power_on(struct device *dev)
goto out_cci_addr_fail;
}
- /* Are we still initialising...? If yes, return here. */
- if (!sensor->pixel_array)
- return 0;
+ /* Are we still initialising...? If not, proceed with control setup. */
+ if (sensor->pixel_array) {
+ rval = __v4l2_ctrl_handler_setup(
+ &sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = smiapp_update_mode(sensor);
+ if (rval < 0)
+ goto out_cci_addr_fail;
+ }
- mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
mutex_unlock(&sensor->mutex);
- if (rval < 0)
- goto out_cci_addr_fail;
return 0;
out_cci_addr_fail:
-
+ mutex_unlock(&sensor->mutex);
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
@@ -1360,6 +1364,8 @@ static int smiapp_power_off(struct device *dev)
struct smiapp_sensor *sensor =
container_of(ssd, struct smiapp_sensor, ssds[0]);
+ mutex_lock(&sensor->mutex);
+
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1372,6 +1378,10 @@ static int smiapp_power_off(struct device *dev)
SMIAPP_REG_U8_SOFTWARE_RESET,
SMIAPP_SOFTWARE_RESET);
+ sensor->active = false;
+
+ mutex_unlock(&sensor->mutex);
+
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
@@ -1381,29 +1391,6 @@ static int smiapp_power_off(struct device *dev)
return 0;
}
-static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
-{
- int rval;
-
- if (!on) {
- pm_runtime_mark_last_busy(subdev->dev);
- pm_runtime_put_autosuspend(subdev->dev);
-
- return 0;
- }
-
- rval = pm_runtime_get_sync(subdev->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(subdev->dev);
-
- pm_runtime_put(subdev->dev);
-
- return rval;
-}
-
/* -----------------------------------------------------------------------------
* Video stream management
*/
@@ -1560,19 +1547,31 @@ out:
static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
if (sensor->streaming == enable)
return 0;
if (enable) {
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(&client->dev);
+ return rval;
+ }
+
sensor->streaming = true;
+
rval = smiapp_start_streaming(sensor);
if (rval < 0)
sensor->streaming = false;
} else {
rval = smiapp_stop_streaming(sensor);
sensor->streaming = false;
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
}
return rval;
@@ -2650,7 +2649,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
struct smiapp_sensor *sensor = ssd->sensor;
unsigned int i;
- int rval;
mutex_lock(&sensor->mutex);
@@ -2677,22 +2675,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&sensor->mutex);
- rval = pm_runtime_get_sync(sd->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(sd->dev);
- pm_runtime_put(sd->dev);
-
- return rval;
-}
-
-static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- pm_runtime_mark_last_busy(sd->dev);
- pm_runtime_put_autosuspend(sd->dev);
-
return 0;
}
@@ -2700,10 +2682,6 @@ static const struct v4l2_subdev_video_ops smiapp_video_ops = {
.s_stream = smiapp_set_stream,
};
-static const struct v4l2_subdev_core_ops smiapp_core_ops = {
- .s_power = smiapp_set_power,
-};
-
static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
.enum_mbus_code = smiapp_enum_mbus_code,
.get_fmt = smiapp_get_format,
@@ -2718,7 +2696,6 @@ static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
};
static const struct v4l2_subdev_ops smiapp_ops = {
- .core = &smiapp_core_ops,
.video = &smiapp_video_ops,
.pad = &smiapp_pad_ops,
.sensor = &smiapp_sensor_ops,
@@ -2732,12 +2709,10 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
.registered = smiapp_registered,
.unregistered = smiapp_unregistered,
.open = smiapp_open,
- .close = smiapp_close,
};
static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
.open = smiapp_open,
- .close = smiapp_close,
};
/* -----------------------------------------------------------------------------
@@ -2829,12 +2804,10 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
/* NVM size is not mandatory */
fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
- rval = fwnode_property_read_u32(fwnode, "clock-frequency",
+ rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
- if (rval) {
- dev_warn(dev, "can't get clock-frequency\n");
- goto out_err;
- }
+ if (rval)
+ dev_info(dev, "can't get clock-frequency\n");
dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
@@ -2894,18 +2867,46 @@ static int smiapp_probe(struct i2c_client *client,
}
sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
+ if (PTR_ERR(sensor->ext_clk) == -ENOENT) {
+ dev_info(&client->dev, "no clock defined, continuing...\n");
+ sensor->ext_clk = NULL;
+ } else if (IS_ERR(sensor->ext_clk)) {
dev_err(&client->dev, "could not get clock (%ld)\n",
PTR_ERR(sensor->ext_clk));
return -EPROBE_DEFER;
}
- rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
+ if (sensor->ext_clk) {
+ if (sensor->hwcfg->ext_clk) {
+ unsigned long rate;
+
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
+ return rval;
+ }
+
+ rate = clk_get_rate(sensor->ext_clk);
+ if (rate != sensor->hwcfg->ext_clk) {
+ dev_err(&client->dev,
+ "can't set clock freq, asked for %u but got %lu\n",
+ sensor->hwcfg->ext_clk, rate);
+ return rval;
+ }
+ } else {
+ sensor->hwcfg->ext_clk = clk_get_rate(sensor->ext_clk);
+ dev_dbg(&client->dev, "obtained clock freq %u\n",
+ sensor->hwcfg->ext_clk);
+ }
+ } else if (sensor->hwcfg->ext_clk) {
+ dev_dbg(&client->dev, "assuming clock freq %u\n",
sensor->hwcfg->ext_clk);
- return rval;
+ } else {
+ dev_err(&client->dev, "unable to obtain clock freq\n");
+ return -EINVAL;
}
sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
@@ -3092,7 +3093,7 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev(&sensor->src->sd);
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
if (rval < 0)
goto out_media_entity_cleanup;
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index d6779e35d36f..145653dc81da 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -231,6 +231,9 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
len != SMIAPP_REG_32BIT) || flags)
return -EINVAL;
+ if (!sensor->active)
+ return 0;
+
msg.addr = client->addr;
msg.flags = 0; /* Write */
msg.len = 2 + len;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f74d695018b9..e6a5ab402d7f 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -206,6 +206,7 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
+ bool active; /* is the sensor powered on? */
u16 embedded_start; /* embedded data start line */
u16 embedded_end;
u16 image_start; /* image data start line */
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 0146d1f7aacb..c63948989688 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -335,8 +335,8 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
{
int i;
enum { QQCIF, QQVGA, QCIF, QVGA, CIF, VGA, SXGA };
- int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
- int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
+ static const int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
+ static const int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
for (i = 0; i < ARRAY_SIZE(res_x); i++) {
if (res_x[i] >= *width && res_y[i] >= *height) {
@@ -675,12 +675,9 @@ static int ov9640_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops);
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index cc07b7ae5407..755de2289c39 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -935,11 +935,9 @@ static int ov9740_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9740_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "Failed to allocate private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9740_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e6f5c363ccab..2b8181469b93 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/hdmi.h>
+#include <media/cec.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -63,6 +64,7 @@ MODULE_LICENSE("GPL");
#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2)
+#define POLL_INTERVAL_CEC_MS 10
#define POLL_INTERVAL_MS 1000
static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
@@ -106,6 +108,8 @@ struct tc358743_state {
u8 csi_lanes_in_use;
struct gpio_desc *reset_gpio;
+
+ struct cec_adapter *cec_adap;
};
static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
@@ -595,6 +599,7 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
struct tc358743_platform_data *pdata = &state->pdata;
u32 sys_freq;
u32 lockdet_ref;
+ u32 cec_freq;
u16 fh_min;
u16 fh_max;
@@ -626,6 +631,15 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
i2c_wr8_and_or(sd, NCO_F0_MOD, ~MASK_NCO_F0_MOD,
(pdata->refclk_hz == 27000000) ?
MASK_NCO_F0_MOD_27MHZ : 0x0);
+
+ /*
+ * Trial and error suggests that the default register value
+ * of 656 is for a 42 MHz reference clock. Use that to derive
+ * a new value based on the actual reference clock.
+ */
+ cec_freq = (656 * sys_freq) / 4200;
+ i2c_wr16(sd, CECHCLK, cec_freq);
+ i2c_wr16(sd, CECLCLK, cec_freq);
}
static void tc358743_set_csi_color_space(struct v4l2_subdev *sd)
@@ -814,11 +828,17 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
struct tc358743_state *state = to_state(sd);
struct tc358743_platform_data *pdata = &state->pdata;
- /* CEC and IR are not supported by this driver */
- i2c_wr16_and_or(sd, SYSCTL, ~(MASK_CECRST | MASK_IRRST),
- (MASK_CECRST | MASK_IRRST));
+ /*
+ * IR is not supported by this driver.
+ * CEC is only enabled if needed.
+ */
+ i2c_wr16_and_or(sd, SYSCTL, ~(MASK_IRRST | MASK_CECRST),
+ (MASK_IRRST | MASK_CECRST));
tc358743_reset(sd, MASK_CTXRST | MASK_HDMIRST);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ tc358743_reset(sd, MASK_CECRST);
+#endif
tc358743_sleep_mode(sd, false);
i2c_wr16(sd, FIFOCTL, pdata->fifo_level);
@@ -842,6 +862,133 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
i2c_wr8(sd, VOUT_SET3, MASK_VOUT_EXTCNT);
}
+/* --------------- CEC --------------- */
+
+#ifdef CONFIG_VIDEO_TC358743_CEC
+static int tc358743_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ i2c_wr32(sd, CECIMSK, enable ? MASK_CECTIM | MASK_CECRIM : 0);
+ i2c_wr32(sd, CECICLR, MASK_CECTICLR | MASK_CECRICLR);
+ i2c_wr32(sd, CECEN, enable);
+ if (enable)
+ i2c_wr32(sd, CECREN, MASK_CECREN);
+ return 0;
+}
+
+static int tc358743_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u32 reg;
+
+ reg = i2c_rd32(sd, CECRCTL1);
+ if (enable)
+ reg |= MASK_CECOTH;
+ else
+ reg &= ~MASK_CECOTH;
+ i2c_wr32(sd, CECRCTL1, reg);
+ return 0;
+}
+
+static int tc358743_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int la = 0;
+
+ if (log_addr != CEC_LOG_ADDR_INVALID) {
+ la = i2c_rd32(sd, CECADD);
+ la |= 1 << log_addr;
+ }
+ i2c_wr32(sd, CECADD, la);
+ return 0;
+}
+
+static int tc358743_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i;
+
+ i2c_wr32(sd, CECTCTL,
+ (cec_msg_is_broadcast(msg) ? MASK_CECBRD : 0) |
+ (signal_free_time - 1));
+ for (i = 0; i < msg->len; i++)
+ i2c_wr32(sd, CECTBUF1 + i * 4,
+ msg->msg[i] | ((i == msg->len - 1) ? MASK_CECTEOM : 0));
+ i2c_wr32(sd, CECTEN, MASK_CECTEN);
+ return 0;
+}
+
+static const struct cec_adap_ops tc358743_cec_adap_ops = {
+ .adap_enable = tc358743_cec_adap_enable,
+ .adap_log_addr = tc358743_cec_adap_log_addr,
+ .adap_transmit = tc358743_cec_adap_transmit,
+ .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
+};
+
+static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ bool *handled)
+{
+ struct tc358743_state *state = to_state(sd);
+ unsigned int cec_rxint, cec_txint;
+ unsigned int clr = 0;
+
+ cec_rxint = i2c_rd32(sd, CECRSTAT);
+ cec_txint = i2c_rd32(sd, CECTSTAT);
+
+ if (intstatus & MASK_CEC_RINT)
+ clr |= MASK_CECRICLR;
+ if (intstatus & MASK_CEC_TINT)
+ clr |= MASK_CECTICLR;
+ i2c_wr32(sd, CECICLR, clr);
+
+ if ((intstatus & MASK_CEC_TINT) && cec_txint) {
+ if (cec_txint & MASK_CECTIEND)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_OK);
+ else if (cec_txint & MASK_CECTIAL)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ARB_LOST);
+ else if (cec_txint & MASK_CECTIACK)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_NACK);
+ else if (cec_txint & MASK_CECTIUR) {
+ /*
+ * Not sure when this bit is set. Treat
+ * it as an error for now.
+ */
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ERROR);
+ }
+ *handled = true;
+ }
+ if ((intstatus & MASK_CEC_RINT) &&
+ (cec_rxint & MASK_CECRIEND)) {
+ struct cec_msg msg = {};
+ unsigned int i;
+ unsigned int v;
+
+ v = i2c_rd32(sd, CECRCTR);
+ msg.len = v & 0x1f;
+ for (i = 0; i < msg.len; i++) {
+ v = i2c_rd32(sd, CECRBUF1 + i * 4);
+ msg.msg[i] = v & 0xff;
+ }
+ cec_received_msg(state->cec_adap, &msg);
+ *handled = true;
+ }
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+}
+
+#endif
+
/* --------------- IRQ --------------- */
static void tc358743_format_change(struct v4l2_subdev *sd)
@@ -1296,6 +1443,15 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
intstatus &= ~MASK_HDMI_INT;
}
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
+ tc358743_cec_isr(sd, intstatus, handled);
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+ intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
+ }
+#endif
+
if (intstatus & MASK_CSI_INT) {
u32 csi_int = i2c_rd32(sd, CSI_INT);
@@ -1325,13 +1481,18 @@ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static void tc358743_irq_poll_timer(unsigned long arg)
+static void tc358743_irq_poll_timer(struct timer_list *t)
{
- struct tc358743_state *state = (struct tc358743_state *)arg;
+ struct tc358743_state *state = from_timer(state, t, timer);
+ unsigned int msecs;
schedule_work(&state->work_i2c_poll);
-
- mod_timer(&state->timer, jiffies + msecs_to_jiffies(POLL_INTERVAL_MS));
+ /*
+ * If CEC is present, then we need to poll more frequently,
+ * otherwise we will miss CEC messages.
+ */
+ msecs = state->cec_adap ? POLL_INTERVAL_CEC_MS : POLL_INTERVAL_MS;
+ mod_timer(&state->timer, jiffies + msecs_to_jiffies(msecs));
}
static void tc358743_work_i2c_poll(struct work_struct *work)
@@ -1488,7 +1649,7 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
{
enable_stream(sd, enable);
if (!enable) {
- /* Put all lanes in PL-11 state (STOPSTATE) */
+ /* Put all lanes in LP-11 state (STOPSTATE) */
tc358743_set_csi(sd);
}
@@ -1621,6 +1782,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
{
struct tc358743_state *state = to_state(sd);
u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+ u16 pa;
+ int err;
int i;
v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
@@ -1638,6 +1801,12 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
edid->blocks = EDID_NUM_BLOCKS_MAX;
return -E2BIG;
}
+ pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
+ err = cec_phys_addr_validate(pa, &pa, NULL);
+ if (err)
+ return err;
+
+ cec_phys_addr_invalidate(state->cec_adap);
tc358743_disable_edid(sd);
@@ -1654,6 +1823,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
state->edid_blocks_written = edid->blocks;
+ cec_s_phys_addr(state->cec_adap, pa, false);
+
if (tx_5v_power_present(sd))
tc358743_enable_edid(sd);
@@ -1770,6 +1941,11 @@ static int tc358743_probe_of(struct tc358743_state *state)
goto free_endpoint;
}
+ if (endpoint->bus.mipi_csi2.num_data_lanes > 4) {
+ dev_err(dev, "invalid number of lanes\n");
+ goto free_endpoint;
+ }
+
state->bus = endpoint->bus.mipi_csi2;
ret = clk_prepare_enable(refclk);
@@ -1867,6 +2043,7 @@ static int tc358743_probe(struct i2c_client *client,
struct tc358743_state *state;
struct tc358743_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
+ u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1929,6 +2106,7 @@ static int tc358743_probe(struct i2c_client *client,
}
state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err < 0)
goto err_hdl;
@@ -1945,6 +2123,17 @@ static int tc358743_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
tc358743_delayed_work_enable_hotplug);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ state->cec_adap = cec_allocate_adapter(&tc358743_cec_adap_ops,
+ state, dev_name(&client->dev),
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL, CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(state->cec_adap)) {
+ err = PTR_ERR(state->cec_adap);
+ goto err_hdl;
+ }
+ irq_mask |= MASK_CEC_RMSK | MASK_CEC_TMSK;
+#endif
+
tc358743_initial_setup(sd);
tc358743_s_dv_timings(sd, &default_timing);
@@ -1964,15 +2153,22 @@ static int tc358743_probe(struct i2c_client *client,
} else {
INIT_WORK(&state->work_i2c_poll,
tc358743_work_i2c_poll);
- state->timer.data = (unsigned long)state;
- state->timer.function = tc358743_irq_poll_timer;
+ timer_setup(&state->timer, tc358743_irq_poll_timer, 0);
state->timer.expires = jiffies +
msecs_to_jiffies(POLL_INTERVAL_MS);
add_timer(&state->timer);
}
+ err = cec_register_adapter(state->cec_adap, &client->dev);
+ if (err < 0) {
+ pr_err("%s: failed to register the cec device\n", __func__);
+ cec_delete_adapter(state->cec_adap);
+ state->cec_adap = NULL;
+ goto err_work_queues;
+ }
+
tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
- i2c_wr16(sd, INTMASK, ~(MASK_HDMI_MSK | MASK_CSI_MSK) & 0xffff);
+ i2c_wr16(sd, INTMASK, ~irq_mask);
err = v4l2_ctrl_handler_setup(sd->ctrl_handler);
if (err)
@@ -1984,6 +2180,7 @@ static int tc358743_probe(struct i2c_client *client,
return 0;
err_work_queues:
+ cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq)
flush_work(&state->work_i2c_poll);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
@@ -2004,6 +2201,7 @@ static int tc358743_remove(struct i2c_client *client)
flush_work(&state->work_i2c_poll);
}
cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cec_unregister_adapter(state->cec_adap);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
mutex_destroy(&state->confctl_mutex);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 657ef50f215f..227b46471793 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -193,8 +193,98 @@
#define CSI_START 0x0518
#define MASK_STRT 0x00000001
-#define CECEN 0x0600
-#define MASK_CECEN 0x0001
+/* *** CEC (32 bit) *** */
+#define CECHCLK 0x0028 /* 16 bits */
+#define MASK_CECHCLK (0x7ff << 0)
+
+#define CECLCLK 0x002a /* 16 bits */
+#define MASK_CECLCLK (0x7ff << 0)
+
+#define CECEN 0x0600
+#define MASK_CECEN 0x0001
+
+#define CECADD 0x0604
+#define CECRST 0x0608
+#define MASK_CECRESET 0x0001
+
+#define CECREN 0x060c
+#define MASK_CECREN 0x0001
+
+#define CECRCTL1 0x0614
+#define MASK_CECACKDIS (1 << 24)
+#define MASK_CECHNC (3 << 20)
+#define MASK_CECLNC (7 << 16)
+#define MASK_CECMIN (7 << 12)
+#define MASK_CECMAX (7 << 8)
+#define MASK_CECDAT (7 << 4)
+#define MASK_CECTOUT (3 << 2)
+#define MASK_CECRIHLD (1 << 1)
+#define MASK_CECOTH (1 << 0)
+
+#define CECRCTL2 0x0618
+#define MASK_CECSWAV3 (7 << 12)
+#define MASK_CECSWAV2 (7 << 8)
+#define MASK_CECSWAV1 (7 << 4)
+#define MASK_CECSWAV0 (7 << 0)
+
+#define CECRCTL3 0x061c
+#define MASK_CECWAV3 (7 << 20)
+#define MASK_CECWAV2 (7 << 16)
+#define MASK_CECWAV1 (7 << 12)
+#define MASK_CECWAV0 (7 << 8)
+#define MASK_CECACKEI (1 << 4)
+#define MASK_CECMINEI (1 << 3)
+#define MASK_CECMAXEI (1 << 2)
+#define MASK_CECRSTEI (1 << 1)
+#define MASK_CECWAVEI (1 << 0)
+
+#define CECTEN 0x0620
+#define MASK_CECTBUSY (1 << 1)
+#define MASK_CECTEN (1 << 0)
+
+#define CECTCTL 0x0628
+#define MASK_CECSTRS (7 << 20)
+#define MASK_CECSPRD (7 << 16)
+#define MASK_CECDTRS (7 << 12)
+#define MASK_CECDPRD (15 << 8)
+#define MASK_CECBRD (1 << 4)
+#define MASK_CECFREE (15 << 0)
+
+#define CECRSTAT 0x062c
+#define MASK_CECRIWA (1 << 6)
+#define MASK_CECRIOR (1 << 5)
+#define MASK_CECRIACK (1 << 4)
+#define MASK_CECRIMIN (1 << 3)
+#define MASK_CECRIMAX (1 << 2)
+#define MASK_CECRISTA (1 << 1)
+#define MASK_CECRIEND (1 << 0)
+
+#define CECTSTAT 0x0630
+#define MASK_CECTIUR (1 << 4)
+#define MASK_CECTIACK (1 << 3)
+#define MASK_CECTIAL (1 << 2)
+#define MASK_CECTIEND (1 << 1)
+
+#define CECRBUF1 0x0634
+#define MASK_CECRACK (1 << 9)
+#define MASK_CECEOM (1 << 8)
+#define MASK_CECRBYTE (0xff << 0)
+
+#define CECTBUF1 0x0674
+#define MASK_CECTEOM (1 << 8)
+#define MASK_CECTBYTE (0xff << 0)
+
+#define CECRCTR 0x06b4
+#define MASK_CECRCTR (0x1f << 0)
+
+#define CECIMSK 0x06c0
+#define MASK_CECTIM (1 << 1)
+#define MASK_CECRIM (1 << 0)
+
+#define CECICLR 0x06cc
+#define MASK_CECTICLR (1 << 1)
+#define MASK_CECRICLR (1 << 0)
+
#define HDMI_INT0 0x8500
#define MASK_I_KEY 0x80
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index ce86534450ac..16a1e08ce06c 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -300,9 +300,9 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
* if available, ...
*/
-static void chip_thread_wake(unsigned long data)
+static void chip_thread_wake(struct timer_list *t)
{
- struct CHIPSTATE *chip = (struct CHIPSTATE*)data;
+ struct CHIPSTATE *chip = from_timer(chip, t, wt);
wake_up_process(chip->thread);
}
@@ -1995,7 +1995,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_ctrl_handler_setup(&chip->hdl);
chip->thread = NULL;
- init_timer(&chip->wt);
+ timer_setup(&chip->wt, chip_thread_wake, 0);
if (desc->flags & CHIP_NEED_CHECKMODE) {
if (!desc->getrxsubchans || !desc->setaudmode) {
/* This shouldn't be happen. Warn user, but keep working
@@ -2005,8 +2005,6 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
return 0;
}
/* start async thread */
- chip->wt.function = chip_thread_wake;
- chip->wt.data = (unsigned long)chip;
chip->thread = kthread_run(chip_thread, chip, "%s",
client->name);
if (IS_ERR(chip->thread)) {
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index ad2df998f9c5..d575b3e7e835 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -86,6 +86,7 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
/**
* struct tvp514x_decoder - TVP5146/47 decoder object
* @sd: Subdevice Slave handle
+ * @hdl: embedded &struct v4l2_ctrl_handler
* @tvp514x_regs: copy of hw's regs with preset values.
* @pdata: Board specific
* @ver: Chip version
@@ -98,6 +99,9 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
* @std_list: Standards list
* @input: Input routing at chip level
* @output: Output routing at chip level
+ * @pad: subdev media pad associated with the decoder
+ * @format: media bus frame format
+ * @int_seq: driver's register init sequence
*/
struct tvp514x_decoder {
struct v4l2_subdev sd;
@@ -211,7 +215,7 @@ static struct tvp514x_reg tvp514x_reg_list_default[] = {
{TOK_TERM, 0, 0},
};
-/**
+/*
* List of image formats supported by TVP5146/47 decoder
* Currently we are using 8 bit mode only, but can be
* extended to 10/20 bit mode.
@@ -226,7 +230,7 @@ static const struct v4l2_fmtdesc tvp514x_fmt_list[] = {
},
};
-/**
+/*
* Supported standards -
*
* Currently supports two standards only, need to add support for rest of the
@@ -931,7 +935,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
* tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
* @sd: pointer to standard V4L2 sub-device structure
* @cfg: pad configuration
- * @format: pointer to v4l2_subdev_format structure
+ * @fmt: pointer to v4l2_subdev_format structure
*
* Set pad format for the output pad
*/
@@ -1199,7 +1203,7 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
{TOK_TERM, 0, 0},
};
-/**
+/*
* I2C Device Table -
*
* name - Name of the actual device/chip.
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2ace0410d277..f7c6d64e6031 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -214,12 +214,20 @@ void media_gobj_destroy(struct media_gobj *gobj)
gobj->mdev = NULL;
}
+/*
+ * TODO: Get rid of this.
+ */
+#define MEDIA_ENTITY_MAX_PADS 512
+
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pads)
{
struct media_device *mdev = entity->graph_obj.mdev;
unsigned int i;
+ if (num_pads >= MEDIA_ENTITY_MAX_PADS)
+ return -E2BIG;
+
entity->num_pads = num_pads;
entity->pads = pads;
@@ -280,11 +288,6 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define link_top(en) ((en)->stack[(en)->top].link)
#define stack_top(en) ((en)->stack[(en)->top].entity)
-/*
- * TODO: Get rid of this.
- */
-#define MEDIA_ENTITY_MAX_PADS 512
-
/**
* media_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
diff --git a/drivers/media/pci/b2c2/Kconfig b/drivers/media/pci/b2c2/Kconfig
index 58761a21caa0..7b818d445f39 100644
--- a/drivers/media/pci/b2c2/Kconfig
+++ b/drivers/media/pci/b2c2/Kconfig
@@ -11,5 +11,5 @@ config DVB_B2C2_FLEXCOP_PCI_DEBUG
depends on DVB_B2C2_FLEXCOP_PCI
select DVB_B2C2_FLEXCOP_DEBUG
help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 227086a2e99c..b366a7e1d976 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3652,9 +3652,9 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
wake_up(&wakeup->vb.done);
}
-static void bttv_irq_timeout(unsigned long data)
+static void bttv_irq_timeout(struct timer_list *t)
{
- struct bttv *btv = (struct bttv *)data;
+ struct bttv *btv = from_timer(btv, t, timeout);
struct bttv_buffer_set old,new;
struct bttv_buffer *ovbi;
struct bttv_buffer *item;
@@ -4043,7 +4043,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
INIT_LIST_HEAD(&btv->capture);
INIT_LIST_HEAD(&btv->vcapture);
- setup_timer(&btv->timeout, bttv_irq_timeout, (unsigned long)btv);
+ timer_setup(&btv->timeout, bttv_irq_timeout, 0);
btv->i2c_rc = -1;
btv->tuner_type = UNSET;
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 73d655d073d6..ac7674700685 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -133,10 +133,10 @@ void bttv_input_irq(struct bttv *btv)
ir_handle_key(btv);
}
-static void bttv_input_timer(unsigned long data)
+static void bttv_input_timer(struct timer_list *t)
{
- struct bttv *btv = (struct bttv*)data;
- struct bttv_ir *ir = btv->remote;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
+ struct bttv *btv = ir->btv;
if (btv->c.type == BTTV_BOARD_ENLTV_FM_2)
ir_enltv_handle_key(btv);
@@ -189,9 +189,9 @@ static u32 bttv_rc5_decode(unsigned int code)
return rc5;
}
-static void bttv_rc5_timer_end(unsigned long data)
+static void bttv_rc5_timer_end(struct timer_list *t)
{
- struct bttv_ir *ir = (struct bttv_ir *)data;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
ktime_t tv;
u32 gap, rc5, scancode;
u8 toggle, command, system;
@@ -296,15 +296,15 @@ static int bttv_rc5_irq(struct bttv *btv)
/* ---------------------------------------------------------------------- */
-static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir)
+static void bttv_ir_start(struct bttv_ir *ir)
{
if (ir->polling) {
- setup_timer(&ir->timer, bttv_input_timer, (unsigned long)btv);
+ timer_setup(&ir->timer, bttv_input_timer, 0);
ir->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
/* set timer_end for code completion */
- setup_timer(&ir->timer, bttv_rc5_timer_end, (unsigned long)ir);
+ timer_setup(&ir->timer, bttv_rc5_timer_end, 0);
ir->shift_by = 1;
ir->rc5_remote_gap = ir_rc5_remote_gap;
}
@@ -531,6 +531,7 @@ int bttv_input_init(struct bttv *btv)
/* init input device */
ir->dev = rc;
+ ir->btv = btv;
snprintf(ir->name, sizeof(ir->name), "bttv IR (card=%d)",
btv->c.type);
@@ -553,7 +554,7 @@ int bttv_input_init(struct bttv *btv)
rc->driver_name = MODULE_NAME;
btv->remote = ir;
- bttv_ir_start(btv, ir);
+ bttv_ir_start(ir);
/* all done */
err = rc_register_device(rc);
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index e77129c92fa0..67c6583f1d79 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -233,7 +233,7 @@ static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer
bttv_dma_free(q,fh->btv,buf);
}
-struct videobuf_queue_ops bttv_vbi_qops = {
+const struct videobuf_queue_ops bttv_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 9efc4559fa8e..cb1b5e611130 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -122,6 +122,7 @@ struct bttv_format {
struct bttv_ir {
struct rc_dev *dev;
+ struct bttv *btv;
struct timer_list timer;
char name[32];
@@ -281,7 +282,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
-extern struct videobuf_queue_ops bttv_vbi_qops;
+extern const struct videobuf_queue_ops bttv_vbi_qops;
/* ---------------------------------------------------------- */
/* bttv-gpio.c */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 98b6cb9505d1..3f16cf3f6d74 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -738,9 +738,6 @@ static int cobalt_probe(struct pci_dev *pci_dev,
goto err_i2c;
}
- retval = v4l2_device_register_subdev_nodes(&cobalt->v4l2_dev);
- if (retval)
- goto err_i2c;
retval = cobalt_nodes_register(cobalt);
if (retval) {
cobalt_err("Error %d registering device nodes\n", retval);
@@ -767,8 +764,6 @@ err_pci:
err_wq:
destroy_workqueue(cobalt->irq_work_queues);
err:
- if (retval == 0)
- retval = -ENODEV;
cobalt_err("error %d on initialization\n", retval);
v4l2_device_unregister(&cobalt->v4l2_dev);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8654710464cc..8f314ca320c7 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -255,7 +255,7 @@ static void request_module_async(struct work_struct *work)
request_module("cx18-alsa");
/* Initialize cx18-alsa for this instance of the cx18 device */
- if (cx18_ext_init != NULL)
+ if (cx18_ext_init)
cx18_ext_init(dev);
}
@@ -291,11 +291,11 @@ int cx18_msleep_timeout(unsigned int msecs, int intr)
/* Release ioremapped memory */
static void cx18_iounmap(struct cx18 *cx)
{
- if (cx == NULL)
+ if (!cx)
return;
/* Release io memory */
- if (cx->enc_mem != NULL) {
+ if (cx->enc_mem) {
CX18_DEBUG_INFO("releasing enc_mem\n");
iounmap(cx->enc_mem);
cx->enc_mem = NULL;
@@ -649,15 +649,15 @@ static void cx18_process_options(struct cx18 *cx)
CX18_INFO("User specified %s card\n", cx->card->name);
else if (cx->options.cardtype != 0)
CX18_ERR("Unknown user specified type, trying to autodetect card\n");
- if (cx->card == NULL) {
+ if (!cx->card) {
if (cx->pci_dev->subsystem_vendor == CX18_PCI_ID_HAUPPAUGE) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_INFO("Autodetected Hauppauge card\n");
}
}
- if (cx->card == NULL) {
+ if (!cx->card) {
for (i = 0; (cx->card = cx18_get_card(i)); i++) {
- if (cx->card->pci_list == NULL)
+ if (!cx->card->pci_list)
continue;
for (j = 0; cx->card->pci_list[j].device; j++) {
if (cx->pci_dev->device !=
@@ -676,7 +676,7 @@ static void cx18_process_options(struct cx18 *cx)
}
done:
- if (cx->card == NULL) {
+ if (!cx->card) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_ERR("Unknown card: vendor/device: [%04x:%04x]\n",
cx->pci_dev->vendor, cx->pci_dev->device);
@@ -698,7 +698,7 @@ static int cx18_create_in_workq(struct cx18 *cx)
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
cx->in_work_queue = alloc_ordered_workqueue("%s", 0, cx->in_workq_name);
- if (cx->in_work_queue == NULL) {
+ if (!cx->in_work_queue) {
CX18_ERR("Unable to create incoming mailbox handler thread\n");
return -ENOMEM;
}
@@ -909,12 +909,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
return -ENOMEM;
}
- cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
- if (cx == NULL) {
- printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
- i);
+ cx = kzalloc(sizeof(*cx), GFP_ATOMIC);
+ if (!cx)
return -ENOMEM;
- }
+
cx->pci_dev = pci_dev;
cx->instance = i;
@@ -1256,7 +1254,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_STREAMS; i++)
- if (&cx->streams[i].video_dev != NULL)
+ if (&cx->streams[i].video_dev)
cancel_work_sync(&cx->streams[i].out_work_order);
}
@@ -1301,7 +1299,7 @@ static void cx18_remove(struct pci_dev *pci_dev)
pci_disable_device(cx->pci_dev);
- if (cx->vbi.sliced_mpeg_data[0] != NULL)
+ if (cx->vbi.sliced_mpeg_data[0])
for (i = 0; i < CX18_VBI_FRAMES; i++)
kfree(cx->vbi.sliced_mpeg_data[i]);
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 98467b2089fa..4f9c2395941b 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -684,9 +684,9 @@ int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
}
-void cx18_vb_timeout(unsigned long data)
+void cx18_vb_timeout(struct timer_list *t)
{
- struct cx18_stream *s = (struct cx18_stream *)data;
+ struct cx18_stream *s = from_timer(s, t, vb_timeout);
struct cx18_videobuf_buffer *buf;
unsigned long flags;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 58b00b433708..37ef34e866cb 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -29,7 +29,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
void cx18_unmute(struct cx18 *cx);
int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma);
-void cx18_vb_timeout(unsigned long data);
+void cx18_vb_timeout(struct timer_list *t);
/* Shared with cx18-alsa module */
int cx18_claim_stream(struct cx18_open_id *id, int type);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 8385411af641..f35f78d66985 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -282,7 +282,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
INIT_WORK(&s->out_work_order, cx18_out_work_handler);
INIT_LIST_HEAD(&s->vb_capture);
- setup_timer(&s->vb_timeout, cx18_vb_timeout, (unsigned long)s);
+ timer_setup(&s->vb_timeout, cx18_vb_timeout, 0);
spin_lock_init(&s->vb_lock);
if (type == CX18_ENC_STREAM_TYPE_YUV) {
spin_lock_init(&s->vbuf_q_lock);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 78a8836d03e4..28eab9c518c5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1323,7 +1323,7 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
static void tbs_card_init(struct cx23885_dev *dev)
{
int i;
- const u8 buf[] = {
+ static const u8 buf[] = {
0xe0, 0x06, 0x66, 0x33, 0x65,
0x01, 0x17, 0x06, 0xde};
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 0f21467ae88e..ef863492c0ac 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -270,7 +270,7 @@ static const struct i2c_adapter cx23885_i2c_adap_template = {
.algo = &cx23885_i2c_algo_template,
};
-static struct i2c_client cx23885_i2c_client_template = {
+static const struct i2c_client cx23885_i2c_client_template = {
.name = "cx23885 internal",
};
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 369e545cac04..70f9f13bded3 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -254,7 +254,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
}
-struct vb2_ops cx23885_vbi_qops = {
+const struct vb2_ops cx23885_vbi_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index cb714ab60d69..6aab713e0476 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -591,7 +591,7 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
extern int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f);
extern void cx23885_vbi_timeout(unsigned long data);
-extern struct vb2_ops cx23885_vbi_qops;
+extern const struct vb2_ops cx23885_vbi_qops;
extern int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status);
/* cx23885-i2c.c */
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 000049d3c71b..31479a41f359 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -291,7 +291,7 @@ static const struct i2c_adapter cx25821_i2c_adap_template = {
.algo = &cx25821_i2c_algo_template,
};
-static struct i2c_client cx25821_i2c_client_template = {
+static const struct i2c_client cx25821_i2c_client_template = {
.name = "cx25821 internal",
};
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index e02449bf2041..4e9953e61a12 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -593,11 +593,11 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_proto *protocol,
void cx88_i2c_init_ir(struct cx88_core *core)
{
struct i2c_board_info info;
- const unsigned short default_addr_list[] = {
+ static const unsigned short default_addr_list[] = {
0x18, 0x6b, 0x71,
I2C_CLIENT_END
};
- const unsigned short pvr2000_addr_list[] = {
+ static const unsigned short pvr2000_addr_list[] = {
0x18, 0x1a,
I2C_CLIENT_END
};
diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h
index a4c6bbe09168..b3646c04f1a7 100644
--- a/drivers/media/pci/ddbridge/ddbridge-io.h
+++ b/drivers/media/pci/ddbridge/ddbridge-io.h
@@ -47,12 +47,12 @@ static inline void ddbwritel(struct ddb *dev, u32 val, u32 adr)
static inline void ddbcpyto(struct ddb *dev, u32 adr, void *src, long count)
{
- return memcpy_toio(dev->regs + adr, src, count);
+ memcpy_toio(dev->regs + adr, src, count);
}
static inline void ddbcpyfrom(struct ddb *dev, void *dst, u32 adr, long count)
{
- return memcpy_fromio(dst, dev->regs + adr, count);
+ memcpy_fromio(dst, dev->regs + adr, count);
}
static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 54dcac4b2229..6b2ffdc96961 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -770,8 +770,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
init_waitqueue_head(&itv->dma_waitq);
- setup_timer(&itv->dma_timer, ivtv_unfinished_dma,
- (unsigned long)itv);
+ timer_setup(&itv->dma_timer, ivtv_unfinished_dma, 0);
itv->cur_dma_stream = -1;
itv->cur_pio_stream = -1;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 5a35e366f4c0..893962ac85de 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -700,7 +700,7 @@ static const struct i2c_algo_bit_data ivtv_i2c_algo_template = {
.timeout = IVTV_ALGO_BIT_TIMEOUT * HZ, /* jiffies */
};
-static struct i2c_client ivtv_i2c_client_template = {
+static const struct i2c_client ivtv_i2c_client_template = {
.name = "ivtv internal",
};
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 6efe1f71262c..63b09bf73bf0 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1074,9 +1074,9 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
return vsync_force ? IRQ_NONE : IRQ_HANDLED;
}
-void ivtv_unfinished_dma(unsigned long arg)
+void ivtv_unfinished_dma(struct timer_list *t)
{
- struct ivtv *itv = (struct ivtv *)arg;
+ struct ivtv *itv = from_timer(itv, t, dma_timer);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
return;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index 1e84433737cc..bcab5f07d37f 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -48,6 +48,6 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
void ivtv_irq_work_handler(struct kthread_work *work);
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
-void ivtv_unfinished_dma(unsigned long arg);
+void ivtv_unfinished_dma(struct timer_list *t);
#endif
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 11e987860b23..ed855e3df558 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -72,7 +72,7 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
+ if (unlikely(!mantis)) {
dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
return IRQ_NONE;
}
@@ -161,11 +161,10 @@ static int hopper_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis) {
err = -ENOMEM;
goto fail0;
}
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index adc980d33711..4ce8a90d69dc 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -171,13 +171,11 @@ static int mantis_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis)
return -ENOMEM;
- }
drvdata = (void *)pci_id->driver_data;
mantis->num = devs;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 49e047e4a81e..23999a8cef37 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1626,35 +1626,31 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
meye.mchip_dev = pcidev;
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
- if (!meye.grab_temp) {
- v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
+ if (!meye.grab_temp)
goto outvmalloc;
- }
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc1;
- }
+
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc2;
- }
meye.vdev = meye_template;
meye.vdev.v4l2_dev = &meye.v4l2_dev;
- ret = -EIO;
- if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
+ ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
goto outsonypienable;
}
- if ((ret = pci_enable_device(meye.mchip_dev))) {
+ ret = pci_enable_device(meye.mchip_dev);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
index 0ad37714c7fd..b663154d0cc4 100644
--- a/drivers/media/pci/netup_unidvb/Kconfig
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -1,8 +1,8 @@
config DVB_NETUP_UNIDVB
tristate "NetUP Universal DVB card support"
depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER
- select VIDEOBUF2_DVB
- select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DVB
+ select VIDEOBUF2_VMALLOC
select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
select DVB_HELENE if MEDIA_SUBDRV_AUTOSELECT
@@ -10,8 +10,8 @@ config DVB_NETUP_UNIDVB
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for NetUP PCI express Universal DVB card.
- help
- Say Y when you want to support NetUP Dual Universal DVB card
- Card can receive two independent streams in following standards:
+
+ Say Y when you want to support NetUP Dual Universal DVB card.
+ Card can receive two independent streams in following standards:
DVB-S/S2, T/T2, C/C2
- Two CI slots available for CAM modules.
+ Two CI slots available for CAM modules.
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60e6cd5b3a03..509d69e6ca4a 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -82,11 +82,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
* @start_addr_lo: DMA ring buffer start address, lower part
* @start_addr_hi: DMA ring buffer start address, higher part
* @size: DMA ring buffer size register
- Bits [0-7]: DMA packet size, 188 bytes
- Bits [16-23]: packets count in block, 128 packets
- Bits [24-31]: blocks count, 8 blocks
+ * * Bits [0-7]: DMA packet size, 188 bytes
+ * * Bits [16-23]: packets count in block, 128 packets
+ * * Bits [24-31]: blocks count, 8 blocks
* @timeout: DMA timeout in units of 8ns
- For example, value of 375000000 equals to 3 sec
+ * For example, value of 375000000 equals to 3 sec
* @curr_addr_lo: Current ring buffer head address, lower part
* @curr_addr_hi: Current ring buffer head address, higher part
* @stat_pkt_received: Statistic register, not tested
@@ -638,9 +638,9 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
spin_unlock_irqrestore(&dma->lock, flags);
}
-static void netup_unidvb_dma_timeout(unsigned long data)
+static void netup_unidvb_dma_timeout(struct timer_list *t)
{
- struct netup_dma *dma = (struct netup_dma *)data;
+ struct netup_dma *dma = from_timer(dma, t, timeout);
struct netup_unidvb_dev *ndev = dma->ndev;
dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
@@ -664,8 +664,7 @@ static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
spin_lock_init(&dma->lock);
INIT_WORK(&dma->work, netup_unidvb_dma_worker);
INIT_LIST_HEAD(&dma->free_buffers);
- setup_timer(&dma->timeout, netup_unidvb_dma_timeout,
- (unsigned long)dma);
+ timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
dma->ring_buffer_size = ndev->dma_size / 2;
dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 7976c5a12ca8..9e76de2411ae 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -338,9 +338,9 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
}
}
-void saa7134_buffer_timeout(unsigned long data)
+void saa7134_buffer_timeout(struct timer_list *t)
{
- struct saa7134_dmaqueue *q = (struct saa7134_dmaqueue *)data;
+ struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
struct saa7134_dev *dev = q->dev;
unsigned long flags;
@@ -378,7 +378,7 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
}
}
spin_unlock_irqrestore(&dev->slock, flags);
- saa7134_buffer_timeout((unsigned long)q); /* also calls del_timer(&q->timeout) */
+ saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */
}
EXPORT_SYMBOL_GPL(saa7134_stop_streaming);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8f2ed632840f..cf1e526de56a 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -345,7 +345,7 @@ static const struct i2c_adapter saa7134_adap_template = {
.algo = &saa7134_algo,
};
-static struct i2c_client saa7134_client_template = {
+static const struct i2c_client saa7134_client_template = {
.name = "saa7134 internal",
};
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 9337e4615519..2d5abeddc079 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -447,10 +447,10 @@ void saa7134_input_irq(struct saa7134_dev *dev)
}
}
-static void saa7134_input_timer(unsigned long data)
+static void saa7134_input_timer(struct timer_list *t)
{
- struct saa7134_dev *dev = (struct saa7134_dev *)data;
- struct saa7134_card_ir *ir = dev->remote;
+ struct saa7134_card_ir *ir = from_timer(ir, t, timer);
+ struct saa7134_dev *dev = ir->dev->priv;
build_key(dev);
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
@@ -507,8 +507,7 @@ static int __saa7134_ir_start(void *priv)
ir->running = true;
if (ir->polling) {
- setup_timer(&ir->timer, saa7134_input_timer,
- (unsigned long)dev);
+ timer_setup(&ir->timer, saa7134_input_timer, 0);
ir->timer.expires = jiffies + HZ;
add_timer(&ir->timer);
}
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 7414878af9e0..2be703617e29 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -223,8 +223,7 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
dev->ts.nr_packets = ts_nr_packets;
INIT_LIST_HEAD(&dev->ts_q.queue);
- setup_timer(&dev->ts_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->ts_q));
+ timer_setup(&dev->ts_q.timeout, saa7134_buffer_timeout, 0);
dev->ts_q.dev = dev;
dev->ts_q.need_two = 1;
dev->ts_started = 0;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index bcad9b2d9bb3..57bea543c39b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -165,7 +165,7 @@ static int buffer_init(struct vb2_buffer *vb2)
return 0;
}
-struct vb2_ops saa7134_vbi_qops = {
+const struct vb2_ops saa7134_vbi_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -181,8 +181,7 @@ struct vb2_ops saa7134_vbi_qops = {
int saa7134_vbi_init1(struct saa7134_dev *dev)
{
INIT_LIST_HEAD(&dev->vbi_q.queue);
- setup_timer(&dev->vbi_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->vbi_q));
+ timer_setup(&dev->vbi_q.timeout, saa7134_buffer_timeout, 0);
dev->vbi_q.dev = dev;
if (vbibufs < 2)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 51d42bbf969e..82d2a24644e4 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2145,8 +2145,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
dev->automute = 0;
INIT_LIST_HEAD(&dev->video_q.queue);
- setup_timer(&dev->video_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->video_q));
+ timer_setup(&dev->video_q.timeout, saa7134_buffer_timeout, 0);
dev->video_q.dev = dev;
dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
dev->width = 720;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 816b5282d671..39c36e6aefbe 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -773,7 +773,7 @@ int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
unsigned int state);
void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
-void saa7134_buffer_timeout(unsigned long data);
+void saa7134_buffer_timeout(struct timer_list *t);
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
int saa7134_set_dmabits(struct saa7134_dev *dev);
@@ -870,7 +870,7 @@ int saa7134_ts_stop(struct saa7134_dev *dev);
/* ----------------------------------------------------------- */
/* saa7134-vbi.c */
-extern struct vb2_ops saa7134_vbi_qops;
+extern const struct vb2_ops saa7134_vbi_qops;
extern struct video_device saa7134_vbi_template;
int saa7134_vbi_init1(struct saa7134_dev *dev);
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index f708cab01fef..d31a2d4494d1 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -260,11 +260,10 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE("\n");
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("not enough kernel memory in hexium_attach()\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
+
dev->ext_priv = hexium;
/* enable i2c-port pins */
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 01f01580c7ca..043318aa19e2 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -219,11 +219,9 @@ static int hexium_probe(struct saa7146_dev *dev)
return -EFAULT;
}
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("hexium_probe: not enough kernel memory\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
@@ -268,7 +266,9 @@ static int hexium_probe(struct saa7146_dev *dev)
/* check if this is an old hexium Orion card by looking at
a saa7110 at address 0x4e */
- if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
+ err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ,
+ 0x00, I2C_SMBUS_BYTE_DATA, &data);
+ if (err == 0) {
pr_info("device is a Hexium HV-PCI6/Orion (old)\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index a0d2129c6ca9..c83b2e914dcb 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -98,11 +98,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
goto ret;
}
- buf = kzalloc(sizeof(struct saa7164_buffer), GFP_KERNEL);
- if (!buf) {
- log_warn("%s() SAA_ERR_NO_RESOURCES\n", __func__);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
goto ret;
- }
buf->idx = -1;
buf->port = port;
@@ -283,7 +281,7 @@ struct saa7164_user_buffer *saa7164_buffer_alloc_user(struct saa7164_dev *dev,
{
struct saa7164_user_buffer *buf;
- buf = kzalloc(sizeof(struct saa7164_user_buffer), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 4bcde7c79dc3..6d13cbb9d010 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -84,7 +84,7 @@ static const struct i2c_adapter saa7164_i2c_adap_template = {
.algo = &saa7164_i2c_algo_template,
};
-static struct i2c_client saa7164_i2c_client_template = {
+static const struct i2c_client saa7164_i2c_client_template = {
.name = "saa7164 internal",
};
diff --git a/drivers/media/pci/solo6x10/solo6x10-enc.c b/drivers/media/pci/solo6x10/solo6x10-enc.c
index d28211bb9674..58d6b5131dd0 100644
--- a/drivers/media/pci/solo6x10/solo6x10-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-enc.c
@@ -175,7 +175,7 @@ out:
return 0;
}
-/**
+/*
* Set channel Quality Profile (0-3).
*/
void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch,
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index eb5a9eae7c8e..dd199bfc1d45 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -404,6 +404,7 @@ static const struct v4l2_file_operations vip_fops = {
* vidioc_querycap - return capabilities of device
* @file: descriptor of device
* @cap: contains return values
+ * @priv: unused
*
* the capabilities of the device are returned
*
@@ -429,6 +430,7 @@ static int vidioc_querycap(struct file *file, void *priv,
* vidioc_s_std - set video standard
* @file: descriptor of device
* @std: contains standard to be set
+ * @priv: unused
*
* the video standard is set
*
@@ -466,6 +468,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
/**
* vidioc_g_std - get video standard
* @file: descriptor of device
+ * @priv: unused
* @std: contains return values
*
* the current video standard is returned
@@ -483,6 +486,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
/**
* vidioc_querystd - get possible video standards
* @file: descriptor of device
+ * @priv: unused
* @std: contains return values
*
* all possible video standards are returned
@@ -512,6 +516,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
/**
* vidioc_s_input - set input line
* @file: descriptor of device
+ * @priv: unused
* @i: new input line number
*
* the current active input line is set
@@ -538,6 +543,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
/**
* vidioc_g_input - return input line
* @file: descriptor of device
+ * @priv: unused
* @i: returned input line number
*
* the current active input line is returned
@@ -554,6 +560,8 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
/**
* vidioc_enum_fmt_vid_cap - return video capture format
+ * @file: descriptor of device
+ * @priv: unused
* @f: returned format information
*
* returns name and format of video capture
@@ -577,6 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_try_fmt_vid_cap - set video capture format
* @file: descriptor of device
+ * @priv: unused
* @f: new format
*
* new video format is set which includes width and
@@ -639,6 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_s_fmt_vid_cap - set current video format parameters
* @file: descriptor of device
+ * @priv: unused
* @f: returned format information
*
* set new capture format
@@ -706,6 +716,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_g_fmt_vid_cap - get current video format parameters
* @file: descriptor of device
+ * @priv: unused
* @f: contains format information
*
* returns current video format parameters
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index f46947d8adf8..6d415bdeef18 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -347,9 +347,9 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
static inline void print_time(char *s)
{
#ifdef DEBUG_TIMING
- struct timeval tv;
- do_gettimeofday(&tv);
- printk("%s: %d.%d\n", s, (int)tv.tv_sec, (int)tv.tv_usec);
+ struct timespec64 ts;
+ ktime_get_real_ts64(&ts);
+ printk("%s: %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec);
#endif
}
@@ -1224,7 +1224,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
dprintk(2, "av7110: %p\n", budget);
spin_lock(&budget->feedlock1);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock1);
return status;
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 97499b2af714..b3dc45b91101 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -330,7 +330,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
return -EINVAL;
spin_lock(&budget->feedlock);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
if (budget->feeding++ == 0)
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock);
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
index 7439db212a69..82ff9c9494f3 100644
--- a/drivers/media/pci/tw68/tw68-risc.c
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -29,14 +29,15 @@
#include "tw68.h"
/**
- * @rp pointer to current risc program position
- * @sglist pointer to "scatter-gather list" of buffer pointers
- * @offset offset to target memory buffer
- * @sync_line 0 -> no sync, 1 -> odd sync, 2 -> even sync
- * @bpl number of bytes per scan line
- * @padding number of bytes of padding to add
- * @lines number of lines in field
- * @jump insert a jump at the start
+ * tw68_risc_field
+ * @rp: pointer to current risc program position
+ * @sglist: pointer to "scatter-gather list" of buffer pointers
+ * @offset: offset to target memory buffer
+ * @sync_line: 0 -> no sync, 1 -> odd sync, 2 -> even sync
+ * @bpl: number of bytes per scan line
+ * @padding: number of bytes of padding to add
+ * @lines: number of lines in field
+ * @jump: insert a jump at the start
*/
static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
@@ -120,18 +121,18 @@ static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
* memory for the dma controller "program" and then fills in that
* memory with the appropriate "instructions".
*
- * @pci_dev structure with info about the pci
+ * @pci: structure with info about the pci
* slot which our device is in.
- * @risc structure with info about the memory
+ * @buf: structure with info about the memory
* used for our controller program.
- * @sglist scatter-gather list entry
- * @top_offset offset within the risc program area for the
+ * @sglist: scatter-gather list entry
+ * @top_offset: offset within the risc program area for the
* first odd frame line
- * @bottom_offset offset within the risc program area for the
+ * @bottom_offset: offset within the risc program area for the
* first even frame line
- * @bpl number of data bytes per scan line
- * @padding number of extra bytes to add at end of line
- * @lines number of scan lines
+ * @bpl: number of data bytes per scan line
+ * @padding: number of extra bytes to add at end of line
+ * @lines: number of scan lines
*/
int tw68_risc_buffer(struct pci_dev *pci,
struct tw68_buf *buf,
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index b762e5f0ba1d..7fb3f07bf022 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -126,9 +126,9 @@ void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
* channels "too fast" which makes some TW686x devices very
* angry and freeze the CPU (see note 1).
*/
-static void tw686x_dma_delay(unsigned long data)
+static void tw686x_dma_delay(struct timer_list *t)
{
- struct tw686x_dev *dev = (struct tw686x_dev *)data;
+ struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
@@ -325,8 +325,7 @@ static int tw686x_probe(struct pci_dev *pci_dev,
goto iounmap;
}
- setup_timer(&dev->dma_delay_timer,
- tw686x_dma_delay, (unsigned long) dev);
+ timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
* This must be set right before initializing v4l2_dev.
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 81cba177cd90..0cdb7d34926d 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -37,7 +37,7 @@ extern int zr36067_debug;
/* Anybody who uses more than four? */
#define BUZ_MAX 4
-extern struct video_device zoran_template;
+extern const struct video_device zoran_template;
extern int zoran_check_jpg_settings(struct zoran *zr,
struct zoran_jpg_settings *settings,
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index a11cb501c550..d07840072337 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2839,7 +2839,7 @@ static const struct v4l2_file_operations zoran_fops = {
.poll = zoran_poll,
};
-struct video_device zoran_template = {
+const struct video_device zoran_template = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 3c4f7fa7b9d8..fd0c99859d6f 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -458,6 +458,21 @@ config VIDEO_RENESAS_VSP1
To compile this driver as a module, choose M here: the module
will be called vsp1.
+config VIDEO_ROCKCHIP_RGA
+ tristate "Rockchip Raster 2d Graphic Acceleration Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
+ Rockchip RGA is a separate 2D raster graphic acceleration unit.
+ It accelerates 2D graphics operations, such as point/line drawing,
+ image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
+
+ To compile this driver as a module choose m here.
+
config VIDEO_TI_VPE
tristate "TI VPE (Video Processing Engine) driver"
depends on VIDEO_DEV && VIDEO_V4L2
@@ -553,6 +568,16 @@ config VIDEO_MESON_AO_CEC
This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
+
+config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+ depends on PREEMPT
+ select CEC_CORE
+ select CEC_PIN
+ select GPIOLIB
+ ---help---
+ This is a generic GPIO-based CEC driver.
+ The CEC bus is present in the HDMI connector and enables communication
between compatible devices.
config VIDEO_SAMSUNG_S5P_CEC
@@ -589,6 +614,17 @@ config VIDEO_STM32_HDMI_CEC
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+config VIDEO_TEGRA_HDMI_CEC
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
endif #CEC_PLATFORM_DRIVERS
menuconfig SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 327f80a6f82c..003b0bb2cddf 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_VIDEO_CODA) += coda/
obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_MUX) += video-mux.o
@@ -47,6 +49,8 @@ obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
+
obj-y += stm32/
obj-y += blackfin/
@@ -63,6 +67,8 @@ obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip/rga/
+
obj-y += omap/
obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index dfcc484cab89..0997c640191d 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2417,6 +2417,11 @@ static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
return vpfe_probe_complete(vpfe);
}
+static const struct v4l2_async_notifier_operations vpfe_async_ops = {
+ .bound = vpfe_async_bound,
+ .complete = vpfe_async_complete,
+};
+
static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
@@ -2590,8 +2595,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->notifier.subdevs = vpfe->cfg->asd;
vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
- vpfe->notifier.bound = vpfe_async_bound;
- vpfe->notifier.complete = vpfe_async_complete;
+ vpfe->notifier.ops = &vpfe_async_ops;
ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
&vpfe->notifier);
if (ret) {
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index e6cef966dcbf..2aadc19235ea 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -43,6 +43,7 @@
/* ISC Clock Status Register */
#define ISC_CLKSR 0x00000020
+#define ISC_CLKSR_SIP BIT(31)
#define ISC_CLK(n) BIT(n)
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index d7103c5f92c3..13f1c1c797b0 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -65,6 +65,7 @@ struct isc_clk {
struct clk_hw hw;
struct clk *clk;
struct regmap *regmap;
+ spinlock_t lock;
u8 id;
u8 parent_id;
u32 div;
@@ -82,41 +83,69 @@ struct isc_subdev_entity {
struct v4l2_subdev *sd;
struct v4l2_async_subdev *asd;
struct v4l2_async_notifier notifier;
- struct v4l2_subdev_pad_config *config;
u32 pfe_cfg0;
struct list_head list;
};
+/* Indicate the format is generated by the sensor */
+#define FMT_FLAG_FROM_SENSOR BIT(0)
+/* Indicate the format is produced by ISC itself */
+#define FMT_FLAG_FROM_CONTROLLER BIT(1)
+/* Indicate a Raw Bayer format */
+#define FMT_FLAG_RAW_FORMAT BIT(2)
+
+#define FMT_FLAG_RAW_FROM_SENSOR (FMT_FLAG_FROM_SENSOR | \
+ FMT_FLAG_RAW_FORMAT)
+
/*
* struct isc_format - ISC media bus format information
* @fourcc: Fourcc code for this format
* @mbus_code: V4L2 media bus format code.
+ * flags: Indicate format from sensor or converted by controller
* @bpp: Bits per pixel (when stored in memory)
- * @reg_bps: reg value for bits per sample
* (when transferred over a bus)
- * @pipeline: pipeline switch
* @sd_support: Subdev supports this format
* @isc_support: ISC can convert raw format to this format
*/
+
struct isc_format {
u32 fourcc;
u32 mbus_code;
+ u32 flags;
u8 bpp;
- u32 reg_bps;
- u32 reg_bay_cfg;
- u32 reg_rlp_mode;
- u32 reg_dcfg_imode;
- u32 reg_dctrl_dview;
-
- u32 pipeline;
-
bool sd_support;
bool isc_support;
};
+/* Pipeline bitmap */
+#define WB_ENABLE BIT(0)
+#define CFA_ENABLE BIT(1)
+#define CC_ENABLE BIT(2)
+#define GAM_ENABLE BIT(3)
+#define GAM_BENABLE BIT(4)
+#define GAM_GENABLE BIT(5)
+#define GAM_RENABLE BIT(6)
+#define CSC_ENABLE BIT(7)
+#define CBC_ENABLE BIT(8)
+#define SUB422_ENABLE BIT(9)
+#define SUB420_ENABLE BIT(10)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+struct fmt_config {
+ u32 fourcc;
+
+ u32 pfe_cfg0_bps;
+ u32 cfa_baycfg;
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
#define HIST_ENTRIES 512
#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
@@ -181,80 +210,320 @@ struct isc_device {
struct list_head subdev_entities;
};
-#define RAW_FMT_IND_START 0
-#define RAW_FMT_IND_END 11
-#define ISC_FMT_IND_START 12
-#define ISC_FMT_IND_END 14
-
-static struct isc_format isc_formats[] = {
- { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_YUV420, 0x0, 12,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC420P, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
- false, false },
- { V4L2_PIX_FMT_YUV422P, 0x0, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC422P, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
- false, false },
- { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_RGB565_2X8_LE, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_RGB565,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x7b,
- false, false },
-
- { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
+static struct isc_format formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .mbus_code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .mbus_code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 16,
+ },
+};
+
+struct fmt_config fmt_configs_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC420P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB420_ENABLE | SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC422P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED32,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
};
#define GAMMA_MAX 2
@@ -307,31 +576,80 @@ module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
"Sensor is preferred to output the specified format (1-on 0-off), default 1");
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
static int isc_clk_enable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
__func__, isc_clk->div, isc_clk->parent_id);
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_update_bits(regmap, ISC_CLKCFG,
ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
(isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
(isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
- return 0;
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
}
static void isc_clk_disable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
+ unsigned long flags;
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
}
static int isc_clk_is_enabled(struct clk_hw *hw)
@@ -339,8 +657,14 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 status;
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -447,6 +771,8 @@ static int isc_clk_set_rate(struct clk_hw *hw,
}
static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
.enable = isc_clk_enable,
.disable = isc_clk_disable,
.is_enabled = isc_clk_is_enabled,
@@ -492,6 +818,7 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
isc_clk->regmap = regmap;
isc_clk->id = id;
isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
if (IS_ERR(isc_clk->clk)) {
@@ -575,11 +902,27 @@ static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
!isc_fmt->isc_support;
}
+static struct fmt_config *get_fmt_config(u32 fourcc)
+{
+ struct fmt_config *config;
+ int i;
+
+ config = &fmt_configs_list[0];
+ for (i = 0; i < ARRAY_SIZE(fmt_configs_list); i++) {
+ if (config->fourcc == fourcc)
+ return config;
+
+ config++;
+ }
+ return NULL;
+}
+
static void isc_start_dma(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
u32 sizeimage = pixfmt->sizeimage;
+ struct fmt_config *config = get_fmt_config(isc->current_fmt->fourcc);
u32 dctrl_dview;
dma_addr_t addr0;
@@ -602,7 +945,7 @@ static void isc_start_dma(struct isc_device *isc)
if (sensor_is_preferred(isc->current_fmt))
dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
else
- dctrl_dview = isc->current_fmt->reg_dctrl_dview;
+ dctrl_dview = config->dctrl_dview;
regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
@@ -612,6 +955,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
u32 val, bay_cfg;
const u32 *gamma;
unsigned int i;
@@ -625,7 +969,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
if (!pipeline)
return;
- bay_cfg = isc->raw_fmt->reg_bay_cfg;
+ bay_cfg = config->cfa_baycfg;
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
regmap_write(regmap, ISC_WB_O_RGR, 0x0);
@@ -678,11 +1022,13 @@ static void isc_set_histogram(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
- regmap_write(regmap, ISC_HIS_CFG, ISC_HIS_CFG_MODE_R |
- (isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
- ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CFG,
+ ISC_HIS_CFG_MODE_R |
+ (config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_R;
@@ -699,8 +1045,10 @@ static void isc_set_histogram(struct isc_device *isc)
}
static inline void isc_get_param(const struct isc_format *fmt,
- u32 *rlp_mode, u32 *dcfg)
+ u32 *rlp_mode, u32 *dcfg)
{
+ struct fmt_config *config = get_fmt_config(fmt->fourcc);
+
*dcfg = ISC_DCFG_YMBSIZE_BEATS8;
switch (fmt->fourcc) {
@@ -712,8 +1060,8 @@ static inline void isc_get_param(const struct isc_format *fmt,
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- *rlp_mode = fmt->reg_rlp_mode;
- *dcfg |= fmt->reg_dcfg_imode;
+ *rlp_mode = config->rlp_cfg_mode;
+ *dcfg |= config->dcfg_imode;
break;
default:
*rlp_mode = ISC_RLP_CFG_MODE_DAT8;
@@ -726,20 +1074,22 @@ static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
const struct isc_format *current_fmt = isc->current_fmt;
+ struct fmt_config *curfmt_config = get_fmt_config(current_fmt->fourcc);
+ struct fmt_config *rawfmt_config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_subdev_entity *subdev = isc->current_subdev;
u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
if (sensor_is_preferred(current_fmt)) {
- pfe_cfg0 = current_fmt->reg_bps;
+ pfe_cfg0 = curfmt_config->pfe_cfg0_bps;
pipeline = 0x0;
isc_get_param(current_fmt, &rlp_mode, &dcfg);
isc->ctrls.hist_stat = HIST_INIT;
} else {
- pfe_cfg0 = isc->raw_fmt->reg_bps;
- pipeline = current_fmt->pipeline;
- rlp_mode = current_fmt->reg_rlp_mode;
- dcfg = current_fmt->reg_dcfg_imode | ISC_DCFG_YMBSIZE_BEATS8 |
- ISC_DCFG_CMBSIZE_BEATS8;
+ pfe_cfg0 = rawfmt_config->pfe_cfg0_bps;
+ pipeline = curfmt_config->bits_pipeline;
+ rlp_mode = curfmt_config->rlp_cfg_mode;
+ dcfg = curfmt_config->dcfg_imode |
+ ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
}
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
@@ -941,6 +1291,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
{
struct isc_format *isc_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -971,7 +1322,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
- isc->current_subdev->config, &format);
+ &pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1323,6 +1674,7 @@ static void isc_awb_work(struct work_struct *w)
struct isc_device *isc =
container_of(w, struct isc_device, awb_work);
struct regmap *regmap = isc->regmap;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
@@ -1340,7 +1692,7 @@ static void isc_awb_work(struct work_struct *w)
}
ctrls->hist_id = hist_id;
- baysel = isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+ baysel = config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
pm_runtime_get_sync(isc->dev);
@@ -1436,17 +1788,15 @@ static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct isc_device, v4l2_dev);
cancel_work_sync(&isc->awb_work);
video_unregister_device(&isc->video_dev);
- if (isc->current_subdev->config)
- v4l2_subdev_free_pad_config(isc->current_subdev->config);
v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
static struct isc_format *find_format_by_code(unsigned int code, int *index)
{
- struct isc_format *fmt = &isc_formats[0];
+ struct isc_format *fmt = &formats_list[0];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
if (fmt->mbus_code == code) {
*index = i;
return fmt;
@@ -1463,37 +1813,36 @@ static int isc_formats_init(struct isc_device *isc)
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
unsigned int num_fmts, i, j;
+ u32 list_size = ARRAY_SIZE(formats_list);
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- fmt = &isc_formats[0];
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
- fmt->isc_support = false;
- fmt->sd_support = false;
-
- fmt++;
- }
-
while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code)) {
mbus_code.index++;
+
fmt = find_format_by_code(mbus_code.code, &i);
- if (!fmt)
+ if ((!fmt) || (!(fmt->flags & FMT_FLAG_FROM_SENSOR)))
continue;
fmt->sd_support = true;
- if (i <= RAW_FMT_IND_END) {
- for (j = ISC_FMT_IND_START; j <= ISC_FMT_IND_END; j++)
- isc_formats[j].isc_support = true;
-
+ if (fmt->flags & FMT_FLAG_RAW_FORMAT)
isc->raw_fmt = fmt;
- }
}
- fmt = &isc_formats[0];
- for (i = 0, num_fmts = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0; i < list_size; i++) {
+ if (fmt->flags & FMT_FLAG_FROM_CONTROLLER)
+ fmt->isc_support = true;
+
+ fmt++;
+ }
+
+ fmt = &formats_list[0];
+ num_fmts = 0;
+ for (i = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
num_fmts++;
@@ -1505,15 +1854,13 @@ static int isc_formats_init(struct isc_device *isc)
isc->num_user_formats = num_fmts;
isc->user_formats = devm_kcalloc(isc->dev,
- num_fmts, sizeof(struct isc_format *),
+ num_fmts, sizeof(*isc->user_formats),
GFP_KERNEL);
- if (!isc->user_formats) {
- v4l2_err(&isc->v4l2_dev, "could not allocate memory\n");
+ if (!isc->user_formats)
return -ENOMEM;
- }
- fmt = &isc_formats[0];
- for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0, j = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
isc->user_formats[j++] = fmt;
@@ -1550,7 +1897,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
- struct isc_subdev_entity *sd_entity;
struct video_device *vdev = &isc->video_dev;
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
@@ -1563,8 +1909,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
isc->current_subdev = container_of(notifier,
struct isc_subdev_entity, notifier);
- sd_entity = isc->current_subdev;
-
mutex_init(&isc->lock);
init_completion(&isc->comp);
@@ -1591,10 +1935,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
INIT_LIST_HEAD(&isc->dma_queue);
spin_lock_init(&isc->dma_queue_lock);
- sd_entity->config = v4l2_subdev_alloc_pad_config(sd_entity->sd);
- if (sd_entity->config == NULL)
- return -ENOMEM;
-
ret = isc_formats_init(isc);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
@@ -1639,6 +1979,12 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+
static void isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
@@ -1716,7 +2062,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity = devm_kzalloc(dev,
sizeof(*subdev_entity), GFP_KERNEL);
- if (subdev_entity == NULL) {
+ if (!subdev_entity) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1724,7 +2070,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity->asd = devm_kzalloc(dev,
sizeof(*subdev_entity->asd), GFP_KERNEL);
- if (subdev_entity->asd == NULL) {
+ if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1815,25 +2161,37 @@ static int atmel_isc_probe(struct platform_device *pdev)
return ret;
}
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
ret = isc_clk_init(isc);
if (ret) {
dev_err(dev, "failed to init isc clock: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_hclk;
}
isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
/* ispck should be greater or equal to hclock */
ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
if (ret) {
dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = isc_parse_dt(dev, isc);
@@ -1851,9 +2209,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
subdev_entity->notifier.subdevs = &subdev_entity->asd;
subdev_entity->notifier.num_subdevs = 1;
- subdev_entity->notifier.bound = isc_async_bound;
- subdev_entity->notifier.unbind = isc_async_unbind;
- subdev_entity->notifier.complete = isc_async_complete;
+ subdev_entity->notifier.ops = &isc_async_ops;
ret = v4l2_async_notifier_register(&isc->v4l2_dev,
&subdev_entity->notifier);
@@ -1866,7 +2222,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
break;
}
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_request_idle(dev);
return 0;
@@ -1876,7 +2234,11 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-clean_isc_clk:
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
isc_clk_cleanup(isc);
return ret;
@@ -1887,6 +2249,8 @@ static int atmel_isc_remove(struct platform_device *pdev)
struct isc_device *isc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
isc_subdev_cleanup(isc);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 891fa2505efa..e900995143a3 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -411,7 +411,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&isi->irqlock, flags);
list_add_tail(&buf->list, &isi->video_buffer_list);
- if (isi->active == NULL) {
+ if (!isi->active) {
isi->active = buf;
if (vb2_is_streaming(vb->vb2_queue))
start_dma(isi, buf);
@@ -1038,10 +1038,8 @@ static int isi_formats_init(struct atmel_isi *isi)
isi->user_formats = devm_kcalloc(isi->dev,
num_fmts, sizeof(struct isi_format *),
GFP_KERNEL);
- if (!isi->user_formats) {
- dev_err(isi->dev, "could not allocate memory\n");
+ if (!isi->user_formats)
return -ENOMEM;
- }
memcpy(isi->user_formats, isi_fmts,
num_fmts * sizeof(struct isi_format *));
@@ -1105,6 +1103,12 @@ static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
+ .bound = isi_graph_notify_bound,
+ .unbind = isi_graph_notify_unbind,
+ .complete = isi_graph_notify_complete,
+};
+
static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1143,7 +1147,7 @@ static int isi_graph_init(struct atmel_isi *isi)
/* Register the subdevices notifier. */
subdevs = devm_kzalloc(isi->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL) {
+ if (!subdevs) {
of_node_put(isi->entity.node);
return -ENOMEM;
}
@@ -1152,9 +1156,7 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.subdevs = subdevs;
isi->notifier.num_subdevs = 1;
- isi->notifier.bound = isi_graph_notify_bound;
- isi->notifier.unbind = isi_graph_notify_unbind;
- isi->notifier.complete = isi_graph_notify_complete;
+ isi->notifier.ops = &isi_graph_notify_ops;
ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
@@ -1176,10 +1178,8 @@ static int atmel_isi_probe(struct platform_device *pdev)
int ret, i;
isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
- if (!isi) {
- dev_err(&pdev->dev, "Can't allocate interface!\n");
+ if (!isi)
return -ENOMEM;
- }
isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
if (IS_ERR(isi->pclk))
@@ -1204,7 +1204,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return ret;
isi->vdev = video_device_alloc();
- if (isi->vdev == NULL) {
+ if (!isi->vdev) {
ret = -ENOMEM;
goto err_vdev_alloc;
}
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index 37169054b828..478eb2f7d723 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -338,7 +338,6 @@ struct ppi_if *ppi_create_instance(struct platform_device *pdev,
ppi = kzalloc(sizeof(*ppi), GFP_KERNEL);
if (!ppi) {
peripheral_free_list(info->pin_req);
- dev_err(&pdev->dev, "unable to allocate memory for ppi handle\n");
return NULL;
}
ppi->ops = &ppi_ops;
diff --git a/drivers/media/platform/cec-gpio/Makefile b/drivers/media/platform/cec-gpio/Makefile
new file mode 100644
index 000000000000..e82b258afa55
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CEC_GPIO) += cec-gpio.o
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
new file mode 100644
index 000000000000..5debdf08fbe7
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <media/cec-pin.h>
+
+struct cec_gpio {
+ struct cec_adapter *adap;
+ struct device *dev;
+
+ struct gpio_desc *cec_gpio;
+ int cec_irq;
+ bool cec_is_low;
+ bool cec_have_irq;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+ bool hpd_is_high;
+ ktime_t hpd_ts;
+};
+
+static bool cec_gpio_read(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return false;
+ return gpiod_get_value(cec->cec_gpio);
+}
+
+static void cec_gpio_high(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->cec_is_low)
+ return;
+ cec->cec_is_low = false;
+ gpiod_set_value(cec->cec_gpio, 1);
+}
+
+static void cec_gpio_low(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return;
+ if (WARN_ON_ONCE(cec->cec_have_irq))
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+ cec->cec_is_low = true;
+ gpiod_set_value(cec->cec_gpio, 0);
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->hpd_gpio);
+
+ if (is_high == cec->hpd_is_high)
+ return IRQ_HANDLED;
+ cec->hpd_ts = ktime_get();
+ cec->hpd_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_pin_changed(cec->adap, gpiod_get_value(cec->cec_gpio));
+ return IRQ_HANDLED;
+}
+
+static bool cec_gpio_enable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ return true;
+
+ if (request_irq(cec->cec_irq, cec_gpio_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ adap->name, cec))
+ return false;
+ cec->cec_have_irq = true;
+ return true;
+}
+
+static void cec_gpio_disable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+}
+
+static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
+ if (cec->cec_have_irq)
+ seq_printf(file, "using irq: %d\n", cec->cec_irq);
+ if (cec->hpd_gpio)
+ seq_printf(file, "hpd: %s\n",
+ cec->hpd_is_high ? "high" : "low");
+}
+
+static int cec_gpio_read_hpd(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->hpd_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->hpd_gpio);
+}
+
+static void cec_gpio_free(struct cec_adapter *adap)
+{
+ cec_gpio_disable_irq(adap);
+}
+
+static const struct cec_pin_ops cec_gpio_pin_ops = {
+ .read = cec_gpio_read,
+ .low = cec_gpio_low,
+ .high = cec_gpio_high,
+ .enable_irq = cec_gpio_enable_irq,
+ .disable_irq = cec_gpio_disable_irq,
+ .status = cec_gpio_status,
+ .free = cec_gpio_free,
+ .read_hpd = cec_gpio_read_hpd,
+};
+
+static int cec_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cec_gpio *cec;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_IN);
+ if (IS_ERR(cec->cec_gpio))
+ return PTR_ERR(cec->cec_gpio);
+ cec->cec_irq = gpiod_to_irq(cec->cec_gpio);
+
+ cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(cec->hpd_gpio))
+ return PTR_ERR(cec->hpd_gpio);
+
+ cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
+ cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ if (cec->hpd_gpio) {
+ cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
+ ret = devm_request_threaded_irq(dev, cec->hpd_irq,
+ cec_hpd_gpio_irq_handler,
+ cec_hpd_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "hpd-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int cec_gpio_remove(struct platform_device *pdev)
+{
+ struct cec_gpio *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ return 0;
+}
+
+static const struct of_device_id cec_gpio_match[] = {
+ {
+ .compatible = "cec-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cec_gpio_match);
+
+static struct platform_driver cec_gpio_pdrv = {
+ .probe = cec_gpio_probe,
+ .remove = cec_gpio_remove,
+ .driver = {
+ .name = "cec-gpio",
+ .of_match_table = cec_gpio_match,
+ },
+};
+
+module_platform_driver(cec_gpio_pdrv);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 291c40933935..bfc4ecf6f068 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -417,6 +417,10 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
dev->devtype->product != CODA_DX6)
size += ysize / 4;
name = kasprintf(GFP_KERNEL, "fb%d", i);
+ if (!name) {
+ coda_free_framebuffers(ctx);
+ return -ENOMEM;
+ }
ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
size, name);
kfree(name);
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index f1b521045d64..3482178cbf01 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -82,8 +82,8 @@ struct ccdc_hw_device {
};
/* Used by CCDC module to register & unregister with vpfe capture driver */
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev);
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev);
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
#endif
#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 6d492dc4c3a9..89cb3094d7e6 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -841,7 +841,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM355 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 3b2d8a9317b8..5fa0a1f32536 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -776,7 +776,7 @@ static void ccdc_restore_context(void)
regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 5813b49391ed..d5ff58494c1e 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1000,7 +1000,7 @@ static int isif_close(struct device *device)
return 0;
}
-static struct ccdc_hw_device isif_hw_dev = {
+static const struct ccdc_hw_device isif_hw_dev = {
.name = "ISIF",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 13d027031ff0..6aabd21fe69f 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -122,7 +122,7 @@ static irqreturn_t venc_isr(int irq, void *arg)
int fid;
int i;
- if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ if (!arg || !disp_dev->dev[0])
return IRQ_HANDLED;
if (venc_is_second_field(disp_dev))
@@ -337,10 +337,10 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (layer->cur_frm != NULL)
+ if (layer->cur_frm)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (layer->next_frm != NULL)
+ if (layer->next_frm)
vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -947,7 +947,7 @@ static int vpbe_display_s_std(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL != vpbe_dev->ops.s_std) {
+ if (vpbe_dev->ops.s_std) {
ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -1000,8 +1000,7 @@ static int vpbe_display_enum_output(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
/* Enumerate outputs */
-
- if (NULL == vpbe_dev->ops.enum_outputs)
+ if (!vpbe_dev->ops.enum_outputs)
return -EINVAL;
ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
@@ -1030,7 +1029,7 @@ static int vpbe_display_s_output(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL == vpbe_dev->ops.set_output)
+ if (!vpbe_dev->ops.set_output)
return -EINVAL;
ret = vpbe_dev->ops.set_output(vpbe_dev, i);
@@ -1077,7 +1076,7 @@ vpbe_display_enum_dv_timings(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
/* Enumerate outputs */
- if (NULL == vpbe_dev->ops.enum_dv_timings)
+ if (!vpbe_dev->ops.enum_dv_timings)
return -EINVAL;
ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
@@ -1292,7 +1291,7 @@ static int vpbe_device_get(struct device *dev, void *data)
if (strcmp("vpbe_controller", pdev->name) == 0)
vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-osd") != NULL)
+ if (strstr(pdev->name, "vpbe-osd"))
vpbe_disp->osd_device = platform_get_drvdata(pdev);
return 0;
@@ -1305,15 +1304,10 @@ static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
struct video_device *vbd = NULL;
/* Allocate memory for four plane display objects */
-
- disp_dev->dev[i] =
- kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
-
- /* If memory allocation fails, return error */
- if (!disp_dev->dev[i]) {
- printk(KERN_ERR "ran out of memory\n");
+ disp_dev->dev[i] = kzalloc(sizeof(*disp_dev->dev[i]), GFP_KERNEL);
+ if (!disp_dev->dev[i])
return -ENOMEM;
- }
+
spin_lock_init(&disp_dev->dev[i]->irqlock);
mutex_init(&disp_dev->dev[i]->opslock);
@@ -1397,8 +1391,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
printk(KERN_DEBUG "vpbe_display_probe\n");
/* Allocate memory for vpbe_display */
- disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
- GFP_KERNEL);
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(*disp_dev), GFP_KERNEL);
if (!disp_dev)
return -ENOMEM;
@@ -1414,7 +1407,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
/* Initialize the vpbe display controller */
- if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ if (disp_dev->vpbe_dev->ops.initialize) {
err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
disp_dev->vpbe_dev);
if (err) {
@@ -1482,7 +1475,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Unregister video device */
- if (disp_dev->dev[k] != NULL) {
+ if (disp_dev->dev[k]) {
video_unregister_device(&disp_dev->dev[k]->video_dev);
kfree(disp_dev->dev[k]);
}
@@ -1504,7 +1497,7 @@ static int vpbe_display_remove(struct platform_device *pdev)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
/* deinitialize the vpbe display controller */
- if (NULL != vpbe_dev->ops.deinitialize)
+ if (vpbe_dev->ops.deinitialize)
vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
/* un-register device */
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6792da16d9c7..7b3f6f8e3dc8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -115,7 +115,7 @@ static struct vpfe_config_params config_params = {
};
/* ccdc device registered */
-static struct ccdc_hw_device *ccdc_dev;
+static const struct ccdc_hw_device *ccdc_dev;
/* lock for accessing ccdc information */
static DEFINE_MUTEX(ccdc_lock);
/* ccdc configuration */
@@ -203,7 +203,7 @@ static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
* vpfe_register_ccdc_device. CCDC module calls this to
* register with vpfe capture
*/
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
{
int ret = 0;
printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
@@ -259,7 +259,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
* vpfe_unregister_ccdc_device. CCDC module calls this to
* unregister with vpfe capture
*/
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
{
if (!dev) {
printk(KERN_ERR "invalid ccdc device ptr\n");
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 07e89a4985a6..16352e2263d2 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -47,8 +47,9 @@ EXPORT_SYMBOL_GPL(vpif_lock);
void __iomem *vpif_base;
EXPORT_SYMBOL_GPL(vpif_base);
-/**
+/*
* vpif_ch_params: video standard configuration parameters for vpif
+ *
* The table must include all presets from supported subdevices.
*/
const struct vpif_channel_config_params vpif_ch_params[] = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 0ef36cec21d1..fca4dc829f73 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -109,7 +109,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* @vq: vb2_queue ptr
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
* @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
@@ -167,7 +167,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
/**
* vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
* @count: number of buffers
*/
static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -629,7 +629,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
/**
* vpif_get_default_field() - Get default field type based on interface
- * @vpif_params - ptr to vpif params
+ * @iface: ptr to vpif interface
*/
static inline enum v4l2_field vpif_get_default_field(
struct vpif_interface *iface)
@@ -640,8 +640,8 @@ static inline enum v4l2_field vpif_get_default_field(
/**
* vpif_config_addr() - function to configure buffer address in vpif
- * @ch - channel ptr
- * @muxmode - channel mux mode
+ * @ch: channel ptr
+ * @muxmode: channel mux mode
*/
static void vpif_config_addr(struct channel_obj *ch, int muxmode)
{
@@ -661,9 +661,9 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
/**
* vpif_input_to_subdev() - Maps input to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @input_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @input_index: Given input index from application
*
* lookup the sub device information for a given input index.
* we report all the inputs to application. inputs table also
@@ -699,9 +699,9 @@ static int vpif_input_to_subdev(
/**
* vpif_set_input() - Select an input
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given input index from application
*
* Select the given input.
*/
@@ -792,7 +792,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
* vpif_g_std() - get STD handler
* @file: file ptr
* @priv: file handle
- * @std_id: ptr to std id
+ * @std: ptr to std id
*/
static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
@@ -933,7 +933,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
* vpif_enum_fmt_vid_cap() - ENUM_FMT handler
* @file: file ptr
* @priv: file handle
- * @index: input index
+ * @fmt: ptr to V4L2 format descriptor
*/
static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
@@ -1500,6 +1500,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev)
{
@@ -1691,8 +1696,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
@@ -1741,6 +1745,7 @@ static int vpif_remove(struct platform_device *device)
#ifdef CONFIG_PM_SLEEP
/**
* vpif_suspend: vpif device suspend
+ * @dev: pointer to &struct device
*/
static int vpif_suspend(struct device *dev)
{
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 56fe4e5b396e..7be636237acf 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -102,7 +102,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* @vq: vb2_queue ptr
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
* @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
@@ -158,7 +158,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
/**
* vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
* @count: number of buffers
*/
static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -766,9 +766,9 @@ static int vpif_enum_output(struct file *file, void *fh,
/**
* vpif_output_to_subdev() - Maps output to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @index: Given output index from application
*
* lookup the sub device information for a given output index.
* we report all the output to application. output table also
@@ -802,9 +802,9 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
/**
* vpif_set_output() - Select an output
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given output index from application
*
* Select the given output.
*/
@@ -1232,6 +1232,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
@@ -1313,8 +1318,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 43801509dabb..17854a379243 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -958,6 +958,51 @@ static struct gsc_pix_max gsc_v_100_max = {
.target_rot_en_h = 2016,
};
+static struct gsc_pix_max gsc_v_5250_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2016,
+ .real_rot_en_h = 2016,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5420_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2048,
+ .real_rot_en_h = 2048,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5433_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
static struct gsc_pix_min gsc_v_100_min = {
.org_w = 64,
.org_h = 32,
@@ -992,6 +1037,45 @@ static struct gsc_variant gsc_v_100_variant = {
.local_sc_down = 2,
};
+static struct gsc_variant gsc_v_5250_variant = {
+ .pix_max = &gsc_v_5250_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5420_variant = {
+ .pix_max = &gsc_v_5420_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5433_variant = {
+ .pix_max = &gsc_v_5433_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
static struct gsc_driverdata gsc_v_100_drvdata = {
.variant = {
[0] = &gsc_v_100_variant,
@@ -1004,11 +1088,33 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
.num_clocks = 1,
};
+static struct gsc_driverdata gsc_v_5250_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5250_variant,
+ [1] = &gsc_v_5250_variant,
+ [2] = &gsc_v_5250_variant,
+ [3] = &gsc_v_5250_variant,
+ },
+ .num_entities = 4,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_v_5420_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5420_variant,
+ [1] = &gsc_v_5420_variant,
+ },
+ .num_entities = 2,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
static struct gsc_driverdata gsc_5433_drvdata = {
.variant = {
- [0] = &gsc_v_100_variant,
- [1] = &gsc_v_100_variant,
- [2] = &gsc_v_100_variant,
+ [0] = &gsc_v_5433_variant,
+ [1] = &gsc_v_5433_variant,
+ [2] = &gsc_v_5433_variant,
},
.num_entities = 3,
.clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
@@ -1017,13 +1123,21 @@ static struct gsc_driverdata gsc_5433_drvdata = {
static const struct of_device_id exynos_gsc_match[] = {
{
- .compatible = "samsung,exynos5-gsc",
- .data = &gsc_v_100_drvdata,
+ .compatible = "samsung,exynos5250-gsc",
+ .data = &gsc_v_5250_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5420-gsc",
+ .data = &gsc_v_5420_drvdata,
},
{
.compatible = "samsung,exynos5433-gsc",
.data = &gsc_5433_drvdata,
},
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
@@ -1045,6 +1159,9 @@ static int gsc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (drv_data == &gsc_v_100_drvdata)
+ dev_info(dev, "compatible 'exynos5-gsc' is deprecated\n");
+
gsc->id = ret;
if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index c480efb755f5..46a7d242a1a5 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -76,7 +76,7 @@ config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
depends on VIDEO_EXYNOS4_FIMC_IS
select VIDEO_EXYNOS4_IS_COMMON
default y
- help
+ help
This option enables an additional video device node exposing a V4L2
video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 948fe01f6c96..ed9302caa004 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -146,6 +146,7 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
/**
* fimc_capture_config_update - apply the camera interface configuration
+ * @ctx: FIMC capture context
*
* To be called from within the interrupt handler with fimc.slock
* spinlock held. It updates the camera pixel crop, rotation and
@@ -858,6 +859,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
* fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
* @sensor: pointer to the sensor subdev
* @plane_fmt: provides plane sizes corresponding to the frame layout entries
+ * @num_planes: number of planes
* @try: true to set the frame parameters, false to query only
*
* This function is used by this driver only for compressed/blob data formats.
@@ -1101,6 +1103,7 @@ static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
/**
* fimc_pipeline_validate - check for formats inconsistencies
* between source and sink pad of each link
+ * @fimc: the FIMC device this context applies to
*
* Return 0 if all formats match or -EPIPE otherwise.
*/
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index d4656d5175d7..0ef583cfc424 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -60,6 +60,7 @@ static void __setup_sensor_notification(struct fimc_md *fmd,
/**
* fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @p: fimc pipeline
* @me: media entity terminating the pipeline
*
* Caller holds the graph mutex.
@@ -151,8 +152,8 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
/**
* fimc_pipeline_s_power - change power state of all pipeline subdevs
- * @fimc: fimc device terminating the pipeline
- * @state: true to power on, false to power off
+ * @p: fimc device terminating the pipeline
+ * @on: true to power on, false to power off
*
* Needs to be called with the graph mutex held.
*/
@@ -219,6 +220,7 @@ static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
/**
* __fimc_pipeline_open - update the pipeline information, enable power
* of all pipeline subdevs and the sensor clock
+ * @ep: fimc device terminating the pipeline
* @me: media entity to start graph walk with
* @prepare: true to walk the current pipeline and acquire all subdevs
*
@@ -252,7 +254,7 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
/**
* __fimc_pipeline_close - disable the sensor clock and pipeline power
- * @fimc: fimc device terminating the pipeline
+ * @ep: fimc device terminating the pipeline
*
* Disable power of all subdevs and turn the external sensor clock off.
*/
@@ -281,7 +283,7 @@ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
/**
* __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
- * @pipeline: video pipeline structure
+ * @ep: video pipeline structure
* @on: passed as the s_stream() callback argument
*/
static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
@@ -902,6 +904,7 @@ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
/**
* fimc_md_create_links - create default links between registered entities
+ * @fmd: fimc media device
*
* Parallel interface sensor entities are connected directly to FIMC capture
* entities. The sensors using MIPI CSIS bus are connected through immutable
@@ -1405,6 +1408,11 @@ unlock:
return media_device_register(&fmd->media_dev);
}
+static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1479,8 +1487,7 @@ static int fimc_md_probe(struct platform_device *pdev)
if (fmd->num_sensors > 0) {
fmd->subdev_notifier.subdevs = fmd->async_subdevs;
fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
- fmd->subdev_notifier.bound = subdev_notifier_bound;
- fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 560aadabcb11..cba46a656338 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -189,7 +189,7 @@ struct csis_drvdata {
* @irq: requested s5p-mipi-csis irq number
* @interrupt_mask: interrupt mask of the all used interrupts
* @flags: the state variable for power and streaming control
- * @clock_frequency: device bus clock frequency
+ * @clk_frequency: device bus clock frequency
* @hs_settle: HS-RX settle time
* @num_lanes: number of MIPI-CSI data lanes used
* @max_num_lanes: maximum number of MIPI-CSI data lanes supported
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index fb43025df573..dba21215dc84 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -339,9 +339,9 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
}
}
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
{
- struct viu_dev *dev = (struct viu_dev *)data;
+ struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
struct viu_buf *buf;
struct viu_dmaqueue *vidq = &dev->vidq;
@@ -1466,8 +1466,7 @@ static int viu_of_probe(struct platform_device *op)
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
- setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
- (unsigned long)viu_dev);
+ timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
viu_dev->std = V4L2_STD_NTSC_M;
viu_dev->first = 1;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index b7731b18ecae..aa3ce41898bc 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -59,6 +59,7 @@ struct h264_fb {
* @read_idx : read index
* @write_idx : write index
* @count : buffer count in list
+ * @reserved : for 8 bytes alignment
*/
struct h264_ring_fb_list {
struct h264_fb fb_list[H264_MAX_FB_NUM];
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index b9fad6a48879..3e84a761db3a 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -155,7 +155,6 @@ struct vdec_vp8_vpu_inst {
* @reg_base : HW register base address
* @frm_cnt : decode frame count
* @ctx : V4L2 context
- * @dev : platform device
* @vpu : VPU instance for decoder
* @vsi : VPU share information
*/
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 4eb3be37ba14..6cf31b366aad 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -34,7 +34,7 @@ static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
#define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
-/**
+/*
* enum venc_h264_vpu_work_buf - h264 encoder buffer index
*/
enum venc_h264_vpu_work_buf {
@@ -50,7 +50,7 @@ enum venc_h264_vpu_work_buf {
VENC_H264_VPU_WORK_BUF_MAX,
};
-/**
+/*
* enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
*/
enum venc_h264_bs_mode {
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index acb639c4abd2..957420dd60de 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -34,7 +34,7 @@
/* This ac_tag is vp8 frame tag. */
#define MAX_AC_TAG_SIZE 10
-/**
+/*
* enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
*/
enum venc_vp8_vpu_work_buf {
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index 853d598937f6..1ff6a93262b7 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -181,6 +181,7 @@ struct share_obj {
* @extmem: VPU extended memory information
* @reg: VPU TCM and configuration registers
* @run: VPU initialization status
+ * @wdt: VPU watchdog workqueue
* @ipi_desc: VPU IPI descriptor
* @recv_buf: VPU DTCM share buffer for receiving. The
* receive buffer is only accessed in interrupt context.
@@ -194,7 +195,7 @@ struct share_obj {
* suppose a client is using VPU to decode VP8.
* If the other client wants to encode VP8,
* it has to wait until VP8 decode completes.
- * @wdt_refcnt WDT reference count to make sure the watchdog can be
+ * @wdt_refcnt: WDT reference count to make sure the watchdog can be
* disabled if no other client is using VPU service
* @ack_wq: The wait queue for each codec and mdp. When sleeping
* processes wake up, they will check the condition
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4d29860d27b4..6f1b0c799e58 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1004,11 +1004,12 @@ static int omap_vout_open(struct file *file)
struct omap_vout_device *vout = NULL;
vout = video_drvdata(file);
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
if (vout == NULL)
return -ENODEV;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
/* for now, we only support single open */
if (vout->opened)
return -EBUSY;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1a428fe9f070..b7ff3842afc0 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1669,8 +1669,8 @@ static int isp_link_entity(
break;
}
if (i == entity->num_pads) {
- dev_err(isp->dev, "%s: no source pad in external entity\n",
- __func__);
+ dev_err(isp->dev, "%s: no source pad in external entity %s\n",
+ __func__, entity->name);
return -EINVAL;
}
@@ -2001,6 +2001,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
+ v4l2_async_notifier_cleanup(&isp->notifier);
return 0;
}
@@ -2011,44 +2012,41 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
- struct isp_async_subdev *isd)
+static int isp_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
+ struct isp_async_subdev *isd =
+ container_of(asd, struct isp_async_subdev, asd);
struct isp_bus_cfg *buscfg = &isd->bus;
- struct v4l2_fwnode_endpoint vep;
- unsigned int i;
- int ret;
bool csi1 = false;
-
- ret = v4l2_fwnode_endpoint_parse(fwnode, &vep);
- if (ret)
- return ret;
+ unsigned int i;
dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_PARALLEL:
buscfg->interface = ISP_INTERFACE_PARALLEL;
buscfg->bus.parallel.data_lane_shift =
- vep.bus.parallel.data_shift;
+ vep->bus.parallel.data_shift;
buscfg->bus.parallel.clk_pol =
- !!(vep.bus.parallel.flags
+ !!(vep->bus.parallel.flags
& V4L2_MBUS_PCLK_SAMPLE_FALLING);
buscfg->bus.parallel.hs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
buscfg->bus.parallel.vs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
buscfg->bus.parallel.fld_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
buscfg->bus.parallel.data_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
- buscfg->bus.parallel.bt656 = vep.bus_type == V4L2_MBUS_BT656;
+ !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
break;
case ISP_OF_PHY_CSIPHY1:
case ISP_OF_PHY_CSIPHY2:
- switch (vep.bus_type) {
+ switch (vep->bus_type) {
case V4L2_MBUS_CCP2:
case V4L2_MBUS_CSI1:
dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
@@ -2060,11 +2058,11 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
break;
default:
dev_err(dev, "unsupported bus type %u\n",
- vep.bus_type);
+ vep->bus_type);
return -EINVAL;
}
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_CSIPHY1:
if (csi1)
buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
@@ -2080,47 +2078,47 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
}
if (csi1) {
buscfg->bus.ccp2.lanecfg.clk.pos =
- vep.bus.mipi_csi1.clock_lane;
+ vep->bus.mipi_csi1.clock_lane;
buscfg->bus.ccp2.lanecfg.clk.pol =
- vep.bus.mipi_csi1.lane_polarity[0];
+ vep->bus.mipi_csi1.lane_polarity[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.clk.pol,
buscfg->bus.ccp2.lanecfg.clk.pos);
buscfg->bus.ccp2.lanecfg.data[0].pos =
- vep.bus.mipi_csi1.data_lane;
+ vep->bus.mipi_csi1.data_lane;
buscfg->bus.ccp2.lanecfg.data[0].pol =
- vep.bus.mipi_csi1.lane_polarity[1];
+ vep->bus.mipi_csi1.lane_polarity[1];
dev_dbg(dev, "data lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.data[0].pol,
buscfg->bus.ccp2.lanecfg.data[0].pos);
buscfg->bus.ccp2.strobe_clk_pol =
- vep.bus.mipi_csi1.clock_inv;
- buscfg->bus.ccp2.phy_layer = vep.bus.mipi_csi1.strobe;
+ vep->bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
buscfg->bus.ccp2.ccp2_mode =
- vep.bus_type == V4L2_MBUS_CCP2;
+ vep->bus_type == V4L2_MBUS_CCP2;
buscfg->bus.ccp2.vp_clk_pol = 1;
buscfg->bus.ccp2.crc = 1;
} else {
buscfg->bus.csi2.lanecfg.clk.pos =
- vep.bus.mipi_csi2.clock_lane;
+ vep->bus.mipi_csi2.clock_lane;
buscfg->bus.csi2.lanecfg.clk.pol =
- vep.bus.mipi_csi2.lane_polarities[0];
+ vep->bus.mipi_csi2.lane_polarities[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.csi2.lanecfg.clk.pol,
buscfg->bus.csi2.lanecfg.clk.pos);
buscfg->bus.csi2.num_data_lanes =
- vep.bus.mipi_csi2.num_data_lanes;
+ vep->bus.mipi_csi2.num_data_lanes;
for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
buscfg->bus.csi2.lanecfg.data[i].pos =
- vep.bus.mipi_csi2.data_lanes[i];
+ vep->bus.mipi_csi2.data_lanes[i];
buscfg->bus.csi2.lanecfg.data[i].pol =
- vep.bus.mipi_csi2.lane_polarities[i + 1];
+ vep->bus.mipi_csi2.lane_polarities[i + 1];
dev_dbg(dev,
"data lane %u polarity %u, pos %u\n", i,
buscfg->bus.csi2.lanecfg.data[i].pol,
@@ -2137,57 +2135,13 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
default:
dev_warn(dev, "%pOF: invalid interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
return -EINVAL;
}
return 0;
}
-static int isp_fwnodes_parse(struct device *dev,
- struct v4l2_async_notifier *notifier)
-{
- struct fwnode_handle *fwnode = NULL;
-
- notifier->subdevs = devm_kcalloc(
- dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
- if (!notifier->subdevs)
- return -ENOMEM;
-
- while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
- (fwnode = fwnode_graph_get_next_endpoint(
- of_fwnode_handle(dev->of_node), fwnode))) {
- struct isp_async_subdev *isd;
-
- isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
- if (!isd)
- goto error;
-
- if (isp_fwnode_parse(dev, fwnode, isd)) {
- devm_kfree(dev, isd);
- continue;
- }
-
- notifier->subdevs[notifier->num_subdevs] = &isd->asd;
-
- isd->asd.match.fwnode.fwnode =
- fwnode_graph_get_remote_port_parent(fwnode);
- if (!isd->asd.match.fwnode.fwnode) {
- dev_warn(dev, "bad remote port parent\n");
- goto error;
- }
-
- isd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- notifier->num_subdevs++;
- }
-
- return notifier->num_subdevs;
-
-error:
- fwnode_handle_put(fwnode);
- return -EINVAL;
-}
-
static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct isp_device *isp = container_of(async, struct isp_device,
@@ -2201,7 +2155,7 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- if (!sd->asd)
+ if (sd->notifier != &isp->notifier)
continue;
ret = isp_link_entity(isp, &sd->entity,
@@ -2217,6 +2171,10 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&isp->media_dev);
}
+static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .complete = isp_subdev_notifier_complete,
+};
+
/*
* isp_probe - Probe ISP platform device
* @pdev: Pointer to ISP platform device
@@ -2256,15 +2214,17 @@ static int isp_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = isp_fwnodes_parse(&pdev->dev, &isp->notifier);
- if (ret < 0)
- return ret;
-
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ &pdev->dev, &isp->notifier, sizeof(struct isp_async_subdev),
+ isp_fwnode_parse);
+ if (ret < 0)
+ goto error;
+
isp->dev = &pdev->dev;
isp->ref_count = 0;
@@ -2385,7 +2345,7 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_register_entities;
- isp->notifier.complete = isp_subdev_notifier_complete;
+ isp->notifier.ops = &isp_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
@@ -2406,6 +2366,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
+ v4l2_async_notifier_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
return ret;
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index e528df6efc09..8b9043db94b3 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -220,14 +220,11 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
-
-#define ISP_MAX_SUBDEVS 8
- struct v4l2_subdev *subdevs[ISP_MAX_SUBDEVS];
};
struct isp_async_subdev {
- struct isp_bus_cfg bus;
struct v4l2_async_subdev asd;
+ struct isp_bus_cfg bus;
};
#define v4l2_subdev_to_bus_cfg(sd) \
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index edca993c2b1f..295f34ad1080 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -235,6 +235,7 @@ enum pxa_mbus_layout {
* stored in memory in the following way:
* @packing: Type of sample-packing, that has to be used
* @order: Sample order when storing in memory
+ * @layout: Planes layout in memory
* @bits_per_sample: How many bits the bridge has to sample
*/
struct pxa_mbus_pixelfmt {
@@ -852,10 +853,10 @@ static void pxa_camera_dma_irq_v(void *data)
/**
* pxa_init_dma_channel - init dma descriptors
* @pcdev: pxa camera device
- * @vb: videobuffer2 buffer
- * @dma: dma video buffer
+ * @buf: pxa camera buffer
* @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
- * @cibr: camera Receive Buffer Register
+ * @sg: dma scatter list
+ * @sglen: dma scatter list length
*
* Prepares the pxa dma descriptors to transfer one camera channel.
*
@@ -1010,6 +1011,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
/**
* pxa_camera_check_link_miss - check missed DMA linking
* @pcdev: camera device
+ * @last_submitted: an opaque DMA cookie for last submitted
+ * @last_issued: an opaque DMA cookie for last issued
*
* The DMA chaining is done with DMA running. This means a tiny temporal window
* remains, where a buffer is queued on the chain, while the chain is already
@@ -2221,6 +2224,11 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
mutex_unlock(&pcdev->mlock);
}
+static const struct v4l2_async_notifier_operations pxa_camera_sensor_ops = {
+ .bound = pxa_camera_sensor_bound,
+ .unbind = pxa_camera_sensor_unbind,
+};
+
/*
* Driver probe, remove, suspend and resume operations
*/
@@ -2489,8 +2497,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->asds[0] = &pcdev->asd;
pcdev->notifier.subdevs = pcdev->asds;
pcdev->notifier.num_subdevs = 1;
- pcdev->notifier.bound = pxa_camera_sensor_bound;
- pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
if (!of_have_populated_dt())
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b22d2dfcd3c2..55232a912950 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
}
if (output->wm_idx[i] % 2 == 1)
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss-8x16/camss-video.c
index cf4219e871bd..ffaa2849e0c1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-video.c
@@ -21,7 +21,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-dma-sg.h>
#include "camss-video.h"
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss-8x16/camss.c
index a3760b5dd1d1..390a42c17b66 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss.c
@@ -601,6 +601,11 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&camss->media_dev);
}
+static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
+ .bound = camss_subdev_notifier_bound,
+ .complete = camss_subdev_notifier_complete,
+};
+
static const struct media_device_ops camss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
@@ -655,8 +660,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_register_entities;
if (camss->notifier.num_subdevs) {
- camss->notifier.bound = camss_subdev_notifier_bound;
- camss->notifier.complete = camss_subdev_notifier_complete;
+ camss->notifier.ops = &camss_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&camss->v4l2_dev,
&camss->notifier);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index cba092bcb76d..a0fe80df0cbd 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -194,7 +194,6 @@ struct venus_buffer {
* @fh: a holder of v4l file handle structure
* @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue
- * @cmd_stop: a flag to signal encoder/decoder commands
* @width: current capture width
* @height: current capture height
* @out_width: current output width
@@ -258,7 +257,6 @@ struct venus_inst {
} controls;
struct v4l2_fh fh;
unsigned int streamon_cap, streamon_out;
- bool cmd_stop;
u32 width;
u32 height;
u32 out_width;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 9b2a401a4891..0ce9559a2924 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
mutex_lock(&inst->lock);
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
- inst->cmd_stop = false;
- goto unlock;
- }
-
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
if (!(inst->streamon_out & inst->streamon_cap))
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index c09490876516..1baf78d3c02d 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -88,12 +88,6 @@ unlock:
return ret;
}
-static int core_deinit_wait_atomic_t(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
int hfi_core_deinit(struct venus_core *core, bool blocking)
{
int ret = 0, empty;
@@ -112,7 +106,7 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)
if (!empty) {
mutex_unlock(&core->lock);
- wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t,
+ wait_on_atomic_t(&core->insts_count, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
mutex_lock(&core->lock);
}
@@ -484,6 +478,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(hfi_session_process_buf);
irqreturn_t hfi_isr_thread(int irq, void *dev_id)
{
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 1caae8feaa36..734ce11b0ed0 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
desc->attrs = DMA_ATTR_WRITE_COMBINE;
desc->size = ALIGN(size, SZ_4K);
- desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
+ desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
desc->attrs);
if (!desc->kva)
return -ENOMEM;
@@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret)
return ret;
- hdev->ifaceq_table.kva = desc.kva;
- hdev->ifaceq_table.da = desc.da;
- hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
- offset = hdev->ifaceq_table.size;
+ hdev->ifaceq_table = desc;
+ offset = IFACEQ_TABLE_SIZE;
for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i];
@@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret) {
hdev->sfr.da = 0;
} else {
- hdev->sfr.da = desc.da;
- hdev->sfr.kva = desc.kva;
- hdev->sfr.size = ALIGNED_SFR_SIZE;
+ hdev->sfr = desc;
sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index da611a5eb670..c9e9576bb08a 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
static int
vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
- if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
return 0;
}
@@ -479,6 +485,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
+ struct hfi_frame_data fdata = {0};
int ret;
ret = vdec_try_decoder_cmd(file, fh, cmd);
@@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
return ret;
mutex_lock(&inst->lock);
- inst->cmd_stop = true;
- mutex_unlock(&inst->lock);
- hfi_session_flush(inst);
+ /*
+ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
+ * input to signal EOS.
+ */
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ fdata.device_addr = 0xdeadbeef;
- return 0;
+ ret = hfi_session_process_buf(inst, &fdata);
+
+unlock:
+ mutex_unlock(&inst->lock);
+ return ret;
}
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
@@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
inst->reconfig = false;
inst->sequence_cap = 0;
inst->sequence_out = 0;
- inst->cmd_stop = false;
ret = vdec_init_session(inst);
if (ret)
@@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- inst->cmd_stop = false;
- }
-
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6f123a387cf9..3fcf0e9b7b29 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
if (!vbuf)
return;
- vb = &vbuf->vb2_buf;
- vb->planes[0].bytesused = bytesused;
- vb->planes[0].data_offset = data_offset;
-
vbuf->flags = flags;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb = &vbuf->vb2_buf;
+ vb2_set_plane_payload(vb, 0, bytesused + data_offset);
+ vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
} else {
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 142de447aaaa..108d776f3265 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
#include "rcar-vin.h"
@@ -77,14 +78,14 @@ static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
int ret;
/* Verify subdevices mbus format */
- if (!rvin_mbus_supported(&vin->digital)) {
+ if (!rvin_mbus_supported(vin->digital)) {
vin_err(vin, "Unsupported media bus format for %s\n",
- vin->digital.subdev->name);
+ vin->digital->subdev->name);
return -EINVAL;
}
vin_dbg(vin, "Found media bus format for %s: %d\n",
- vin->digital.subdev->name, vin->digital.code);
+ vin->digital->subdev->name, vin->digital->code);
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
if (ret < 0) {
@@ -103,7 +104,7 @@ static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
rvin_v4l2_remove(vin);
- vin->digital.subdev = NULL;
+ vin->digital->subdev = NULL;
}
static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
@@ -120,117 +121,75 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
if (ret < 0)
return ret;
- vin->digital.source_pad = ret;
+ vin->digital->source_pad = ret;
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->digital.sink_pad = ret < 0 ? 0 : ret;
+ vin->digital->sink_pad = ret < 0 ? 0 : ret;
- vin->digital.subdev = subdev;
+ vin->digital->subdev = subdev;
vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->digital.source_pad,
- vin->digital.sink_pad);
+ subdev->name, vin->digital->source_pad,
+ vin->digital->sink_pad);
return 0;
}
+static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
+ .bound = rvin_digital_notify_bound,
+ .unbind = rvin_digital_notify_unbind,
+ .complete = rvin_digital_notify_complete,
+};
+
-static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
- struct device_node *ep,
- struct v4l2_mbus_config *mbus_cfg)
+static int rvin_digital_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_fwnode_endpoint v4l2_ep;
- int ret;
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+ struct rvin_graph_entity *rvge =
+ container_of(asd, struct rvin_graph_entity, asd);
- ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
- if (ret) {
- vin_err(vin, "Could not parse v4l2 endpoint\n");
- return -EINVAL;
- }
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
- mbus_cfg->type = v4l2_ep.bus_type;
+ rvge->mbus_cfg.type = vep->bus_type;
- switch (mbus_cfg->type) {
+ switch (rvge->mbus_cfg.type) {
case V4L2_MBUS_PARALLEL:
vin_dbg(vin, "Found PARALLEL media bus\n");
- mbus_cfg->flags = v4l2_ep.bus.parallel.flags;
+ rvge->mbus_cfg.flags = vep->bus.parallel.flags;
break;
case V4L2_MBUS_BT656:
vin_dbg(vin, "Found BT656 media bus\n");
- mbus_cfg->flags = 0;
+ rvge->mbus_cfg.flags = 0;
break;
default:
vin_err(vin, "Unknown media bus type\n");
return -EINVAL;
}
- return 0;
-}
-
-static int rvin_digital_graph_parse(struct rvin_dev *vin)
-{
- struct device_node *ep, *np;
- int ret;
-
- vin->digital.asd.match.fwnode.fwnode = NULL;
- vin->digital.subdev = NULL;
-
- /*
- * Port 0 id 0 is local digital input, try to get it.
- * Not all instances can or will have this, that is OK
- */
- ep = of_graph_get_endpoint_by_regs(vin->dev->of_node, 0, 0);
- if (!ep)
- return 0;
-
- np = of_graph_get_remote_port_parent(ep);
- if (!np) {
- vin_err(vin, "No remote parent for digital input\n");
- of_node_put(ep);
- return -EINVAL;
- }
- of_node_put(np);
-
- ret = rvin_digitial_parse_v4l2(vin, ep, &vin->digital.mbus_cfg);
- of_node_put(ep);
- if (ret)
- return ret;
-
- vin->digital.asd.match.fwnode.fwnode = of_fwnode_handle(np);
- vin->digital.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ vin->digital = rvge;
return 0;
}
static int rvin_digital_graph_init(struct rvin_dev *vin)
{
- struct v4l2_async_subdev **subdevs = NULL;
int ret;
- ret = rvin_digital_graph_parse(vin);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ vin->dev, &vin->notifier,
+ sizeof(struct rvin_graph_entity), rvin_digital_parse_v4l2);
if (ret)
return ret;
- if (!vin->digital.asd.match.fwnode.fwnode) {
- vin_dbg(vin, "No digital subdevice found\n");
+ if (!vin->digital)
return -ENODEV;
- }
-
- /* Register the subdevices notifier. */
- subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL)
- return -ENOMEM;
-
- subdevs[0] = &vin->digital.asd;
vin_dbg(vin, "Found digital subdevice %pOF\n",
- to_of_node(subdevs[0]->match.fwnode.fwnode));
-
- vin->notifier.num_subdevs = 1;
- vin->notifier.subdevs = subdevs;
- vin->notifier.bound = rvin_digital_notify_bound;
- vin->notifier.unbind = rvin_digital_notify_unbind;
- vin->notifier.complete = rvin_digital_notify_complete;
+ to_of_node(vin->digital->asd.match.fwnode.fwnode));
+ vin->notifier.ops = &rvin_digital_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
@@ -290,6 +249,8 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (ret)
return ret;
+ platform_set_drvdata(pdev, vin);
+
ret = rvin_digital_graph_init(vin);
if (ret < 0)
goto error;
@@ -297,11 +258,10 @@ static int rcar_vin_probe(struct platform_device *pdev)
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, vin);
-
return 0;
error:
rvin_dma_remove(vin);
+ v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -313,6 +273,7 @@ static int rcar_vin_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vin->notifier);
+ v4l2_async_notifier_cleanup(&vin->notifier);
rvin_dma_remove(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index b136844499f6..23fdff7a7370 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -183,7 +183,7 @@ static int rvin_setup(struct rvin_dev *vin)
/*
* Input interface
*/
- switch (vin->digital.code) {
+ switch (vin->digital->code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
@@ -191,7 +191,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
input_is_yuv = true;
break;
@@ -200,7 +200,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
input_is_yuv = true;
break;
@@ -212,11 +212,11 @@ static int rvin_setup(struct rvin_dev *vin)
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
/* Hsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_HPS;
/* Vsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_VPS;
/*
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index dd37ea811680..b479b882da12 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -111,7 +111,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
struct v4l2_mbus_framefmt *mf = &fmt.format;
int ret;
- fmt.pad = vin->digital.source_pad;
+ fmt.pad = vin->digital->source_pad;
ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
if (ret)
@@ -172,13 +172,13 @@ static int __rvin_try_format_source(struct rvin_dev *vin,
sd = vin_to_source(vin);
- v4l2_fill_mbus_format(&format.format, pix, vin->digital.code);
+ v4l2_fill_mbus_format(&format.format, pix, vin->digital->code);
pad_cfg = v4l2_subdev_alloc_pad_config(sd);
if (pad_cfg == NULL)
return -ENOMEM;
- format.pad = vin->digital.source_pad;
+ format.pad = vin->digital->source_pad;
field = pix->field;
@@ -555,7 +555,7 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
if (timings->pad)
return -EINVAL;
- timings->pad = vin->digital.sink_pad;
+ timings->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -607,7 +607,7 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
if (cap->pad)
return -EINVAL;
- cap->pad = vin->digital.sink_pad;
+ cap->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -625,7 +625,7 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
@@ -643,7 +643,7 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 9bfb5a7c4dc4..5382078143fb 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,7 +126,7 @@ struct rvin_dev {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity digital;
+ struct rvin_graph_entity *digital;
struct mutex lock;
struct vb2_queue queue;
@@ -145,7 +145,7 @@ struct rvin_dev {
struct v4l2_rect compose;
};
-#define vin_to_source(vin) vin->digital.subdev
+#define vin_to_source(vin) ((vin)->digital->subdev)
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 522364ff0d5d..63c94f4028a7 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -630,7 +630,7 @@ static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
{
unsigned int i;
u32 ctr;
- int ret;
+ int ret = -EINVAL;
/*
* When both internal channels are enabled, they can be synchronized
@@ -1185,6 +1185,12 @@ error:
return ret;
}
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
/* Read endpoint properties */
static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
struct fwnode_handle *fwnode)
@@ -1347,9 +1353,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
if (ret)
goto error;
- sdr->notifier.bound = rcar_drif_notify_bound;
- sdr->notifier.unbind = rcar_drif_notify_unbind;
- sdr->notifier.complete = rcar_drif_notify_complete;
+ sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 3245bc45f4a0..b13dec3081e5 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1132,7 +1132,7 @@ static int fdp1_device_process(struct fdp1_ctx *ctx)
* mem2mem callbacks
*/
-/**
+/*
* job_ready() - check whether an instance is ready to be scheduled to run
*/
static int fdp1_m2m_job_ready(void *priv)
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 070bac36d766..f6092ae45912 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -257,7 +257,7 @@ struct jpu_fmt {
};
/**
- * jpu_q_data - parameters of one queue
+ * struct jpu_q_data - parameters of one queue
* @fmtinfo: driver-specific format of this queue
* @format: multiplanar format of this queue
* @sequence: sequence number
@@ -269,7 +269,7 @@ struct jpu_q_data {
};
/**
- * jpu_ctx - the device context data
+ * struct jpu_ctx - the device context data
* @jpu: JPEG IP device for this context
* @encoder: compression (encode) operation or decompression (decode)
* @compr_quality: destination image quality in compression (encode) mode
diff --git a/drivers/media/platform/rockchip/rga/Makefile b/drivers/media/platform/rockchip/rga/Makefile
new file mode 100644
index 000000000000..92fe25490ccd
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/Makefile
@@ -0,0 +1,3 @@
+rockchip-rga-objs := rga.o rga-hw.o rga-buf.o
+
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip-rga.o
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
new file mode 100644
index 000000000000..49cacc7a48d1
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int
+rga_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rga_frame *f = rga_get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int rga_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void rga_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(rga->dev);
+
+ if (!ret)
+ return 0;
+
+ for (i = 0; i < q->num_buffers; ++i) {
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+ return ret;
+}
+
+static void rga_buf_stop_streaming(struct vb2_queue *q)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ pm_runtime_put(rga->dev);
+}
+
+const struct vb2_ops rga_qops = {
+ .queue_setup = rga_queue_setup,
+ .buf_prepare = rga_buf_prepare,
+ .buf_queue = rga_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = rga_buf_start_streaming,
+ .stop_streaming = rga_buf_stop_streaming,
+};
+
+/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
+ * We use it more like a scatter-gather list.
+ */
+void rga_buf_map(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ unsigned int *pages;
+ unsigned int address, len, i, p;
+ unsigned int mapped_size = 0;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pages = rga->src_mmu_pages;
+ else
+ pages = rga->dst_mmu_pages;
+
+ /* Create local MMU table for RGA */
+ sgt = vb2_plane_cookie(vb, 0);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
+ address = sg_phys(sgl);
+
+ for (p = 0; p < len; p++) {
+ dma_addr_t phys = address + (p << PAGE_SHIFT);
+
+ pages[mapped_size + p] = phys;
+ }
+
+ mapped_size += len;
+ }
+
+ /* sync local MMU table for RGA */
+ dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
+ 8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
new file mode 100644
index 000000000000..96d1b1b3fe8e
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+enum e_rga_start_pos {
+ LT = 0,
+ LB = 1,
+ RT = 2,
+ RB = 3,
+};
+
+struct rga_addr_offset {
+ unsigned int y_off;
+ unsigned int u_off;
+ unsigned int v_off;
+};
+
+struct rga_corners_addr_offset {
+ struct rga_addr_offset left_top;
+ struct rga_addr_offset right_top;
+ struct rga_addr_offset left_bottom;
+ struct rga_addr_offset right_bottom;
+};
+
+static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
+{
+ /*
+ * The rga hw scaling factor is a normalized inverse of the
+ * scaling factor.
+ * For example: When source width is 100 and destination width is 200
+ * (scaling of 2x), then the hw factor is NC * 100 / 200.
+ * The normalization factor (NC) is 2^16 = 0x10000.
+ */
+
+ return (src > dst) ? ((dst << 16) / src) : ((src << 16) / dst);
+}
+
+static struct rga_corners_addr_offset
+rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct rga_corners_addr_offset offsets;
+ struct rga_addr_offset *lt, *lb, *rt, *rb;
+ unsigned int x_div = 0,
+ y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
+
+ lt = &offsets.left_top;
+ lb = &offsets.left_bottom;
+ rt = &offsets.right_top;
+ rb = &offsets.right_bottom;
+
+ x_div = frm->fmt->x_div;
+ y_div = frm->fmt->y_div;
+ uv_factor = frm->fmt->uv_factor;
+ uv_stride = frm->stride / x_div;
+ pixel_width = frm->stride / frm->width;
+
+ lt->y_off = y * frm->stride + x * pixel_width;
+ lt->u_off =
+ frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
+ lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
+
+ lb->y_off = lt->y_off + (h - 1) * frm->stride;
+ lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
+ lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride;
+
+ rt->y_off = lt->y_off + (w - 1) * pixel_width;
+ rt->u_off = lt->u_off + w / x_div - 1;
+ rt->v_off = lt->v_off + w / x_div - 1;
+
+ rb->y_off = lb->y_off + (w - 1) * pixel_width;
+ rb->u_off = lb->u_off + w / x_div - 1;
+ rb->v_off = lb->v_off + w / x_div - 1;
+
+ return offsets;
+}
+
+static struct rga_addr_offset *rga_lookup_draw_pos(struct
+ rga_corners_addr_offset
+ * offsets, u32 rotate_mode,
+ u32 mirr_mode)
+{
+ static enum e_rga_start_pos rot_mir_point_matrix[4][4] = {
+ {
+ LT, RT, LB, RB,
+ },
+ {
+ RT, LT, RB, LB,
+ },
+ {
+ RB, LB, RT, LT,
+ },
+ {
+ LB, RB, LT, RT,
+ },
+ };
+
+ if (!offsets)
+ return NULL;
+
+ switch (rot_mir_point_matrix[rotate_mode][mirr_mode]) {
+ case LT:
+ return &offsets->left_top;
+ case LB:
+ return &offsets->left_bottom;
+ case RT:
+ return &offsets->right_top;
+ case RB:
+ return &offsets->right_bottom;
+ }
+
+ return NULL;
+}
+
+static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7;
+}
+
+static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 4;
+}
+
+static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 8;
+}
+
+static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int scale_dst_w, scale_dst_h;
+ unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
+ union rga_src_info src_info;
+ union rga_dst_info dst_info;
+ union rga_src_x_factor x_factor;
+ union rga_src_y_factor y_factor;
+ union rga_src_vir_info src_vir_info;
+ union rga_src_act_info src_act_info;
+ union rga_dst_vir_info dst_vir_info;
+ union rga_dst_act_info dst_act_info;
+
+ struct rga_addr_offset *dst_offset;
+ struct rga_corners_addr_offset offsets;
+ struct rga_corners_addr_offset src_offsets;
+
+ src_h = ctx->in.crop.height;
+ src_w = ctx->in.crop.width;
+ src_x = ctx->in.crop.left;
+ src_y = ctx->in.crop.top;
+ dst_h = ctx->out.crop.height;
+ dst_w = ctx->out.crop.width;
+ dst_x = ctx->out.crop.left;
+ dst_y = ctx->out.crop.top;
+
+ src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
+ x_factor.val = dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ y_factor.val = dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ src_vir_info.val = dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ src_act_info.val = dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_vir_info.val = dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_act_info.val = dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+
+ src_info.data.format = ctx->in.fmt->hw_format;
+ src_info.data.swap = ctx->in.fmt->color_swap;
+ dst_info.data.format = ctx->out.fmt->hw_format;
+ dst_info.data.swap = ctx->out.fmt->color_swap;
+
+ if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->in.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+ }
+
+ if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->out.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ dst_info.data.csc_mode = RGA_DST_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+
+ if (ctx->vflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_X;
+
+ if (ctx->hflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_Y;
+
+ switch (ctx->rotate) {
+ case 90:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
+ break;
+ case 180:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
+ break;
+ case 270:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
+ break;
+ default:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
+ break;
+ }
+
+ /*
+ * Cacluate the up/down scaling mode/factor.
+ *
+ * RGA used to scale the picture first, and then rotate second,
+ * so we need to swap the w/h when rotate degree is 90/270.
+ */
+ if (src_info.data.rot_mode == RGA_SRC_ROT_MODE_90_DEGREE ||
+ src_info.data.rot_mode == RGA_SRC_ROT_MODE_270_DEGREE) {
+ if (rga->version.major == 0 || rga->version.minor == 0) {
+ if (dst_w == src_h)
+ src_h -= 8;
+ if (abs(src_w - dst_h) < 16)
+ src_w -= 16;
+ }
+
+ scale_dst_h = dst_w;
+ scale_dst_w = dst_h;
+ } else {
+ scale_dst_w = dst_w;
+ scale_dst_h = dst_h;
+ }
+
+ if (src_w == scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_NO;
+ x_factor.val = 0;
+ } else if (src_w > scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_DOWN;
+ x_factor.data.down_scale_factor =
+ rga_get_scaling(src_w, scale_dst_w) + 1;
+ } else {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_UP;
+ x_factor.data.up_scale_factor =
+ rga_get_scaling(src_w - 1, scale_dst_w - 1);
+ }
+
+ if (src_h == scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_NO;
+ y_factor.val = 0;
+ } else if (src_h > scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_DOWN;
+ y_factor.data.down_scale_factor =
+ rga_get_scaling(src_h, scale_dst_h) + 1;
+ } else {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_UP;
+ y_factor.data.up_scale_factor =
+ rga_get_scaling(src_h - 1, scale_dst_h - 1);
+ }
+
+ /*
+ * Cacluate the framebuffer virtual strides and active size,
+ * note that the step of vir_stride / vir_width is 4 byte words
+ */
+ src_vir_info.data.vir_stride = ctx->in.stride >> 2;
+ src_vir_info.data.vir_width = ctx->in.stride >> 2;
+
+ src_act_info.data.act_height = src_h - 1;
+ src_act_info.data.act_width = src_w - 1;
+
+ dst_vir_info.data.vir_stride = ctx->out.stride >> 2;
+ dst_act_info.data.act_height = dst_h - 1;
+ dst_act_info.data.act_width = dst_w - 1;
+
+ /*
+ * Cacluate the source framebuffer base address with offset pixel.
+ */
+ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
+ src_w, src_h);
+
+ /*
+ * Configure the dest framebuffer base address with pixel offset.
+ */
+ offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
+ dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
+ src_info.data.mir_mode);
+
+ dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.y_off;
+ dest[(RGA_SRC_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.u_off;
+ dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.v_off;
+
+ dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
+ dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
+ dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
+ dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+
+ dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+
+ dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->y_off;
+ dest[(RGA_DST_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->u_off;
+ dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->v_off;
+
+ dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
+ dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
+
+ dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
+}
+
+static void rga_cmd_set_mode(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ union rga_mode_ctrl mode;
+ union rga_alpha_ctrl0 alpha_ctrl0;
+ union rga_alpha_ctrl1 alpha_ctrl1;
+
+ mode.val = 0;
+ alpha_ctrl0.val = 0;
+ alpha_ctrl1.val = 0;
+
+ mode.data.gradient_sat = 1;
+ mode.data.render = RGA_MODE_RENDER_BITBLT;
+ mode.data.bitblt = RGA_MODE_BITBLT_MODE_SRC_TO_DST;
+
+ /* disable alpha blending */
+ dest[(RGA_ALPHA_CTRL0 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl0.val;
+ dest[(RGA_ALPHA_CTRL1 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl1.val;
+
+ dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
+}
+
+static void rga_cmd_set(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
+
+ rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
+ /*
+ * Due to hardware bug,
+ * src1 mmu also should be configured when using alpha blending.
+ */
+ rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
+
+ rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_mode(ctx);
+
+ rga_cmd_set_trans_info(ctx);
+
+ rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
+
+ /* sync CMD buf for RGA */
+ dma_sync_single_for_device(rga->dev, rga->cmdbuf_phy,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+void rga_hw_start(struct rockchip_rga *rga)
+{
+ struct rga_ctx *ctx = rga->curr;
+
+ rga_cmd_set(ctx);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x00);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x22);
+
+ rga_write(rga, RGA_INT, 0x600);
+
+ rga_write(rga, RGA_CMD_CTRL, 0x1);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
new file mode 100644
index 000000000000..ca3c204abe42
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_HW_H__
+#define __RGA_HW_H__
+
+#define RGA_CMDBUF_SIZE 0x20
+
+/* Hardware limits */
+#define MAX_WIDTH 8192
+#define MAX_HEIGHT 8192
+
+#define MIN_WIDTH 34
+#define MIN_HEIGHT 34
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
+#define RGA_TIMEOUT 500
+
+/* Registers address */
+#define RGA_SYS_CTRL 0x0000
+#define RGA_CMD_CTRL 0x0004
+#define RGA_CMD_BASE 0x0008
+#define RGA_INT 0x0010
+#define RGA_MMU_CTRL0 0x0014
+#define RGA_VERSION_INFO 0x0028
+
+#define RGA_MODE_BASE_REG 0x0100
+#define RGA_MODE_MAX_REG 0x017C
+
+#define RGA_MODE_CTRL 0x0100
+#define RGA_SRC_INFO 0x0104
+#define RGA_SRC_Y_RGB_BASE_ADDR 0x0108
+#define RGA_SRC_CB_BASE_ADDR 0x010c
+#define RGA_SRC_CR_BASE_ADDR 0x0110
+#define RGA_SRC1_RGB_BASE_ADDR 0x0114
+#define RGA_SRC_VIR_INFO 0x0118
+#define RGA_SRC_ACT_INFO 0x011c
+#define RGA_SRC_X_FACTOR 0x0120
+#define RGA_SRC_Y_FACTOR 0x0124
+#define RGA_SRC_BG_COLOR 0x0128
+#define RGA_SRC_FG_COLOR 0x012c
+#define RGA_SRC_TR_COLOR0 0x0130
+#define RGA_SRC_TR_COLOR1 0x0134
+
+#define RGA_DST_INFO 0x0138
+#define RGA_DST_Y_RGB_BASE_ADDR 0x013c
+#define RGA_DST_CB_BASE_ADDR 0x0140
+#define RGA_DST_CR_BASE_ADDR 0x0144
+#define RGA_DST_VIR_INFO 0x0148
+#define RGA_DST_ACT_INFO 0x014c
+
+#define RGA_ALPHA_CTRL0 0x0150
+#define RGA_ALPHA_CTRL1 0x0154
+#define RGA_FADING_CTRL 0x0158
+#define RGA_PAT_CON 0x015c
+#define RGA_ROP_CON0 0x0160
+#define RGA_ROP_CON1 0x0164
+#define RGA_MASK_BASE 0x0168
+
+#define RGA_MMU_CTRL1 0x016C
+#define RGA_MMU_SRC_BASE 0x0170
+#define RGA_MMU_SRC1_BASE 0x0174
+#define RGA_MMU_DST_BASE 0x0178
+
+/* Registers value */
+#define RGA_MODE_RENDER_BITBLT 0
+#define RGA_MODE_RENDER_COLOR_PALETTE 1
+#define RGA_MODE_RENDER_RECTANGLE_FILL 2
+#define RGA_MODE_RENDER_UPDATE_PALETTE_LUT_RAM 3
+
+#define RGA_MODE_BITBLT_MODE_SRC_TO_DST 0
+#define RGA_MODE_BITBLT_MODE_SRC_SRC1_TO_DST 1
+
+#define RGA_MODE_CF_ROP4_SOLID 0
+#define RGA_MODE_CF_ROP4_PATTERN 1
+
+#define RGA_COLOR_FMT_ABGR8888 0
+#define RGA_COLOR_FMT_XBGR8888 1
+#define RGA_COLOR_FMT_RGB888 2
+#define RGA_COLOR_FMT_BGR565 4
+#define RGA_COLOR_FMT_ABGR1555 5
+#define RGA_COLOR_FMT_ABGR4444 6
+#define RGA_COLOR_FMT_YUV422SP 8
+#define RGA_COLOR_FMT_YUV422P 9
+#define RGA_COLOR_FMT_YUV420SP 10
+#define RGA_COLOR_FMT_YUV420P 11
+/* SRC_COLOR Palette */
+#define RGA_COLOR_FMT_CP_1BPP 12
+#define RGA_COLOR_FMT_CP_2BPP 13
+#define RGA_COLOR_FMT_CP_4BPP 14
+#define RGA_COLOR_FMT_CP_8BPP 15
+#define RGA_COLOR_FMT_MASK 15
+
+#define RGA_COLOR_NONE_SWAP 0
+#define RGA_COLOR_RB_SWAP 1
+#define RGA_COLOR_ALPHA_SWAP 2
+#define RGA_COLOR_UV_SWAP 4
+
+#define RGA_SRC_CSC_MODE_BYPASS 0
+#define RGA_SRC_CSC_MODE_BT601_R0 1
+#define RGA_SRC_CSC_MODE_BT601_R1 2
+#define RGA_SRC_CSC_MODE_BT709_R0 3
+#define RGA_SRC_CSC_MODE_BT709_R1 4
+
+#define RGA_SRC_ROT_MODE_0_DEGREE 0
+#define RGA_SRC_ROT_MODE_90_DEGREE 1
+#define RGA_SRC_ROT_MODE_180_DEGREE 2
+#define RGA_SRC_ROT_MODE_270_DEGREE 3
+
+#define RGA_SRC_MIRR_MODE_NO 0
+#define RGA_SRC_MIRR_MODE_X 1
+#define RGA_SRC_MIRR_MODE_Y 2
+#define RGA_SRC_MIRR_MODE_X_Y 3
+
+#define RGA_SRC_HSCL_MODE_NO 0
+#define RGA_SRC_HSCL_MODE_DOWN 1
+#define RGA_SRC_HSCL_MODE_UP 2
+
+#define RGA_SRC_VSCL_MODE_NO 0
+#define RGA_SRC_VSCL_MODE_DOWN 1
+#define RGA_SRC_VSCL_MODE_UP 2
+
+#define RGA_SRC_TRANS_ENABLE_R 1
+#define RGA_SRC_TRANS_ENABLE_G 2
+#define RGA_SRC_TRANS_ENABLE_B 4
+#define RGA_SRC_TRANS_ENABLE_A 8
+
+#define RGA_SRC_BIC_COE_SELEC_CATROM 0
+#define RGA_SRC_BIC_COE_SELEC_MITCHELL 1
+#define RGA_SRC_BIC_COE_SELEC_HERMITE 2
+#define RGA_SRC_BIC_COE_SELEC_BSPLINE 3
+
+#define RGA_DST_DITHER_MODE_888_TO_666 0
+#define RGA_DST_DITHER_MODE_888_TO_565 1
+#define RGA_DST_DITHER_MODE_888_TO_555 2
+#define RGA_DST_DITHER_MODE_888_TO_444 3
+
+#define RGA_DST_CSC_MODE_BYPASS 0
+#define RGA_DST_CSC_MODE_BT601_R0 1
+#define RGA_DST_CSC_MODE_BT601_R1 2
+#define RGA_DST_CSC_MODE_BT709_R0 3
+
+#define RGA_ALPHA_ROP_MODE_2 0
+#define RGA_ALPHA_ROP_MODE_3 1
+#define RGA_ALPHA_ROP_MODE_4 2
+
+#define RGA_ALPHA_SELECT_ALPHA 0
+#define RGA_ALPHA_SELECT_ROP 1
+
+#define RGA_ALPHA_MASK_BIG_ENDIAN 0
+#define RGA_ALPHA_MASK_LITTLE_ENDIAN 1
+
+#define RGA_ALPHA_NORMAL 0
+#define RGA_ALPHA_REVERSE 1
+
+#define RGA_ALPHA_BLEND_GLOBAL 0
+#define RGA_ALPHA_BLEND_NORMAL 1
+#define RGA_ALPHA_BLEND_MULTIPLY 2
+
+#define RGA_ALPHA_CAL_CUT 0
+#define RGA_ALPHA_CAL_NORMAL 1
+
+#define RGA_ALPHA_FACTOR_ZERO 0
+#define RGA_ALPHA_FACTOR_ONE 1
+#define RGA_ALPHA_FACTOR_OTHER 2
+#define RGA_ALPHA_FACTOR_OTHER_REVERSE 3
+#define RGA_ALPHA_FACTOR_SELF 4
+
+#define RGA_ALPHA_COLOR_NORMAL 0
+#define RGA_ALPHA_COLOR_MULTIPLY_CAL 1
+
+/* Registers union */
+union rga_mode_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:2] */
+ unsigned int render:3;
+ /* [3:6] */
+ unsigned int bitblt:1;
+ unsigned int cf_rop4_pat:1;
+ unsigned int alpha_zero_key:1;
+ unsigned int gradient_sat:1;
+ /* [7:31] */
+ unsigned int reserved:25;
+ } data;
+};
+
+union rga_src_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:7] */
+ unsigned int swap:3;
+ unsigned int cp_endian:1;
+ /* [8:17] */
+ unsigned int csc_mode:2;
+ unsigned int rot_mode:2;
+ unsigned int mir_mode:2;
+ unsigned int hscl_mode:2;
+ unsigned int vscl_mode:2;
+ /* [18:22] */
+ unsigned int trans_mode:1;
+ unsigned int trans_enable:4;
+ /* [23:25] */
+ unsigned int dither_up_en:1;
+ unsigned int bic_coe_sel:2;
+ /* [26:31] */
+ unsigned int reserved:6;
+ } data;
+};
+
+union rga_src_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_width:15;
+ unsigned int reserved:1;
+ /* [16:25] */
+ unsigned int vir_stride:10;
+ /* [26:31] */
+ unsigned int reserved1:6;
+ } data;
+};
+
+union rga_src_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:13;
+ unsigned int reserved:3;
+ /* [16:31] */
+ unsigned int act_height:13;
+ unsigned int reserved1:3;
+ } data;
+};
+
+union rga_src_x_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+union rga_src_y_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+/* Alpha / Red / Green / Blue */
+union rga_src_cp_gr_color {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int gradient_x:16;
+ /* [16:31] */
+ unsigned int gradient_y:16;
+ } data;
+};
+
+union rga_src_transparency_color0 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmin:8;
+ /* [8:15] */
+ unsigned int trans_gmin:8;
+ /* [16:23] */
+ unsigned int trans_bmin:8;
+ /* [24:31] */
+ unsigned int trans_amin:8;
+ } data;
+};
+
+union rga_src_transparency_color1 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmax:8;
+ /* [8:15] */
+ unsigned int trans_gmax:8;
+ /* [16:23] */
+ unsigned int trans_bmax:8;
+ /* [24:31] */
+ unsigned int trans_amax:8;
+ } data;
+};
+
+union rga_dst_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:6] */
+ unsigned int swap:3;
+ /* [7:9] */
+ unsigned int src1_format:3;
+ /* [10:11] */
+ unsigned int src1_swap:2;
+ /* [12:15] */
+ unsigned int dither_up_en:1;
+ unsigned int dither_down_en:1;
+ unsigned int dither_down_mode:2;
+ /* [16:18] */
+ unsigned int csc_mode:2;
+ unsigned int csc_clip:1;
+ /* [19:31] */
+ unsigned int reserved:13;
+ } data;
+};
+
+union rga_dst_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_stride:15;
+ unsigned int reserved:1;
+ /* [16:31] */
+ unsigned int src1_vir_stride:15;
+ unsigned int reserved1:1;
+ } data;
+};
+
+union rga_dst_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:12;
+ unsigned int reserved:4;
+ /* [16:31] */
+ unsigned int act_height:12;
+ unsigned int reserved1:4;
+ } data;
+};
+
+union rga_alpha_ctrl0 {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int rop_en:1;
+ unsigned int rop_select:1;
+ unsigned int rop_mode:2;
+ /* [4:11] */
+ unsigned int src_fading_val:8;
+ /* [12:20] */
+ unsigned int dst_fading_val:8;
+ unsigned int mask_endian:1;
+ /* [21:31] */
+ unsigned int reserved:11;
+ } data;
+};
+
+union rga_alpha_ctrl1 {
+ unsigned int val;
+ struct {
+ /* [0:1] */
+ unsigned int dst_color_m0:1;
+ unsigned int src_color_m0:1;
+ /* [2:7] */
+ unsigned int dst_factor_m0:3;
+ unsigned int src_factor_m0:3;
+ /* [8:9] */
+ unsigned int dst_alpha_cal_m0:1;
+ unsigned int src_alpha_cal_m0:1;
+ /* [10:13] */
+ unsigned int dst_blend_m0:2;
+ unsigned int src_blend_m0:2;
+ /* [14:15] */
+ unsigned int dst_alpha_m0:1;
+ unsigned int src_alpha_m0:1;
+ /* [16:21] */
+ unsigned int dst_factor_m1:3;
+ unsigned int src_factor_m1:3;
+ /* [22:23] */
+ unsigned int dst_alpha_cal_m1:1;
+ unsigned int src_alpha_cal_m1:1;
+ /* [24:27] */
+ unsigned int dst_blend_m1:2;
+ unsigned int src_blend_m1:2;
+ /* [28:29] */
+ unsigned int dst_alpha_m1:1;
+ unsigned int src_alpha_m1:1;
+ /* [30:31] */
+ unsigned int reserved:2;
+ } data;
+};
+
+union rga_fading_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int fading_offset_r:8;
+ /* [8:15] */
+ unsigned int fading_offset_g:8;
+ /* [16:23] */
+ unsigned int fading_offset_b:8;
+ /* [24:31] */
+ unsigned int fading_en:1;
+ unsigned int reserved:7;
+ } data;
+};
+
+union rga_pat_con {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int width:8;
+ /* [8:15] */
+ unsigned int height:8;
+ /* [16:23] */
+ unsigned int offset_x:8;
+ /* [24:31] */
+ unsigned int offset_y:8;
+ } data;
+};
+
+#endif
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
new file mode 100644
index 000000000000..89296de9cf4a
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -0,0 +1,1010 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static void job_abort(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+
+ if (!rga->curr) /* No job currently running */
+ return;
+
+ wait_event_timeout(rga->irq_queue,
+ !rga->curr, msecs_to_jiffies(RGA_TIMEOUT));
+}
+
+static void device_run(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rga->ctrl_lock, flags);
+
+ rga->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ rga_buf_map(src);
+ rga_buf_map(dst);
+
+ rga_hw_start(rga);
+
+ spin_unlock_irqrestore(&rga->ctrl_lock, flags);
+}
+
+static irqreturn_t rga_isr(int irq, void *prv)
+{
+ struct rockchip_rga *rga = prv;
+ int intr;
+
+ intr = rga_read(rga, RGA_INT) & 0xf;
+
+ rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
+
+ if (intr & 0x04) {
+ struct vb2_v4l2_buffer *src, *dst;
+ struct rga_ctx *ctx = rga->curr;
+
+ WARN_ON(!ctx);
+
+ rga->curr = NULL;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!src);
+ WARN_ON(!dst);
+
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
+
+ wake_up(&rga->irq_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct v4l2_m2m_ops rga_m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct rga_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rga_qops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->rga->mutex;
+ src_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rga_qops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->rga->mutex;
+ dst_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rga_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rga_ctx *ctx = container_of(ctrl->handler, struct rga_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rga->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ ctx->rotate = ctrl->val;
+ break;
+ case V4L2_CID_BG_COLOR:
+ ctx->fill_color = ctrl->val;
+ break;
+ }
+ spin_unlock_irqrestore(&ctx->rga->ctrl_lock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rga_ctrl_ops = {
+ .s_ctrl = rga_s_ctrl,
+};
+
+static int rga_setup_ctrls(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffffff, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_err(&rga->v4l2_dev, "%s failed\n", __func__);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ return 0;
+}
+
+struct rga_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR4444,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR1555,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_BGR565,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422P,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct rga_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .crop.left = 0,
+ .crop.top = 0,
+ .crop.width = DEFAULT_WIDTH,
+ .crop.height = DEFAULT_HEIGHT,
+ .fmt = &formats[0],
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int rga_open(struct file *file)
+{
+ struct rockchip_rga *rga = video_drvdata(file);
+ struct rga_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->rga = rga;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ if (mutex_lock_interruptible(&rga->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rga->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&rga->mutex);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ rga_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static int rga_release(struct file *file)
+{
+ struct rga_ctx *ctx =
+ container_of(file->private_data, struct rga_ctx, fh);
+ struct rockchip_rga *rga = ctx->rga;
+
+ mutex_lock(&rga->mutex);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rga_fops = {
+ .owner = THIS_MODULE,
+ .open = rga_open,
+ .release = rga_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int
+vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, RGA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "rockchip-rga", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:rga", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct rga_fmt *fmt;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = frm->stride;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = frm->colorspace;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_fmt *fmt;
+
+ fmt = rga_fmt_find(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < MIN_WIDTH)
+ f->fmt.pix.width = MIN_WIDTH;
+ if (f->fmt.pix.height < MIN_HEIGHT)
+ f->fmt.pix.height = MIN_HEIGHT;
+
+ if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+ struct rga_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&rga->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = rga_fmt_find(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ frm->colorspace = f->fmt.pix.colorspace;
+
+ /* Reset crop settings */
+ frm->crop.left = 0;
+ frm->crop.top = 0;
+ frm->crop.width = frm->width;
+ frm->crop.height = frm->height;
+
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rga_frame *f;
+ bool use_frame = false;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_frame) {
+ s->r = f->crop;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct rga_frame *f;
+ int ret = 0;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "doesn't support negative values for top & left.\n");
+ return -EINVAL;
+ }
+
+ if (s->r.left + s->r.width > f->width ||
+ s->r.top + s->r.height > f->height ||
+ s->r.width < MIN_WIDTH || s->r.height < MIN_HEIGHT) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev, "unsupported crop value.\n");
+ return -EINVAL;
+ }
+
+ f->crop = s->r;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rga_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+};
+
+static struct video_device rga_videodev = {
+ .name = "rockchip-rga",
+ .fops = &rga_fops,
+ .ioctl_ops = &rga_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static int rga_enable_clocks(struct rockchip_rga *rga)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rga->sclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rga->aclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
+ goto err_disable_sclk;
+ }
+
+ ret = clk_prepare_enable(rga->hclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(rga->sclk);
+err_disable_aclk:
+ clk_disable_unprepare(rga->aclk);
+
+ return ret;
+}
+
+static void rga_disable_clocks(struct rockchip_rga *rga)
+{
+ clk_disable_unprepare(rga->sclk);
+ clk_disable_unprepare(rga->hclk);
+ clk_disable_unprepare(rga->aclk);
+}
+
+static int rga_parse_dt(struct rockchip_rga *rga)
+{
+ struct reset_control *core_rst, *axi_rst, *ahb_rst;
+
+ core_rst = devm_reset_control_get(rga->dev, "core");
+ if (IS_ERR(core_rst)) {
+ dev_err(rga->dev, "failed to get core reset controller\n");
+ return PTR_ERR(core_rst);
+ }
+
+ axi_rst = devm_reset_control_get(rga->dev, "axi");
+ if (IS_ERR(axi_rst)) {
+ dev_err(rga->dev, "failed to get axi reset controller\n");
+ return PTR_ERR(axi_rst);
+ }
+
+ ahb_rst = devm_reset_control_get(rga->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(rga->dev, "failed to get ahb reset controller\n");
+ return PTR_ERR(ahb_rst);
+ }
+
+ reset_control_assert(core_rst);
+ udelay(1);
+ reset_control_deassert(core_rst);
+
+ reset_control_assert(axi_rst);
+ udelay(1);
+ reset_control_deassert(axi_rst);
+
+ reset_control_assert(ahb_rst);
+ udelay(1);
+ reset_control_deassert(ahb_rst);
+
+ rga->sclk = devm_clk_get(rga->dev, "sclk");
+ if (IS_ERR(rga->sclk)) {
+ dev_err(rga->dev, "failed to get sclk clock\n");
+ return PTR_ERR(rga->sclk);
+ }
+
+ rga->aclk = devm_clk_get(rga->dev, "aclk");
+ if (IS_ERR(rga->aclk)) {
+ dev_err(rga->dev, "failed to get aclk clock\n");
+ return PTR_ERR(rga->aclk);
+ }
+
+ rga->hclk = devm_clk_get(rga->dev, "hclk");
+ if (IS_ERR(rga->hclk)) {
+ dev_err(rga->dev, "failed to get hclk clock\n");
+ return PTR_ERR(rga->hclk);
+ }
+
+ return 0;
+}
+
+static int rga_probe(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
+ if (!rga)
+ return -ENOMEM;
+
+ rga->dev = &pdev->dev;
+ spin_lock_init(&rga->ctrl_lock);
+ mutex_init(&rga->mutex);
+
+ init_waitqueue_head(&rga->irq_queue);
+
+ ret = rga_parse_dt(rga);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+
+ pm_runtime_enable(rga->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rga->regs = devm_ioremap_resource(rga->dev, res);
+ if (IS_ERR(rga->regs)) {
+ ret = PTR_ERR(rga->regs);
+ goto err_put_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(rga->dev, "failed to get irq\n");
+ ret = irq;
+ goto err_put_clk;
+ }
+
+ ret = devm_request_irq(rga->dev, irq, rga_isr, 0,
+ dev_name(rga->dev), rga);
+ if (ret < 0) {
+ dev_err(rga->dev, "failed to request irq\n");
+ goto err_put_clk;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
+ if (ret)
+ goto err_put_clk;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&rga->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = rga_videodev;
+ vfd->lock = &rga->mutex;
+ vfd->v4l2_dev = &rga->v4l2_dev;
+
+ video_set_drvdata(vfd, rga);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", rga_videodev.name);
+ rga->vfd = vfd;
+
+ platform_set_drvdata(pdev, rga);
+ rga->m2m_dev = v4l2_m2m_init(&rga_m2m_ops);
+ if (IS_ERR(rga->m2m_dev)) {
+ v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(rga->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ pm_runtime_get_sync(rga->dev);
+
+ rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
+ rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
+
+ v4l2_info(&rga->v4l2_dev, "HW Version: 0x%02x.%02x\n",
+ rga->version.major, rga->version.minor);
+
+ pm_runtime_put(rga->dev);
+
+ /* Create CMD buffer */
+ rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
+ &rga->cmdbuf_phy, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
+ rga->src_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+ rga->dst_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ def_frame.size = def_frame.stride * def_frame.height;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ return 0;
+
+rel_vdev:
+ video_device_release(vfd);
+unreg_video_dev:
+ video_unregister_device(rga->vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&rga->v4l2_dev);
+err_put_clk:
+ pm_runtime_disable(rga->dev);
+
+ return ret;
+}
+
+static int rga_remove(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga = platform_get_drvdata(pdev);
+
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, &rga->cmdbuf_virt,
+ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+
+ free_pages((unsigned long)rga->src_mmu_pages, 3);
+ free_pages((unsigned long)rga->dst_mmu_pages, 3);
+
+ v4l2_info(&rga->v4l2_dev, "Removing\n");
+
+ v4l2_m2m_release(rga->m2m_dev);
+ video_unregister_device(rga->vfd);
+ v4l2_device_unregister(&rga->v4l2_dev);
+
+ pm_runtime_disable(rga->dev);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_suspend(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ rga_disable_clocks(rga);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_resume(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ return rga_enable_clocks(rga);
+}
+
+static const struct dev_pm_ops rga_pm = {
+ SET_RUNTIME_PM_OPS(rga_runtime_suspend,
+ rga_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_rga_match[] = {
+ {
+ .compatible = "rockchip,rk3288-rga",
+ },
+ {
+ .compatible = "rockchip,rk3399-rga",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rga_match);
+
+static struct platform_driver rga_pdrv = {
+ .probe = rga_probe,
+ .remove = rga_remove,
+ .driver = {
+ .name = RGA_NAME,
+ .pm = &rga_pm,
+ .of_match_table = rockchip_rga_match,
+ },
+};
+
+module_platform_driver(rga_pdrv);
+
+MODULE_AUTHOR("Jacob Chen <jacob-chen@iotwrt.com>");
+MODULE_DESCRIPTION("Rockchip Raster 2d Graphic Acceleration Unit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
new file mode 100644
index 000000000000..5d43e7ea88af
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_H__
+#define __RGA_H__
+
+#include <linux/platform_device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define RGA_NAME "rockchip-rga"
+
+struct rga_fmt {
+ u32 fourcc;
+ int depth;
+ u8 uv_factor;
+ u8 y_div;
+ u8 x_div;
+ u8 color_swap;
+ u8 hw_format;
+};
+
+struct rga_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ u32 colorspace;
+
+ /* Crop */
+ struct v4l2_rect crop;
+
+ /* Image format */
+ struct rga_fmt *fmt;
+
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 size;
+};
+
+struct rockchip_rga_version {
+ u32 major;
+ u32 minor;
+};
+
+struct rga_ctx {
+ struct v4l2_fh fh;
+ struct rockchip_rga *rga;
+ struct rga_frame in;
+ struct rga_frame out;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Control values */
+ u32 op;
+ u32 hflip;
+ u32 vflip;
+ u32 rotate;
+ u32 fill_color;
+};
+
+struct rockchip_rga {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+
+ struct device *dev;
+ struct regmap *grf;
+ void __iomem *regs;
+ struct clk *sclk;
+ struct clk *aclk;
+ struct clk *hclk;
+ struct rockchip_rga_version version;
+
+ /* vfd lock */
+ struct mutex mutex;
+ /* ctrl parm lock */
+ spinlock_t ctrl_lock;
+
+ wait_queue_head_t irq_queue;
+
+ struct rga_ctx *curr;
+ dma_addr_t cmdbuf_phy;
+ void *cmdbuf_virt;
+ unsigned int *src_mmu_pages;
+ unsigned int *dst_mmu_pages;
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type);
+
+/* RGA Buffers Manage */
+extern const struct vb2_ops rga_qops;
+void rga_buf_map(struct vb2_buffer *vb);
+
+/* RGA Hardware */
+static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
+{
+ writel(value, rga->regs + reg);
+};
+
+static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
+{
+ return readl(rga->regs + reg);
+};
+
+static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
+{
+ u32 temp = rga_read(rga, reg) & ~(mask);
+
+ temp |= val & mask;
+ rga_write(rga, reg, temp);
+};
+
+void rga_hw_start(struct rockchip_rga *rga);
+
+#endif
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index c4ab63986c8f..79bc0ef6bb41 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -103,6 +103,7 @@ static const struct camif_fmt camif_formats[] = {
/**
* s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @vp: video path (DMA) description (codec/preview)
* @pixelformat: fourcc to match, ignored if null
* @index: index to the camif_formats array, ignored if negative
*/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 1afde5021ca6..bc68dbbcaec1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -145,9 +145,9 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
}
}
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
{
- struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+ struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt);
@@ -470,7 +470,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
{
mfc_err("Interrupt Error: %08x\n", err);
- if (ctx != NULL) {
+ if (ctx) {
/* Error recovery is dependent on the state of context */
switch (ctx->state) {
case MFCINST_RES_CHANGE_INIT:
@@ -508,7 +508,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
if (ctx->c_ops->post_seq_start) {
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_buf *src_buf;
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
@@ -1043,12 +1043,9 @@ end:
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
- struct s5p_mfc_dev *dev = ctx->dev;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int ret;
- if (mutex_lock_interruptible(&dev->mfc_mutex))
- return -ERESTARTSYS;
if (offset < DST_QUEUE_OFF_BASE) {
mfc_debug(2, "mmaping source\n");
ret = vb2_mmap(&ctx->vq_src, vma);
@@ -1057,7 +1054,6 @@ static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
ret = vb2_mmap(&ctx->vq_dst, vma);
}
- mutex_unlock(&dev->mfc_mutex);
return ret;
}
@@ -1083,7 +1079,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
struct device *child;
int ret;
- child = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL);
+ child = devm_kzalloc(dev, sizeof(*child), GFP_KERNEL);
if (!child)
return NULL;
@@ -1270,10 +1266,8 @@ static int s5p_mfc_probe(struct platform_device *pdev)
pr_debug("%s++\n", __func__);
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+ if (!dev)
return -ENOMEM;
- }
spin_lock_init(&dev->irqlock);
spin_lock_init(&dev->condlock);
@@ -1291,7 +1285,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
+ if (!res) {
dev_err(&pdev->dev, "failed to get irq resource\n");
return -ENOENT;
}
@@ -1320,9 +1314,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->hw_lock = 0;
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
atomic_set(&dev->watchdog_cnt, 0);
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = s5p_mfc_watchdog;
+ timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 15a562af13c7..dedc1b024f6f 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -267,7 +267,7 @@ static void sh_veu_process(struct sh_veu_dev *veu,
sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
}
-/**
+/*
* sh_veu_device_run() - prepares and starts the device
*
* This will be called by the framework when it decides to schedule a particular
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 1f3c450c7a69..916ff68b73d4 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1391,6 +1391,12 @@ static int soc_camera_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations soc_camera_async_ops = {
+ .bound = soc_camera_async_bound,
+ .unbind = soc_camera_async_unbind,
+ .complete = soc_camera_async_complete,
+};
+
static int scan_async_group(struct soc_camera_host *ici,
struct v4l2_async_subdev **asd, unsigned int size)
{
@@ -1437,9 +1443,7 @@ static int scan_async_group(struct soc_camera_host *ici,
sasc->notifier.subdevs = asd;
sasc->notifier.num_subdevs = size;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
@@ -1537,9 +1541,7 @@ static int soc_of_bind(struct soc_camera_host *ici,
sasc->notifier.subdevs = &info->subdev;
sasc->notifier.num_subdevs = 1;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index 0116097c0c0f..270ec613c27c 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -306,16 +306,17 @@ update_cache:
}
/**
- * @icd - soc-camera device
- * @rect - camera cropping window
- * @subrect - part of rect, sent to the user
- * @mf - in- / output camera output window
- * @width - on input: max host input width
- * on output: user width, mapped back to input
- * @height - on input: max host input height
- * on output: user height, mapped back to input
- * @host_can_scale - host can scale this pixel format
- * @shift - shift, used for scaling
+ * soc_camera_client_scale
+ * @icd: soc-camera device
+ * @rect: camera cropping window
+ * @subrect: part of rect, sent to the user
+ * @mf: in- / output camera output window
+ * @width: on input: max host input width;
+ * on output: user width, mapped back to input
+ * @height: on input: max host input height;
+ * on output: user height, mapped back to input
+ * @host_can_scale: host can scale this pixel format
+ * @shift: shift, used for scaling
*/
int soc_camera_client_scale(struct soc_camera_device *icd,
struct v4l2_rect *rect, struct v4l2_rect *subrect,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 939da6da7644..7e9ed9c7b3e1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -723,7 +723,7 @@ static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct bdisp_ctx *ctx = fh_to_ctx(fh);
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_pix_format *pix;
struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame)) {
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 59280ac31937..a0acee7671b1 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -61,9 +61,9 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
#define FIFO_LEN 1024
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
{
- struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
struct channel_info *channel;
int chan_num;
@@ -865,8 +865,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
/* Setup timer interrupt */
- setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
- (unsigned long)fei);
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
mutex_init(&fei->lock);
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index e6f247a983c7..17f1eb0ba957 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -134,7 +134,7 @@ enum hva_h264_sei_payload_type {
SEI_FRAME_PACKING_ARRANGEMENT = 45
};
-/**
+/*
* stereo Video Info struct
*/
struct hva_h264_stereo_video_sei {
@@ -146,7 +146,9 @@ struct hva_h264_stereo_video_sei {
u8 right_view_self_contained_flag;
};
-/**
+/*
+ * struct hva_h264_td
+ *
* @frame_width: width in pixels of the buffer containing the input frame
* @frame_height: height in pixels of the buffer containing the input frame
* @frame_num: the parameter to be written in the slice header
@@ -352,7 +354,9 @@ struct hva_h264_td {
u32 addr_brc_in_out_parameter;
};
-/**
+/*
+ * struct hva_h264_slice_po
+ *
* @ slice_size: slice size
* @ slice_start_time: start time
* @ slice_stop_time: stop time
@@ -365,7 +369,9 @@ struct hva_h264_slice_po {
u32 slice_num;
};
-/**
+/*
+ * struct hva_h264_po
+ *
* @ bitstream_size: bitstream size
* @ dct_bitstream_size: dtc bitstream size
* @ stuffing_bits: number of stuffing bits inserted by the encoder
@@ -391,7 +397,9 @@ struct hva_h264_task {
struct hva_h264_po po;
};
-/**
+/*
+ * struct hva_h264_ctx
+ *
* @seq_info: sequence information buffer
* @ref_frame: reference frame buffer
* @rec_frame: reconstructed frame buffer
@@ -999,7 +1007,6 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
{
struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
- struct hva_buffer *tmp_frame;
u32 stuffing_bytes = 0;
int ret = 0;
@@ -1023,9 +1030,7 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
&stream->bytesused);
/* switch reference & reconstructed frame */
- tmp_frame = ctx->ref_frame;
- ctx->ref_frame = ctx->rec_frame;
- ctx->rec_frame = tmp_frame;
+ swap(ctx->ref_frame, ctx->rec_frame);
return 0;
err:
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 35ba6f211b79..ac4c450a6c7d 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1495,6 +1495,12 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1542,9 +1548,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.subdevs = subdevs;
dcmi->notifier.num_subdevs = 1;
- dcmi->notifier.bound = dcmi_graph_notify_bound;
- dcmi->notifier.unbind = dcmi_graph_notify_unbind;
- dcmi->notifier.complete = dcmi_graph_notify_complete;
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
diff --git a/drivers/media/platform/tegra-cec/Makefile b/drivers/media/platform/tegra-cec/Makefile
new file mode 100644
index 000000000000..f3d81127589f
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra_cec.o
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
new file mode 100644
index 000000000000..807c94c70049
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -0,0 +1,495 @@
+/*
+ * Tegra CEC implementation
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include <media/cec-notifier.h>
+
+#include "tegra_cec.h"
+
+#define TEGRA_CEC_NAME "tegra-cec"
+
+struct tegra_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *cec_base;
+ struct cec_notifier *notifier;
+ int tegra_cec_irq;
+ bool rx_done;
+ bool tx_done;
+ int tx_status;
+ u8 rx_buf[CEC_MAX_MSG_SIZE];
+ u8 rx_buf_cnt;
+ u32 tx_buf[CEC_MAX_MSG_SIZE];
+ u8 tx_buf_cur;
+ u8 tx_buf_cnt;
+};
+
+static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
+{
+ return readl(cec->cec_base + reg);
+}
+
+static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
+{
+ writel(val, cec->cec_base + reg);
+}
+
+static void tegra_cec_error_recovery(struct tegra_cec *cec)
+{
+ u32 hw_ctrl;
+
+ hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
+}
+
+static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+
+ if (cec->tx_done) {
+ cec_transmit_attempt_done(cec->adap, cec->tx_status);
+ cec->tx_done = false;
+ }
+ if (cec->rx_done) {
+ struct cec_msg msg = {};
+
+ msg.len = cec->rx_buf_cnt;
+ memcpy(msg.msg, cec->rx_buf, msg.len);
+ cec_received_msg(cec->adap, &msg);
+ cec->rx_done = false;
+ cec->rx_buf_cnt = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+ u32 status, mask;
+
+ status = cec_read(cec, TEGRA_CEC_INT_STAT);
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+
+ status &= mask;
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
+ dev_err(dev, "TX underrun, interrupt timing issue!\n");
+
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
+ (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
+ cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ else
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
+ tegra_cec_error_recovery(cec);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ } else {
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_OK;
+ }
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
+ dev_warn(dev, "TX NAKed on the fly!\n");
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
+ if (cec->tx_buf_cur == cec->tx_buf_cnt) {
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+ } else {
+ cec_write(cec, TEGRA_CEC_TX_REGISTER,
+ cec->tx_buf[cec->tx_buf_cur++]);
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
+ }
+ }
+
+ if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED));
+ } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
+ u32 v;
+
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
+ v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
+ if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
+ cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
+ if (v & TEGRA_CEC_RX_REGISTER_EOM) {
+ cec->rx_done = true;
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+
+ cec->rx_buf_cnt = 0;
+ cec->tx_buf_cnt = 0;
+ cec->tx_buf_cur = 0;
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_MASK, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
+
+ if (!enable)
+ return 0;
+
+ cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_0,
+ (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
+ (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
+ (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
+ (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_1,
+ (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
+ (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
+ (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
+ (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_2,
+ (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_0,
+ (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
+ (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
+ (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
+ (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_1,
+ (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
+ (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
+ (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
+ (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_2,
+ (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
+ (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
+ (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
+ TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
+ TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
+ TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN);
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
+ return 0;
+}
+
+static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
+ else
+ state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
+ return 0;
+}
+
+static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (enable)
+ reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
+ else
+ reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
+ return 0;
+}
+
+static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time_ms, struct cec_msg *msg)
+{
+ bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
+ struct tegra_cec *cec = adap->priv;
+ unsigned int i;
+ u32 mode = 0;
+ u32 mask;
+
+ if (cec_msg_is_broadcast(msg))
+ mode = TEGRA_CEC_TX_REG_BCAST;
+
+ cec->tx_buf_cur = 0;
+ cec->tx_buf_cnt = msg->len;
+
+ for (i = 0; i < msg->len; i++) {
+ cec->tx_buf[i] = mode | msg->msg[i];
+ if (i == 0)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
+ if (i == msg->len - 1)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
+ if (i == 0 && retry_xfer)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
+ }
+
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ return 0;
+}
+
+static const struct cec_adap_ops tegra_cec_ops = {
+ .adap_enable = tegra_cec_adap_enable,
+ .adap_log_addr = tegra_cec_adap_log_addr,
+ .adap_transmit = tegra_cec_adap_transmit,
+ .adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
+};
+
+static int tegra_cec_probe(struct platform_device *pdev)
+{
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct tegra_cec *cec;
+ struct resource *res;
+ int ret = 0;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
+
+ if (!cec)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to allocate resources for device\n");
+ return -EBUSY;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev,
+ "Unable to request mem region for device\n");
+ return -EBUSY;
+ }
+
+ cec->tegra_cec_irq = platform_get_irq(pdev, 0);
+
+ if (cec->tegra_cec_irq <= 0)
+ return -EBUSY;
+
+ cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (!cec->cec_base) {
+ dev_err(&pdev->dev, "Unable to grab IOs for device\n");
+ return -EBUSY;
+ }
+
+ cec->clk = devm_clk_get(&pdev->dev, "cec");
+
+ if (IS_ERR_OR_NULL(cec->clk)) {
+ dev_err(&pdev->dev, "Can't get clock for CEC\n");
+ return -ENOENT;
+ }
+
+ clk_prepare_enable(cec->clk);
+
+ /* set context info. */
+ cec->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, cec);
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
+ tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
+ 0, "cec_irq", &pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device\n");
+ goto clk_error;
+ }
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto clk_error;
+ }
+
+ cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Couldn't create cec adapter\n");
+ goto cec_error;
+ }
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register device\n");
+ goto cec_error;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ return 0;
+
+cec_error:
+ if (cec->notifier)
+ cec_notifier_put(cec->notifier);
+ cec_delete_adapter(cec->adap);
+clk_error:
+ clk_disable_unprepare(cec->clk);
+ return ret;
+}
+
+static int tegra_cec_remove(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ dev_notice(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int tegra_cec_resume(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ dev_notice(&pdev->dev, "Resuming\n");
+
+ clk_prepare_enable(cec->clk);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id tegra_cec_of_match[] = {
+ { .compatible = "nvidia,tegra114-cec", },
+ { .compatible = "nvidia,tegra124-cec", },
+ { .compatible = "nvidia,tegra210-cec", },
+ {},
+};
+
+static struct platform_driver tegra_cec_driver = {
+ .driver = {
+ .name = TEGRA_CEC_NAME,
+ .of_match_table = of_match_ptr(tegra_cec_of_match),
+ },
+ .probe = tegra_cec_probe,
+ .remove = tegra_cec_remove,
+
+#ifdef CONFIG_PM
+ .suspend = tegra_cec_suspend,
+ .resume = tegra_cec_resume,
+#endif
+};
+
+module_platform_driver(tegra_cec_driver);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/platform/tegra-cec/tegra_cec.h
new file mode 100644
index 000000000000..e301513daa87
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.h
@@ -0,0 +1,127 @@
+/*
+ * Tegra CEC register definitions
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TEGRA_CEC_H
+#define TEGRA_CEC_H
+
+/* CEC registers */
+#define TEGRA_CEC_SW_CONTROL 0x000
+#define TEGRA_CEC_HW_CONTROL 0x004
+#define TEGRA_CEC_INPUT_FILTER 0x008
+#define TEGRA_CEC_TX_REGISTER 0x010
+#define TEGRA_CEC_RX_REGISTER 0x014
+#define TEGRA_CEC_RX_TIMING_0 0x018
+#define TEGRA_CEC_RX_TIMING_1 0x01c
+#define TEGRA_CEC_RX_TIMING_2 0x020
+#define TEGRA_CEC_TX_TIMING_0 0x024
+#define TEGRA_CEC_TX_TIMING_1 0x028
+#define TEGRA_CEC_TX_TIMING_2 0x02c
+#define TEGRA_CEC_INT_STAT 0x030
+#define TEGRA_CEC_INT_MASK 0x034
+#define TEGRA_CEC_HW_DEBUG_RX 0x038
+#define TEGRA_CEC_HW_DEBUG_TX 0x03c
+
+#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff
+#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \
+ ((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK)
+#define TEGRA_CEC_HWCTRL_RX_SNOOP (1 << 15)
+#define TEGRA_CEC_HWCTRL_RX_NAK_MODE (1 << 16)
+#define TEGRA_CEC_HWCTRL_TX_NAK_MODE (1 << 24)
+#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE (1 << 30)
+#define TEGRA_CEC_HWCTRL_TX_RX_MODE (1 << 31)
+
+#define TEGRA_CEC_INPUT_FILTER_MODE (1 << 31)
+#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0
+
+#define TEGRA_CEC_TX_REG_DATA_SHIFT 0
+#define TEGRA_CEC_TX_REG_EOM (1 << 8)
+#define TEGRA_CEC_TX_REG_BCAST (1 << 12)
+#define TEGRA_CEC_TX_REG_START_BIT (1 << 16)
+#define TEGRA_CEC_TX_REG_RETRY (1 << 17)
+
+#define TEGRA_CEC_RX_REGISTER_SHIFT 0
+#define TEGRA_CEC_RX_REGISTER_EOM (1 << 8)
+#define TEGRA_CEC_RX_REGISTER_ACK (1 << 9)
+
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT 0
+
+#define TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT 8
+#define TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT 16
+#define TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT 8
+#define TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT 16
+#define TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT 0
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8
+
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0
+#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17
+#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21
+#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT (1 << 25)
+#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER (1 << 26)
+
+#endif /* TEGRA_CEC_H */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 42e383a48ffe..8b586c864524 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1522,6 +1522,11 @@ static int cal_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations cal_async_ops = {
+ .bound = cal_async_bound,
+ .complete = cal_async_complete,
+};
+
static int cal_complete_ctx(struct cal_ctx *ctx)
{
struct video_device *vfd;
@@ -1736,8 +1741,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ctx->asd_list[0] = asd;
ctx->notifier.subdevs = ctx->asd_list;
ctx->notifier.num_subdevs = 1;
- ctx->notifier.bound = cal_async_bound;
- ctx->notifier.complete = cal_async_complete;
+ ctx->notifier.ops = &cal_async_ops;
ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
&ctx->notifier);
if (ret) {
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 45bd10544189..e395aa85c8ad 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -926,7 +926,7 @@ static struct vpe_ctx *file2ctx(struct file *file)
* mem2mem callbacks
*/
-/**
+/*
* job_ready() - check whether an instance is ready to be scheduled to run
*/
static int job_ready(void *priv)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index b01fba020d5f..065483e62db4 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -343,7 +343,7 @@ static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
* mem2mem callbacks
*/
-/**
+/*
* job_ready() - check whether an instance is ready to be scheduled to run
*/
static int job_ready(void *priv)
@@ -388,9 +388,9 @@ static void device_run(void *priv)
schedule_irq(dev, ctx->transtime);
}
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
{
- struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+ struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
@@ -1024,7 +1024,7 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- setup_timer(&dev->timer, device_isr, (long)dev);
+ timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 51c0eee61ca6..fe088a953860 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -267,11 +267,12 @@ static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
PLATFORM_DEVID_AUTO,
&pdata,
sizeof(pdata));
- if (!vimc->subdevs[i]) {
+ if (IS_ERR(vimc->subdevs[i])) {
+ match = ERR_CAST(vimc->subdevs[i]);
while (--i >= 0)
platform_device_unregister(vimc->subdevs[i]);
- return ERR_PTR(-ENOMEM);
+ return match;
}
component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index f0f423c7ca41..a651527d80db 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -189,6 +189,22 @@ struct vivid_fmt vivid_formats[] = {
.buffers = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_Y16,
.vdownsampling = { 1 },
.bit_depth = { 16 },
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 8b5cbb6b7a70..4257451f1bd8 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -70,6 +70,7 @@ struct vsp1_dl_body {
* @dma: DMA address for the header
* @body0: first display list body
* @fragments: list of extra display list bodies
+ * @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
*/
struct vsp1_dl_list {
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index ebfdf334d99c..d881cf09876d 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -351,6 +351,11 @@ static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -EINVAL;
}
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
struct device_node *node)
{
@@ -548,8 +553,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
xdev->notifier.subdevs = subdevs;
xdev->notifier.num_subdevs = num_subdevs;
- xdev->notifier.bound = xvip_graph_notify_bound;
- xdev->notifier.complete = xvip_graph_notify_complete;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 6888b7db449d..7575e5370a49 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -281,9 +281,9 @@ static bool cadet_has_rds_data(struct cadet *dev)
}
-static void cadet_handler(unsigned long data)
+static void cadet_handler(struct timer_list *t)
{
- struct cadet *dev = (void *)data;
+ struct cadet *dev = from_timer(dev, t, readtimer);
/* Service the RDS fifo */
if (mutex_trylock(&dev->lock)) {
@@ -309,7 +309,6 @@ static void cadet_handler(unsigned long data)
/*
* Clean up and exit
*/
- setup_timer(&dev->readtimer, cadet_handler, data);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
@@ -318,7 +317,7 @@ static void cadet_start_rds(struct cadet *dev)
{
dev->rdsstat = 1;
outb(0x80, dev->io); /* Select RDS fifo */
- setup_timer(&dev->readtimer, cadet_handler, (unsigned long)dev);
+ timer_setup(&dev->readtimer, cadet_handler, 0);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 3c0a22a54113..70a2c86774ce 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -254,7 +254,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct raremono_device *radio = video_drvdata(file);
- u32 freq = f->frequency;
+ u32 freq;
unsigned band;
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 271f725b17e8..540ac887a63c 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -158,7 +158,7 @@ enum si476x_ctrl_idx {
};
static struct v4l2_ctrl_config si476x_ctrls[] = {
- /**
+ /*
* SI476X during its station seeking(or tuning) process uses several
* parameters to detrmine if "the station" is valid:
*
@@ -197,7 +197,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
.step = 2,
},
- /**
+ /*
* #V4L2_CID_SI476X_HARMONICS_COUNT -- number of harmonics
* built-in power-line noise supression filter is to reject
* during AM-mode operation.
@@ -213,7 +213,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
.step = 1,
},
- /**
+ /*
* #V4L2_CID_SI476X_DIVERSITY_MODE -- configuration which
* two tuners working in diversity mode are to work in.
*
@@ -237,7 +237,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
.max = ARRAY_SIZE(phase_diversity_modes) - 1,
},
- /**
+ /*
* #V4L2_CID_SI476X_INTERCHIP_LINK -- inter-chip link in
* diversity mode indicator. Allows user to determine if two
* chips working in diversity mode have established a link
@@ -296,11 +296,15 @@ struct si476x_radio_ops {
/**
* struct si476x_radio - radio device
*
- * @core: Pointer to underlying core device
+ * @v4l2dev: Pointer to V4L2 device created by V4L2 subsystem
* @videodev: Pointer to video device created by V4L2 subsystem
+ * @ctrl_handler: V4L2 controls handler
+ * @core: Pointer to underlying core device
* @ops: Vtable of functions. See struct si476x_radio_ops for details
- * @kref: Reference counter
- * @core_lock: An r/w semaphore to brebvent the deletion of underlying
+ * @debugfs: pointer to &strucd dentry for debugfs
+ * @audmode: audio mode, as defined for the rxsubchans field
+ * at videodev2.h
+ *
* core structure is the radio device is being used
*/
struct si476x_radio {
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 903fcd5e99c0..3cbdc085c65d 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1330,7 +1330,7 @@ static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
/**
* wl1273_fm_set_tx_power() - Set the transmission power value.
- * @core: A pointer to the device struct.
+ * @radio: A pointer to the device struct.
* @power: The new power value.
*/
static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index cd76facc22f5..c89a7d5b8c55 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -749,7 +749,7 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
/*
* si470x_viddev_template - video device interface
*/
-struct video_device si470x_viddev_template = {
+const struct video_device si470x_viddev_template = {
.fops = &si470x_fops,
.name = DRIVER_NAME,
.release = video_device_release_empty,
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 7d2defd9d399..eb7b834a0ae5 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -209,7 +209,7 @@ struct si470x_device {
/**************************************************************************
* Common Functions
**************************************************************************/
-extern struct video_device si470x_viddev_template;
+extern const struct video_device si470x_viddev_template;
extern const struct v4l2_ctrl_ops si470x_ctrl_ops;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index c9e349b169c4..2add222ea346 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -7,11 +7,11 @@ config RADIO_WL128X
depends on VIDEO_V4L2 && RFKILL && TTY && TI_ST
depends on GPIOLIB || COMPILE_TEST
help
- Choose Y here if you have this FM radio chip.
+ Choose Y here if you have this FM radio chip.
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/video4linux/API.html>.
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux 2 API. Information on
+ this API and pointers to "v4l2" programs may be found at
+ <file:Documentation/video4linux/API.html>.
endmenu
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index ab3428bf63fe..800d69c3f80b 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -543,13 +543,13 @@ static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
* interrupt process. Therefore reset stage index to re-enable default
* interrupts. So that next interrupt will be processed as usual.
*/
-static void int_timeout_handler(unsigned long data)
+static void int_timeout_handler(struct timer_list *t)
{
struct fmdev *fmdev;
struct fm_irq *fmirq;
fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
- fmdev = (struct fmdev *)data;
+ fmdev = from_timer(fmdev, t, irq_info.timer);
fmirq = &fmdev->irq_info;
fmirq->retry++;
@@ -1550,8 +1550,7 @@ int fmc_prepare(struct fmdev *fmdev)
atomic_set(&fmdev->tx_cnt, 1);
fmdev->resp_comp = NULL;
- setup_timer(&fmdev->irq_info.timer, &int_timeout_handler,
- (unsigned long)fmdev);
+ timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
/*TODO: add FM_STIC_EVENT later */
fmdev->irq_info.mask = FM_MAL_EVENT;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index d9ce8ff55d0c..afb3456d4e20 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -2,7 +2,6 @@
menuconfig RC_CORE
tristate "Remote Controller support"
depends on INPUT
- default y
---help---
Enable support for Remote Controllers on Linux. This is
needed in order to support several video capture adapters,
@@ -179,6 +178,7 @@ config IR_ENE
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
depends on RC_CORE
+ depends on OF || COMPILE_TEST
help
Say Y here if you want to use hisilicon hix5hd2 remote control.
To compile this driver as a module, choose M here: the module will be
@@ -286,6 +286,7 @@ config IR_REDRAT3
config IR_SPI
tristate "SPI connected IR LED"
depends on SPI && LIRC
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use an IR LED connected through SPI bus.
@@ -393,6 +394,7 @@ config RC_LOOPBACK
config IR_GPIO_CIR
tristate "GPIO IR remote control"
depends on RC_CORE
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to use GPIO based IR Receiver.
@@ -403,6 +405,7 @@ config IR_GPIO_TX
tristate "GPIO IR Bit Banging Transmitter"
depends on RC_CORE
depends on LIRC
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to a GPIO based IR transmitter. This is a
bit banging driver.
@@ -415,6 +418,7 @@ config IR_PWM_TX
depends on RC_CORE
depends on LIRC
depends on PWM
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use a PWM based IR transmitter. This is
more power efficient than the bit banging gpio driver.
@@ -469,6 +473,16 @@ config IR_SIR
To compile this driver as a module, choose M here: the module will
be called sir-ir.
+config IR_TANGO
+ tristate "Sigma Designs SMP86xx IR decoder"
+ depends on RC_CORE
+ depends on ARCH_TANGO || COMPILE_TEST
+ ---help---
+ Adds support for the HW IR decoder embedded on Sigma Designs
+ Tango-based systems (SMP86xx, SMP87xx).
+ The HW decoder supports NEC, RC-5, RC-6 IR protocols.
+ When compiled as a module, look for tango-ir.
+
config IR_ZX
tristate "ZTE ZX IR remote control"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index ab3d5a135453..10026477a677 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_ZX) += zx-irdec.o
+obj-$(CONFIG_IR_TANGO) += tango-ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index d0871d60a723..8e82610ffaad 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -198,7 +198,7 @@ static const struct ati_receiver_type type_firefly = {
.default_keymap = RC_MAP_SNAPSTREAM_FIREFLY
};
-static struct usb_device_id ati_remote_table[] = {
+static const struct usb_device_id ati_remote_table[] = {
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index af7ba23e16e1..71b8c9bbf6c4 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -670,9 +670,9 @@ exit:
}
/* timer to simulate tx done interrupt */
-static void ene_tx_irqsim(unsigned long data)
+static void ene_tx_irqsim(struct timer_list *t)
{
- struct ene_device *dev = (struct ene_device *)data;
+ struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
@@ -1045,8 +1045,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
- setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
- (long unsigned int)dev);
+ timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0);
pr_warn("Simulation of TX activated\n");
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 7248b3662285..3d99b51384ac 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -14,119 +14,64 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
-#include <linux/platform_data/media/gpio-ir-recv.h>
-#define GPIO_IR_DRIVER_NAME "gpio-rc-recv"
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
struct gpio_rc_dev {
struct rc_dev *rcdev;
- int gpio_nr;
- bool active_low;
+ struct gpio_desc *gpiod;
+ int irq;
};
-#ifdef CONFIG_OF
-/*
- * Translate OpenFirmware node properties into platform_data
- */
-static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
- struct gpio_ir_recv_platform_data *pdata)
-{
- struct device_node *np = dev->of_node;
- enum of_gpio_flags flags;
- int gpio;
-
- gpio = of_get_gpio_flags(np, 0, &flags);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
- return gpio;
- }
-
- pdata->gpio_nr = gpio;
- pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
- /* probe() takes care of map_name == NULL or allowed_protos == 0 */
- pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
- pdata->allowed_protos = 0;
-
- return 0;
-}
-
-static const struct of_device_id gpio_ir_recv_of_match[] = {
- { .compatible = "gpio-ir-receiver", },
- { },
-};
-MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
-
-#else /* !CONFIG_OF */
-
-#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
-
-#endif
-
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
+ int val;
struct gpio_rc_dev *gpio_dev = dev_id;
- int gval;
- int rc = 0;
-
- gval = gpio_get_value(gpio_dev->gpio_nr);
-
- if (gval < 0)
- goto err_get_value;
-
- if (gpio_dev->active_low)
- gval = !gval;
- rc = ir_raw_event_store_edge(gpio_dev->rcdev, gval == 1);
- if (rc < 0)
- goto err_get_value;
+ val = gpiod_get_value(gpio_dev->gpiod);
+ if (val >= 0)
+ ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
-err_get_value:
return IRQ_HANDLED;
}
static int gpio_ir_recv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
- const struct gpio_ir_recv_platform_data *pdata =
- pdev->dev.platform_data;
int rc;
- if (pdev->dev.of_node) {
- struct gpio_ir_recv_platform_data *dtpdata =
- devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
- if (!dtpdata)
- return -ENOMEM;
- rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
- if (rc)
- return rc;
- pdata = dtpdata;
- }
-
- if (!pdata)
- return -EINVAL;
+ if (!np)
+ return -ENODEV;
- if (pdata->gpio_nr < 0)
- return -EINVAL;
-
- gpio_dev = kzalloc(sizeof(struct gpio_rc_dev), GFP_KERNEL);
+ gpio_dev = devm_kzalloc(dev, sizeof(*gpio_dev), GFP_KERNEL);
if (!gpio_dev)
return -ENOMEM;
- rcdev = rc_allocate_device(RC_DRIVER_IR_RAW);
- if (!rcdev) {
- rc = -ENOMEM;
- goto err_allocate_device;
+ gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN);
+ if (IS_ERR(gpio_dev->gpiod)) {
+ rc = PTR_ERR(gpio_dev->gpiod);
+ /* Just try again if this happens */
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio (%d)\n", rc);
+ return rc;
}
+ gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod);
+ if (gpio_dev->irq < 0)
+ return gpio_dev->irq;
+
+ rcdev = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
rcdev->priv = gpio_dev;
rcdev->device_name = GPIO_IR_DEVICE_NAME;
@@ -135,92 +80,52 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
- rcdev->dev.parent = &pdev->dev;
- rcdev->driver_name = GPIO_IR_DRIVER_NAME;
+ rcdev->dev.parent = dev;
+ rcdev->driver_name = KBUILD_MODNAME;
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- if (pdata->allowed_protos)
- rcdev->allowed_protocols = pdata->allowed_protos;
- else
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
+ rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rcdev->map_name = of_get_property(np, "linux,rc-map-name", NULL);
+ if (!rcdev->map_name)
+ rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
- gpio_dev->gpio_nr = pdata->gpio_nr;
- gpio_dev->active_low = pdata->active_low;
-
- rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv");
- if (rc < 0)
- goto err_gpio_request;
- rc = gpio_direction_input(pdata->gpio_nr);
- if (rc < 0)
- goto err_gpio_direction_input;
- rc = rc_register_device(rcdev);
+ rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
- dev_err(&pdev->dev, "failed to register rc device\n");
- goto err_register_rc_device;
+ dev_err(dev, "failed to register rc device (%d)\n", rc);
+ return rc;
}
platform_set_drvdata(pdev, gpio_dev);
- rc = request_any_context_irq(gpio_to_irq(pdata->gpio_nr),
- gpio_ir_recv_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "gpio-ir-recv-irq", gpio_dev);
- if (rc < 0)
- goto err_request_irq;
-
- return 0;
-
-err_request_irq:
- rc_unregister_device(rcdev);
- rcdev = NULL;
-err_register_rc_device:
-err_gpio_direction_input:
- gpio_free(pdata->gpio_nr);
-err_gpio_request:
- rc_free_device(rcdev);
-err_allocate_device:
- kfree(gpio_dev);
- return rc;
-}
-
-static int gpio_ir_recv_remove(struct platform_device *pdev)
-{
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
-
- free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev);
- rc_unregister_device(gpio_dev->rcdev);
- gpio_free(gpio_dev->gpio_nr);
- kfree(gpio_dev);
- return 0;
+ return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "gpio-ir-recv-irq", gpio_dev);
}
#ifdef CONFIG_PM
static int gpio_ir_recv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq_wake(gpio_dev->irq);
else
- disable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq(gpio_dev->irq);
return 0;
}
static int gpio_ir_recv_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq_wake(gpio_dev->irq);
else
- enable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq(gpio_dev->irq);
return 0;
}
@@ -231,11 +136,16 @@ static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
};
#endif
+static const struct of_device_id gpio_ir_recv_of_match[] = {
+ { .compatible = "gpio-ir-receiver", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
- .remove = gpio_ir_recv_remove,
.driver = {
- .name = GPIO_IR_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index a5ea86be8f44..f563ddd7f739 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -137,9 +137,9 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
dev_err(ir->dev, "submit urb failed: %d", ret);
}
-static void igorplugusb_timer(unsigned long data)
+static void igorplugusb_timer(struct timer_list *t)
{
- struct igorplugusb *ir = (struct igorplugusb *)data;
+ struct igorplugusb *ir = from_timer(ir, t, timer);
igorplugusb_cmd(ir, GET_INFRACODE);
}
@@ -174,7 +174,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->dev = &intf->dev;
- setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir);
+ timer_setup(&ir->timer, igorplugusb_timer, 0);
ir->request.bRequest = GET_INFRACODE;
ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
@@ -245,7 +245,7 @@ static void igorplugusb_disconnect(struct usb_interface *intf)
usb_free_urb(ir->urb);
}
-static struct usb_device_id igorplugusb_table[] = {
+static const struct usb_device_id igorplugusb_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
/* Fit PC2 Infrared Adapter */
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 03fe080278df..bcbabeeab12a 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -92,10 +92,9 @@ static int img_ir_probe(struct platform_device *pdev)
/* Private driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "cannot allocate device data\n");
+ if (!priv)
return -ENOMEM;
- }
+
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 82fdf4cc0824..ec4ded84cd17 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -339,7 +339,7 @@ static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
/**
* img_ir_decoder_convert() - Generate internal timings in decoder.
* @decoder: Decoder to be converted to internal timings.
- * @timings: Timing register values.
+ * @reg_timings: Timing register values.
* @clock_hz: IR clock rate in Hz.
*
* Fills out the repeat timings and timing register values for a specific clock
@@ -867,9 +867,9 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
}
/* timer function to end waiting for repeat. */
-static void img_ir_end_timer(unsigned long arg)
+static void img_ir_end_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
spin_lock_irq(&priv->lock);
img_ir_end_repeat(priv);
@@ -881,9 +881,9 @@ static void img_ir_end_timer(unsigned long arg)
* cleared when invalid interrupts were generated due to a quirk in the
* img-ir decoder.
*/
-static void img_ir_suspend_timer(unsigned long arg)
+static void img_ir_suspend_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
spin_lock_irq(&priv->lock);
/*
@@ -1055,9 +1055,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
img_ir_probe_hw_caps(priv);
/* Set up the end timer */
- setup_timer(&hw->end_timer, img_ir_end_timer, (unsigned long)priv);
- setup_timer(&hw->suspend_timer, img_ir_suspend_timer,
- (unsigned long)priv);
+ timer_setup(&hw->end_timer, img_ir_end_timer, 0);
+ timer_setup(&hw->suspend_timer, img_ir_suspend_timer, 0);
/* Register a clock notifier */
if (!IS_ERR(priv->clk)) {
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
index 64714efc1145..6e545680d3b6 100644
--- a/drivers/media/rc/img-ir/img-ir-raw.c
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -67,9 +67,9 @@ void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
* order to be assured of the final space. If there are no edges for a certain
* time we use this timer to emit a final sample to satisfy them.
*/
-static void img_ir_echo_timer(unsigned long arg)
+static void img_ir_echo_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
spin_lock_irq(&priv->lock);
@@ -107,7 +107,7 @@ int img_ir_probe_raw(struct img_ir_priv *priv)
int error;
/* Set up the echo timer */
- setup_timer(&raw->timer, img_ir_echo_timer, (unsigned long)priv);
+ timer_setup(&raw->timer, img_ir_echo_timer, 0);
/* Allocate raw decoder */
raw->rdev = rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7b3f31cc63d2..eb943e862515 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -346,7 +346,7 @@ static const struct imon_usb_dev_descr imon_ir_raw = {
* devices use the SoundGraph vendor ID (0x15c2). This driver only supports
* the ffdc and later devices, which do onboard decoding.
*/
-static struct usb_device_id imon_usb_id_table[] = {
+static const struct usb_device_id imon_usb_id_table[] = {
/*
* Several devices with this same device ID, all use iMON_PAD.inf
* SoundGraph iMON PAD (IR & VFD)
@@ -492,7 +492,7 @@ static void free_imon_context(struct imon_context *ictx)
dev_dbg(dev, "%s: iMON context freed\n", __func__);
}
-/**
+/*
* Called when the Display device (e.g. /dev/lcd0)
* is opened by the application.
*/
@@ -542,7 +542,7 @@ exit:
return retval;
}
-/**
+/*
* Called when the display device (e.g. /dev/lcd0)
* is closed by the application.
*/
@@ -575,7 +575,7 @@ static int display_close(struct inode *inode, struct file *file)
return retval;
}
-/**
+/*
* Sends a packet to the device -- this function must be called with
* ictx->lock held, or its unlock/lock sequence while waiting for tx
* to complete can/will lead to a deadlock.
@@ -602,8 +602,7 @@ static int send_packet(struct imon_context *ictx)
ictx->tx_urb->actual_length = 0;
} else {
/* fill request into kmalloc'ed space: */
- control_req = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
+ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL);
if (control_req == NULL)
return -ENOMEM;
@@ -665,7 +664,7 @@ static int send_packet(struct imon_context *ictx)
return retval;
}
-/**
+/*
* Sends an associate packet to the iMON 2.4G.
*
* This might not be such a good idea, since it has an id collision with
@@ -695,7 +694,7 @@ static int send_associate_24g(struct imon_context *ictx)
return retval;
}
-/**
+/*
* Sends packets to setup and show clock on iMON display
*
* Arguments: year - last 2 digits of year, month - 1..12,
@@ -782,7 +781,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
return retval;
}
-/**
+/*
* These are the sysfs functions to handle the association on the iMON 2.4G LT.
*/
static ssize_t show_associate_remote(struct device *d,
@@ -824,7 +823,7 @@ static ssize_t store_associate_remote(struct device *d,
return count;
}
-/**
+/*
* sysfs functions to control internal imon clock
*/
static ssize_t show_imon_clock(struct device *d,
@@ -924,7 +923,7 @@ static const struct attribute_group imon_rf_attr_group = {
.attrs = imon_rf_sysfs_entries
};
-/**
+/*
* Writes data to the VFD. The iMON VFD is 2x16 characters
* and requires data in 5 consecutive USB interrupt packets,
* each packet but the last carrying 7 bytes.
@@ -943,7 +942,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
int seq;
int retval = 0;
struct imon_context *ictx;
- const unsigned char vfd_packet6[] = {
+ static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
ictx = file->private_data;
@@ -1009,7 +1008,7 @@ exit:
return (!retval) ? n_bytes : retval;
}
-/**
+/*
* Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
* packets. We accept data as 16 hexadecimal digits, followed by a
* newline (to make it easy to drive the device from a command-line
@@ -1067,7 +1066,7 @@ exit:
return (!retval) ? n_bytes : retval;
}
-/**
+/*
* Callback function for USB core API: transmit data
*/
static void usb_tx_callback(struct urb *urb)
@@ -1088,12 +1087,12 @@ static void usb_tx_callback(struct urb *urb)
complete(&ictx->tx.finished);
}
-/**
+/*
* report touchscreen input
*/
-static void imon_touch_display_timeout(unsigned long data)
+static void imon_touch_display_timeout(struct timer_list *t)
{
- struct imon_context *ictx = (struct imon_context *)data;
+ struct imon_context *ictx = from_timer(ictx, t, ttimer);
if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
return;
@@ -1104,7 +1103,7 @@ static void imon_touch_display_timeout(unsigned long data)
input_sync(ictx->touch);
}
-/**
+/*
* iMON IR receivers support two different signal sets -- those used by
* the iMON remotes, and those used by the Windows MCE remotes (which is
* really just RC-6), but only one or the other at a time, as the signals
@@ -1192,7 +1191,7 @@ static inline int tv2int(const struct timeval *a, const struct timeval *b)
return sec;
}
-/**
+/*
* The directional pad behaves a bit differently, depending on whether this is
* one of the older ffdc devices or a newer device. Newer devices appear to
* have a higher resolution matrix for more precise mouse movement, but it
@@ -1544,7 +1543,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
}
}
-/**
+/*
* figure out if these is a press or a release. We don't actually
* care about repeats, as those will be auto-generated within the IR
* subsystem for repeating scancodes.
@@ -1593,10 +1592,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
return press_type;
}
-/**
+/*
* Process the incoming packet
*/
-/**
+/*
* Convert bit count to time duration (in us) and submit
* the value to lirc_dev.
*/
@@ -1609,7 +1608,7 @@ static void submit_data(struct imon_context *context)
ir_raw_event_store_with_filter(context->rdev, &ev);
}
-/**
+/*
* Process the incoming packet
*/
static void imon_incoming_ir_raw(struct imon_context *context,
@@ -1832,7 +1831,7 @@ not_input_data:
}
}
-/**
+/*
* Callback function for USB core API: receive data
*/
static void usb_rx_callback_intf0(struct urb *urb)
@@ -2047,8 +2046,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
{
struct rc_dev *rdev;
int ret;
- const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x88 };
+ static const unsigned char fp_packet[] = {
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 };
rdev = rc_allocate_device(ictx->dev_descr->flags & IMON_IR_RAW ?
RC_DRIVER_IR_RAW : RC_DRIVER_SCANCODE);
@@ -2310,11 +2309,10 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
struct usb_host_interface *iface_desc;
int ret = -ENOMEM;
- ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
- if (!ictx) {
- dev_err(dev, "%s: kzalloc failed for context", __func__);
+ ictx = kzalloc(sizeof(*ictx), GFP_KERNEL);
+ if (!ictx)
goto exit;
- }
+
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb)
goto rx_urb_alloc_failed;
@@ -2413,8 +2411,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
mutex_lock(&ictx->lock);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
- setup_timer(&ictx->ttimer, imon_touch_display_timeout,
- (unsigned long)ictx);
+ timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0);
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
@@ -2488,7 +2485,7 @@ static void imon_init_display(struct imon_context *ictx,
}
-/**
+/*
* Callback function for USB core API: Probe
*/
static int imon_probe(struct usb_interface *interface,
@@ -2517,6 +2514,11 @@ static int imon_probe(struct usb_interface *interface,
mutex_lock(&driver_lock);
first_if = usb_ifnum_to_if(usbdev, 0);
+ if (!first_if) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
first_if_ctx = usb_get_intfdata(first_if);
if (ifnum == 0) {
@@ -2581,7 +2583,7 @@ fail:
return ret;
}
-/**
+/*
* Callback function for USB core API: disconnect
*/
static void imon_disconnect(struct usb_interface *interface)
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index e2bd68c42edf..22c8aee3df4f 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -39,7 +39,7 @@ enum jvc_state {
/**
* ir_jvc_decode() - Decode one JVC pulse or space
* @dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index d2223c04e9ad..4fd4521693d9 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -25,8 +25,8 @@
/**
* ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the
* lircd userspace daemon for decoding.
- * @input_dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @dev: the struct rc_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the lirc interfaces aren't wired up.
*/
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
+ if (!dev->raw->lirc.ldev || !dev->raw->lirc.ldev->buf)
return -EINVAL;
/* Packet start */
@@ -84,8 +84,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
(u64)LIRC_VALUE_MASK);
gap_sample = LIRC_SPACE(lirc->gap_duration);
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
- (unsigned char *) &gap_sample);
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
+ (unsigned char *)&gap_sample);
lirc->gap = false;
}
@@ -95,9 +95,9 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
TO_US(ev.duration), TO_STR(ev.pulse));
}
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
(unsigned char *) &sample);
- wake_up(&dev->raw->lirc.drv->rbuf->wait_poll);
+ wake_up(&dev->raw->lirc.ldev->buf->wait_poll);
return 0;
}
@@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->max_timeout)
return -ENOTTY;
+ /* Check for multiply overflow */
+ if (val > U32_MAX / 1000)
+ return -EINVAL;
+
tmp = val * 1000;
- if (tmp < dev->min_timeout ||
- tmp > dev->max_timeout)
- return -EINVAL;
+ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+ return -EINVAL;
if (dev->s_timeout)
ret = dev->s_timeout(dev, tmp);
@@ -343,12 +346,12 @@ static const struct file_operations lirc_fops = {
static int ir_lirc_register(struct rc_dev *dev)
{
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int rc = -ENOMEM;
unsigned long features = 0;
- drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!drv)
+ ldev = lirc_allocate_device();
+ if (!ldev)
return rc;
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
@@ -380,32 +383,29 @@ static int ir_lirc_register(struct rc_dev *dev)
if (dev->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
- snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)",
+ snprintf(ldev->name, sizeof(ldev->name), "ir-lirc-codec (%s)",
dev->driver_name);
- drv->minor = -1;
- drv->features = features;
- drv->data = &dev->raw->lirc;
- drv->rbuf = NULL;
- drv->code_length = sizeof(struct ir_raw_event) * 8;
- drv->chunk_size = sizeof(int);
- drv->buffer_size = LIRCBUF_SIZE;
- drv->fops = &lirc_fops;
- drv->dev = &dev->dev;
- drv->rdev = dev;
- drv->owner = THIS_MODULE;
-
- drv->minor = lirc_register_driver(drv);
- if (drv->minor < 0) {
- rc = -ENODEV;
+ ldev->features = features;
+ ldev->data = &dev->raw->lirc;
+ ldev->buf = NULL;
+ ldev->code_length = sizeof(struct ir_raw_event) * 8;
+ ldev->chunk_size = sizeof(int);
+ ldev->buffer_size = LIRCBUF_SIZE;
+ ldev->fops = &lirc_fops;
+ ldev->dev.parent = &dev->dev;
+ ldev->rdev = dev;
+ ldev->owner = THIS_MODULE;
+
+ rc = lirc_register_device(ldev);
+ if (rc < 0)
goto out;
- }
- dev->raw->lirc.drv = drv;
+ dev->raw->lirc.ldev = ldev;
dev->raw->lirc.dev = dev;
return 0;
out:
- kfree(drv);
+ lirc_free_device(ldev);
return rc;
}
@@ -413,9 +413,8 @@ static int ir_lirc_unregister(struct rc_dev *dev)
{
struct lirc_codec *lirc = &dev->raw->lirc;
- lirc_unregister_driver(lirc->drv->minor);
- kfree(lirc->drv);
- lirc->drv = NULL;
+ lirc_unregister_device(lirc->ldev);
+ lirc->ldev = NULL;
return 0;
}
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 7c572a643656..69d6264d54e6 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -115,9 +115,9 @@ static unsigned char kbd_keycodes[256] = {
KEY_RESERVED
};
-static void mce_kbd_rx_timeout(unsigned long data)
+static void mce_kbd_rx_timeout(struct timer_list *t)
{
- struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
+ struct mce_kbd_dec *mce_kbd = from_timer(mce_kbd, t, rx_timeout);
int i;
unsigned char maskcode;
@@ -389,8 +389,7 @@ static int ir_mce_kbd_register(struct rc_dev *dev)
set_bit(EV_MSC, idev->evbit);
set_bit(MSC_SCAN, idev->mscbit);
- setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
- (unsigned long)mce_kbd);
+ timer_setup(&mce_kbd->rx_timeout, mce_kbd_rx_timeout, 0);
input_set_drvdata(idev, mce_kbd);
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 817c18f2ddd1..6880c190dcd2 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -41,7 +41,7 @@ enum nec_state {
/**
* ir_nec_decode() - Decode one NEC pulse or space
* @dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
@@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
data->state = STATE_BIT_PULSE;
return 0;
} else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
- rc_repeat(dev);
- IR_dprintk(1, "Repeat last key\n");
data->state = STATE_TRAILER_PULSE;
return 0;
}
@@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
break;
- address = bitrev8((data->bits >> 24) & 0xff);
- not_address = bitrev8((data->bits >> 16) & 0xff);
- command = bitrev8((data->bits >> 8) & 0xff);
- not_command = bitrev8((data->bits >> 0) & 0xff);
+ if (data->count == NEC_NBITS) {
+ address = bitrev8((data->bits >> 24) & 0xff);
+ not_address = bitrev8((data->bits >> 16) & 0xff);
+ command = bitrev8((data->bits >> 8) & 0xff);
+ not_command = bitrev8((data->bits >> 0) & 0xff);
+
+ scancode = ir_nec_bytes_to_scancode(address,
+ not_address,
+ command,
+ not_command,
+ &rc_proto);
- scancode = ir_nec_bytes_to_scancode(address, not_address,
- command, not_command,
- &rc_proto);
+ if (data->is_nec_x)
+ data->necx_repeat = true;
- if (data->is_nec_x)
- data->necx_repeat = true;
+ rc_keydown(dev, rc_proto, scancode, 0);
+ } else {
+ rc_repeat(dev);
+ }
- rc_keydown(dev, rc_proto, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
@@ -178,7 +183,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
* ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
* @protocol: specific protocol to use
* @scancode: a single NEC scancode.
- * @raw: raw data to be modulated.
*/
static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode)
{
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 758c60956850..d94e07b02f3b 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -48,7 +48,7 @@ enum sanyo_state {
/**
* ir_sanyo_decode() - Decode one SANYO pulse or space
* @dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 129b558acc92..7140dd6160ee 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -39,7 +39,7 @@ enum sharp_state {
/**
* ir_sharp_decode() - Decode one Sharp pulse or space
* @dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index 6f464be1c8d7..712bc6d76e92 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -35,7 +35,7 @@ enum xmp_state {
/**
* ir_xmp_decode() - Decode one XMP pulse or space
* @dev: the struct rc_dev descriptor of the device
- * @duration: the struct ir_raw_event descriptor of the pulse/space
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 2d0b26bf2051..50b319355edf 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-alink-dtu-m.o \
rc-anysee.o \
rc-apac-viewcomp.o \
+ rc-astrometa-t2hybrid.o \
rc-asus-pc39.o \
rc-asus-ps3-100.o \
rc-ati-tv-wonder-hd-600.o \
@@ -48,6 +49,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-geekbox.o \
rc-genius-tvgo-a11mce.o \
rc-gotview7135.o \
+ rc-hisi-poplar.o \
+ rc-hisi-tv-demo.o \
rc-imon-mce.o \
rc-imon-pad.o \
rc-iodata-bctv7e.o \
@@ -89,6 +92,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-reddo.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
+ rc-tango.o \
rc-tbs-nec.o \
rc-technisat-ts35.o \
rc-technisat-usb2.o \
diff --git a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
new file mode 100644
index 000000000000..51690960fec4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
@@ -0,0 +1,70 @@
+/*
+ * Keytable for the Astrometa T2hybrid remote controller
+ *
+ * Copyright (C) 2017 Oleh Kravchenko <oleg@kaa.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table t2hybrid[] = {
+ { 0x4d, KEY_POWER2 },
+ { 0x54, KEY_VIDEO }, /* Source */
+ { 0x16, KEY_MUTE },
+
+ { 0x4c, KEY_RECORD },
+ { 0x05, KEY_CHANNELUP },
+ { 0x0c, KEY_TIME}, /* Timeshift */
+
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x40, KEY_ZOOM }, /* Fullscreen */
+ { 0x1e, KEY_VOLUMEUP },
+
+ { 0x12, KEY_0 },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x1c, KEY_AGAIN }, /* Recall */
+
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+
+ { 0x0d, KEY_4 },
+ { 0x19, KEY_5 },
+ { 0x1b, KEY_6 },
+
+ { 0x11, KEY_7 },
+ { 0x15, KEY_8 },
+ { 0x17, KEY_9 },
+};
+
+static struct rc_map_list t2hybrid_map = {
+ .map = {
+ .scan = t2hybrid,
+ .size = ARRAY_SIZE(t2hybrid),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_ASTROMETA_T2HYBRID,
+ }
+};
+
+static int __init init_rc_map_t2hybrid(void)
+{
+ return rc_map_register(&t2hybrid_map);
+}
+
+static void __exit exit_rc_map_t2hybrid(void)
+{
+ rc_map_unregister(&t2hybrid_map);
+}
+
+module_init(init_rc_map_t2hybrid)
+module_exit(exit_rc_map_t2hybrid)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 9882e2cde975..6d5a73b7ccec 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -43,7 +43,8 @@ static struct rc_map_table avermedia_m135a[] = {
{ 0x0213, KEY_RIGHT }, /* -> or L */
{ 0x0212, KEY_LEFT }, /* <- or R */
- { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0215, KEY_MENU },
+ { 0x0217, KEY_CAMERA }, /* Capturar Imagem or Snapshot */
{ 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
{ 0x0303, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-hisi-poplar.c b/drivers/media/rc/keymaps/rc-hisi-poplar.c
new file mode 100644
index 000000000000..78728bc7f63a
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-poplar.c
@@ -0,0 +1,69 @@
+/*
+ * Keytable for remote controller of HiSilicon poplar board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_poplar_keymap[] = {
+ { 0x0000b292, KEY_1},
+ { 0x0000b293, KEY_2},
+ { 0x0000b2cc, KEY_3},
+ { 0x0000b28e, KEY_4},
+ { 0x0000b28f, KEY_5},
+ { 0x0000b2c8, KEY_6},
+ { 0x0000b28a, KEY_7},
+ { 0x0000b28b, KEY_8},
+ { 0x0000b2c4, KEY_9},
+ { 0x0000b287, KEY_0},
+ { 0x0000b282, KEY_HOMEPAGE},
+ { 0x0000b2ca, KEY_UP},
+ { 0x0000b299, KEY_LEFT},
+ { 0x0000b2c1, KEY_RIGHT},
+ { 0x0000b2d2, KEY_DOWN},
+ { 0x0000b2c5, KEY_DELETE},
+ { 0x0000b29c, KEY_MUTE},
+ { 0x0000b281, KEY_VOLUMEDOWN},
+ { 0x0000b280, KEY_VOLUMEUP},
+ { 0x0000b2dc, KEY_POWER},
+ { 0x0000b29a, KEY_MENU},
+ { 0x0000b28d, KEY_SETUP},
+ { 0x0000b2c5, KEY_BACK},
+ { 0x0000b295, KEY_PLAYPAUSE},
+ { 0x0000b2ce, KEY_ENTER},
+ { 0x0000b285, KEY_CHANNELUP},
+ { 0x0000b286, KEY_CHANNELDOWN},
+ { 0x0000b2da, KEY_NUMERIC_STAR},
+ { 0x0000b2d0, KEY_NUMERIC_POUND},
+};
+
+static struct rc_map_list hisi_poplar_map = {
+ .map = {
+ .scan = hisi_poplar_keymap,
+ .size = ARRAY_SIZE(hisi_poplar_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_POPLAR,
+ }
+};
+
+static int __init init_rc_map_hisi_poplar(void)
+{
+ return rc_map_register(&hisi_poplar_map);
+}
+
+static void __exit exit_rc_map_hisi_poplar(void)
+{
+ rc_map_unregister(&hisi_poplar_map);
+}
+
+module_init(init_rc_map_hisi_poplar)
+module_exit(exit_rc_map_hisi_poplar)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
new file mode 100644
index 000000000000..4816e3a4a18d
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
@@ -0,0 +1,81 @@
+/*
+ * Keytable for remote controller of HiSilicon tv demo board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_tv_demo_keymap[] = {
+ { 0x00000092, KEY_1},
+ { 0x00000093, KEY_2},
+ { 0x000000cc, KEY_3},
+ { 0x0000009f, KEY_4},
+ { 0x0000008e, KEY_5},
+ { 0x0000008f, KEY_6},
+ { 0x000000c8, KEY_7},
+ { 0x00000094, KEY_8},
+ { 0x0000008a, KEY_9},
+ { 0x0000008b, KEY_0},
+ { 0x000000ce, KEY_ENTER},
+ { 0x000000ca, KEY_UP},
+ { 0x00000099, KEY_LEFT},
+ { 0x00000084, KEY_PAGEUP},
+ { 0x000000c1, KEY_RIGHT},
+ { 0x000000d2, KEY_DOWN},
+ { 0x00000089, KEY_PAGEDOWN},
+ { 0x000000d1, KEY_MUTE},
+ { 0x00000098, KEY_VOLUMEDOWN},
+ { 0x00000090, KEY_VOLUMEUP},
+ { 0x0000009c, KEY_POWER},
+ { 0x000000d6, KEY_STOP},
+ { 0x00000097, KEY_MENU},
+ { 0x000000cb, KEY_BACK},
+ { 0x000000da, KEY_PLAYPAUSE},
+ { 0x00000080, KEY_INFO},
+ { 0x000000c3, KEY_REWIND},
+ { 0x00000087, KEY_HOMEPAGE},
+ { 0x000000d0, KEY_FASTFORWARD},
+ { 0x000000c4, KEY_SOUND},
+ { 0x00000082, BTN_1},
+ { 0x000000c7, BTN_2},
+ { 0x00000086, KEY_PROGRAM},
+ { 0x000000d9, KEY_SUBTITLE},
+ { 0x00000085, KEY_ZOOM},
+ { 0x0000009b, KEY_RED},
+ { 0x0000009a, KEY_GREEN},
+ { 0x000000c0, KEY_YELLOW},
+ { 0x000000c2, KEY_BLUE},
+ { 0x0000009d, KEY_CHANNELDOWN},
+ { 0x000000cf, KEY_CHANNELUP},
+};
+
+static struct rc_map_list hisi_tv_demo_map = {
+ .map = {
+ .scan = hisi_tv_demo_keymap,
+ .size = ARRAY_SIZE(hisi_tv_demo_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_TV_DEMO,
+ }
+};
+
+static int __init init_rc_map_hisi_tv_demo(void)
+{
+ return rc_map_register(&hisi_tv_demo_map);
+}
+
+static void __exit exit_rc_map_hisi_tv_demo(void)
+{
+ rc_map_unregister(&hisi_tv_demo_map);
+}
+
+module_init(init_rc_map_hisi_tv_demo)
+module_exit(exit_rc_map_hisi_tv_demo)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-tango.c b/drivers/media/rc/keymaps/rc-tango.c
new file mode 100644
index 000000000000..1c6e8875d46f
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-tango.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table tango_table[] = {
+ { 0x4cb4a, KEY_POWER },
+ { 0x4cb48, KEY_FILE },
+ { 0x4cb0f, KEY_SETUP },
+ { 0x4cb4d, KEY_SUSPEND },
+ { 0x4cb4e, KEY_VOLUMEUP },
+ { 0x4cb44, KEY_EJECTCD },
+ { 0x4cb13, KEY_TV },
+ { 0x4cb51, KEY_MUTE },
+ { 0x4cb52, KEY_VOLUMEDOWN },
+
+ { 0x4cb41, KEY_1 },
+ { 0x4cb03, KEY_2 },
+ { 0x4cb42, KEY_3 },
+ { 0x4cb45, KEY_4 },
+ { 0x4cb07, KEY_5 },
+ { 0x4cb46, KEY_6 },
+ { 0x4cb55, KEY_7 },
+ { 0x4cb17, KEY_8 },
+ { 0x4cb56, KEY_9 },
+ { 0x4cb1b, KEY_0 },
+ { 0x4cb59, KEY_DELETE },
+ { 0x4cb5a, KEY_CAPSLOCK },
+
+ { 0x4cb47, KEY_BACK },
+ { 0x4cb05, KEY_SWITCHVIDEOMODE },
+ { 0x4cb06, KEY_UP },
+ { 0x4cb43, KEY_LEFT },
+ { 0x4cb01, KEY_RIGHT },
+ { 0x4cb0a, KEY_DOWN },
+ { 0x4cb02, KEY_ENTER },
+ { 0x4cb4b, KEY_INFO },
+ { 0x4cb09, KEY_HOME },
+
+ { 0x4cb53, KEY_MENU },
+ { 0x4cb12, KEY_PREVIOUS },
+ { 0x4cb50, KEY_PLAY },
+ { 0x4cb11, KEY_NEXT },
+ { 0x4cb4f, KEY_TITLE },
+ { 0x4cb0e, KEY_REWIND },
+ { 0x4cb4c, KEY_STOP },
+ { 0x4cb0d, KEY_FORWARD },
+ { 0x4cb57, KEY_MEDIA_REPEAT },
+ { 0x4cb16, KEY_ANGLE },
+ { 0x4cb54, KEY_PAUSE },
+ { 0x4cb15, KEY_SLOW },
+ { 0x4cb5b, KEY_TIME },
+ { 0x4cb1a, KEY_AUDIO },
+ { 0x4cb58, KEY_SUBTITLE },
+ { 0x4cb19, KEY_ZOOM },
+
+ { 0x4cb5f, KEY_RED },
+ { 0x4cb1e, KEY_GREEN },
+ { 0x4cb5c, KEY_YELLOW },
+ { 0x4cb1d, KEY_BLUE },
+};
+
+static struct rc_map_list tango_map = {
+ .map = {
+ .scan = tango_table,
+ .size = ARRAY_SIZE(tango_table),
+ .rc_proto = RC_PROTO_NECX,
+ .name = RC_MAP_TANGO,
+ }
+};
+
+static int __init init_rc_map_tango(void)
+{
+ return rc_map_register(&tango_map);
+}
+
+static void __exit exit_rc_map_tango(void)
+{
+ rc_map_unregister(&tango_map);
+}
+
+module_init(init_rc_map_tango)
+module_exit(exit_rc_map_tango)
+
+MODULE_AUTHOR("Sigma Designs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index 2275b37c61d2..78bb3143a1a8 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -66,7 +66,7 @@ static struct rc_map_list twinhan_vp1027_map = {
.map = {
.scan = twinhan_vp1027,
.size = ARRAY_SIZE(twinhan_vp1027),
- .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
+ .rc_proto = RC_PROTO_NEC,
.name = RC_MAP_TWINHAN_VP1027_DVBS,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 9080e39ea391..e16d1138ca48 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -24,96 +24,91 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
#include <media/rc-core.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
-#define NOPLUG -1
#define LOGHEAD "lirc_dev (%s[%d]): "
static dev_t lirc_base_dev;
-struct irctl {
- struct lirc_driver d;
- int attached;
- int open;
+/* Used to keep track of allocated lirc devices */
+#define LIRC_MAX_DEVICES 256
+static DEFINE_IDA(lirc_ida);
- struct mutex irctl_lock;
- struct lirc_buffer *buf;
- bool buf_internal;
- unsigned int chunk_size;
-
- struct device dev;
- struct cdev cdev;
-};
+/* Only used for sysfs but defined to void otherwise */
+static struct class *lirc_class;
-static DEFINE_MUTEX(lirc_dev_lock);
+static void lirc_release_device(struct device *ld)
+{
+ struct lirc_dev *d = container_of(ld, struct lirc_dev, dev);
-static struct irctl *irctls[MAX_IRCTL_DEVICES];
+ put_device(d->dev.parent);
-/* Only used for sysfs but defined to void otherwise */
-static struct class *lirc_class;
+ if (d->buf_internal) {
+ lirc_buffer_free(d->buf);
+ kfree(d->buf);
+ d->buf = NULL;
+ }
+ kfree(d);
+ module_put(THIS_MODULE);
+}
-static void lirc_release(struct device *ld)
+static int lirc_allocate_buffer(struct lirc_dev *d)
{
- struct irctl *ir = container_of(ld, struct irctl, dev);
+ int err;
- put_device(ir->dev.parent);
+ if (d->buf) {
+ d->buf_internal = false;
+ return 0;
+ }
- if (ir->buf_internal) {
- lirc_buffer_free(ir->buf);
- kfree(ir->buf);
+ d->buf = kmalloc(sizeof(*d->buf), GFP_KERNEL);
+ if (!d->buf)
+ return -ENOMEM;
+
+ err = lirc_buffer_init(d->buf, d->chunk_size, d->buffer_size);
+ if (err) {
+ kfree(d->buf);
+ d->buf = NULL;
+ return err;
}
- mutex_lock(&lirc_dev_lock);
- irctls[ir->d.minor] = NULL;
- mutex_unlock(&lirc_dev_lock);
- kfree(ir);
+ d->buf_internal = true;
+ return 0;
}
-static int lirc_allocate_buffer(struct irctl *ir)
+struct lirc_dev *
+lirc_allocate_device(void)
{
- int err = 0;
- int bytes_in_key;
- unsigned int chunk_size;
- unsigned int buffer_size;
- struct lirc_driver *d = &ir->d;
-
- bytes_in_key = BITS_TO_LONGS(d->code_length) +
- (d->code_length % 8 ? 1 : 0);
- buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
- chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key;
-
- if (d->rbuf) {
- ir->buf = d->rbuf;
- ir->buf_internal = false;
- } else {
- ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!ir->buf) {
- err = -ENOMEM;
- goto out;
- }
+ struct lirc_dev *d;
- err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
- if (err) {
- kfree(ir->buf);
- ir->buf = NULL;
- goto out;
- }
-
- ir->buf_internal = true;
- d->rbuf = ir->buf;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (d) {
+ mutex_init(&d->mutex);
+ device_initialize(&d->dev);
+ d->dev.class = lirc_class;
+ d->dev.release = lirc_release_device;
+ __module_get(THIS_MODULE);
}
- ir->chunk_size = ir->buf->chunk_size;
-out:
- return err;
+ return d;
}
+EXPORT_SYMBOL(lirc_allocate_device);
-int lirc_register_driver(struct lirc_driver *d)
+void lirc_free_device(struct lirc_dev *d)
+{
+ if (!d)
+ return;
+
+ put_device(&d->dev);
+}
+EXPORT_SYMBOL(lirc_free_device);
+
+int lirc_register_device(struct lirc_dev *d)
{
- struct irctl *ir;
int minor;
int err;
@@ -122,8 +117,8 @@ int lirc_register_driver(struct lirc_driver *d)
return -EBADRQC;
}
- if (!d->dev) {
- pr_err("dev pointer not filled in!\n");
+ if (!d->dev.parent) {
+ pr_err("dev parent pointer not filled in!\n");
return -EINVAL;
}
@@ -132,226 +127,146 @@ int lirc_register_driver(struct lirc_driver *d)
return -EINVAL;
}
- if (d->minor >= MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "minor must be between 0 and %d!\n",
- MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
+ if (!d->buf && d->chunk_size < 1) {
+ pr_err("chunk_size must be set!\n");
+ return -EINVAL;
}
- if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
- dev_err(d->dev, "code length must be less than %d bits\n",
- BUFLEN * 8);
- return -EBADRQC;
+ if (!d->buf && d->buffer_size < 1) {
+ pr_err("buffer_size must be set!\n");
+ return -EINVAL;
}
- if (!d->rbuf && !(d->fops && d->fops->read &&
- d->fops->poll && d->fops->unlocked_ioctl)) {
- dev_err(d->dev, "undefined read, poll, ioctl\n");
+ if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
+ dev_err(&d->dev, "code length must be less than %d bits\n",
+ BUFLEN * 8);
return -EBADRQC;
}
- mutex_lock(&lirc_dev_lock);
-
- minor = d->minor;
-
- if (minor < 0) {
- /* find first free slot for driver */
- for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++)
- if (!irctls[minor])
- break;
- if (minor == MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "no free slots for drivers!\n");
- err = -ENOMEM;
- goto out_lock;
- }
- } else if (irctls[minor]) {
- dev_err(d->dev, "minor (%d) just registered!\n", minor);
- err = -EBUSY;
- goto out_lock;
- }
-
- ir = kzalloc(sizeof(struct irctl), GFP_KERNEL);
- if (!ir) {
- err = -ENOMEM;
- goto out_lock;
+ if (!d->buf && !(d->fops && d->fops->read &&
+ d->fops->poll && d->fops->unlocked_ioctl)) {
+ dev_err(&d->dev, "undefined read, poll, ioctl\n");
+ return -EBADRQC;
}
- mutex_init(&ir->irctl_lock);
- irctls[minor] = ir;
- d->minor = minor;
-
/* some safety check 8-) */
- d->name[sizeof(d->name)-1] = '\0';
+ d->name[sizeof(d->name) - 1] = '\0';
if (d->features == 0)
d->features = LIRC_CAN_REC_LIRCCODE;
- ir->d = *d;
-
if (LIRC_CAN_REC(d->features)) {
- err = lirc_allocate_buffer(irctls[minor]);
- if (err) {
- kfree(ir);
- goto out_lock;
- }
- d->rbuf = ir->buf;
+ err = lirc_allocate_buffer(d);
+ if (err)
+ return err;
}
- device_initialize(&ir->dev);
- ir->dev.devt = MKDEV(MAJOR(lirc_base_dev), ir->d.minor);
- ir->dev.class = lirc_class;
- ir->dev.parent = d->dev;
- ir->dev.release = lirc_release;
- dev_set_name(&ir->dev, "lirc%d", ir->d.minor);
+ minor = ida_simple_get(&lirc_ida, 0, LIRC_MAX_DEVICES, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
- cdev_init(&ir->cdev, d->fops);
- ir->cdev.owner = ir->d.owner;
- ir->cdev.kobj.parent = &ir->dev.kobj;
-
- err = cdev_add(&ir->cdev, ir->dev.devt, 1);
- if (err)
- goto out_free_dev;
-
- ir->attached = 1;
-
- err = device_add(&ir->dev);
- if (err)
- goto out_cdev;
-
- mutex_unlock(&lirc_dev_lock);
+ d->minor = minor;
+ d->dev.devt = MKDEV(MAJOR(lirc_base_dev), d->minor);
+ dev_set_name(&d->dev, "lirc%d", d->minor);
- get_device(ir->dev.parent);
+ cdev_init(&d->cdev, d->fops);
+ d->cdev.owner = d->owner;
+ d->attached = true;
- dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
- ir->d.name, ir->d.minor);
+ err = cdev_device_add(&d->cdev, &d->dev);
+ if (err) {
+ ida_simple_remove(&lirc_ida, minor);
+ return err;
+ }
- return minor;
+ get_device(d->dev.parent);
-out_cdev:
- cdev_del(&ir->cdev);
-out_free_dev:
- put_device(&ir->dev);
-out_lock:
- mutex_unlock(&lirc_dev_lock);
+ dev_info(&d->dev, "lirc_dev: driver %s registered at minor = %d\n",
+ d->name, d->minor);
- return err;
+ return 0;
}
-EXPORT_SYMBOL(lirc_register_driver);
+EXPORT_SYMBOL(lirc_register_device);
-int lirc_unregister_driver(int minor)
+void lirc_unregister_device(struct lirc_dev *d)
{
- struct irctl *ir;
+ if (!d)
+ return;
- if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- pr_err("minor (%d) must be between 0 and %d!\n",
- minor, MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
- }
-
- ir = irctls[minor];
- if (!ir) {
- pr_err("failed to get irctl\n");
- return -ENOENT;
- }
+ dev_dbg(&d->dev, "lirc_dev: driver %s unregistered from minor = %d\n",
+ d->name, d->minor);
- mutex_lock(&lirc_dev_lock);
+ mutex_lock(&d->mutex);
- if (ir->d.minor != minor) {
- dev_err(ir->d.dev, "lirc_dev: minor %d device not registered\n",
- minor);
- mutex_unlock(&lirc_dev_lock);
- return -ENOENT;
+ d->attached = false;
+ if (d->open) {
+ dev_dbg(&d->dev, LOGHEAD "releasing opened driver\n",
+ d->name, d->minor);
+ wake_up_interruptible(&d->buf->wait_poll);
}
- dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n",
- ir->d.name, ir->d.minor);
-
- ir->attached = 0;
- if (ir->open) {
- dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n",
- ir->d.name, ir->d.minor);
- wake_up_interruptible(&ir->buf->wait_poll);
- }
+ mutex_unlock(&d->mutex);
- mutex_unlock(&lirc_dev_lock);
-
- device_del(&ir->dev);
- cdev_del(&ir->cdev);
- put_device(&ir->dev);
-
- return 0;
+ cdev_device_del(&d->cdev, &d->dev);
+ ida_simple_remove(&lirc_ida, d->minor);
+ put_device(&d->dev);
}
-EXPORT_SYMBOL(lirc_unregister_driver);
+EXPORT_SYMBOL(lirc_unregister_device);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
- struct irctl *ir;
- int retval = 0;
-
- if (iminor(inode) >= MAX_IRCTL_DEVICES) {
- pr_err("open result for %d is -ENODEV\n", iminor(inode));
- return -ENODEV;
- }
-
- if (mutex_lock_interruptible(&lirc_dev_lock))
- return -ERESTARTSYS;
-
- ir = irctls[iminor(inode)];
- mutex_unlock(&lirc_dev_lock);
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+ int retval;
- if (!ir) {
- retval = -ENODEV;
- goto error;
- }
+ dev_dbg(&d->dev, LOGHEAD "open called\n", d->name, d->minor);
- dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor);
+ retval = mutex_lock_interruptible(&d->mutex);
+ if (retval)
+ return retval;
- if (ir->d.minor == NOPLUG) {
+ if (!d->attached) {
retval = -ENODEV;
- goto error;
+ goto out;
}
- if (ir->open) {
+ if (d->open) {
retval = -EBUSY;
- goto error;
+ goto out;
}
- if (ir->d.rdev) {
- retval = rc_open(ir->d.rdev);
+ if (d->rdev) {
+ retval = rc_open(d->rdev);
if (retval)
- goto error;
+ goto out;
}
- if (ir->buf)
- lirc_buffer_clear(ir->buf);
+ if (d->buf)
+ lirc_buffer_clear(d->buf);
- ir->open++;
+ d->open++;
-error:
+ lirc_init_pdata(inode, file);
nonseekable_open(inode, file);
+ mutex_unlock(&d->mutex);
+
+ return 0;
+out:
+ mutex_unlock(&d->mutex);
return retval;
}
EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
- struct irctl *ir = irctls[iminor(inode)];
- int ret;
-
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -EINVAL;
- }
+ struct lirc_dev *d = file->private_data;
- ret = mutex_lock_killable(&lirc_dev_lock);
- WARN_ON(ret);
+ mutex_lock(&d->mutex);
- rc_close(ir->d.rdev);
+ rc_close(d->rdev);
+ d->open--;
- ir->open--;
- if (!ret)
- mutex_unlock(&lirc_dev_lock);
+ mutex_unlock(&d->mutex);
return 0;
}
@@ -359,29 +274,24 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned int ret;
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return POLLERR;
- }
-
- if (!ir->attached)
+ if (!d->attached)
return POLLHUP | POLLERR;
- if (ir->buf) {
- poll_wait(file, &ir->buf->wait_poll, wait);
+ if (d->buf) {
+ poll_wait(file, &d->buf->wait_poll, wait);
- if (lirc_buffer_empty(ir->buf))
+ if (lirc_buffer_empty(d->buf))
ret = 0;
else
ret = POLLIN | POLLRDNORM;
- } else
+ } else {
ret = POLLERR;
+ }
- dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n",
- ir->d.name, ir->d.minor, ret);
+ dev_dbg(&d->dev, LOGHEAD "poll result = %d\n", d->name, d->minor, ret);
return ret;
}
@@ -389,48 +299,44 @@ EXPORT_SYMBOL(lirc_dev_fop_poll);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct lirc_dev *d = file->private_data;
__u32 mode;
- int result = 0;
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ int result;
- if (!ir) {
- pr_err("no irctl found!\n");
- return -ENODEV;
- }
+ dev_dbg(&d->dev, LOGHEAD "ioctl called (0x%x)\n",
+ d->name, d->minor, cmd);
- dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n",
- ir->d.name, ir->d.minor, cmd);
+ result = mutex_lock_interruptible(&d->mutex);
+ if (result)
+ return result;
- if (ir->d.minor == NOPLUG || !ir->attached) {
- dev_err(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n",
- ir->d.name, ir->d.minor);
- return -ENODEV;
+ if (!d->attached) {
+ result = -ENODEV;
+ goto out;
}
- mutex_lock(&ir->irctl_lock);
-
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (__u32 __user *)arg);
+ result = put_user(d->features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = put_user(LIRC_REC2MODE
- (ir->d.features & LIRC_CAN_REC_MASK),
+ (d->features & LIRC_CAN_REC_MASK),
(__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = get_user(mode, (__u32 __user *)arg);
- if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
+ if (!result && !(LIRC_MODE2REC(mode) & d->features))
result = -EINVAL;
/*
* FIXME: We should actually set the mode somehow but
@@ -438,32 +344,14 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (__u32 __user *)arg);
- break;
- case LIRC_GET_MIN_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.min_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.min_timeout, (__u32 __user *)arg);
- break;
- case LIRC_GET_MAX_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.max_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.max_timeout, (__u32 __user *)arg);
+ result = put_user(d->code_length, (__u32 __user *)arg);
break;
default:
result = -ENOTTY;
}
- mutex_unlock(&ir->irctl_lock);
-
+out:
+ mutex_unlock(&d->mutex);
return result;
}
EXPORT_SYMBOL(lirc_dev_fop_ioctl);
@@ -473,35 +361,34 @@ ssize_t lirc_dev_fop_read(struct file *file,
size_t length,
loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned char *buf;
- int ret = 0, written = 0;
+ int ret, written = 0;
DECLARE_WAITQUEUE(wait, current);
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -ENODEV;
- }
-
- if (!LIRC_CAN_REC(ir->d.features))
- return -EINVAL;
-
- dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
-
- buf = kzalloc(ir->chunk_size, GFP_KERNEL);
+ buf = kzalloc(d->buf->chunk_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- goto out_unlocked;
+ dev_dbg(&d->dev, LOGHEAD "read called\n", d->name, d->minor);
+
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ kfree(buf);
+ return ret;
}
- if (!ir->attached) {
+
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
- if (length % ir->chunk_size) {
+ if (!LIRC_CAN_REC(d->features)) {
+ ret = -EINVAL;
+ goto out_locked;
+ }
+
+ if (length % d->buf->chunk_size) {
ret = -EINVAL;
goto out_locked;
}
@@ -511,14 +398,14 @@ ssize_t lirc_dev_fop_read(struct file *file,
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
- add_wait_queue(&ir->buf->wait_poll, &wait);
+ add_wait_queue(&d->buf->wait_poll, &wait);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < length && ret == 0) {
- if (lirc_buffer_empty(ir->buf)) {
+ if (lirc_buffer_empty(d->buf)) {
/* According to the read(2) man page, 'written' can be
* returned as less than 'length', instead of blocking
* again, returning -EWOULDBLOCK, or returning
@@ -535,36 +422,36 @@ ssize_t lirc_dev_fop_read(struct file *file,
break;
}
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
set_current_state(TASK_RUNNING);
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ remove_wait_queue(&d->buf->wait_poll, &wait);
goto out_unlocked;
}
- if (!ir->attached) {
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
} else {
- lirc_buffer_read(ir->buf, buf);
+ lirc_buffer_read(d->buf, buf);
ret = copy_to_user((void __user *)buffer+written, buf,
- ir->buf->chunk_size);
+ d->buf->chunk_size);
if (!ret)
- written += ir->buf->chunk_size;
+ written += d->buf->chunk_size;
else
ret = -EFAULT;
}
}
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ remove_wait_queue(&d->buf->wait_poll, &wait);
out_locked:
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
out_unlocked:
kfree(buf);
@@ -573,9 +460,19 @@ out_unlocked:
}
EXPORT_SYMBOL(lirc_dev_fop_read);
+void lirc_init_pdata(struct inode *inode, struct file *file)
+{
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+
+ file->private_data = d;
+}
+EXPORT_SYMBOL(lirc_init_pdata);
+
void *lirc_get_pdata(struct file *file)
{
- return irctls[iminor(file_inode(file))]->d.data;
+ struct lirc_dev *d = file->private_data;
+
+ return d->data;
}
EXPORT_SYMBOL(lirc_get_pdata);
@@ -590,7 +487,7 @@ static int __init lirc_dev_init(void)
return PTR_ERR(lirc_class);
}
- retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES,
+ retval = alloc_chrdev_region(&lirc_base_dev, 0, LIRC_MAX_DEVICES,
"BaseRemoteCtl");
if (retval) {
class_destroy(lirc_class);
@@ -607,7 +504,7 @@ static int __init lirc_dev_init(void)
static void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
- unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES);
+ unregister_chrdev_region(lirc_base_dev, LIRC_MAX_DEVICES);
pr_info("module unloaded\n");
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index bf7aaff3aa37..a9187b0b46a1 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -188,6 +188,8 @@ enum mceusb_model_type {
TIVO_KIT,
MCE_GEN2_NO_TX,
HAUPPAUGE_CX_HYBRID_TV,
+ EVROMEDIA_FULL_HYBRID_FULLHD,
+ ASTROMETA_T2HYBRID,
};
struct mceusb_model {
@@ -247,9 +249,19 @@ static const struct mceusb_model mceusb_model[] = {
.mce_gen2 = 1,
.rc_map = RC_MAP_TIVO,
},
+ [EVROMEDIA_FULL_HYBRID_FULLHD] = {
+ .name = "Evromedia USB Full Hybrid Full HD",
+ .no_tx = 1,
+ .rc_map = RC_MAP_MSI_DIGIVOX_III,
+ },
+ [ASTROMETA_T2HYBRID] = {
+ .name = "Astrometa T2Hybrid",
+ .no_tx = 1,
+ .rc_map = RC_MAP_ASTROMETA_T2HYBRID,
+ }
};
-static struct usb_device_id mceusb_dev_table[] = {
+static const struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
{ USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
.driver_info = MCE_GEN1 },
@@ -398,6 +410,12 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Adaptec / HP eHome Receiver */
{ USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
+ /* Evromedia USB Full Hybrid Full HD */
+ { USB_DEVICE(0x1b80, 0xd3b2),
+ .driver_info = EVROMEDIA_FULL_HYBRID_FULLHD },
+ /* Astrometa T2hybrid */
+ { USB_DEVICE(0x15f4, 0x0135),
+ .driver_info = ASTROMETA_T2HYBRID },
/* Terminating entry */
{ }
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7da9c96cb058..ae4dd0c27731 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -106,7 +106,7 @@ struct ir_raw_event_ctrl {
} mce_kbd;
struct lirc_codec {
struct rc_dev *dev;
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int carrier_low;
ktime_t gap_start;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 503bc425a187..d78483a504c9 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
/**
* ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
* @dev: the struct rc_dev device descriptor
- * @type: the type of the event that has occurred
+ * @ev: the event that has occurred
*
* This routine (which may be called from an interrupt context) works
* in similar manner to ir_raw_event_store_edge.
@@ -471,9 +471,10 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
}
EXPORT_SYMBOL(ir_raw_encode_scancode);
-static void edge_handle(unsigned long arg)
+static void edge_handle(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)arg;
+ struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
+ struct rc_dev *dev = raw->dev;
ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
if (ktime_to_ns(interval) >= dev->timeout) {
@@ -513,8 +514,7 @@ int ir_raw_event_prepare(struct rc_dev *dev)
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
- setup_timer(&dev->raw->edge_handle, edge_handle,
- (unsigned long)dev);
+ timer_setup(&dev->raw->edge_handle, edge_handle, 0);
INIT_KFIFO(dev->raw->kfifo);
return 0;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 981cccd6b988..c144b77eac98 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/rc-core.h>
+#include <linux/bsearch.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -38,41 +39,41 @@ static const struct {
[RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
[RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
[RC_PROTO_RC5] = { .name = "rc-5",
- .scancode_bits = 0x1f7f, .repeat_period = 164 },
+ .scancode_bits = 0x1f7f, .repeat_period = 250 },
[RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
- .scancode_bits = 0x1f7f3f, .repeat_period = 164 },
+ .scancode_bits = 0x1f7f3f, .repeat_period = 250 },
[RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
- .scancode_bits = 0x2fff, .repeat_period = 164 },
+ .scancode_bits = 0x2fff, .repeat_period = 250 },
[RC_PROTO_JVC] = { .name = "jvc",
.scancode_bits = 0xffff, .repeat_period = 250 },
[RC_PROTO_SONY12] = { .name = "sony-12",
- .scancode_bits = 0x1f007f, .repeat_period = 100 },
+ .scancode_bits = 0x1f007f, .repeat_period = 250 },
[RC_PROTO_SONY15] = { .name = "sony-15",
- .scancode_bits = 0xff007f, .repeat_period = 100 },
+ .scancode_bits = 0xff007f, .repeat_period = 250 },
[RC_PROTO_SONY20] = { .name = "sony-20",
- .scancode_bits = 0x1fff7f, .repeat_period = 100 },
+ .scancode_bits = 0x1fff7f, .repeat_period = 250 },
[RC_PROTO_NEC] = { .name = "nec",
- .scancode_bits = 0xffff, .repeat_period = 160 },
+ .scancode_bits = 0xffff, .repeat_period = 250 },
[RC_PROTO_NECX] = { .name = "nec-x",
- .scancode_bits = 0xffffff, .repeat_period = 160 },
+ .scancode_bits = 0xffffff, .repeat_period = 250 },
[RC_PROTO_NEC32] = { .name = "nec-32",
- .scancode_bits = 0xffffffff, .repeat_period = 160 },
+ .scancode_bits = 0xffffffff, .repeat_period = 250 },
[RC_PROTO_SANYO] = { .name = "sanyo",
.scancode_bits = 0x1fffff, .repeat_period = 250 },
[RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
- .scancode_bits = 0xffff, .repeat_period = 150 },
+ .scancode_bits = 0xffff, .repeat_period = 250 },
[RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
- .scancode_bits = 0x1fffff, .repeat_period = 150 },
+ .scancode_bits = 0x1fffff, .repeat_period = 250 },
[RC_PROTO_RC6_0] = { .name = "rc-6-0",
- .scancode_bits = 0xffff, .repeat_period = 164 },
+ .scancode_bits = 0xffff, .repeat_period = 250 },
[RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
- .scancode_bits = 0xfffff, .repeat_period = 164 },
+ .scancode_bits = 0xfffff, .repeat_period = 250 },
[RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
- .scancode_bits = 0xffffff, .repeat_period = 164 },
+ .scancode_bits = 0xffffff, .repeat_period = 250 },
[RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
- .scancode_bits = 0xffffffff, .repeat_period = 164 },
+ .scancode_bits = 0xffffffff, .repeat_period = 250 },
[RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
- .scancode_bits = 0xffff7fff, .repeat_period = 164 },
+ .scancode_bits = 0xffff7fff, .repeat_period = 250 },
[RC_PROTO_SHARP] = { .name = "sharp",
.scancode_bits = 0x1fff, .repeat_period = 250 },
[RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
@@ -169,10 +170,11 @@ static struct rc_map_list empty_map = {
* @name: name to assign to the table
* @rc_proto: ir type to assign to the new table
* @size: initial size of the table
- * @return: zero on success or a negative error code
*
* This routine will initialize the rc_map and will allocate
* memory to hold at least the specified number of elements.
+ *
+ * return: zero on success or a negative error code
*/
static int ir_create_table(struct rc_map *rc_map,
const char *name, u64 rc_proto, size_t size)
@@ -215,10 +217,11 @@ static void ir_free_table(struct rc_map *rc_map)
* ir_resize_table() - resizes a scancode table if necessary
* @rc_map: the rc_map to resize
* @gfp_flags: gfp flags to use when allocating memory
- * @return: zero on success or a negative error code
*
* This routine will shrink the rc_map if it has lots of
* unused entries and grow it if it is full.
+ *
+ * return: zero on success or a negative error code
*/
static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
{
@@ -264,11 +267,13 @@ static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
* @dev: the struct rc_dev device descriptor
* @rc_map: scancode table to be adjusted
* @index: index of the mapping that needs to be updated
- * @keycode: the desired keycode
- * @return: previous keycode assigned to the mapping
+ * @new_keycode: the desired keycode
*
* This routine is used to update scancode->keycode mapping at given
* position.
+ *
+ * return: previous keycode assigned to the mapping
+ *
*/
static unsigned int ir_update_mapping(struct rc_dev *dev,
struct rc_map *rc_map,
@@ -319,12 +324,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
* @scancode: the desired scancode
* @resize: controls whether we allowed to resize the table to
* accommodate not yet present scancodes
- * @return: index of the mapping containing scancode in question
- * or -1U in case of failure.
*
* This routine is used to locate given scancode in rc_map.
* If scancode is not yet present the routine will allocate a new slot
* for it.
+ *
+ * return: index of the mapping containing scancode in question
+ * or -1U in case of failure.
*/
static unsigned int ir_establish_scancode(struct rc_dev *dev,
struct rc_map *rc_map,
@@ -374,11 +380,12 @@ static unsigned int ir_establish_scancode(struct rc_dev *dev,
/**
* ir_setkeycode() - set a keycode in the scancode->keycode table
* @idev: the struct input_dev device descriptor
- * @scancode: the desired scancode
- * @keycode: result
- * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
+ * @ke: Input keymap entry
+ * @old_keycode: result
*
* This routine is used to handle evdev EVIOCSKEY ioctl.
+ *
+ * return: -EINVAL if the keycode could not be inserted, otherwise zero.
*/
static int ir_setkeycode(struct input_dev *idev,
const struct input_keymap_entry *ke,
@@ -421,11 +428,11 @@ out:
/**
* ir_setkeytable() - sets several entries in the scancode->keycode table
* @dev: the struct rc_dev device descriptor
- * @to: the struct rc_map to copy entries to
* @from: the struct rc_map to copy entries from
- * @return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
*
* This routine is used to handle table initialization.
+ *
+ * return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
*/
static int ir_setkeytable(struct rc_dev *dev,
const struct rc_map *from)
@@ -439,9 +446,6 @@ static int ir_setkeytable(struct rc_dev *dev,
if (rc)
return rc;
- IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
- rc_map->size, rc_map->alloc);
-
for (i = 0; i < from->size; i++) {
index = ir_establish_scancode(dev, rc_map,
from->scan[i].scancode, false);
@@ -460,43 +464,49 @@ static int ir_setkeytable(struct rc_dev *dev,
return rc;
}
+static int rc_map_cmp(const void *key, const void *elt)
+{
+ const unsigned int *scancode = key;
+ const struct rc_map_table *e = elt;
+
+ if (*scancode < e->scancode)
+ return -1;
+ else if (*scancode > e->scancode)
+ return 1;
+ return 0;
+}
+
/**
* ir_lookup_by_scancode() - locate mapping by scancode
* @rc_map: the struct rc_map to search
* @scancode: scancode to look for in the table
- * @return: index in the table, -1U if not found
*
* This routine performs binary search in RC keykeymap table for
* given scancode.
+ *
+ * return: index in the table, -1U if not found
*/
static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
unsigned int scancode)
{
- int start = 0;
- int end = rc_map->len - 1;
- int mid;
-
- while (start <= end) {
- mid = (start + end) / 2;
- if (rc_map->scan[mid].scancode < scancode)
- start = mid + 1;
- else if (rc_map->scan[mid].scancode > scancode)
- end = mid - 1;
- else
- return mid;
- }
+ struct rc_map_table *res;
- return -1U;
+ res = bsearch(&scancode, rc_map->scan, rc_map->len,
+ sizeof(struct rc_map_table), rc_map_cmp);
+ if (!res)
+ return -1U;
+ else
+ return res - rc_map->scan;
}
/**
* ir_getkeycode() - get a keycode from the scancode->keycode table
* @idev: the struct input_dev device descriptor
- * @scancode: the desired scancode
- * @keycode: used to return the keycode, if found, or KEY_RESERVED
- * @return: always returns zero.
+ * @ke: Input keymap entry
*
* This routine is used to handle evdev EVIOCGKEY ioctl.
+ *
+ * return: always returns zero.
*/
static int ir_getkeycode(struct input_dev *idev,
struct input_keymap_entry *ke)
@@ -553,11 +563,12 @@ out:
* rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode
* @dev: the struct rc_dev descriptor of the device
* @scancode: the scancode to look for
- * @return: the corresponding keycode, or KEY_RESERVED
*
* This routine is used by drivers which need to convert a scancode to a
* keycode. Normally it should not be used since drivers should have no
* interest in keycodes.
+ *
+ * return: the corresponding keycode, or KEY_RESERVED
*/
u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
{
@@ -622,14 +633,15 @@ EXPORT_SYMBOL_GPL(rc_keyup);
/**
* ir_timer_keyup() - generates a keyup event after a timeout
- * @cookie: a pointer to the struct rc_dev for the device
+ *
+ * @t: a pointer to the struct timer_list
*
* This routine will generate a keyup event some time after a keydown event
* is generated when no further activity has been detected.
*/
-static void ir_timer_keyup(unsigned long cookie)
+static void ir_timer_keyup(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)cookie;
+ struct rc_dev *dev = from_timer(dev, t, timer_keyup);
unsigned long flags;
/*
@@ -777,7 +789,8 @@ EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
* provides sensible defaults
* @dev: the struct rc_dev descriptor of the device
* @filter: the scancode and mask
- * @return: 0 or -EINVAL if the filter is not valid
+ *
+ * return: 0 or -EINVAL if the filter is not valid
*/
static int rc_validate_filter(struct rc_dev *dev,
struct rc_scancode_filter *filter)
@@ -1480,6 +1493,8 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
+ if (dev->device_name)
+ ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
return 0;
}
@@ -1487,7 +1502,10 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(protocols, 0644, show_protocols, store_protocols);
+static struct device_attribute dev_attr_ro_protocols =
+__ATTR(protocols, 0444, show_protocols, NULL);
+static struct device_attribute dev_attr_rw_protocols =
+__ATTR(protocols, 0644, show_protocols, store_protocols);
static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols,
store_wakeup_protocols);
static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
@@ -1499,13 +1517,22 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
-static struct attribute *rc_dev_protocol_attrs[] = {
- &dev_attr_protocols.attr,
+static struct attribute *rc_dev_rw_protocol_attrs[] = {
+ &dev_attr_rw_protocols.attr,
NULL,
};
-static const struct attribute_group rc_dev_protocol_attr_grp = {
- .attrs = rc_dev_protocol_attrs,
+static const struct attribute_group rc_dev_rw_protocol_attr_grp = {
+ .attrs = rc_dev_rw_protocol_attrs,
+};
+
+static struct attribute *rc_dev_ro_protocol_attrs[] = {
+ &dev_attr_ro_protocols.attr,
+ NULL,
+};
+
+static const struct attribute_group rc_dev_ro_protocol_attr_grp = {
+ .attrs = rc_dev_ro_protocol_attrs,
};
static struct attribute *rc_dev_filter_attrs[] = {
@@ -1529,7 +1556,7 @@ static const struct attribute_group rc_dev_wakeup_filter_attr_grp = {
.attrs = rc_dev_wakeup_filter_attrs,
};
-static struct device_type rc_dev_type = {
+static const struct device_type rc_dev_type = {
.release = rc_dev_release,
.uevent = rc_dev_uevent,
};
@@ -1553,8 +1580,7 @@ struct rc_dev *rc_allocate_device(enum rc_driver_type type)
dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
- setup_timer(&dev->timer_keyup, ir_timer_keyup,
- (unsigned long)dev);
+ timer_setup(&dev->timer_keyup, ir_timer_keyup, 0);
spin_lock_init(&dev->rc_map.lock);
spin_lock_init(&dev->keylock);
@@ -1638,6 +1664,9 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
rc_proto = BIT_ULL(rc_map->rc_proto);
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->enabled_protocols = dev->allowed_protocols;
+
if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_proto);
if (rc < 0)
@@ -1729,8 +1758,10 @@ int rc_register_device(struct rc_dev *dev)
dev_set_drvdata(&dev->dev, dev);
dev->dev.groups = dev->sysfs_groups;
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
- dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->sysfs_groups[attr++] = &rc_dev_ro_protocol_attr_grp;
+ else if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ dev->sysfs_groups[attr++] = &rc_dev_rw_protocol_attr_grp;
if (dev->s_filter)
dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
if (dev->s_wakeup_filter)
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 6784cb9fc4e7..6bfc24885b5c 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -186,7 +186,7 @@ struct redrat3_error {
} __packed;
/* table of devices that work with this driver */
-static struct usb_device_id redrat3_dev_table[] = {
+static const struct usb_device_id redrat3_dev_table[] = {
/* Original version of the RedRat3 */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)},
/* Second Version/release of the RedRat3 - RetRat3-II */
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8b66926bc16a..8bf5637b3a69 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -470,7 +470,7 @@ static int hardware_init_port(void)
return 0;
}
-static void serial_ir_timeout(unsigned long arg)
+static void serial_ir_timeout(struct timer_list *unused)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -540,8 +540,7 @@ static int serial_ir_probe(struct platform_device *dev)
serial_ir.rcdev = rcdev;
- setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
- (unsigned long)&serial_ir);
+ timer_setup(&serial_ir.timeout_timer, serial_ir_timeout, 0);
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index bc906fb128d5..9ee2c9196b4d 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
static irqreturn_t sir_interrupt(int irq, void *dev_id);
static void send_space(unsigned long len);
static void send_pulse(unsigned long len);
-static void init_hardware(void);
+static int init_hardware(void);
static void drop_hardware(void);
/* Initialisation */
@@ -120,7 +120,7 @@ static void add_read_queue(int flag, unsigned long val)
}
/* SECTION: Hardware */
-static void sir_timeout(unsigned long data)
+static void sir_timeout(struct timer_list *unused)
{
/*
* if last received signal was a pulse, but receiving stopped
@@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
}
}
-static void init_hardware(void)
+static int init_hardware(void)
{
+ u8 scratch, scratch2, scratch3;
unsigned long flags;
spin_lock_irqsave(&hardware_lock, flags);
+
+ /*
+ * This is a simple port existence test, borrowed from the autoconfig
+ * function in drivers/tty/serial/8250/8250_port.c
+ */
+ scratch = sinp(UART_IER);
+ soutp(UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ scratch2 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, 0x0f);
+#ifdef __i386__
+ outb(0x00, 0x080);
+#endif
+ scratch3 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0f) {
+ /* we fail, there's nothing here */
+ spin_unlock_irqrestore(&hardware_lock, flags);
+ pr_err("port existence test failed, cannot continue\n");
+ return -ENODEV;
+ }
+
/* reset UART */
outb(0, io + UART_MCR);
outb(0, io + UART_IER);
@@ -285,6 +310,8 @@ static void init_hardware(void)
/* turn on UART */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
spin_unlock_irqrestore(&hardware_lock, flags);
+
+ return 0;
}
static void drop_hardware(void)
@@ -321,7 +348,7 @@ static int sir_ir_probe(struct platform_device *dev)
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->dev.parent = &sir_ir_dev->dev;
- setup_timer(&timerlist, sir_timeout, 0);
+ timer_setup(&timerlist, sir_timeout, 0);
/* get I/O port access and IRQ line */
if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
@@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
pr_err("IRQ %d already in use.\n", irq);
return retval;
}
+
+ retval = init_hardware();
+ if (retval) {
+ del_timer_sync(&timerlist);
+ return retval;
+ }
+
pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
if (retval < 0)
return retval;
- init_hardware();
-
return 0;
}
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index a8e39c635f34..d2efd7b2c3bc 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -49,7 +49,7 @@ struct st_rc_device {
#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */
#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */
-/**
+/*
* IRQ set: Enable full FIFO 1 -> bit 3;
* Enable overrun IRQ 1 -> bit 2;
* Enable last symbol IRQ 1 -> bit 1:
@@ -72,7 +72,7 @@ static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
ir_raw_event_store(rdev, &ev);
}
-/**
+/*
* RX graphical example to better understand the difference between ST IR block
* output and standard definition used by LIRC (and most of the world!)
*
@@ -317,7 +317,7 @@ static int st_rc_probe(struct platform_device *pdev)
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, rc_dev->irq);
- /**
+ /*
* for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
* lircd expects a long space first before a signal train to sync.
*/
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index f03a174ddf9d..c9a70fda88a8 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -43,7 +43,7 @@
#define USB_STREAMZAP_PRODUCT_ID 0x0000
/* table of devices that work with this driver */
-static struct usb_device_id streamzap_table[] = {
+static const struct usb_device_id streamzap_table[] = {
/* Streamzap Remote Control */
{ USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
/* Terminating entry */
@@ -191,7 +191,7 @@ static void sz_push_half_space(struct streamzap_ir *sz,
sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
-/**
+/*
* streamzap_callback - usb IRQ handler callback
*
* This procedure is invoked on reception of data from
@@ -321,7 +321,7 @@ out:
return NULL;
}
-/**
+/*
* streamzap_probe
*
* Called by usb-core to associated with a candidate device
@@ -450,7 +450,7 @@ free_sz:
return retval;
}
-/**
+/*
* streamzap_disconnect
*
* Called by the usb core when the device is removed from the system.
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
new file mode 100644
index 000000000000..9d4c17230c3a
--- /dev/null
+++ b/drivers/media/rc/tango-ir.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <media/rc-core.h>
+
+#define DRIVER_NAME "tango-ir"
+
+#define IR_NEC_CTRL 0x00
+#define IR_NEC_DATA 0x04
+#define IR_CTRL 0x08
+#define IR_RC5_CLK_DIV 0x0c
+#define IR_RC5_DATA 0x10
+#define IR_INT 0x14
+
+#define NEC_TIME_BASE 560
+#define RC5_TIME_BASE 1778
+
+#define RC6_CTRL 0x00
+#define RC6_CLKDIV 0x04
+#define RC6_DATA0 0x08
+#define RC6_DATA1 0x0c
+#define RC6_DATA2 0x10
+#define RC6_DATA3 0x14
+#define RC6_DATA4 0x18
+
+#define RC6_CARRIER 36000
+#define RC6_TIME_BASE 16
+
+#define NEC_CAP(n) ((n) << 24)
+#define GPIO_SEL(n) ((n) << 16)
+#define DISABLE_NEC (BIT(4) | BIT(8))
+#define ENABLE_RC5 (BIT(0) | BIT(9))
+#define ENABLE_RC6 (BIT(0) | BIT(7))
+#define ACK_IR_INT (BIT(0) | BIT(1))
+#define ACK_RC6_INT (BIT(31))
+
+#define NEC_ANY (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32)
+
+struct tango_ir {
+ void __iomem *rc5_base;
+ void __iomem *rc6_base;
+ struct rc_dev *rc;
+ struct clk *clk;
+};
+
+static void tango_ir_handle_nec(struct tango_ir *ir)
+{
+ u32 v, code;
+ enum rc_proto proto;
+
+ v = readl_relaxed(ir->rc5_base + IR_NEC_DATA);
+ if (!v) {
+ rc_repeat(ir->rc);
+ return;
+ }
+
+ code = ir_nec_bytes_to_scancode(v, v >> 8, v >> 16, v >> 24, &proto);
+ rc_keydown(ir->rc, proto, code, 0);
+}
+
+static void tango_ir_handle_rc5(struct tango_ir *ir)
+{
+ u32 data, field, toggle, addr, cmd, code;
+
+ data = readl_relaxed(ir->rc5_base + IR_RC5_DATA);
+ if (data & BIT(31))
+ return;
+
+ field = data >> 12 & 1;
+ toggle = data >> 11 & 1;
+ addr = data >> 6 & 0x1f;
+ cmd = (data & 0x3f) | (field ^ 1) << 6;
+
+ code = RC_SCANCODE_RC5(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC5, code, toggle);
+}
+
+static void tango_ir_handle_rc6(struct tango_ir *ir)
+{
+ u32 data0, data1, toggle, mode, addr, cmd, code;
+
+ data0 = readl_relaxed(ir->rc6_base + RC6_DATA0);
+ data1 = readl_relaxed(ir->rc6_base + RC6_DATA1);
+
+ mode = data0 >> 1 & 7;
+ if (mode != 0)
+ return;
+
+ toggle = data0 & 1;
+ addr = data0 >> 16;
+ cmd = data1;
+
+ code = RC_SCANCODE_RC6_0(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC6_0, code, toggle);
+}
+
+static irqreturn_t tango_ir_irq(int irq, void *dev_id)
+{
+ struct tango_ir *ir = dev_id;
+ unsigned int rc5_stat;
+ unsigned int rc6_stat;
+
+ rc5_stat = readl_relaxed(ir->rc5_base + IR_INT);
+ writel_relaxed(rc5_stat, ir->rc5_base + IR_INT);
+
+ rc6_stat = readl_relaxed(ir->rc6_base + RC6_CTRL);
+ writel_relaxed(rc6_stat, ir->rc6_base + RC6_CTRL);
+
+ if (!(rc5_stat & 3) && !(rc6_stat & BIT(31)))
+ return IRQ_NONE;
+
+ if (rc5_stat & BIT(0))
+ tango_ir_handle_rc5(ir);
+
+ if (rc5_stat & BIT(1))
+ tango_ir_handle_nec(ir);
+
+ if (rc6_stat & BIT(31))
+ tango_ir_handle_rc6(ir);
+
+ return IRQ_HANDLED;
+}
+
+static int tango_change_protocol(struct rc_dev *dev, u64 *rc_type)
+{
+ struct tango_ir *ir = dev->priv;
+ u32 rc5_ctrl = DISABLE_NEC;
+ u32 rc6_ctrl = 0;
+
+ if (*rc_type & NEC_ANY)
+ rc5_ctrl = 0;
+
+ if (*rc_type & RC_PROTO_BIT_RC5)
+ rc5_ctrl |= ENABLE_RC5;
+
+ if (*rc_type & RC_PROTO_BIT_RC6_0)
+ rc6_ctrl = ENABLE_RC6;
+
+ writel_relaxed(rc5_ctrl, ir->rc5_base + IR_CTRL);
+ writel_relaxed(rc6_ctrl, ir->rc6_base + RC6_CTRL);
+
+ return 0;
+}
+
+static int tango_ir_probe(struct platform_device *pdev)
+{
+ const char *map_name = RC_MAP_TANGO;
+ struct device *dev = &pdev->dev;
+ struct rc_dev *rc;
+ struct tango_ir *ir;
+ struct resource *rc5_res;
+ struct resource *rc6_res;
+ u64 clkrate, clkdiv;
+ int irq, err;
+ u32 val;
+
+ rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc5_res)
+ return -EINVAL;
+
+ rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rc6_res)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ ir = devm_kzalloc(dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ if (IS_ERR(ir->rc5_base))
+ return PTR_ERR(ir->rc5_base);
+
+ ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ if (IS_ERR(ir->rc6_base))
+ return PTR_ERR(ir->rc6_base);
+
+ ir->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ir->clk))
+ return PTR_ERR(ir->clk);
+
+ rc = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
+ if (!rc)
+ return -ENOMEM;
+
+ of_property_read_string(dev->of_node, "linux,rc-map-name", &map_name);
+
+ rc->device_name = DRIVER_NAME;
+ rc->driver_name = DRIVER_NAME;
+ rc->input_phys = DRIVER_NAME "/input0";
+ rc->map_name = map_name;
+ rc->allowed_protocols = NEC_ANY | RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_0;
+ rc->change_protocol = tango_change_protocol;
+ rc->priv = ir;
+ ir->rc = rc;
+
+ err = clk_prepare_enable(ir->clk);
+ if (err)
+ return err;
+
+ clkrate = clk_get_rate(ir->clk);
+
+ clkdiv = clkrate * NEC_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ val = NEC_CAP(31) | GPIO_SEL(12) | clkdiv;
+ writel_relaxed(val, ir->rc5_base + IR_NEC_CTRL);
+
+ clkdiv = clkrate * RC5_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ writel_relaxed(DISABLE_NEC, ir->rc5_base + IR_CTRL);
+ writel_relaxed(clkdiv, ir->rc5_base + IR_RC5_CLK_DIV);
+ writel_relaxed(ACK_IR_INT, ir->rc5_base + IR_INT);
+
+ clkdiv = clkrate * RC6_TIME_BASE;
+ do_div(clkdiv, RC6_CARRIER);
+
+ writel_relaxed(ACK_RC6_INT, ir->rc6_base + RC6_CTRL);
+ writel_relaxed((clkdiv >> 2) << 18 | clkdiv, ir->rc6_base + RC6_CLKDIV);
+
+ err = devm_request_irq(dev, irq, tango_ir_irq, IRQF_SHARED,
+ dev_name(dev), ir);
+ if (err)
+ goto err_clk;
+
+ err = devm_rc_register_device(dev, rc);
+ if (err)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, ir);
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ir->clk);
+ return err;
+}
+
+static int tango_ir_remove(struct platform_device *pdev)
+{
+ struct tango_ir *ir = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ir->clk);
+ return 0;
+}
+
+static const struct of_device_id tango_ir_dt_ids[] = {
+ { .compatible = "sigma,smp8642-ir" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tango_ir_dt_ids);
+
+static struct platform_driver tango_ir_driver = {
+ .probe = tango_ir_probe,
+ .remove = tango_ir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = tango_ir_dt_ids,
+ },
+};
+module_platform_driver(tango_ir_driver);
+
+MODULE_DESCRIPTION("SMP86xx IR decoder driver");
+MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 8b39d8dc97a0..5c87c5c6a455 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1397,9 +1397,9 @@ static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
* risk of overflow. It accurately calculates
* f_ref * num / denom to within 1 HZ with fixed math.
*
- * @num : Fractional portion of the multiplier
+ * @f_ref: SRO frequency.
+ * @num: Fractional portion of the multiplier
* @denom: denominator portion of the ratio
- * @f_Ref: SRO frequency.
*
* This calculation handles f_ref as two separate 14-bit fields.
* Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
@@ -1464,8 +1464,6 @@ static u32 MT2063_CalcLO1Mult(u32 *Div,
* @f_LO: desired LO frequency.
* @f_LO_Step: Minimum step size for the LO (in Hz).
* @f_Ref: SRO frequency.
- * @f_Avoid: Range of PLL frequencies to avoid near
- * integer multiples of f_Ref (in Hz).
*
* Returns: Recalculated LO frequency.
*/
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 5a28ce3a1d49..38dbc128340d 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd,
const struct firmware *firmware) {
- struct as10x_fw_pkt_t fw_pkt;
+ struct as10x_fw_pkt_t *fw_pkt;
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
+ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+ if (!fw_pkt)
+ return -ENOMEM;
+
+
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
/* parse intel hex line */
read_bytes = parse_hex_line(
(u8 *) (firmware->data + total_read_bytes),
- fw_pkt.raw.address,
- fw_pkt.raw.data,
+ fw_pkt->raw.address,
+ fw_pkt->raw.data,
&data_len,
&addr_has_changed);
@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
/* detect the end of file */
total_read_bytes += read_bytes;
if (total_read_bytes == firmware->size) {
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x03;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x03;
/* send EOF command */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt, 2, 0);
+ fw_pkt, 2, 0);
if (errno < 0)
goto error;
} else {
if (!addr_has_changed) {
/* prepare command to send */
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x01;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x01;
- data_len += sizeof(fw_pkt.u.request);
- data_len += sizeof(fw_pkt.raw.address);
+ data_len += sizeof(fw_pkt->u.request);
+ data_len += sizeof(fw_pkt->raw.address);
/* send cmd to device */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt,
+ fw_pkt,
data_len,
0);
if (errno < 0)
@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
+ kfree(fw_pkt);
return (errno == 0) ? total_read_bytes : errno;
}
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 34dc7e062471..d9093a3c57c5 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -105,9 +105,9 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
static void au0828_restart_dvb_streaming(struct work_struct *work);
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
dprintk(1, "%s called\n", __func__);
dev->bulk_timeout_running = 0;
@@ -648,9 +648,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
return ret;
}
- dev->bulk_timeout.function = au0828_bulk_timeout;
- dev->bulk_timeout.data = (unsigned long) dev;
- init_timer(&dev->bulk_timeout);
+ timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
return 0;
}
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index ef7d1b830ca3..1b8ec5d9e7ab 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -342,7 +342,7 @@ static const struct i2c_adapter au0828_i2c_adap_template = {
.algo = &au0828_i2c_algo_template,
};
-static struct i2c_client au0828_i2c_client_template = {
+static const struct i2c_client au0828_i2c_client_template = {
.name = "au0828 internal",
};
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 7996eb83a54e..af68afe085b5 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -269,7 +269,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
static int au0828_probe_i2c_ir(struct au0828_dev *dev)
{
int i = 0;
- const unsigned short addr_list[] = {
+ static const unsigned short addr_list[] = {
0x47, I2C_CLIENT_END
};
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index e0930ce59b8d..9dd6bdb7304f 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -79,7 +79,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops au0828_vbi_qops = {
+const struct vb2_ops au0828_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 9342402b92f7..a240153821e0 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -67,10 +67,10 @@ static inline void print_err_status(struct au0828_dev *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -954,9 +954,9 @@ int au0828_analog_unregister(struct au0828_dev *dev)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
struct au0828_dmaqueue *dma_q = &dev->vidq;
struct au0828_buffer *buf;
unsigned char *vid_data;
@@ -978,9 +978,9 @@ static void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
struct au0828_dmaqueue *dma_q = &dev->vbiq;
struct au0828_buffer *buf;
unsigned char *vbi_data;
@@ -1953,10 +1953,8 @@ int au0828_analog_register(struct au0828_dev *dev,
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vbiq.active);
- setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
- (unsigned long)dev);
- setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
- (unsigned long)dev);
+ timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+ timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
dev->width = NTSC_STD_W;
dev->height = NTSC_STD_H;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 05e445fe0b77..f6f37e8ef51d 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -358,7 +358,7 @@ void au0828_dvb_suspend(struct au0828_dev *dev);
void au0828_dvb_resume(struct au0828_dev *dev);
/* au0828-vbi.c */
-extern struct vb2_ops au0828_vbi_qops;
+extern const struct vb2_ops au0828_vbi_qops;
#define dprintk(level, fmt, arg...)\
do { if (au0828_debug & level)\
diff --git a/drivers/media/usb/b2c2/Kconfig b/drivers/media/usb/b2c2/Kconfig
index 17d35833980c..a620ae42dfc8 100644
--- a/drivers/media/usb/b2c2/Kconfig
+++ b/drivers/media/usb/b2c2/Kconfig
@@ -10,6 +10,6 @@ config DVB_B2C2_FLEXCOP_USB_DEBUG
bool "Enable debug for the B2C2 FlexCop drivers"
depends on DVB_B2C2_FLEXCOP_USB
select DVB_B2C2_FLEXCOP_DEBUG
- help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ help
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e0daa9b6c2a0..54d9d0cb326f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -847,7 +847,7 @@ struct cx231xx_board cx231xx_boards[] = {
.demod_addr = 0x64, /* 0xc8 >> 1 */
.demod_i2c_master = I2C_1_MUX_3,
.has_dvb = 1,
- .ir_i2c_master = I2C_0,
+ .decoder = CX231XX_AVDECODER,
.norm = V4L2_STD_PAL,
.output_mode = OUT_MODE_VIP11,
.tuner_addr = 0x60, /* 0xc0 >> 1 */
@@ -872,6 +872,7 @@ struct cx231xx_board cx231xx_boards[] = {
.name = "Astrometa T2hybrid",
.tuner_type = TUNER_ABSENT,
.has_dvb = 1,
+ .decoder = CX231XX_AVDECODER,
.output_mode = OUT_MODE_VIP11,
.agc_analog_digital_select_gpio = 0x01,
.ctl_pin_status_mask = 0xffffffc4,
@@ -1684,7 +1685,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = dev->devno;
assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
+ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV;
goto err_if;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index c18bb33e060e..54abc1a7c8e1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -179,10 +179,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index 76e901920f6f..d3bfe8e23b1f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -43,10 +43,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -285,7 +285,7 @@ static void vbi_buffer_release(struct videobuf_queue *vq,
free_buffer(vq, buf);
}
-struct videobuf_queue_ops cx231xx_vbi_qops = {
+const struct videobuf_queue_ops cx231xx_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 16c7d20a22a4..b33d2bdb621c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -22,7 +22,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern struct videobuf_queue_ops cx231xx_vbi_qops;
+extern const struct videobuf_queue_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 179b8481a870..226059fc672b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -199,10 +199,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 096bb75a24e5..2bf3bd81280a 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -628,8 +628,7 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
}
ret = dvb_usbv2_device_power_ctrl(d, 0);
- if (ret < 0)
- goto err;
+
err:
if (!adap->suspend_resume_active) {
adap->active_fe = -1;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 0eb33e043079..a221bb8a12b4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -516,7 +516,6 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
data required to program */
block_len = (msg->len / 8);
left_over_len = (msg->len % 8);
- index = 0;
mxl_i2c("block_len %d, left_over_len %d",
block_len, left_over_len);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 95a7b9123f8e..c76e78f9638a 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1598,7 +1598,7 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
struct rtl28xxu_dev *dev = d->priv;
u8 buf[5];
u32 rc_code;
- struct rtl28xxu_reg_val rc_nec_tab[] = {
+ static const struct rtl28xxu_reg_val rc_nec_tab[] = {
{ 0x3033, 0x80 },
{ 0x3020, 0x43 },
{ 0x3021, 0x16 },
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 7ba975bea96a..540886b3bb29 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -37,48 +37,9 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct rc_map_table rc_map_a800_table[] = {
- { 0x0201, KEY_MODE }, /* SOURCE */
- { 0x0200, KEY_POWER2 }, /* POWER */
- { 0x0205, KEY_1 }, /* 1 */
- { 0x0206, KEY_2 }, /* 2 */
- { 0x0207, KEY_3 }, /* 3 */
- { 0x0209, KEY_4 }, /* 4 */
- { 0x020a, KEY_5 }, /* 5 */
- { 0x020b, KEY_6 }, /* 6 */
- { 0x020d, KEY_7 }, /* 7 */
- { 0x020e, KEY_8 }, /* 8 */
- { 0x020f, KEY_9 }, /* 9 */
- { 0x0212, KEY_LEFT }, /* L / DISPLAY */
- { 0x0211, KEY_0 }, /* 0 */
- { 0x0213, KEY_RIGHT }, /* R / CH RTN */
- { 0x0217, KEY_CAMERA }, /* SNAP SHOT */
- { 0x0210, KEY_LAST }, /* 16-CH PREV */
- { 0x021e, KEY_VOLUMEDOWN }, /* VOL DOWN */
- { 0x020c, KEY_ZOOM }, /* FULL SCREEN */
- { 0x021f, KEY_VOLUMEUP }, /* VOL UP */
- { 0x0214, KEY_MUTE }, /* MUTE */
- { 0x0208, KEY_AUDIO }, /* AUDIO */
- { 0x0219, KEY_RECORD }, /* RECORD */
- { 0x0218, KEY_PLAY }, /* PLAY */
- { 0x021b, KEY_STOP }, /* STOP */
- { 0x021a, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
- { 0x021d, KEY_BACK }, /* << / RED */
- { 0x021c, KEY_FORWARD }, /* >> / YELLOW */
- { 0x0203, KEY_TEXT }, /* TELETEXT */
- { 0x0204, KEY_EPG }, /* EPG */
- { 0x0215, KEY_MENU }, /* MENU */
-
- { 0x0303, KEY_CHANNELUP }, /* CH UP */
- { 0x0302, KEY_CHANNELDOWN }, /* CH DOWN */
- { 0x0301, KEY_FIRST }, /* |<< / GREEN */
- { 0x0300, KEY_LAST }, /* >>| / BLUE */
-
-};
-
-static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int a800_rc_query(struct dvb_usb_device *d)
{
- int ret;
+ int ret = 0;
u8 *key = kmalloc(5, GFP_KERNEL);
if (!key)
return -ENOMEM;
@@ -90,11 +51,12 @@ static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
goto out;
}
- /* call the universal NEC remote processor, to find out the key's state and event */
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_rc("key: %*ph\n", 5, key);
- ret = 0;
+ /* Note that extended nec and nec32 are dropped */
+ if (key[0] == 1)
+ rc_keydown(d->rc_dev, RC_PROTO_NEC,
+ RC_SCANCODE_NEC(key[1], key[3]), 0);
+ else if (key[0] == 2)
+ rc_repeat(d->rc_dev);
out:
kfree(key);
return ret;
@@ -157,11 +119,12 @@ static struct dvb_usb_device_properties a800_properties = {
.power_ctrl = a800_power_ctrl,
.identify_state = a800_identify_state,
- .rc.legacy = {
- .rc_interval = DEFAULT_RC_INTERVAL,
- .rc_map_table = rc_map_a800_table,
- .rc_map_size = ARRAY_SIZE(rc_map_a800_table),
- .rc_query = a800_rc_query,
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_AVERMEDIA_M135A,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = a800_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
},
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index f9772ad0a2a5..5a2f81311fb7 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -26,7 +26,7 @@
#include "cinergyT2.h"
-/**
+/*
* convert linux-dvb frontend parameter set into TPS.
* See ETSI ETS-300744, section 4.6.2, table 9 for details.
*
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 6020170fe99a..366b05529915 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -478,7 +478,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
&stk7700ph_dib7700_xc3028_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7070p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7770p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1677,10 +1677,10 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
return -EINVAL;
}
- /** Update PLL if needed ratio **/
+ /* Update PLL if needed ratio */
state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0);
- /** Get optimize PLL ratio to remove spurious **/
+ /* Get optimize PLL ratio to remove spurious */
pll_ratio = dib8090_compute_pll_parameters(fe);
if (pll_ratio == 17)
timf = 21387946;
@@ -1691,7 +1691,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
else
timf = 18179756;
- /** Update ratio **/
+ /* Update ratio */
state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio);
state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf);
@@ -3056,7 +3056,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
@@ -3109,7 +3109,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
/* initialize IC 0 */
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3139,7 +3139,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3214,7 +3214,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
1, 0x10, &tfe7790p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
@@ -3309,7 +3309,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3357,7 +3357,7 @@ static int novatd_sleep_override(struct dvb_frontend* fe)
return state->sleep(fe);
}
-/**
+/*
* novatd_frontend_attach - Nova-TD specific attach
*
* Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
@@ -3384,7 +3384,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -3620,7 +3620,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
/* Demodulator not found for some reason? */
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 8207e6900656..bcacb0f22028 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
{
- u8 wbuf[1] = { offs };
- return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
+ u8 *buf;
+ int rc;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = offs;
+
+ rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
+ *val = buf[1];
+ kfree(buf);
+
+ return rc;
}
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 701c10835482..65e2c9e2cdc9 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -280,10 +280,11 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->change_protocol = d->props.rc.core.change_protocol;
dev->allowed_protocols = d->props.rc.core.allowed_protos;
usb_to_input_id(d->udev, &dev->input_id);
- dev->device_name = "IR-receiver inside an USB DVB receiver";
+ dev->device_name = d->desc->name;
dev->input_phys = d->rc_phys;
dev->dev.parent = &d->udev->dev;
dev->priv = d;
+ dev->scancode_mask = d->props.rc.core.scancode_mask;
err = rc_register_device(dev);
if (err < 0) {
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 6c7c4637530f..e71fc86b4fb2 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -208,6 +208,7 @@ struct dvb_rc {
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
bool bulk_mode; /* uses bulk mode */
+ u32 scancode_mask;
};
/**
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 0251a4e91d47..b6046e0e07f6 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -261,28 +261,6 @@ static int jdvbt90502_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-
-/* filter out un-supported properties to notify users */
-static int jdvbt90502_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- int r = 0;
-
- switch (tvp->cmd) {
- case DTV_DELIVERY_SYSTEM:
- if (tvp->u.data != SYS_ISDBT)
- r = -EINVAL;
- break;
- case DTV_CLEAR:
- case DTV_TUNE:
- case DTV_FREQUENCY:
- break;
- default:
- r = -EINVAL;
- }
- return r;
-}
-
static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -319,7 +297,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
}
-/**
+/*
* (reg, val) commad list to initialize this module.
* captured on a Windows box.
*/
@@ -457,8 +435,6 @@ static const struct dvb_frontend_ops jdvbt90502_ops = {
.init = jdvbt90502_init,
.write = _jdvbt90502_write,
- .set_property = jdvbt90502_set_property,
-
.set_frontend = jdvbt90502_set_frontend,
.read_status = jdvbt90502_read_status,
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
index 62abe6c43a32..16875945e662 100644
--- a/drivers/media/usb/dvb-usb/friio.c
+++ b/drivers/media/usb/dvb-usb/friio.c
@@ -21,7 +21,7 @@ MODULE_PARM_DESC(debug,
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-/**
+/*
* Indirect I2C access to the PLL via FE.
* whole I2C protocol data to the PLL is sent via the FE's I2C register.
* This is done by a control msg to the FE with the I2C data accompanied, and
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 13340af0d39c..2527b88beb87 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -97,82 +97,22 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
return vp7045_usb_op(d,SET_TUNER_POWER,&v,1,NULL,0,150);
}
-/* remote control stuff */
-
-/* The keymapping struct. Somehow this should be loaded to the driver, but
- * currently it is hardcoded. */
-static struct rc_map_table rc_map_vp7045_table[] = {
- { 0x0016, KEY_POWER },
- { 0x0010, KEY_MUTE },
- { 0x0003, KEY_1 },
- { 0x0001, KEY_2 },
- { 0x0006, KEY_3 },
- { 0x0009, KEY_4 },
- { 0x001d, KEY_5 },
- { 0x001f, KEY_6 },
- { 0x000d, KEY_7 },
- { 0x0019, KEY_8 },
- { 0x001b, KEY_9 },
- { 0x0015, KEY_0 },
- { 0x0005, KEY_CHANNELUP },
- { 0x0002, KEY_CHANNELDOWN },
- { 0x001e, KEY_VOLUMEUP },
- { 0x000a, KEY_VOLUMEDOWN },
- { 0x0011, KEY_RECORD },
- { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */
- { 0x0014, KEY_PLAY },
- { 0x001a, KEY_STOP },
- { 0x0040, KEY_REWIND },
- { 0x0012, KEY_FASTFORWARD },
- { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */
- { 0x004c, KEY_PAUSE },
- { 0x004d, KEY_SCREEN }, /* Full screen mode. */
- { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */
- { 0x000c, KEY_CANCEL }, /* Cancel */
- { 0x001c, KEY_EPG }, /* EPG */
- { 0x0000, KEY_TAB }, /* Tab */
- { 0x0048, KEY_INFO }, /* Preview */
- { 0x0004, KEY_LIST }, /* RecordList */
- { 0x000f, KEY_TEXT }, /* Teletext */
- { 0x0041, KEY_PREVIOUSSONG },
- { 0x0042, KEY_NEXTSONG },
- { 0x004b, KEY_UP },
- { 0x0051, KEY_DOWN },
- { 0x004e, KEY_LEFT },
- { 0x0052, KEY_RIGHT },
- { 0x004f, KEY_ENTER },
- { 0x0013, KEY_CANCEL },
- { 0x004a, KEY_CLEAR },
- { 0x0054, KEY_PRINT }, /* Capture */
- { 0x0043, KEY_SUBTITLE }, /* Subtitle/CC */
- { 0x0008, KEY_VIDEO }, /* A/V */
- { 0x0007, KEY_SLEEP }, /* Hibernate */
- { 0x0045, KEY_ZOOM }, /* Zoom+ */
- { 0x0018, KEY_RED},
- { 0x0053, KEY_GREEN},
- { 0x005e, KEY_YELLOW},
- { 0x005f, KEY_BLUE}
-};
-
-static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int vp7045_rc_query(struct dvb_usb_device *d)
{
u8 key;
- int i;
vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
deb_rc("remote query key: %x %d\n",key,key);
- if (key == 0x44) {
- *state = REMOTE_NO_KEY_PRESSED;
- return 0;
+ if (key != 0x44) {
+ /*
+ * The 8 bit address isn't available, but since the remote uses
+ * address 0 we'll use that. nec repeats are ignored too, even
+ * though the remote sends them.
+ */
+ rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0, key), 0);
}
- for (i = 0; i < ARRAY_SIZE(rc_map_vp7045_table); i++)
- if (rc5_data(&rc_map_vp7045_table[i]) == key) {
- *state = REMOTE_KEY_PRESSED;
- *event = rc_map_vp7045_table[i].keycode;
- break;
- }
return 0;
}
@@ -265,11 +205,13 @@ static struct dvb_usb_device_properties vp7045_properties = {
.power_ctrl = vp7045_power_ctrl,
.read_mac_address = vp7045_read_mac_addr,
- .rc.legacy = {
- .rc_interval = 400,
- .rc_map_table = rc_map_vp7045_table,
- .rc_map_size = ARRAY_SIZE(rc_map_vp7045_table),
- .rc_query = vp7045_rc_query,
+ .rc.core = {
+ .rc_interval = 400,
+ .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = vp7045_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .scancode_mask = 0xff,
},
.num_device_descs = 2,
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 4a7db623fe29..9950a740e04e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -112,10 +112,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 66c5012a628a..9bf49d666e5a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -882,7 +882,7 @@ static const struct i2c_adapter em28xx_adap_template = {
.algo = &em28xx_algo,
};
-static struct i2c_client em28xx_client_template = {
+static const struct i2c_client em28xx_client_template = {
.name = "em28xx internal",
};
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 8dfcb56bf4b3..9c411aac3878 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -16,4 +16,4 @@
int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
void em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern struct vb2_ops em28xx_vbi_qops;
+extern const struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 0bac552bbe87..f5123651ef30 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -93,7 +93,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops em28xx_vbi_qops = {
+const struct vb2_ops em28xx_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 8d253a5df0a9..a2ba2d905952 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -557,10 +557,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 3fd94fe7e1eb..d214a21acff7 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -204,11 +204,11 @@ config USB_GSPCA_SE401
tristate "SE401 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- Endpoints (formerly known as AOX) se401 chip.
+ Say Y here if you want support for cameras based on the
+ Endpoints (formerly known as AOX) se401 chip.
- To compile this driver as a module, choose M here: the
- module will be called gspca_se401.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_se401.
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
@@ -224,11 +224,11 @@ config USB_GSPCA_SN9C20X
tristate "SN9C20X USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- sn9c20x chips (SN9C201 and SN9C202).
+ Say Y here if you want support for cameras based on the
+ sn9c20x chips (SN9C201 and SN9C202).
- To compile this driver as a module, choose M here: the
- module will be called gspca_sn9c20x.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_sn9c20x.
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 0f141762abf1..961343873fd0 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1075,7 +1075,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/* give an index to each format */
index = 0;
- j = 0;
for (i = gspca_dev->cam.nmodes; --i >= 0; ) {
fmt_tb[index] = gspca_dev->cam.cam_mode[i].pixelformat;
j = 0;
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index cdb79c5f0c38..1b30434b72ef 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -1,4 +1,4 @@
-/**
+/*
* OV519 driver
*
* Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr>
@@ -2865,7 +2865,7 @@ static void sd_reset_snapshot(struct gspca_dev *gspca_dev)
static void ov51x_upload_quan_tables(struct sd *sd)
{
- const unsigned char yQuanTable511[] = {
+ static const unsigned char yQuanTable511[] = {
0, 1, 1, 2, 2, 3, 3, 4,
1, 1, 1, 2, 2, 3, 4, 4,
1, 1, 2, 2, 3, 4, 4, 4,
@@ -2876,7 +2876,7 @@ static void ov51x_upload_quan_tables(struct sd *sd)
4, 4, 4, 4, 5, 5, 5, 5
};
- const unsigned char uvQuanTable511[] = {
+ static const unsigned char uvQuanTable511[] = {
0, 2, 2, 3, 4, 4, 4, 4,
2, 2, 2, 4, 4, 4, 4, 4,
2, 2, 3, 4, 4, 4, 4, 4,
@@ -2888,13 +2888,13 @@ static void ov51x_upload_quan_tables(struct sd *sd)
};
/* OV518 quantization tables are 8x4 (instead of 8x8) */
- const unsigned char yQuanTable518[] = {
+ static const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
6, 6, 6, 6, 7, 7, 7, 8,
7, 7, 6, 7, 7, 7, 8, 8
};
- const unsigned char uvQuanTable518[] = {
+ static const unsigned char uvQuanTable518[] = {
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 8,
@@ -2943,7 +2943,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 511 and 511+ */
- const struct ov_regvals init_511[] = {
+ static const struct ov_regvals init_511[] = {
{ R51x_SYS_RESET, 0x7f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x7f },
@@ -2953,7 +2953,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R51x_SYS_RESET, 0x3d },
};
- const struct ov_regvals norm_511[] = {
+ static const struct ov_regvals norm_511[] = {
{ R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2963,7 +2963,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals norm_511_p[] = {
+ static const struct ov_regvals norm_511_p[] = {
{ R511_DRAM_FLOW_CTL, 0xff },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2973,7 +2973,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals compress_511[] = {
+ static const struct ov_regvals compress_511[] = {
{ 0x70, 0x1f },
{ 0x71, 0x05 },
{ 0x72, 0x06 },
@@ -3009,7 +3009,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 518 and 518+ */
- const struct ov_regvals init_518[] = {
+ static const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -3020,7 +3020,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x5d, 0x03 },
};
- const struct ov_regvals norm_518[] = {
+ static const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -3033,7 +3033,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x2f, 0x80 },
};
- const struct ov_regvals norm_518_p[] = {
+ static const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index a097d3dbc141..65ef755adfdc 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -386,7 +386,7 @@ static void msi2500_isoc_handler(struct urb *urb)
if (unlikely(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
- dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronously\n",
urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 60a2604e4cb3..1ad913fc30bf 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -44,7 +44,6 @@ config VIDEO_PVRUSB2_DVB
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
---help---
-
This option enables a DVB interface for the pvrusb2 driver.
If your device does not support digital television, this
feature will have no affect on the driver's operation.
diff --git a/drivers/media/usb/pwc/pwc-dec23.c b/drivers/media/usb/pwc/pwc-dec23.c
index 3792fedff951..1283b3bd9800 100644
--- a/drivers/media/usb/pwc/pwc-dec23.c
+++ b/drivers/media/usb/pwc/pwc-dec23.c
@@ -649,11 +649,10 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
}
/**
- *
* Uncompress a pwc23 buffer.
- *
- * src: raw data
- * dst: image output
+ * @pdev: pointer to pwc device's internal struct
+ * @src: raw data
+ * @dst: image output
*/
void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index eb6921d2743e..54b036d39c5b 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -262,7 +262,8 @@ static void pwc_isoc_handler(struct urb *urb)
if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN) {
- PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
+ PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronously.\n",
+ urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index b2f239c4ba42..7fee5766587a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -485,9 +485,10 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
/* kickstarts the firmware loading. from probe
*/
-static void s2255_timer(unsigned long user_data)
+static void s2255_timer(struct timer_list *t)
{
- struct s2255_fw *data = (struct s2255_fw *)user_data;
+ struct s2255_dev *dev = from_timer(dev, t, timer);
+ struct s2255_fw *data = dev->fw_data;
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
pr_err("s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -2283,7 +2284,7 @@ static int s2255_probe(struct usb_interface *interface,
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
- setup_timer(&dev->timer, s2255_timer, (unsigned long)dev->fw_data);
+ timer_setup(&dev->timer, s2255_timer, 0);
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_vc *vc = &dev->vc[i];
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 8c1f926567ec..d07349cf9489 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -74,7 +74,7 @@ struct smsusb_device_t {
static int smsusb_submit_urb(struct smsusb_device_t *dev,
struct smsusb_urb_t *surb);
-/**
+/*
* Completing URB's callback handler - bottom half (proccess context)
* submits the URB prepared on smsusb_onresponse()
*/
@@ -86,7 +86,7 @@ static void do_submit_urb(struct work_struct *work)
smsusb_submit_urb(dev, surb);
}
-/**
+/*
* Completing URB's callback handler - top half (interrupt context)
* adds completing sms urb to the global surbs list and activtes the worker
* thread the surb
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 2c70173e3c82..62a12d5356ad 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -246,7 +246,7 @@ static const struct i2c_adapter adap_template = {
.algo = &algo,
};
-static struct i2c_client client_template = {
+static const struct i2c_client client_template = {
.name = "stk1160 internal",
};
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index ce8ebbe395a6..423c03a0638d 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -38,10 +38,10 @@ static inline void print_err_status(struct stk1160 *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 2537643a1808..77347541904d 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -1184,7 +1184,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
- struct tm6000_core *dev = NULL;
+ struct tm6000_core *dev;
int i, rc = 0;
int nr = 0;
char *speed;
@@ -1194,22 +1194,21 @@ static int tm6000_usb_probe(struct usb_interface *interface,
/* Selects the proper interface */
rc = usb_set_interface(usbdev, 0, 1);
if (rc < 0)
- goto err;
+ goto report_failure;
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS);
if (nr >= TM6000_MAXBOARDS) {
printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS);
- usb_put_dev(usbdev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto put_device;
}
/* Create and initialize dev struct */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "tm6000" ": out of memory!\n");
- usb_put_dev(usbdev);
- return -ENOMEM;
+ if (!dev) {
+ rc = -ENOMEM;
+ goto put_device;
}
spin_lock_init(&dev->slock);
mutex_init(&dev->usb_lock);
@@ -1313,8 +1312,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
if (!dev->isoc_in.endp) {
printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n");
rc = -ENODEV;
-
- goto err;
+ goto free_device;
}
/* save our data pointer in this interface device */
@@ -1324,17 +1322,18 @@ static int tm6000_usb_probe(struct usb_interface *interface,
rc = tm6000_init_dev(dev);
if (rc < 0)
- goto err;
+ goto free_device;
return 0;
-err:
+free_device:
+ kfree(dev);
+report_failure:
printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
clear_bit(nr, &tm6000_devused);
+put_device:
usb_put_dev(usbdev);
-
- kfree(dev);
return rc;
}
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 097ac321b7e1..c811fc6cf48a 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -45,10 +45,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -123,7 +123,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
}
dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dvb->bulk_urb == NULL)
+ if (!dvb->bulk_urb)
return -ENOMEM;
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
@@ -133,9 +133,8 @@ static int tm6000_start_stream(struct tm6000_core *dev)
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
- if (dvb->bulk_urb->transfer_buffer == NULL) {
+ if (!dvb->bulk_urb->transfer_buffer) {
usb_free_urb(dvb->bulk_urb);
- printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
@@ -361,7 +360,7 @@ static void unregister_dvb(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
- if (dvb->bulk_urb != NULL) {
+ if (dvb->bulk_urb) {
struct urb *bulk_urb = dvb->bulk_urb;
kfree(bulk_urb->transfer_buffer);
@@ -400,10 +399,8 @@ static int dvb_init(struct tm6000_core *dev)
}
dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL);
- if (!dvb) {
- printk(KERN_INFO "Cannot allocate memory\n");
+ if (!dvb)
return -ENOMEM;
- }
dev->dvb = dvb;
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 91889ad9cdd7..397990afe00b 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -352,7 +352,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
dprintk(1, "IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
- if (ir->int_urb->transfer_buffer == NULL) {
+ if (!ir->int_urb->transfer_buffer) {
usb_free_urb(ir->int_urb);
return err;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index ec8c4d2534dc..9fa25de6b5a9 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -342,10 +342,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -470,20 +470,16 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
int num_bufs = TM6000_NUM_URB_BUF;
int i;
- if (dev->urb_buffer != NULL)
+ if (dev->urb_buffer)
return 0;
dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_buffer) {
- tm6000_err("cannot allocate memory for urb buffers\n");
+ if (!dev->urb_buffer)
return -ENOMEM;
- }
dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_dma) {
- tm6000_err("cannot allocate memory for urb dma pointers\n");
+ if (!dev->urb_dma)
return -ENOMEM;
- }
for (i = 0; i < num_bufs; i++) {
dev->urb_buffer[i] = usb_alloc_coherent(
@@ -507,7 +503,7 @@ static int tm6000_free_urb_buffers(struct tm6000_core *dev)
{
int i;
- if (dev->urb_buffer == NULL)
+ if (!dev->urb_buffer)
return 0;
for (i = 0; i < TM6000_NUM_URB_BUF; i++) {
@@ -598,15 +594,12 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
dev->isoc_ctl.num_bufs = num_bufs;
dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->isoc_ctl.urb) {
- tm6000_err("cannot alloc memory for usb buffers\n");
+ if (!dev->isoc_ctl.urb)
return -ENOMEM;
- }
dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!dev->isoc_ctl.transfer_buffer) {
- tm6000_err("cannot allocate memory for usbtransfer\n");
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
}
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index b842f367249f..a142b9dc0feb 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -76,7 +76,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define TTUSB_REV_2_2 0x22
#define TTUSB_BUDGET_NAME "ttusb_stc_fw"
-/**
+/*
* since we're casting (struct ttusb*) <-> (struct dvb_demux*) around
* the dvb_demux field must be the first in struct!!
*/
@@ -713,7 +713,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
}
}
- /**
+ /*
* if length is valid and we reached the end:
* goto next muxpack
*/
@@ -729,7 +729,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
/* maximum bytes, until we know the length */
ttusb->muxpack_len = 2;
- /**
+ /*
* no muxpacks left?
* return to search-sync state
*/
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index f06f09a0876e..127f8a0c098b 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -84,7 +84,7 @@ static int usbtv_probe(struct usb_interface *intf,
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
size = usb_endpoint_maxp(&ep->desc);
- size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
+ size = size * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
static const struct usb_device_id usbtv_id_table[] = {
{ USB_DEVICE(0x1b71, 0x3002) },
+ { USB_DEVICE(0x1f71, 0x3301) },
{}
};
MODULE_DEVICE_TABLE(usb, usbtv_id_table);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 95b5f4319ec2..3668a04359e8 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
*/
if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
ret = usb_control_msg(usbtv->udev,
- usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
if (ret < 0)
goto error;
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 4ff8d0aed015..1d888661fd03 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -209,10 +209,8 @@ static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
int status;
unsigned char *transfer_buffer = kmalloc(size, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&udev->dev, "kmalloc(%d) failed\n", size);
+ if (!transfer_buffer)
return -ENOMEM;
- }
memcpy(transfer_buffer, cp, size);
@@ -387,9 +385,9 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
vb);
int rc;
- DBG("%s, field=%d, fmt name = %s\n", __func__, field, cam->fmt != NULL ?
- cam->fmt->name : "");
- if (cam->fmt == NULL)
+ DBG("%s, field=%d, fmt name = %s\n", __func__, field,
+ cam->fmt ? cam->fmt->name : "");
+ if (!cam->fmt)
return -EINVAL;
buf->vb.size = cam->width * cam->height * (cam->fmt->depth >> 3);
@@ -789,7 +787,7 @@ static int zr364xx_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);
char pixelformat_name[5];
- if (cam == NULL)
+ if (!cam)
return -ENODEV;
if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG) {
@@ -819,7 +817,7 @@ static int zr364xx_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct zr364xx_camera *cam;
- if (file == NULL)
+ if (!file)
return -ENODEV;
cam = video_drvdata(file);
@@ -981,13 +979,13 @@ static void read_pipe_completion(struct urb *purb)
pipe_info = purb->context;
_DBG("%s %p, status %d\n", __func__, purb, purb->status);
- if (pipe_info == NULL) {
+ if (!pipe_info) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
cam = pipe_info->cam;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
@@ -1071,7 +1069,7 @@ static void zr364xx_stop_readpipe(struct zr364xx_camera *cam)
{
struct zr364xx_pipeinfo *pipe_info;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": invalid device\n");
return;
}
@@ -1275,7 +1273,7 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
struct zr364xx_camera *cam = video_drvdata(file);
int ret;
- if (cam == NULL) {
+ if (!cam) {
DBG("%s: cam == NULL\n", __func__);
return -ENODEV;
}
@@ -1359,7 +1357,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
pipe->transfer_buffer = kzalloc(pipe->transfer_size,
GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
+ if (!pipe->transfer_buffer) {
DBG("out of memory!\n");
return -ENOMEM;
}
@@ -1375,7 +1373,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
DBG("valloc %p, idx %lu, pdata %p\n",
&cam->buffer.frame[i], i,
cam->buffer.frame[i].lpvbits);
- if (cam->buffer.frame[i].lpvbits == NULL) {
+ if (!cam->buffer.frame[i].lpvbits) {
printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
break;
}
@@ -1423,11 +1421,9 @@ static int zr364xx_probe(struct usb_interface *intf,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- cam = kzalloc(sizeof(struct zr364xx_camera), GFP_KERNEL);
- if (cam == NULL) {
- dev_err(&udev->dev, "cam: out of memory !\n");
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
return -ENOMEM;
- }
cam->v4l2_dev.release = zr364xx_release;
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 8db45dfc271b..82852f23a3b6 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -239,7 +239,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
* @new_config: an optional parameter used by a few tuners to adjust
- internal parameters, like LNA mode
+ * internal parameters, like LNA mode
* @tuner_callback: an optional function to be called when switching
* to analog mode
*
@@ -750,6 +750,7 @@ static int tuner_remove(struct i2c_client *client)
/**
* check_mode - Verify if tuner supports the requested mode
* @t: a pointer to the module's internal struct_tuner
+ * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
*
* This function checks if the tuner is capable of tuning analog TV,
* digital TV or radio, depending on what the caller wants. If the
@@ -757,6 +758,7 @@ static int tuner_remove(struct i2c_client *client)
* returns 0.
* This function is needed for boards that have a separate tuner for
* radio (like devices with tea5767).
+ *
* NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
* select a TV frequency. So, t_mode = T_ANALOG_TV could actually
* be used to represent a Digital TV too.
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index d741a8e0fdac..e5acfab470a5 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -22,8 +22,37 @@
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->bound)
+ return 0;
+
+ return n->ops->bound(n, subdev, asd);
+}
+
+static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->unbind)
+ return;
+
+ n->ops->unbind(n, subdev, asd);
+}
+
+static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+{
+ if (!n->ops || !n->ops->complete)
+ return 0;
+
+ return n->ops->complete(n);
+}
+
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
#if IS_ENABLED(CONFIG_I2C)
@@ -60,8 +89,8 @@ static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd)
+static struct v4l2_async_subdev *v4l2_async_find_match(
+ struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
{
bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
struct v4l2_async_subdev *asd;
@@ -95,22 +124,96 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
return NULL;
}
-static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+/* Find the sub-device notifier registered by a sub-device driver. */
+static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
+ struct v4l2_subdev *sd)
{
- int ret;
+ struct v4l2_async_notifier *n;
- if (notifier->bound) {
- ret = notifier->bound(notifier, sd, asd);
- if (ret < 0)
- return ret;
+ list_for_each_entry(n, &notifier_list, list)
+ if (n->sd == sd)
+ return n;
+
+ return NULL;
+}
+
+/* Get v4l2_device related to the notifier if one can be found. */
+static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
+ struct v4l2_async_notifier *notifier)
+{
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ return notifier->v4l2_dev;
+}
+
+/*
+ * Return true if all child sub-device notifiers are complete, false otherwise.
+ */
+static bool v4l2_async_notifier_can_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd;
+
+ if (!list_empty(&notifier->waiting))
+ return false;
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier &&
+ !v4l2_async_notifier_can_complete(subdev_notifier))
+ return false;
}
- ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ return true;
+}
+
+/*
+ * Complete the master notifier if possible. This is done when all async
+ * sub-devices have been bound; v4l2_device is also available then.
+ */
+static int v4l2_async_notifier_try_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ /* Quick check whether there are still more sub-devices here. */
+ if (!list_empty(&notifier->waiting))
+ return 0;
+
+ /* Check the entire notifier tree; find the root notifier first. */
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ /* This is root if it has v4l2_dev. */
+ if (!notifier->v4l2_dev)
+ return 0;
+
+ /* Is everything ready? */
+ if (!v4l2_async_notifier_can_complete(notifier))
+ return 0;
+
+ return v4l2_async_notifier_call_complete(notifier);
+}
+
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier);
+
+static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ int ret;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
if (ret < 0) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
+ v4l2_device_unregister_subdev(sd);
return ret;
}
@@ -122,8 +225,55 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
/* Move from the global subdevice list to notifier's done */
list_move(&sd->async_list, &notifier->done);
- if (list_empty(&notifier->waiting) && notifier->complete)
- return notifier->complete(notifier);
+ /*
+ * See if the sub-device has a notifier. If not, return here.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (!subdev_notifier || subdev_notifier->parent)
+ return 0;
+
+ /*
+ * Proceed with checking for the sub-device notifier's async
+ * sub-devices, and return the result. The error will be handled by the
+ * caller.
+ */
+ subdev_notifier->parent = notifier;
+
+ return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+}
+
+/* Test all async sub-devices in a notifier for a match. */
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_subdev *sd;
+
+ if (!v4l2_dev)
+ return 0;
+
+again:
+ list_for_each_entry(sd, &subdev_list, async_list) {
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_async_match_notify() may lead to registering a
+ * new notifier and thus changing the async subdevs
+ * list. In order to proceed safely from here, restart
+ * parsing the list from the beginning.
+ */
+ goto again;
+ }
return 0;
}
@@ -134,24 +284,107 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Subdevice driver will reprobe and put the subdev back onto the list */
list_del_init(&sd->async_list);
sd->asd = NULL;
- sd->dev = NULL;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+/* Unbind all sub-devices in the notifier tree. */
+static void v4l2_async_notifier_unbind_all_subdevs(
+ struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ list_move(&sd->async_list, &subdev_list);
+ }
+
+ notifier->parent = NULL;
+}
+
+/* See if an fwnode can be found in a notifier's lists. */
+static bool __v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode)
+{
struct v4l2_async_subdev *asd;
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(asd, &notifier->waiting, list) {
+ if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ if (WARN_ON(!sd->asd))
+ continue;
+
+ if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (sd->asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Find out whether an async sub-device was set up for an fwnode already or
+ * whether it exists in a given notifier before @this_index.
+ */
+static bool v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode,
+ unsigned int this_index)
+{
+ unsigned int j;
+
+ lockdep_assert_held(&list_lock);
+
+ /* Check that an fwnode is not being added more than once. */
+ for (j = 0; j < this_index; j++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[this_index];
+ struct v4l2_async_subdev *other_asd = notifier->subdevs[j];
+
+ if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE &&
+ asd->match.fwnode.fwnode ==
+ other_asd->match.fwnode.fwnode)
+ return true;
+ }
+
+ /* Check than an fwnode did not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, list)
+ if (__v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, fwnode))
+ return true;
+
+ return false;
+}
+
+static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+{
+ struct device *dev =
+ notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
+ struct v4l2_async_subdev *asd;
+ int ret;
int i;
- if (!v4l2_dev || !notifier->num_subdevs ||
- notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
return -EINVAL;
- notifier->v4l2_dev = v4l2_dev;
INIT_LIST_HEAD(&notifier->waiting);
INIT_LIST_HEAD(&notifier->done);
+ mutex_lock(&list_lock);
+
for (i = 0; i < notifier->num_subdevs; i++) {
asd = notifier->subdevs[i];
@@ -159,32 +392,32 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
case V4L2_ASYNC_MATCH_CUSTOM:
case V4L2_ASYNC_MATCH_DEVNAME:
case V4L2_ASYNC_MATCH_I2C:
+ break;
case V4L2_ASYNC_MATCH_FWNODE:
+ if (v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, asd->match.fwnode.fwnode, i)) {
+ dev_err(dev,
+ "fwnode has already been registered or in notifier's subdev list\n");
+ ret = -EEXIST;
+ goto err_unlock;
+ }
break;
default:
- dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
- "Invalid match type %u on %p\n",
+ dev_err(dev, "Invalid match type %u on %p\n",
asd->match_type, asd);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
list_add_tail(&asd->list, &notifier->waiting);
}
- mutex_lock(&list_lock);
+ ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ if (ret < 0)
+ goto err_unbind;
- list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
- int ret;
-
- asd = v4l2_async_belongs(notifier, sd);
- if (!asd)
- continue;
-
- ret = v4l2_async_test_notify(notifier, sd, asd);
- if (ret < 0) {
- mutex_unlock(&list_lock);
- return ret;
- }
- }
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret < 0)
+ goto err_unbind;
/* Keep also completed notifiers on the list */
list_add(&notifier->list, &notifier_list);
@@ -192,90 +425,114 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * On failure, unbind all sub-devices registered through this notifier.
+ */
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
+
+err_unlock:
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+
+int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
+{
+ int ret;
+
+ if (WARN_ON(!v4l2_dev || notifier->sd))
+ return -EINVAL;
+
+ notifier->v4l2_dev = v4l2_dev;
+
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->v4l2_dev = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
- struct v4l2_subdev *sd, *tmp;
- unsigned int notif_n_subdev = notifier->num_subdevs;
- unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device **dev;
- int i = 0;
+ int ret;
- if (!notifier->v4l2_dev)
- return;
+ if (WARN_ON(!sd || notifier->v4l2_dev))
+ return -EINVAL;
- dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(notifier->v4l2_dev->dev,
- "Failed to allocate device cache!\n");
- }
+ notifier->sd = sd;
- mutex_lock(&list_lock);
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->sd = NULL;
- list_del(&notifier->list);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
- list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
- struct device *d;
+static void __v4l2_async_notifier_unregister(
+ struct v4l2_async_notifier *notifier)
+{
+ if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ return;
- d = get_device(sd->dev);
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
- v4l2_async_cleanup(sd);
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
- /* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(d);
+ list_del(&notifier->list);
+}
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
- /*
- * Store device at the device cache, in order to call
- * put_device() on the final step
- */
- if (dev)
- dev[i++] = d;
- else
- put_device(d);
- }
+ __v4l2_async_notifier_unregister(notifier);
mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_notifier_unregister);
- /*
- * Call device_attach() to reprobe devices
- *
- * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
- * executed.
- */
- while (i--) {
- struct device *d = dev[i];
-
- if (d && device_attach(d) < 0) {
- const char *name = "(none)";
- int lock = device_trylock(d);
-
- if (lock && d->driver)
- name = d->driver->name;
- dev_err(d, "Failed to re-probe to %s\n", name);
- if (lock)
- device_unlock(d);
+void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+{
+ unsigned int i;
+
+ if (!notifier || !notifier->max_subdevs)
+ return;
+
+ for (i = 0; i < notifier->num_subdevs; i++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[i];
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_FWNODE:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ break;
}
- put_device(d);
+
+ kfree(asd);
}
- kvfree(dev);
- notifier->v4l2_dev = NULL;
+ notifier->max_subdevs = 0;
+ notifier->num_subdevs = 0;
- /*
- * Don't care about the waiting list, it is initialised and populated
- * upon notifier registration.
- */
+ kvfree(notifier->subdevs);
+ notifier->subdevs = NULL;
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
+ struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
+ int ret;
/*
* No reference taken. The reference is held by the device
@@ -290,41 +547,73 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
INIT_LIST_HEAD(&sd->async_list);
list_for_each_entry(notifier, &notifier_list, list) {
- struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
- if (asd) {
- int ret = v4l2_async_test_notify(notifier, sd, asd);
- mutex_unlock(&list_lock);
- return ret;
- }
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_async_subdev *asd;
+
+ if (!v4l2_dev)
+ continue;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
+ if (ret)
+ goto err_unbind;
+
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
+
+ goto out_unlock;
}
/* None matched, wait for hot-plugging */
list_add(&sd->async_list, &subdev_list);
+out_unlock:
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * Complete failed. Unbind the sub-devices bound through registering
+ * this async sub-device.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ if (sd->asd)
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ mutex_unlock(&list_lock);
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_notifier *notifier = sd->notifier;
+ mutex_lock(&list_lock);
- if (!sd->asd) {
- if (!list_empty(&sd->async_list))
- v4l2_async_cleanup(sd);
- return;
- }
+ __v4l2_async_notifier_unregister(sd->subdev_notifier);
+ v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ kfree(sd->subdev_notifier);
+ sd->subdev_notifier = NULL;
- mutex_lock(&list_lock);
+ if (sd->asd) {
+ struct v4l2_async_notifier *notifier = sd->notifier;
- list_add(&sd->asd->list, &notifier->waiting);
+ list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(sd);
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ }
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index dd1db678718c..cbb2ef43945f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = ctrl->flags;
+ ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0;
else
@@ -2003,10 +2013,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, err);
return NULL;
}
- if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
if (is_array &&
(type == V4L2_CTRL_TYPE_BUTTON ||
type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
@@ -2577,10 +2583,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
else
qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->flags = ctrl->flags;
+ qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
- if (ctrl->is_ptr)
- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims;
@@ -2818,7 +2822,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
- return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
+ return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 5c8c49d240d1..930f9c53a64e 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -245,11 +245,11 @@ EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
/**
* v4l2_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- * @match_reduced_fps - if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
- * match.
+ * @t1: compare this v4l2_dv_timings struct...
+ * @t2: with this struct.
+ * @pclock_delta: the allowed pixelclock deviation.
+ * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
+ * match.
*
* Compare t1 with t2 with a given margin of error for the pixelclock.
*/
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 40b2fbfe8865..fb72c7ac04d4 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -19,6 +19,7 @@
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
@@ -26,7 +27,9 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
enum v4l2_fwnode_bus_type {
V4L2_FWNODE_BUS_TYPE_GUESS = 0,
@@ -181,25 +184,6 @@ v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
vep->bus_type = V4L2_MBUS_CSI1;
}
-/**
- * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- * @vep: pointer to the V4L2 fwnode data structure
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * NOTE: This function does not parse properties the size of which is variable
- * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
- * new drivers instead.
- *
- * Return: 0 on success or a negative error code on failure.
- */
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
@@ -239,14 +223,6 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
-/*
- * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by
- * v4l2_fwnode_endpoint_alloc_parse()
- * @vep - the V4L2 fwnode the resources of which are to be released
- *
- * It is safe to call this function with NULL argument or on a V4L2 fwnode the
- * parsing of which failed.
- */
void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
{
if (IS_ERR_OR_NULL(vep))
@@ -257,29 +233,6 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
-/**
- * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * v4l2_fwnode_endpoint_alloc_parse() has two important differences to
- * v4l2_fwnode_endpoint_parse():
- *
- * 1. It also parses variable size data.
- *
- * 2. The memory it has allocated to store the variable size data must be freed
- * using v4l2_fwnode_endpoint_free() when no longer needed.
- *
- * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
- * on error.
- */
struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
struct fwnode_handle *fwnode)
{
@@ -322,24 +275,6 @@ out_err:
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
-/**
- * v4l2_fwnode_endpoint_parse_link() - parse a link between two endpoints
- * @__fwnode: pointer to the endpoint's fwnode at the local end of the link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Fill the link structure with the local and remote nodes and port numbers.
- * The local_node and remote_node fields are set to point to the local and
- * remote port's parent nodes respectively (the port parent node being the
- * parent node of the port node if that node isn't a 'ports' node, or the
- * grand-parent node of the port node otherwise).
- *
- * A reference is taken to both the local and remote nodes, the caller must use
- * v4l2_fwnode_endpoint_put_link() to drop the references when done with the
- * link.
- *
- * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be
- * found.
- */
int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
struct v4l2_fwnode_link *link)
{
@@ -374,13 +309,6 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
-/**
- * v4l2_fwnode_put_link() - drop references to nodes in a link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Drop references to the local and remote nodes in the link. This function
- * must be called on every link parsed with v4l2_fwnode_parse_link().
- */
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
{
fwnode_handle_put(link->local_node);
@@ -388,6 +316,630 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+static int v4l2_async_notifier_realloc(struct v4l2_async_notifier *notifier,
+ unsigned int max_subdevs)
+{
+ struct v4l2_async_subdev **subdevs;
+
+ if (max_subdevs <= notifier->max_subdevs)
+ return 0;
+
+ subdevs = kvmalloc_array(
+ max_subdevs, sizeof(*notifier->subdevs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!subdevs)
+ return -ENOMEM;
+
+ if (notifier->subdevs) {
+ memcpy(subdevs, notifier->subdevs,
+ sizeof(*subdevs) * notifier->num_subdevs);
+
+ kvfree(notifier->subdevs);
+ }
+
+ notifier->subdevs = subdevs;
+ notifier->max_subdevs = max_subdevs;
+
+ return 0;
+}
+
+static int v4l2_async_notifier_fwnode_parse_endpoint(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint, unsigned int asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct v4l2_async_subdev *asd;
+ struct v4l2_fwnode_endpoint *vep;
+ int ret = 0;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return -ENOMEM;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode.fwnode =
+ fwnode_graph_get_remote_port_parent(endpoint);
+ if (!asd->match.fwnode.fwnode) {
+ dev_warn(dev, "bad remote port parent\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ vep = v4l2_fwnode_endpoint_alloc_parse(endpoint);
+ if (IS_ERR(vep)) {
+ ret = PTR_ERR(vep);
+ dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = parse_endpoint ? parse_endpoint(dev, vep, asd) : 0;
+ if (ret == -ENOTCONN)
+ dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep->base.port,
+ vep->base.id);
+ else if (ret < 0)
+ dev_warn(dev,
+ "driver could not parse port@%u/endpoint@%u (%d)\n",
+ vep->base.port, vep->base.id, ret);
+ v4l2_fwnode_endpoint_free(vep);
+ if (ret < 0)
+ goto out_err;
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ notifier->num_subdevs++;
+
+ return 0;
+
+out_err:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ kfree(asd);
+
+ return ret == -ENOTCONN ? 0 : ret;
+}
+
+static int __v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port, bool has_port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct fwnode_handle *fwnode;
+ unsigned int max_subdevs = notifier->max_subdevs;
+ int ret;
+
+ if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev)))
+ return -EINVAL;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ if (ep.port != port)
+ continue;
+ }
+ max_subdevs++;
+ }
+
+ /* No subdevs to add? Return here. */
+ if (max_subdevs == notifier->max_subdevs)
+ return 0;
+
+ ret = v4l2_async_notifier_realloc(notifier, max_subdevs);
+ if (ret)
+ return ret;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret)
+ break;
+
+ if (ep.port != port)
+ continue;
+ }
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = v4l2_async_notifier_fwnode_parse_endpoint(
+ dev, notifier, fwnode, asd_struct_size, parse_endpoint);
+ if (ret < 0)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, 0, false, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+
+int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, port, true, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port);
+
+/*
+ * v4l2_fwnode_reference_parse - parse references for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ * @prop: the name of the property
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries were found
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int v4l2_fwnode_reference_parse(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop)
+{
+ struct fwnode_reference_args args;
+ unsigned int index;
+ int ret;
+
+ for (index = 0;
+ !(ret = fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args));
+ index++)
+ fwnode_handle_put(args.fwnode);
+
+ if (!index)
+ return -ENOENT;
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (ret != -ENOENT && ret != -ENODATA)
+ return ret;
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return ret;
+
+ for (index = 0; !fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args);
+ index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = args.fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return 0;
+
+error:
+ fwnode_handle_put(args.fwnode);
+ return ret;
+}
+
+/*
+ * v4l2_fwnode_reference_get_int_prop - parse a reference with integer
+ * arguments
+ * @fwnode: fwnode to read @prop from
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @index: the index of the reference to get
+ * @props: the array of integer property names
+ * @nprops: the number of integer property names in @nprops
+ *
+ * First find an fwnode referred to by the reference at @index in @prop.
+ *
+ * Then under that fwnode, @nprops times, for each property in @props,
+ * iteratively follow child nodes starting from fwnode such that they have the
+ * property in @props array at the index of the child node distance from the
+ * root node and the value of that property matching with the integer argument
+ * of the reference, at the same index.
+ *
+ * The child fwnode reched at the end of the iteration is then returned to the
+ * caller.
+ *
+ * The core reason for this is that you cannot refer to just any node in ACPI.
+ * So to refer to an endpoint (easy in DT) you need to refer to a device, then
+ * provide a list of (property name, property value) tuples where each tuple
+ * uniquely identifies a child node. The first tuple identifies a child directly
+ * underneath the device fwnode, the next tuple identifies a child node
+ * underneath the fwnode identified by the previous tuple, etc. until you
+ * reached the fwnode you need.
+ *
+ * An example with a graph, as defined in Documentation/acpi/dsd/graph.txt:
+ *
+ * Scope (\_SB.PCI0.I2C2)
+ * {
+ * Device (CAM0)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {
+ * "compatible",
+ * Package () { "nokia,smia" }
+ * },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port0", "PRT0" },
+ * }
+ * })
+ * Name (PRT0, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 0 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP00" },
+ * }
+ * })
+ * Name (EP00, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package() {
+ * \_SB.PCI0.ISP, 4, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * Scope (\_SB.PCI0)
+ * {
+ * Device (ISP)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port4", "PRT4" },
+ * }
+ * })
+ *
+ * Name (PRT4, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 4 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP40" },
+ * }
+ * })
+ *
+ * Name (EP40, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package () {
+ * \_SB.PCI0.I2C2.CAM0,
+ * 0, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * From the EP40 node under ISP device, you could parse the graph remote
+ * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
+ *
+ * @fwnode: fwnode referring to EP40 under ISP.
+ * @prop: "remote-endpoint"
+ * @index: 0
+ * @props: "port", "endpoint"
+ * @nprops: 2
+ *
+ * And you'd get back fwnode referring to EP00 under CAM0.
+ *
+ * The same works the other way around: if you use EP00 under CAM0 as the
+ * fwnode, you'll get fwnode referring to EP40 under ISP.
+ *
+ * The same example in DT syntax would look like this:
+ *
+ * cam: cam0 {
+ * compatible = "nokia,smia";
+ *
+ * port {
+ * port = <0>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&isp 4 0>;
+ * };
+ * };
+ * };
+ *
+ * isp: isp {
+ * ports {
+ * port@4 {
+ * port = <4>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&cam 0 0>;
+ * };
+ * };
+ * };
+ * };
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwise failed
+ * -ENOMEM if memory allocation failed
+ */
+static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
+ struct fwnode_handle *fwnode, const char *prop, unsigned int index,
+ const char * const *props, unsigned int nprops)
+{
+ struct fwnode_reference_args fwnode_args;
+ unsigned int *args = fwnode_args.args;
+ struct fwnode_handle *child;
+ int ret;
+
+ /*
+ * Obtain remote fwnode as well as the integer arguments.
+ *
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return -ENOENT in that case.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
+ index, &fwnode_args);
+ if (ret)
+ return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
+
+ /*
+ * Find a node in the tree under the referred fwnode corresponding to
+ * the integer arguments.
+ */
+ fwnode = fwnode_args.fwnode;
+ while (nprops--) {
+ u32 val;
+
+ /* Loop over all child nodes under fwnode. */
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_property_read_u32(child, *props, &val))
+ continue;
+
+ /* Found property, see if its value matches. */
+ if (val == *args)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ /* No property found; return an error here. */
+ if (!child) {
+ fwnode = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ props++;
+ args++;
+ fwnode = child;
+ }
+
+ return fwnode;
+}
+
+/*
+ * v4l2_fwnode_reference_parse_int_props - parse references for async
+ * sub-devices
+ * @dev: struct device pointer
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @props: the array of integer property names
+ * @nprops: the number of integer properties
+ *
+ * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
+ * property @prop with integer arguments with child nodes matching in properties
+ * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
+ * accordingly.
+ *
+ * While it is technically possible to use this function on DT, it is only
+ * meaningful on ACPI. On Device tree you can refer to any node in the tree but
+ * on ACPI the references are limited to devices.
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwisefailed
+ * -ENOMEM if memory allocation failed
+ */
+static int v4l2_fwnode_reference_parse_int_props(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop, const char * const *props, unsigned int nprops)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int index;
+ int ret;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++)
+ fwnode_handle_put(fwnode);
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (PTR_ERR(fwnode) != -ENOENT && PTR_ERR(fwnode) != -ENODATA)
+ return PTR_ERR(fwnode);
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return -ENOMEM;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(struct v4l2_async_subdev), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+
+error:
+ fwnode_handle_put(fwnode);
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_sensor_common(
+ struct device *dev, struct v4l2_async_notifier *notifier)
+{
+ static const char * const led_props[] = { "led" };
+ static const struct {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+ } props[] = {
+ { "flash-leds", led_props, ARRAY_SIZE(led_props) },
+ { "lens-focus", NULL, 0 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (props[i].props && is_acpi_node(dev_fwnode(dev)))
+ ret = v4l2_fwnode_reference_parse_int_props(
+ dev, notifier, props[i].name,
+ props[i].props, props[i].nprops);
+ else
+ ret = v4l2_fwnode_reference_parse(
+ dev, notifier, props[i].name);
+ if (ret && ret != -ENOENT) {
+ dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
+ props[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_sensor_common);
+
+int v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+ int ret;
+
+ if (WARN_ON(!sd->dev))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ ret = v4l2_async_notifier_parse_fwnode_sensor_common(sd->dev,
+ notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_notifier_unregister(notifier);
+
+out_cleanup:
+ v4l2_async_notifier_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b60a6b0841d1..79614992ee21 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -730,9 +730,12 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
- p->stepwise.min_width, p->stepwise.min_height,
- p->stepwise.step_width, p->stepwise.step_height,
- p->stepwise.max_width, p->stepwise.max_height);
+ p->stepwise.min_width,
+ p->stepwise.min_height,
+ p->stepwise.max_width,
+ p->stepwise.max_height,
+ p->stepwise.step_width,
+ p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
/* fall through */
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index f62e68aa04c4..bc580fbe18fa 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -183,6 +183,7 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
/**
* v4l2_m2m_try_run() - select next job to perform and run it if possible
+ * @m2m_dev: per-device context
*
* Get next transaction (if present) from the waiting jobs list and run it.
*/
@@ -281,6 +282,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
/**
* v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ * @m2m_ctx: m2m context with jobs to be canceled
*
* In case of streamoff or release called on any context,
* 1] If the context is currently running, then abort job will be called
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 1dbf6f7785bb..e87fb13b22dc 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -222,7 +222,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
}
EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-/**
+/*
* __videobuf_free() - free all the buffers and their control structures
*
* This function can only be called if streaming/reading is off, i.e. no buffers
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 0b5c43f7e020..f412429cf5ba 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+ dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index cb115ba6a1d2..a8589d96ef72 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -188,7 +188,7 @@ module_param(debug, int, 0644);
static void __vb2_queue_cancel(struct vb2_queue *q);
static void __enqueue_in_driver(struct vb2_buffer *vb);
-/**
+/*
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
*/
static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
@@ -229,7 +229,7 @@ free:
return ret;
}
-/**
+/*
* __vb2_buf_mem_free() - free memory of the given buffer
*/
static void __vb2_buf_mem_free(struct vb2_buffer *vb)
@@ -243,7 +243,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
}
}
-/**
+/*
* __vb2_buf_userptr_put() - release userspace memory associated with
* a USERPTR buffer
*/
@@ -258,7 +258,7 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
}
-/**
+/*
* __vb2_plane_dmabuf_put() - release memory associated with
* a DMABUF shared plane
*/
@@ -277,7 +277,7 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
p->dbuf_mapped = 0;
}
-/**
+/*
* __vb2_buf_dmabuf_put() - release memory associated with
* a DMABUF shared buffer
*/
@@ -289,7 +289,7 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
}
-/**
+/*
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* the buffer.
*/
@@ -317,7 +317,7 @@ static void __setup_offsets(struct vb2_buffer *vb)
}
}
-/**
+/*
* __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
* queue
@@ -386,7 +386,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
return buffer;
}
-/**
+/*
* __vb2_free_mem() - release all video buffer memory for a given queue
*/
static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
@@ -410,7 +410,7 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
}
}
-/**
+/*
* __vb2_queue_free() - free buffers at the end of the queue - video memory and
* related information, if no buffers are left return the queue to an
* uninitialized state. Might be called even if the queue has already been freed.
@@ -544,7 +544,7 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
}
EXPORT_SYMBOL(vb2_buffer_in_use);
-/**
+/*
* __buffers_in_use() - return true if any buffers on the queue are in use and
* the queue cannot be freed (by the means of REQBUFS(0)) call
*/
@@ -564,7 +564,7 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
}
EXPORT_SYMBOL_GPL(vb2_core_querybuf);
-/**
+/*
* __verify_userptr_ops() - verify that all memory operations required for
* USERPTR queue type have been provided
*/
@@ -577,7 +577,7 @@ static int __verify_userptr_ops(struct vb2_queue *q)
return 0;
}
-/**
+/*
* __verify_mmap_ops() - verify that all memory operations required for
* MMAP queue type have been provided
*/
@@ -590,7 +590,7 @@ static int __verify_mmap_ops(struct vb2_queue *q)
return 0;
}
-/**
+/*
* __verify_dmabuf_ops() - verify that all memory operations required for
* DMABUF queue type have been provided
*/
@@ -953,7 +953,7 @@ void vb2_discard_done(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_discard_done);
-/**
+/*
* __prepare_mmap() - prepare an MMAP buffer
*/
static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
@@ -966,7 +966,7 @@ static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
}
-/**
+/*
* __prepare_userptr() - prepare a USERPTR buffer
*/
static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
@@ -1082,7 +1082,7 @@ err:
return ret;
}
-/**
+/*
* __prepare_dmabuf() - prepare a DMABUF buffer
*/
static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
@@ -1215,7 +1215,7 @@ err:
return ret;
}
-/**
+/*
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
@@ -1298,7 +1298,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
}
EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
-/**
+/*
* vb2_start_streaming() - Attempt to start streaming.
* @q: videobuf2 queue
*
@@ -1427,7 +1427,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
}
EXPORT_SYMBOL_GPL(vb2_core_qbuf);
-/**
+/*
* __vb2_wait_for_done_vb() - wait for a buffer to become available
* for dequeuing
*
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
return 0;
}
-/**
+/*
* __vb2_get_done_vb() - get a buffer ready for dequeuing
*
* Will sleep if required for nonblocking == false.
@@ -1553,7 +1553,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
-/**
+/*
* __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
*/
static void __vb2_dqbuf(struct vb2_buffer *vb)
@@ -1625,7 +1625,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
}
EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
-/**
+/*
* __vb2_queue_cancel() - cancel and stop (pause) streaming
*
* Removes all queued buffers from driver's queue and all buffers queued by
@@ -1773,7 +1773,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
}
EXPORT_SYMBOL_GPL(vb2_core_streamoff);
-/**
+/*
* __find_plane_by_offset() - find plane associated with the given offset off
*/
static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
@@ -2104,7 +2104,7 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
}
EXPORT_SYMBOL_GPL(vb2_core_poll);
-/**
+/*
* struct vb2_fileio_buf - buffer context used by file io emulator
*
* vb2 provides a compatibility layer and emulator of file io (read and
@@ -2118,7 +2118,7 @@ struct vb2_fileio_buf {
unsigned int queued:1;
};
-/**
+/*
* struct vb2_fileio_data - queue context used by file io emulator
*
* @cur_index: the index of the buffer currently being read from or
@@ -2155,7 +2155,7 @@ struct vb2_fileio_data {
unsigned write_immediately:1;
};
-/**
+/*
* __vb2_init_fileio() - initialize file io emulator
* @q: videobuf2 queue
* @read: mode selector (1 means read, 0 means write)
@@ -2274,7 +2274,7 @@ err_kfree:
return ret;
}
-/**
+/*
* __vb2_cleanup_fileio() - free resourced used by file io emulator
* @q: videobuf2 queue
*/
@@ -2293,7 +2293,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
return 0;
}
-/**
+/*
* __vb2_perform_fileio() - perform a single file io (read or write) operation
* @q: videobuf2 queue
* @data: pointed to target userspace buffer
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 9f389f36566d..a9806ba6116d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -479,7 +479,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
- unsigned long offset;
+ unsigned int offset;
int n_pages, i;
int ret = 0;
struct sg_table *sgt;
@@ -507,7 +507,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dev = dev;
buf->dma_dir = dma_dir;
- offset = vaddr & ~PAGE_MASK;
+ offset = lower_32_bits(offset_in_page(vaddr));
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 4bb8424114ce..89e51989332b 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -120,7 +120,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
h->put(h->arg);
}
-/**
+/*
* vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
* video buffers
*/
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 0c0669976bdc..4075314a6989 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -49,7 +49,7 @@ module_param(debug, int, 0644);
#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
-/**
+/*
* __verify_planes_array() - verify that the planes array passed in struct
* v4l2_buffer from userspace can be safely used
*/
@@ -78,7 +78,7 @@ static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
return __verify_planes_array(vb, pb);
}
-/**
+/*
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
*/
@@ -181,7 +181,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
return __verify_planes_array(q->bufs[b->index], b);
}
-/**
+/*
* __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
* returned to userspace
*/
@@ -286,7 +286,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
q->last_buffer_dequeued = true;
}
-/**
+/*
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
* v4l2_buffer by the userspace. It also verifies that struct
* v4l2_buffer has a valid number of planes.
@@ -446,7 +446,7 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
.copy_timestamp = __copy_timestamp,
};
-/**
+/*
* vb2_querybuf() - query video buffer information
* @q: videobuf queue
* @b: buffer struct passed from userspace to vidioc_querybuf handler
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 470b93e3858d..929a601d4cd1 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -9,6 +9,7 @@ endif
obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
+obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
new file mode 100644
index 000000000000..0a7bdbed3a6f
--- /dev/null
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -0,0 +1,722 @@
+/*
+ * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+/*
+ * This driver provides access to the DPFE interface of Broadcom STB SoCs.
+ * The firmware running on the DCPU inside the DDR PHY can provide current
+ * information about the system's RAM, for instance the DRAM refresh rate.
+ * This can be used as an indirect indicator for the DRAM's temperature.
+ * Slower refresh rate means cooler RAM, higher refresh rate means hotter
+ * RAM.
+ *
+ * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
+ * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
+ *
+ * Note regarding the loading of the firmware image: we use be32_to_cpu()
+ * and le_32_to_cpu(), so we can support the following four cases:
+ * - LE kernel + LE firmware image (the most common case)
+ * - LE kernel + BE firmware image
+ * - BE kernel + LE firmware image
+ * - BE kernel + BE firmware image
+ *
+ * The DPCU always runs in big endian mode. The firwmare image, however, can
+ * be in either format. Also, communication between host CPU and DCPU is
+ * always in little endian.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#define DRVNAME "brcmstb-dpfe"
+#define FIRMWARE_NAME "dpfe.bin"
+
+/* DCPU register offsets */
+#define REG_DCPU_RESET 0x0
+#define REG_TO_DCPU_MBOX 0x10
+#define REG_TO_HOST_MBOX 0x14
+
+/* Message RAM */
+#define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32))
+
+/* DRAM Info Offsets & Masks */
+#define DRAM_INFO_INTERVAL 0x0
+#define DRAM_INFO_MR4 0x4
+#define DRAM_INFO_ERROR 0x8
+#define DRAM_INFO_MR4_MASK 0xff
+
+/* DRAM MR4 Offsets & Masks */
+#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
+#define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
+#define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
+#define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
+#define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
+
+#define DRAM_MR4_REFRESH_MASK 0x7
+#define DRAM_MR4_SR_ABORT_MASK 0x1
+#define DRAM_MR4_PPRE_MASK 0x1
+#define DRAM_MR4_TH_OFFS_MASK 0x3
+#define DRAM_MR4_TUF_MASK 0x1
+
+/* DRAM Vendor Offsets & Masks */
+#define DRAM_VENDOR_MR5 0x0
+#define DRAM_VENDOR_MR6 0x4
+#define DRAM_VENDOR_MR7 0x8
+#define DRAM_VENDOR_MR8 0xc
+#define DRAM_VENDOR_ERROR 0x10
+#define DRAM_VENDOR_MASK 0xff
+
+/* Reset register bits & masks */
+#define DCPU_RESET_SHIFT 0x0
+#define DCPU_RESET_MASK 0x1
+#define DCPU_CLK_DISABLE_SHIFT 0x2
+
+/* DCPU return codes */
+#define DCPU_RET_ERROR_BIT BIT(31)
+#define DCPU_RET_SUCCESS 0x1
+#define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
+#define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
+#define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
+#define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
+/* This error code is not firmware defined and only used in the driver. */
+#define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
+
+/* Firmware magic */
+#define DPFE_BE_MAGIC 0xfe1010fe
+#define DPFE_LE_MAGIC 0xfe0101fe
+
+/* Error codes */
+#define ERR_INVALID_MAGIC -1
+#define ERR_INVALID_SIZE -2
+#define ERR_INVALID_CHKSUM -3
+
+/* Message types */
+#define DPFE_MSG_TYPE_COMMAND 1
+#define DPFE_MSG_TYPE_RESPONSE 2
+
+#define DELAY_LOOP_MAX 200000
+
+enum dpfe_msg_fields {
+ MSG_HEADER,
+ MSG_COMMAND,
+ MSG_ARG_COUNT,
+ MSG_ARG0,
+ MSG_CHKSUM,
+ MSG_FIELD_MAX /* Last entry */
+};
+
+enum dpfe_commands {
+ DPFE_CMD_GET_INFO,
+ DPFE_CMD_GET_REFRESH,
+ DPFE_CMD_GET_VENDOR,
+ DPFE_CMD_MAX /* Last entry */
+};
+
+struct dpfe_msg {
+ u32 header;
+ u32 command;
+ u32 arg_count;
+ u32 arg0;
+ u32 chksum; /* This is the sum of all other entries. */
+};
+
+/*
+ * Format of the binary firmware file:
+ *
+ * entry
+ * 0 header
+ * value: 0xfe0101fe <== little endian
+ * 0xfe1010fe <== big endian
+ * 1 sequence:
+ * [31:16] total segments on this build
+ * [15:0] this segment sequence.
+ * 2 FW version
+ * 3 IMEM byte size
+ * 4 DMEM byte size
+ * IMEM
+ * DMEM
+ * last checksum ==> sum of everything
+ */
+struct dpfe_firmware_header {
+ u32 magic;
+ u32 sequence;
+ u32 version;
+ u32 imem_size;
+ u32 dmem_size;
+};
+
+/* Things we only need during initialization. */
+struct init_data {
+ unsigned int dmem_len;
+ unsigned int imem_len;
+ unsigned int chksum;
+ bool is_big_endian;
+};
+
+/* Things we need for as long as we are active. */
+struct private_data {
+ void __iomem *regs;
+ void __iomem *dmem;
+ void __iomem *imem;
+ struct device *dev;
+ unsigned int index;
+ struct mutex lock;
+};
+
+static const char *error_text[] = {
+ "Success", "Header code incorrect", "Unknown command or argument",
+ "Incorrect checksum", "Malformed command", "Timed out",
+};
+
+/* List of supported firmware commands */
+static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 1,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 4,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 5,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 2,
+ [MSG_CHKSUM] = 6,
+ },
+};
+
+static bool is_dcpu_enabled(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+
+ return !(val & DCPU_RESET_MASK);
+}
+
+static void __disable_dcpu(void __iomem *regs)
+{
+ u32 val;
+
+ if (!is_dcpu_enabled(regs))
+ return;
+
+ /* Put DCPU in reset if it's running. */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val |= (1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+}
+
+static void __enable_dcpu(void __iomem *regs)
+{
+ u32 val;
+
+ /* Clear mailbox registers. */
+ writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
+ writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+
+ /* Disable DCPU clock gating */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ /* Take DCPU out of reset */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+}
+
+static unsigned int get_msg_chksum(const u32 msg[])
+{
+ unsigned int sum = 0;
+ unsigned int i;
+
+ /* Don't include the last field in the checksum. */
+ for (i = 0; i < MSG_FIELD_MAX - 1; i++)
+ sum += msg[i];
+
+ return sum;
+}
+
+static int __send_command(struct private_data *priv, unsigned int cmd,
+ u32 result[])
+{
+ const u32 *msg = dpfe_commands[cmd];
+ void __iomem *regs = priv->regs;
+ unsigned int i, chksum;
+ int ret = 0;
+ u32 resp;
+
+ if (cmd >= DPFE_CMD_MAX)
+ return -1;
+
+ mutex_lock(&priv->lock);
+
+ /* Write command and arguments to message area */
+ for (i = 0; i < MSG_FIELD_MAX; i++)
+ writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+
+ /* Tell DCPU there is a command waiting */
+ writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
+
+ /* Wait for DCPU to process the command */
+ for (i = 0; i < DELAY_LOOP_MAX; i++) {
+ /* Read response code */
+ resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
+ if (resp > 0)
+ break;
+ udelay(5);
+ }
+
+ if (i == DELAY_LOOP_MAX) {
+ resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
+ ret = -ffs(resp);
+ } else {
+ /* Read response data */
+ for (i = 0; i < MSG_FIELD_MAX; i++)
+ result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
+ }
+
+ /* Tell DCPU we are done */
+ writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return ret;
+
+ /* Verify response */
+ chksum = get_msg_chksum(result);
+ if (chksum != result[MSG_CHKSUM])
+ resp = DCPU_RET_ERR_CHKSUM;
+
+ if (resp != DCPU_RET_SUCCESS) {
+ resp &= ~DCPU_RET_ERROR_BIT;
+ ret = -ffs(resp);
+ }
+
+ return ret;
+}
+
+/* Ensure that the firmware file loaded meets all the requirements. */
+static int __verify_firmware(struct init_data *init,
+ const struct firmware *fw)
+{
+ const struct dpfe_firmware_header *header = (void *)fw->data;
+ unsigned int dmem_size, imem_size, total_size;
+ bool is_big_endian = false;
+ const u32 *chksum_ptr;
+
+ if (header->magic == DPFE_BE_MAGIC)
+ is_big_endian = true;
+ else if (header->magic != DPFE_LE_MAGIC)
+ return ERR_INVALID_MAGIC;
+
+ if (is_big_endian) {
+ dmem_size = be32_to_cpu(header->dmem_size);
+ imem_size = be32_to_cpu(header->imem_size);
+ } else {
+ dmem_size = le32_to_cpu(header->dmem_size);
+ imem_size = le32_to_cpu(header->imem_size);
+ }
+
+ /* Data and instruction sections are 32 bit words. */
+ if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
+ return ERR_INVALID_SIZE;
+
+ /*
+ * The header + the data section + the instruction section + the
+ * checksum must be equal to the total firmware size.
+ */
+ total_size = dmem_size + imem_size + sizeof(*header) +
+ sizeof(*chksum_ptr);
+ if (total_size != fw->size)
+ return ERR_INVALID_SIZE;
+
+ /* The checksum comes at the very end. */
+ chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
+
+ init->is_big_endian = is_big_endian;
+ init->dmem_len = dmem_size;
+ init->imem_len = imem_size;
+ init->chksum = (is_big_endian)
+ ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
+
+ return 0;
+}
+
+/* Verify checksum by reading back the firmware from co-processor RAM. */
+static int __verify_fw_checksum(struct init_data *init,
+ struct private_data *priv,
+ const struct dpfe_firmware_header *header,
+ u32 checksum)
+{
+ u32 magic, sequence, version, sum;
+ u32 __iomem *dmem = priv->dmem;
+ u32 __iomem *imem = priv->imem;
+ unsigned int i;
+
+ if (init->is_big_endian) {
+ magic = be32_to_cpu(header->magic);
+ sequence = be32_to_cpu(header->sequence);
+ version = be32_to_cpu(header->version);
+ } else {
+ magic = le32_to_cpu(header->magic);
+ sequence = le32_to_cpu(header->sequence);
+ version = le32_to_cpu(header->version);
+ }
+
+ sum = magic + sequence + version + init->dmem_len + init->imem_len;
+
+ for (i = 0; i < init->dmem_len / sizeof(u32); i++)
+ sum += readl_relaxed(dmem + i);
+
+ for (i = 0; i < init->imem_len / sizeof(u32); i++)
+ sum += readl_relaxed(imem + i);
+
+ return (sum == checksum) ? 0 : -1;
+}
+
+static int __write_firmware(u32 __iomem *mem, const u32 *fw,
+ unsigned int size, bool is_big_endian)
+{
+ unsigned int i;
+
+ /* Convert size to 32-bit words. */
+ size /= sizeof(u32);
+
+ /* It is recommended to clear the firmware area first. */
+ for (i = 0; i < size; i++)
+ writel_relaxed(0, mem + i);
+
+ /* Now copy it. */
+ if (is_big_endian) {
+ for (i = 0; i < size; i++)
+ writel_relaxed(be32_to_cpu(fw[i]), mem + i);
+ } else {
+ for (i = 0; i < size; i++)
+ writel_relaxed(le32_to_cpu(fw[i]), mem + i);
+ }
+
+ return 0;
+}
+
+static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
+ struct init_data *init)
+{
+ const struct dpfe_firmware_header *header;
+ unsigned int dmem_size, imem_size;
+ struct device *dev = &pdev->dev;
+ bool is_big_endian = false;
+ struct private_data *priv;
+ const struct firmware *fw;
+ const u32 *dmem, *imem;
+ const void *fw_blob;
+ int ret;
+
+ priv = platform_get_drvdata(pdev);
+
+ /*
+ * Skip downloading the firmware if the DCPU is already running and
+ * responding to commands.
+ */
+ if (is_dcpu_enabled(priv->regs)) {
+ u32 response[MSG_FIELD_MAX];
+
+ ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
+ if (!ret)
+ return 0;
+ }
+
+ ret = request_firmware(&fw, FIRMWARE_NAME, dev);
+ /* request_firmware() prints its own error messages. */
+ if (ret)
+ return ret;
+
+ ret = __verify_firmware(init, fw);
+ if (ret)
+ return -EFAULT;
+
+ __disable_dcpu(priv->regs);
+
+ is_big_endian = init->is_big_endian;
+ dmem_size = init->dmem_len;
+ imem_size = init->imem_len;
+
+ /* At the beginning of the firmware blob is a header. */
+ header = (struct dpfe_firmware_header *)fw->data;
+ /* Void pointer to the beginning of the actual firmware. */
+ fw_blob = fw->data + sizeof(*header);
+ /* IMEM comes right after the header. */
+ imem = fw_blob;
+ /* DMEM follows after IMEM. */
+ dmem = fw_blob + imem_size;
+
+ ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
+ if (ret)
+ return ret;
+ ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
+ if (ret)
+ return ret;
+
+ ret = __verify_fw_checksum(init, priv, header, init->chksum);
+ if (ret)
+ return ret;
+
+ __enable_dcpu(priv->regs);
+
+ return 0;
+}
+
+static ssize_t generic_show(unsigned int command, u32 response[],
+ struct device *dev, char *buf)
+{
+ struct private_data *priv;
+ int ret;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return sprintf(buf, "ERROR: driver private data not set\n");
+
+ ret = __send_command(priv, command, response);
+ if (ret < 0)
+ return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
+
+ return 0;
+}
+
+static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ unsigned int info;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
+ if (ret)
+ return ret;
+
+ info = response[MSG_ARG0];
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ (info >> 24) & 0xff,
+ (info >> 16) & 0xff,
+ (info >> 8) & 0xff,
+ info & 0xff);
+}
+
+static ssize_t show_refresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ void __iomem *info;
+ struct private_data *priv;
+ unsigned int offset;
+ u8 refresh, sr_abort, ppre, thermal_offs, tuf;
+ u32 mr4;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
+ if (ret)
+ return ret;
+
+ priv = dev_get_drvdata(dev);
+ offset = response[MSG_ARG0];
+ info = priv->dmem + offset;
+
+ mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
+
+ refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
+ sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
+ ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
+ thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
+ tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
+ readl_relaxed(info + DRAM_INFO_INTERVAL),
+ refresh, sr_abort, ppre, thermal_offs, tuf,
+ readl_relaxed(info + DRAM_INFO_ERROR));
+}
+
+static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
+ void __iomem *info;
+ unsigned int offset;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ priv = dev_get_drvdata(dev);
+
+ ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
+ if (ret)
+ return ret;
+
+ offset = response[MSG_ARG0];
+ info = priv->dmem + offset;
+ writel_relaxed(val, info + DRAM_INFO_INTERVAL);
+
+ return count;
+}
+
+static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
+ void __iomem *info;
+ unsigned int offset;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
+ if (ret)
+ return ret;
+
+ offset = response[MSG_ARG0];
+ priv = dev_get_drvdata(dev);
+ info = priv->dmem + offset;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x\n",
+ readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_ERROR));
+}
+
+static int brcmstb_dpfe_resume(struct platform_device *pdev)
+{
+ struct init_data init;
+
+ return brcmstb_dpfe_download_firmware(pdev, &init);
+}
+
+static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
+static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
+static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
+static struct attribute *dpfe_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_refresh.attr,
+ &dev_attr_dpfe_vendor.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe);
+
+static int brcmstb_dpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct private_data *priv;
+ struct device *dpfe_dev;
+ struct init_data init;
+ struct resource *res;
+ u32 index;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ /* Cell index is optional; default to 0 if not present. */
+ ret = of_property_read_u32(dev->of_node, "cell-index", &index);
+ if (ret)
+ index = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "couldn't map DCPU registers\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
+ priv->dmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->dmem)) {
+ dev_err(dev, "Couldn't map DCPU data memory\n");
+ return -ENOENT;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
+ priv->imem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->imem)) {
+ dev_err(dev, "Couldn't map DCPU instruction memory\n");
+ return -ENOENT;
+ }
+
+ ret = brcmstb_dpfe_download_firmware(pdev, &init);
+ if (ret)
+ goto err;
+
+ dpfe_dev = devm_kzalloc(dev, sizeof(*dpfe_dev), GFP_KERNEL);
+ if (!dpfe_dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->dev = dpfe_dev;
+ priv->index = index;
+
+ dpfe_dev->parent = dev;
+ dpfe_dev->groups = dpfe_groups;
+ dpfe_dev->of_node = dev->of_node;
+ dev_set_drvdata(dpfe_dev, priv);
+ dev_set_name(dpfe_dev, "dpfe%u", index);
+
+ ret = device_register(dpfe_dev);
+ if (ret)
+ goto err;
+
+ dev_info(dev, "registered.\n");
+
+ return 0;
+
+err:
+ dev_err(dev, "failed to initialize -- error %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id brcmstb_dpfe_of_match[] = {
+ { .compatible = "brcm,dpfe-cpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
+
+static struct platform_driver brcmstb_dpfe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = brcmstb_dpfe_of_match,
+ },
+ .probe = brcmstb_dpfe_probe,
+ .resume = brcmstb_dpfe_resume,
+};
+
+module_platform_driver(brcmstb_dpfe_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 7059bbda2fac..a385a35c7de9 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1075,11 +1075,33 @@ int gpmc_configure(int cmd, int wval)
}
EXPORT_SYMBOL(gpmc_configure);
-void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
+static bool gpmc_nand_writebuffer_empty(void)
+{
+ if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
+ return true;
+
+ return false;
+}
+
+static struct gpmc_nand_ops nand_ops = {
+ .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
+};
+
+/**
+ * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
+ * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @cs: GPMC chip select number on which the NAND sits. The
+ * register map returned will be specific to this chip select.
+ *
+ * Returns NULL on error e.g. invalid cs.
+ */
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
{
int i;
- reg->gpmc_status = NULL; /* deprecated */
+ if (cs >= gpmc_cs_num)
+ return NULL;
+
reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
@@ -1111,34 +1133,6 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
i * GPMC_BCH_SIZE;
}
-}
-
-static bool gpmc_nand_writebuffer_empty(void)
-{
- if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
- return true;
-
- return false;
-}
-
-static struct gpmc_nand_ops nand_ops = {
- .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
-};
-
-/**
- * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
- * @regs: the GPMC NAND register map exclusive for NAND use.
- * @cs: GPMC chip select number on which the NAND sits. The
- * register map returned will be specific to this chip select.
- *
- * Returns NULL on error e.g. invalid cs.
- */
-struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
-{
- if (cs >= gpmc_cs_num)
- return NULL;
-
- gpmc_update_nand_reg(reg, cs);
return &nand_ops;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 22de7f5ed032..57b13dfbd21e 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1492,9 +1492,9 @@ static int msb_ftl_scan(struct msb_data *msb)
return 0;
}
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
{
- struct msb_data *msb = (struct msb_data *)data;
+ struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
@@ -1514,8 +1514,7 @@ static void msb_cache_discard(struct msb_data *msb)
static int msb_cache_init(struct msb_data *msb)
{
- setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
- (unsigned long)msb);
+ timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
if (!msb->cache)
msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ac5ad6d0837c..1d20a800e967 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -510,6 +510,19 @@ config INTEL_SOC_PMIC_CHTWC
available before any devices using it are probed. This option also
causes the designware-i2c driver to be builtin for the same reason.
+config INTEL_SOC_PMIC_CHTDC_TI
+ tristate "Support for Intel Cherry Trail Dollar Cove TI PMIC"
+ depends on GPIOLIB
+ depends on I2C
+ depends on ACPI
+ depends on X86
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Select this option for supporting Dollar Cove (TI version) PMIC
+ device that is found on some Intel Cherry Trail systems.
+
config MFD_INTEL_LPSS
tristate
select COMMON_CLK
@@ -1057,6 +1070,22 @@ config MFD_SMSC
To compile this driver as a module, choose M here: the
module will be called smsc.
+config MFD_SC27XX_PMIC
+ tristate "Spreadtrum SC27xx PMICs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ help
+ This enables support for the Spreadtrum SC27xx PMICs with SPI
+ interface. The SC27xx series PMICs integrate power management,
+ audio codec, battery management and user interface support
+ function (such as RTC, Typec, indicator and so on) in a single chip.
+
+ This driver provides common support for accessing the SC27xx PMICs,
+ and it also adds the irq_chip parts for handling the PMIC chip events.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
@@ -1338,7 +1367,7 @@ config MFD_TPS65090
config MFD_TPS65217
tristate "TI TPS65217 Power Management / White LED chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -1400,7 +1429,7 @@ config MFD_TI_LP87565
config MFD_TPS65218
tristate "TI TPS65218 Power Management chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1408,8 +1437,7 @@ config MFD_TPS65218
If you say yes here you get support for the TPS65218 series of
Power Management chips.
These include voltage regulators, gpio and other features
- that are often used in portable devices. Only regulator
- component is currently supported.
+ that are often used in portable devices.
This driver can also be built as a module. If so, the module
will be called tps65218.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0235e6766bc1..d9474ade32e6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -220,6 +220,7 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
+obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
@@ -228,3 +229,4 @@ obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
+obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 336de66ca408..2468b431bb22 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -876,6 +876,8 @@ static struct mfd_cell axp813_cells[] = {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-regulator",
}
};
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index c9714072e224..59c82cdcf48d 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 *ptr;
u8 *rx_buf;
u8 sum;
+ u8 rx_byte;
int ret = 0, final_ret;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_packet(ec_dev,
- ec_msg->insize + sizeof(*response));
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_packet(ec_dev,
+ ec_msg->insize + sizeof(*response));
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
int i, len;
u8 *ptr;
u8 *rx_buf;
+ u8 rx_byte;
int sum;
int ret = 0, final_ret;
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_response(ec_dev,
- ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_response(ec_dev,
+ ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_spi->last_transfer_ns = ktime_get_ns();
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index b3767c3141e5..dbb85caaafed 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -84,8 +84,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return -ENOMEM;
}
- irq_set_chained_handler(irq, mx25_tsadc_irq_handler);
- irq_set_handler_data(irq, tsadc);
+ irq_set_chained_handler_and_data(irq, mx25_tsadc_irq_handler, tsadc);
return 0;
}
@@ -180,6 +179,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
+static int mx25_tsadc_remove(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static const struct of_device_id mx25_tsadc_ids[] = {
{ .compatible = "fsl,imx25-tsadc" },
{ /* Sentinel */ }
@@ -192,6 +204,7 @@ static struct platform_driver mx25_tsadc_driver = {
.of_match_table = of_match_ptr(mx25_tsadc_ids),
},
.probe = mx25_tsadc_probe,
+ .remove = mx25_tsadc_remove,
};
module_platform_driver(mx25_tsadc_driver);
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..861277c6580a
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -0,0 +1,184 @@
+/*
+ * Device access for Dollar Cove TI PMIC
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * Cleanup and forward-ported
+ * Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define CHTDC_TI_IRQLVL1 0x01
+#define CHTDC_TI_MASK_IRQLVL1 0x02
+
+/* Level 1 IRQs */
+enum {
+ CHTDC_TI_PWRBTN = 0, /* power button */
+ CHTDC_TI_DIETMPWARN, /* thermal */
+ CHTDC_TI_ADCCMPL, /* ADC */
+ /* No IRQ 3 */
+ CHTDC_TI_VBATLOW = 4, /* battery */
+ CHTDC_TI_VBUSDET, /* power source */
+ /* No IRQ 6 */
+ CHTDC_TI_CCEOCAL = 7, /* battery */
+};
+
+static struct resource power_button_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_PWRBTN),
+};
+
+static struct resource thermal_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_DIETMPWARN),
+};
+
+static struct resource adc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_ADCCMPL),
+};
+
+static struct resource pwrsrc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBUSDET),
+};
+
+static struct resource battery_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBATLOW),
+ DEFINE_RES_IRQ(CHTDC_TI_CCEOCAL),
+};
+
+static struct mfd_cell chtdc_ti_dev[] = {
+ {
+ .name = "chtdc_ti_pwrbtn",
+ .num_resources = ARRAY_SIZE(power_button_resources),
+ .resources = power_button_resources,
+ }, {
+ .name = "chtdc_ti_adc",
+ .num_resources = ARRAY_SIZE(adc_resources),
+ .resources = adc_resources,
+ }, {
+ .name = "chtdc_ti_thermal",
+ .num_resources = ARRAY_SIZE(thermal_resources),
+ .resources = thermal_resources,
+ }, {
+ .name = "chtdc_ti_pwrsrc",
+ .num_resources = ARRAY_SIZE(pwrsrc_resources),
+ .resources = pwrsrc_resources,
+ }, {
+ .name = "chtdc_ti_battery",
+ .num_resources = ARRAY_SIZE(battery_resources),
+ .resources = battery_resources,
+ },
+ { .name = "chtdc_ti_region", },
+};
+
+static const struct regmap_config chtdc_ti_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 128,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_irq chtdc_ti_irqs[] = {
+ REGMAP_IRQ_REG(CHTDC_TI_PWRBTN, 0, BIT(CHTDC_TI_PWRBTN)),
+ REGMAP_IRQ_REG(CHTDC_TI_DIETMPWARN, 0, BIT(CHTDC_TI_DIETMPWARN)),
+ REGMAP_IRQ_REG(CHTDC_TI_ADCCMPL, 0, BIT(CHTDC_TI_ADCCMPL)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBATLOW, 0, BIT(CHTDC_TI_VBATLOW)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBUSDET, 0, BIT(CHTDC_TI_VBUSDET)),
+ REGMAP_IRQ_REG(CHTDC_TI_CCEOCAL, 0, BIT(CHTDC_TI_CCEOCAL)),
+};
+
+static const struct regmap_irq_chip chtdc_ti_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irqs = chtdc_ti_irqs,
+ .num_irqs = ARRAY_SIZE(chtdc_ti_irqs),
+ .num_regs = 1,
+ .status_base = CHTDC_TI_IRQLVL1,
+ .mask_base = CHTDC_TI_MASK_IRQLVL1,
+ .ack_base = CHTDC_TI_IRQLVL1,
+};
+
+static int chtdc_ti_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct intel_soc_pmic *pmic;
+ int ret;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, &chtdc_ti_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+ pmic->irq = i2c->irq;
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ IRQF_ONESHOT, 0,
+ &chtdc_ti_irq_chip,
+ &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, chtdc_ti_dev,
+ ARRAY_SIZE(chtdc_ti_dev), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+}
+
+static void chtdc_ti_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(i2c);
+
+ disable_irq(pmic->irq);
+}
+
+static int __maybe_unused chtdc_ti_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int __maybe_unused chtdc_ti_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
+
+static const struct acpi_device_id chtdc_ti_acpi_ids[] = {
+ { "INT33F5" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, chtdc_ti_acpi_ids);
+
+static struct i2c_driver chtdc_ti_i2c_driver = {
+ .driver = {
+ .name = "intel_soc_pmic_chtdc_ti",
+ .pm = &chtdc_ti_pm_ops,
+ .acpi_match_table = chtdc_ti_acpi_ids,
+ },
+ .probe_new = chtdc_ti_probe,
+ .shutdown = chtdc_ti_shutdown,
+};
+module_i2c_driver(chtdc_ti_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC Dollar Cove TI PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 450ae36645aa..cf1120abbf52 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Avoton SoC",
.iTCO_version = 3,
.gpio_version = AVOTON_GPIO,
+ .spi_type = INTEL_SPI_BYT,
},
[LPC_BAYTRAIL] = {
.name = "Bay Trail SoC",
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 662ae0d9e334..1c05ea0cba61 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -48,7 +48,10 @@ static const struct mfd_cell max77693_devs[] = {
.name = "max77693-charger",
.of_compatible = "maxim,max77693-charger",
},
- { .name = "max77693-muic", },
+ {
+ .name = "max77693-muic",
+ .of_compatible = "maxim,max77693-muic",
+ },
{
.name = "max77693-haptic",
.of_compatible = "maxim,max77693-haptic",
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 630bd19b2c0a..98e732a7ae96 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lradc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+ if (!res) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
switch (lradc->soc) {
case IMX23_LRADC:
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 40f8bb14fc59..7fcf37ba922c 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -103,8 +103,64 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
+static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+ u32 lval;
+
+ if (CHK_PCI_PID(pcr, PID_524A))
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG1, &lval);
+ else
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG2, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+}
+
+static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+
+ return 0;
+}
+
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ rts5249_init_from_cfg(pcr);
+ rts5249_init_from_hw(pcr);
+
rtsx_pci_init_cmd(pcr);
/* Rest L1SUB Config */
@@ -125,7 +181,18 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
- return rtsx_pci_send_cmd(pcr, 100);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
}
static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
@@ -285,6 +352,31 @@ static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
+static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr,
+ pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
@@ -297,6 +389,7 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_aspm = rts5249_set_aspm,
};
/* SD Pull Control Enable:
@@ -353,6 +446,8 @@ static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
void rts5249_init_params(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5249_pcr_ops;
@@ -372,6 +467,20 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
@@ -459,6 +568,40 @@ static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* Run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* L1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
static const struct pcr_ops rts524a_pcr_ops = {
.write_phy = rts524a_write_phy,
.read_phy = rts524a_read_phy,
@@ -473,11 +616,16 @@ static const struct pcr_ops rts524a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
@@ -576,11 +724,16 @@ static const struct pcr_ops rts525a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 3cf69e5c5703..590fb9aad77d 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -79,6 +79,96 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
0xFC, 0);
}
+int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ rtsx_pci_write_register(pcr, MSGTXDATA0,
+ MASK_8_BIT_DEF, (u8) (latency & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA1,
+ MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA2,
+ MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA3,
+ MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
+ LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
+
+ return 0;
+}
+
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ if (pcr->ops->set_ltr_latency)
+ return pcr->ops->set_ltr_latency(pcr, latency);
+ else
+ return rtsx_comm_set_ltr_latency(pcr, latency);
+}
+
+static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ rtsx_pci_enable_aspm(pcr);
+ else
+ rtsx_pci_disable_aspm(pcr);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK;
+ u8 val = 0;
+
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, false);
+ else
+ rtsx_comm_set_aspm(pcr, false);
+}
+
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
+{
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
+
+ return 0;
+}
+
+void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+{
+ if (pcr->ops->set_l1off_cfg_sub_d0)
+ pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
+}
+
+static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_disable_aspm(pcr);
+
+ if (option->ltr_enabled)
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 1);
+}
+
+void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->full_on)
+ pcr->ops->full_on(pcr);
+ else
+ rtsx_comm_pm_full_on(pcr);
+}
+
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
{
/* If pci device removed, don't queue idle work any more */
@@ -89,9 +179,7 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
-
- if (pcr->aspm_en)
- rtsx_pci_disable_aspm(pcr);
+ rtsx_pm_full_on(pcr);
}
mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -958,6 +1046,41 @@ static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
return 0;
}
+static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, true);
+ else
+ rtsx_comm_set_aspm(pcr, true);
+}
+
+static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ltr_enabled) {
+ u32 latency = option->ltr_l1off_latency;
+
+ if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
+ mdelay(option->l1_snooze_delay);
+
+ rtsx_set_ltr_latency(pcr, latency);
+ }
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 0);
+
+ rtsx_enable_aspm(pcr);
+}
+
+void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->power_saving)
+ pcr->ops->power_saving(pcr);
+ else
+ rtsx_comm_pm_power_saving(pcr);
+}
+
static void rtsx_pci_idle_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -974,8 +1097,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
- if (pcr->aspm_en)
- rtsx_pci_enable_aspm(pcr);
+ rtsx_pm_power_saving(pcr);
mutex_unlock(&pcr->pcr_mutex);
}
@@ -1063,6 +1185,16 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
+ switch (PCI_PID(pcr)) {
+ case PID_5250:
+ case PID_524A:
+ case PID_525A:
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
+ break;
+ default:
+ break;
+ }
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 931d1ae3ce32..ec784e04fe20 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -32,6 +32,18 @@
#define RTS524A_PME_FORCE_CTL 0xFF78
#define RTS524A_PM_CTRL3 0xFF7E
+#define LTR_ACTIVE_LATENCY_DEF 0x883C
+#define LTR_IDLE_LATENCY_DEF 0x892C
+#define LTR_L1OFF_LATENCY_DEF 0x9003
+#define L1_SNOOZE_DELAY_DEF 1
+#define LTR_L1OFF_SSPWRGATE_5249_DEF 0xAF
+#define LTR_L1OFF_SSPWRGATE_5250_DEF 0xFF
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8
+#define CMD_TIMEOUT_DEF 100
+#define ASPM_MASK_NEG 0xFC
+#define MASK_8_BIT_DEF 0xFF
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -85,5 +97,7 @@ do { \
/* generic operations */
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 691dab791f7a..59d61b04c197 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -40,9 +40,9 @@ static const struct mfd_cell rtsx_usb_cells[] = {
},
};
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
- struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+ struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
@@ -663,7 +663,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
goto out_init_fail;
/* initialize USB SG transfer timer */
- setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+ timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
ARRAY_SIZE(rtsx_usb_cells));
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
new file mode 100644
index 000000000000..56a4782f0569
--- /dev/null
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#define SPRD_PMIC_INT_MASK_STATUS 0x0
+#define SPRD_PMIC_INT_RAW_STATUS 0x4
+#define SPRD_PMIC_INT_EN 0x8
+
+#define SPRD_SC2731_IRQ_BASE 0x140
+#define SPRD_SC2731_IRQ_NUMS 16
+
+struct sprd_pmic {
+ struct regmap *regmap;
+ struct device *dev;
+ struct regmap_irq *irqs;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+};
+
+struct sprd_pmic_data {
+ u32 irq_base;
+ u32 num_irqs;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different interrupt
+ * base address and irq number, we should save irq number and irq base
+ * in the device data structure.
+ */
+static const struct sprd_pmic_data sc2731_data = {
+ .irq_base = SPRD_SC2731_IRQ_BASE,
+ .num_irqs = SPRD_SC2731_IRQ_NUMS,
+};
+
+static const struct mfd_cell sprd_pmic_devs[] = {
+ {
+ .name = "sc27xx-wdt",
+ .of_compatible = "sprd,sc27xx-wdt",
+ }, {
+ .name = "sc27xx-rtc",
+ .of_compatible = "sprd,sc27xx-rtc",
+ }, {
+ .name = "sc27xx-charger",
+ .of_compatible = "sprd,sc27xx-charger",
+ }, {
+ .name = "sc27xx-chg-timer",
+ .of_compatible = "sprd,sc27xx-chg-timer",
+ }, {
+ .name = "sc27xx-fast-chg",
+ .of_compatible = "sprd,sc27xx-fast-chg",
+ }, {
+ .name = "sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc27xx-chg-wdt",
+ }, {
+ .name = "sc27xx-typec",
+ .of_compatible = "sprd,sc27xx-typec",
+ }, {
+ .name = "sc27xx-flash",
+ .of_compatible = "sprd,sc27xx-flash",
+ }, {
+ .name = "sc27xx-eic",
+ .of_compatible = "sprd,sc27xx-eic",
+ }, {
+ .name = "sc27xx-efuse",
+ .of_compatible = "sprd,sc27xx-efuse",
+ }, {
+ .name = "sc27xx-thermal",
+ .of_compatible = "sprd,sc27xx-thermal",
+ }, {
+ .name = "sc27xx-adc",
+ .of_compatible = "sprd,sc27xx-adc",
+ }, {
+ .name = "sc27xx-audio-codec",
+ .of_compatible = "sprd,sc27xx-audio-codec",
+ }, {
+ .name = "sc27xx-regulator",
+ .of_compatible = "sprd,sc27xx-regulator",
+ }, {
+ .name = "sc27xx-vibrator",
+ .of_compatible = "sprd,sc27xx-vibrator",
+ }, {
+ .name = "sc27xx-keypad-led",
+ .of_compatible = "sprd,sc27xx-keypad-led",
+ }, {
+ .name = "sc27xx-bltc",
+ .of_compatible = "sprd,sc27xx-bltc",
+ }, {
+ .name = "sc27xx-fgu",
+ .of_compatible = "sprd,sc27xx-fgu",
+ }, {
+ .name = "sc27xx-7sreset",
+ .of_compatible = "sprd,sc27xx-7sreset",
+ }, {
+ .name = "sc27xx-poweroff",
+ .of_compatible = "sprd,sc27xx-poweroff",
+ },
+};
+
+static int sprd_pmic_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int sprd_pmic_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u32 rx_buf[2] = { 0 };
+ int ret;
+
+ /* Now we only support one PMIC register to read every time. */
+ if (reg_size != sizeof(u32) || val_size != sizeof(u32))
+ return -EINVAL;
+
+ /* Copy address to read from into first element of SPI buffer. */
+ memcpy(rx_buf, reg, sizeof(u32));
+ ret = spi_read(spi, rx_buf, 1);
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, rx_buf, val_size);
+ return 0;
+}
+
+static struct regmap_bus sprd_pmic_regmap = {
+ .write = sprd_pmic_spi_write,
+ .read = sprd_pmic_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static const struct regmap_config sprd_pmic_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xffff,
+};
+
+static int sprd_pmic_probe(struct spi_device *spi)
+{
+ struct sprd_pmic *ddata;
+ const struct sprd_pmic_data *pdata;
+ int ret, i;
+
+ pdata = of_device_get_match_data(&spi->dev);
+ if (!pdata) {
+ dev_err(&spi->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->regmap = devm_regmap_init(&spi->dev, &sprd_pmic_regmap,
+ &spi->dev, &sprd_pmic_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, ddata);
+ ddata->dev = &spi->dev;
+ ddata->irq = spi->irq;
+
+ ddata->irq_chip.name = dev_name(&spi->dev);
+ ddata->irq_chip.status_base =
+ pdata->irq_base + SPRD_PMIC_INT_MASK_STATUS;
+ ddata->irq_chip.mask_base = pdata->irq_base + SPRD_PMIC_INT_EN;
+ ddata->irq_chip.ack_base = 0;
+ ddata->irq_chip.num_regs = 1;
+ ddata->irq_chip.num_irqs = pdata->num_irqs;
+ ddata->irq_chip.mask_invert = true;
+
+ ddata->irqs = devm_kzalloc(&spi->dev, sizeof(struct regmap_irq) *
+ pdata->num_irqs, GFP_KERNEL);
+ if (!ddata->irqs)
+ return -ENOMEM;
+
+ ddata->irq_chip.irqs = ddata->irqs;
+ for (i = 0; i < pdata->num_irqs; i++) {
+ ddata->irqs[i].reg_offset = i / pdata->num_irqs;
+ ddata->irqs[i].mask = BIT(i % pdata->num_irqs);
+ }
+
+ ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
+ &ddata->irq_chip, &ddata->irq_data);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO,
+ sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs),
+ NULL, 0,
+ regmap_irq_get_domain(ddata->irq_data));
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register device %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_pmic_match[] = {
+ { .compatible = "sprd,sc2731", .data = &sc2731_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sprd_pmic_match);
+
+static struct spi_driver sprd_pmic_driver = {
+ .driver = {
+ .name = "sc27xx-pmic",
+ .bus = &spi_bus_type,
+ .of_match_table = sprd_pmic_match,
+ },
+ .probe = sprd_pmic_probe,
+};
+
+static int __init sprd_pmic_init(void)
+{
+ return spi_register_driver(&sprd_pmic_driver);
+}
+subsys_initcall(sprd_pmic_init);
+
+static void __exit sprd_pmic_exit(void)
+{
+ spi_unregister_driver(&sprd_pmic_driver);
+}
+module_exit(sprd_pmic_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum SC27xx PMICs driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 27986f641f7d..36b96fee4ce6 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -314,7 +314,7 @@ static int ssbi_probe(struct platform_device *pdev)
spin_lock_init(&ssbi->lock);
- return of_platform_populate(np, NULL, NULL, &pdev->dev);
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id ssbi_match_table[] = {
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index ab949eaca6ad..3cc80956260e 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -72,10 +72,12 @@ static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
static int stw481x_startup(struct stw481x *stw481x)
{
/* Voltages multiplied by 100 */
- u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
- 130, 132, 134, 136, 138, 140, 145 };
- u8 vpll_val[] = { 105, 120, 130, 180 };
- u8 vaux_val[] = { 15, 18, 25, 28 };
+ static const u8 vcore_val[] = {
+ 100, 105, 110, 115, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 145
+ };
+ static const u8 vpll_val[] = { 105, 120, 130, 180 };
+ static const u8 vaux_val[] = { 15, 18, 25, 28 };
u8 vcore;
u8 vcore_slp;
u8 vpll;
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f769c7d4e335..7566ce4457a0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -311,37 +311,20 @@ static const struct regmap_config tps65217_regmap_config = {
};
static const struct of_device_id tps65217_of_match[] = {
- { .compatible = "ti,tps65217", .data = (void *)TPS65217 },
+ { .compatible = "ti,tps65217"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, tps65217_of_match);
-static int tps65217_probe(struct i2c_client *client,
- const struct i2c_device_id *ids)
+static int tps65217_probe(struct i2c_client *client)
{
struct tps65217 *tps;
unsigned int version;
- unsigned long chip_id = ids->driver_data;
- const struct of_device_id *match;
bool status_off = false;
int ret;
- if (client->dev.of_node) {
- match = of_match_device(tps65217_of_match, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
- chip_id = (unsigned long)match->data;
- status_off = of_property_read_bool(client->dev.of_node,
- "ti,pmic-shutdown-controller");
- }
-
- if (!chip_id) {
- dev_err(&client->dev, "id is null.\n");
- return -ENODEV;
- }
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -349,7 +332,6 @@ static int tps65217_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
tps->dev = &client->dev;
- tps->id = chip_id;
tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
@@ -430,7 +412,7 @@ static struct i2c_driver tps65217_driver = {
.of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
- .probe = tps65217_probe,
+ .probe_new = tps65217_probe,
.remove = tps65217_remove,
};
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 13834a0d2817..910f569ff77c 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -215,17 +215,9 @@ static int tps65218_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65218 *tps;
- const struct of_device_id *match;
int ret;
unsigned int chipid;
- match = of_match_device(of_tps65218_match_table, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
-
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index da16bf45fab4..dc94ffc6321a 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->codec)
return true;
- if (of_find_node_by_name(node, "codec"))
+ node = of_get_child_by_name(parent, "codec");
+ if (node) {
+ of_node_put(node);
return true;
+ }
return false;
}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d36ba0..dd19f17a1b63 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
};
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
{
-#ifdef CONFIG_OF
- if (of_find_node_by_name(node, "vibra"))
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "vibra");
+ if (node) {
+ of_node_put(node);
return true;
-#endif
+ }
+
return false;
}
diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
index 7f01d8e93992..8a828fe41fad 100644
--- a/drivers/misc/altera-stapl/Kconfig
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -1,4 +1,5 @@
-comment "Altera FPGA firmware download module"
+comment "Altera FPGA firmware download module (requires I2C)"
+ depends on !I2C
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 1922cb8f6b88..1c5b7aec13d4 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
@@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name,
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
- kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index a0c44d16bf30..7c11bad5cded 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include "cxl.h"
@@ -331,9 +332,12 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
- /* decrement the use count */
- if (ctx->mm)
+ if (ctx->mm) {
+ /* decrement the use count from above */
mmput(ctx->mm);
+ /* make TLBIs for this context global */
+ mm_context_add_copro(ctx->mm);
+ }
}
/*
@@ -342,13 +346,19 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
*/
cxl_ctx_get();
+ /* See the comment in afu_ioctl_start_work() */
+ smp_mb();
+
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
- if (task)
+ if (task) {
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
+ }
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 8c32040b9c09..12a41b2753f0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -267,6 +268,8 @@ int __detach_context(struct cxl_context *ctx)
/* Decrease the mm count on the context */
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
ctx->mm = NULL;
return 0;
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index b1afeccbb97f..e46a4062904a 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -100,9 +100,12 @@ static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
/* PSL registers - CAIA 2 */
static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
+static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110};
+static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130};
+static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140};
static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168};
static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300};
-static const cxl_p1_reg_t CXL_PSL9_FIR2 = {0x0308};
+static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308};
static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310};
static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320};
static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348};
@@ -112,6 +115,7 @@ static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368};
static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378};
static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380};
static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388};
+static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390};
static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398};
static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588};
static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590};
@@ -414,6 +418,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
+#define CXL_PSL9_TRACEID_MAX 0xAU
+#define CXL_PSL9_TRACESTATE_FIN 0x3U
+
enum cxl_context_status {
CLOSED,
OPENED,
@@ -938,8 +945,6 @@ int cxl_debugfs_adapter_add(struct cxl *adapter);
void cxl_debugfs_adapter_remove(struct cxl *adapter);
int cxl_debugfs_afu_add(struct cxl_afu *afu);
void cxl_debugfs_afu_remove(struct cxl_afu *afu);
-void cxl_stop_trace_psl9(struct cxl *cxl);
-void cxl_stop_trace_psl8(struct cxl *cxl);
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir);
@@ -975,14 +980,6 @@ static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu)
{
}
-static inline void cxl_stop_trace_psl9(struct cxl *cxl)
-{
-}
-
-static inline void cxl_stop_trace_psl8(struct cxl *cxl)
-{
-}
-
static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter,
struct dentry *dir)
{
@@ -1070,7 +1067,8 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9);
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
-void cxl_native_err_irq_dump_regs(struct cxl *adapter);
+void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter);
+void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
void cxl_release_mapping(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index eae9d749f967..1643850d2302 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -15,28 +15,6 @@
static struct dentry *cxl_debugfs;
-void cxl_stop_trace_psl9(struct cxl *adapter)
-{
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x4480000000000000ULL);
-}
-
-void cxl_stop_trace_psl8(struct cxl *adapter)
-{
- int slice;
-
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
-
- /* Stop the slice traces */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- if (adapter->afu[slice])
- cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL);
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
/* Helpers to export CXL mmaped IO registers via debugfs */
static int debugfs_io_u64_get(void *data, u64 *val)
{
@@ -62,9 +40,14 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR2));
+ debugfs_create_io_x64("fir_mask", 0400, dir,
+ _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK));
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
+ debugfs_create_io_x64("debug", 0600, dir,
+ _cxl_p1_addr(adapter, CXL_PSL9_DEBUG));
+ debugfs_create_io_x64("xsl-debug", 0600, dir,
+ _cxl_p1_addr(adapter, CXL_XSL9_DBG));
}
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index f17f72ea0545..70dbb6de102c 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -220,22 +220,11 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
{
- u64 crs; /* Translation Checkout Response Status */
-
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
return true;
- if (cxl_is_power9()) {
- crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
- if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
- (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
- (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
- (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
- (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
- (crs == CXL_PSL9_DSISR_An_URTCH)) {
- return true;
- }
- }
+ if (cxl_is_power9())
+ return true;
return false;
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 4bfad9f6dc9f..76c0b0ca9388 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -220,9 +221,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
- /* decrement the use count */
- if (ctx->mm)
+ if (ctx->mm) {
+ /* decrement the use count from above */
mmput(ctx->mm);
+ /* make TLBIs for this context global */
+ mm_context_add_copro(ctx->mm);
+ }
/*
* Increment driver use count. Enables global TLBIs for hash
@@ -230,6 +234,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
*/
cxl_ctx_get();
+ /*
+ * A barrier is needed to make sure all TLBIs are global
+ * before we attach and the context starts being used by the
+ * adapter.
+ *
+ * Needed after mm_context_add_copro() for radix and
+ * cxl_ctx_get() for hash/p8.
+ *
+ * The barrier should really be mb(), since it involves a
+ * device. However, it's only useful when we have local
+ * vs. global TLBIs, i.e SMP=y. So keep smp_mb().
+ */
+ smp_mb();
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
@@ -240,6 +258,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = NULL;
cxl_ctx_put();
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
goto out;
}
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 4a82c313cf71..02b6b45b4c20 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -897,6 +897,14 @@ int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
+ ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
+ /*
+ * Ideally we should do a wmb() here to make sure the changes to the
+ * PE are visible to the card before we call afu_enable.
+ * On ppc64 though all mmios are preceded by a 'sync' instruction hence
+ * we dont dont need one here.
+ */
+
result = cxl_ops->afu_reset(afu);
if (result)
return result;
@@ -1077,13 +1085,11 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
{
- u64 fir1, fir2, serr;
+ u64 fir1, serr;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
cxl_afu_decode_psl_serr(ctx->afu, serr);
@@ -1257,14 +1263,23 @@ static irqreturn_t native_slice_irq_err(int irq, void *data)
return IRQ_HANDLED;
}
-void cxl_native_err_irq_dump_regs(struct cxl *adapter)
+void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
+{
+ u64 fir1;
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
+ dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
+}
+
+void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
{
u64 fir1, fir2;
fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
-
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+ dev_crit(&adapter->dev,
+ "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
+ fir1, fir2);
}
static irqreturn_t native_irq_err(int irq, void *data)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3ba04f371380..19969ee86d6f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -401,7 +401,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
*capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
- pr_err("cxl: invalid capp unit id\n");
+ pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
+ *phb_index);
return -ENODEV;
}
@@ -475,37 +476,37 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
psl_fircntl |= 0x1ULL; /* ce_thresh */
cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
- /* vccredits=0x1 pcklat=0x4 */
- cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0000000000001810ULL);
-
- /*
- * For debugging with trace arrays.
- * Configure RX trace 0 segmented mode.
- * Configure CT trace 0 segmented mode.
- * Configure LA0 trace 0 segmented mode.
- * Configure LA1 trace 0 segmented mode.
+ /* Setup the PSL to transmit packets on the PCIe before the
+ * CAPP is enabled
*/
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000000ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000003ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000005ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000006ULL);
+ cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
/*
* A response to an ASB_Notify request is returned by the
* system as an MMIO write to the address defined in
- * the PSL_TNR_ADDR register
+ * the PSL_TNR_ADDR register.
+ * keep the Reset Value: 0x00020000E0000000
*/
- /* PSL_TNR_ADDR */
- /* NORST */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
+ /* Enable XSL rty limit */
+ cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
- /* allocate the apc machines */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
+ /* Change XSL_INV dummy read threshold */
+ cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
+
+ if (phb_index == 3) {
+ /* disable machines 31-47 and 20-27 for DMA */
+ cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
+ }
- /* Disable vc dd1 fix */
- if (cxl_is_power9_dd1())
- cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
+ /* Snoop machines */
+ cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
+
+ if (cxl_is_power9_dd1()) {
+ /* Disabling deadlock counter CAR */
+ cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
+ } else
+ cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL);
return 0;
}
@@ -1746,6 +1747,44 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
pci_disable_device(pdev);
}
+static void cxl_stop_trace_psl9(struct cxl *adapter)
+{
+ int traceid;
+ u64 trace_state, trace_mask;
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ /* read each tracearray state and issue mmio to stop them is needed */
+ for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
+ trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
+ trace_mask = (0x3ULL << (62 - traceid * 2));
+ trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
+ dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
+ traceid, trace_state);
+
+ /* issue mmio if the trace array isn't in FIN state */
+ if (trace_state != CXL_PSL9_TRACESTATE_FIN)
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
+ 0x8400000000000000ULL | traceid);
+ }
+}
+
+static void cxl_stop_trace_psl8(struct cxl *adapter)
+{
+ int slice;
+
+ /* Stop the trace */
+ cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
+
+ /* Stop the slice traces */
+ spin_lock(&adapter->afu_list_lock);
+ for (slice = 0; slice < adapter->slices; slice++) {
+ if (adapter->afu[slice])
+ cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
+ 0x8000000000000000LL);
+ }
+ spin_unlock(&adapter->afu_list_lock);
+}
+
static const struct cxl_service_layer_ops psl9_ops = {
.adapter_regs_init = init_implementation_adapter_regs_psl9,
.invalidate_all = cxl_invalidate_all_psl9,
@@ -1762,6 +1801,7 @@ static const struct cxl_service_layer_ops psl9_ops = {
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
.debugfs_stop_trace = cxl_stop_trace_psl9,
.write_timebase_ctrl = write_timebase_ctrl_psl9,
.timebase_read = timebase_read_psl9,
@@ -1785,7 +1825,7 @@ static const struct cxl_service_layer_ops psl8_ops = {
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
.debugfs_stop_trace = cxl_stop_trace_psl8,
.write_timebase_ctrl = write_timebase_ctrl_psl8,
.timebase_read = timebase_read_psl8,
@@ -2043,6 +2083,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
/* There should only be one entry, but go through the list
* anyway
*/
+ if (afu->phb == NULL)
+ return result;
+
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (!afu_dev->driver)
continue;
@@ -2084,8 +2127,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
* Tell the AFU drivers; but we don't care what they
* say, we're going away.
*/
- if (afu->phb != NULL)
- cxl_vphb_error_detected(afu, state);
+ cxl_vphb_error_detected(afu, state);
}
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2225,6 +2267,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
if (cxl_afu_select_best_mode(afu))
goto err;
+ if (afu->phb == NULL)
+ continue;
+
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
/* Reset the device context.
* TODO: make this less disruptive
@@ -2287,6 +2332,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
+ if (afu->phb == NULL)
+ continue;
+
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (afu_dev->driver && afu_dev->driver->err_handler &&
afu_dev->driver->err_handler->resume)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index e0b4b36ef010..4d63ac8a82e0 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -425,7 +425,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
memset(msg, 0, sizeof(msg));
msg[0].addr = client->addr;
msg[0].buf = addrbuf;
- addrbuf[0] = 0x90 + offset;
+ /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+ addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
msg[0].len = 1;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
@@ -561,18 +562,19 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
if (unlikely(!count))
return count;
- client = at24_translate_offset(at24, &off);
+ if (off + count > at24->chip.byte_len)
+ return -EINVAL;
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -588,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
status = at24->read_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -598,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -606,18 +608,19 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
if (unlikely(!count))
return -EINVAL;
- client = at24_translate_offset(at24, &off);
+ if (off + count > at24->chip.byte_len)
+ return -EINVAL;
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -633,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -643,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -730,6 +733,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_warn(&client->dev,
"page_size looks suspicious (no power of 2)!\n");
+ /*
+ * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+ * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+ *
+ * Eventually we'll get rid of the magic values altoghether in favor of
+ * real structs, but for now just manually set the right size.
+ */
+ if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+ chip.byte_len = 6;
+
/* Use I2C operations unless we're stuck with SMBus extensions. */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
if (chip.flags & AT24_FLAG_ADDR16)
@@ -863,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.reg_read = at24_read;
at24->nvmem_config.reg_write = at24_write;
at24->nvmem_config.priv = at24;
- at24->nvmem_config.stride = 4;
+ at24->nvmem_config.stride = 1;
at24->nvmem_config.word_size = 1;
at24->nvmem_config.size = chip.byte_len;
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5813b5f25006..3743c87f8ab9 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -182,6 +182,7 @@ struct dma_mapping {
struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
+ int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
@@ -189,6 +190,7 @@ static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
+ m->write = 1; /* Assume the maps we create are R/W */
}
/**
@@ -347,6 +349,7 @@ enum genwqe_requ_state {
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
+ * @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
@@ -356,6 +359,8 @@ struct genwqe_sgl {
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
+ int write;
+
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
@@ -369,7 +374,7 @@ struct genwqe_sgl {
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size);
+ void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index dd4617764f14..3ecfa35457e0 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -942,6 +942,10 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
+
+ if (ats_flags == ATS_TYPE_SGL_RD)
+ m->write = 0;
+
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
@@ -954,7 +958,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
- u_size);
+ u_size, m->write);
if (rc != 0)
goto err_out;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 147b83011b58..5c0d917636f7 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -296,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size)
+ void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -312,6 +312,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->user_addr = user_addr;
sgl->user_size = user_size;
+ sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
@@ -476,14 +477,20 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
+ size_t offset;
+ unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
- if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
- sgl->fpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ res = copy_to_user(sgl->user_addr,
+ sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying fpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
@@ -491,12 +498,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
- if (copy_to_user(sgl->user_addr + sgl->user_size -
- sgl->lpage_size, sgl->lpage,
- sgl->lpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ offset = sgl->user_size - sgl->lpage_size;
+ res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
+ sgl->lpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying lpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
@@ -599,14 +610,14 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- 1, /* write by caller */
+ m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, 0);
+ free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
@@ -618,7 +629,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, 0);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
@@ -651,7 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, 1);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index b0f7af872bb5..7eebbdfbcacd 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -63,9 +63,11 @@ void lkdtm_BUG(void)
BUG();
}
+static int warn_counter;
+
void lkdtm_WARNING(void)
{
- WARN_ON(1);
+ WARN(1, "Warning message trigger count: %d\n", warn_counter++);
}
void lkdtm_EXCEPTION(void)
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index ed7f0c61c59a..ba92291508dc 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -122,7 +122,7 @@ struct crashtype {
}
/* Define the possible types of crashes that can be triggered. */
-struct crashtype crashtypes[] = {
+static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
@@ -188,8 +188,8 @@ struct crashtype crashtypes[] = {
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
-struct crashpoint *lkdtm_crashpoint;
-struct crashtype *lkdtm_crashtype;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
@@ -212,7 +212,7 @@ MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
/* Return the crashtype number or NULL if the name is invalid */
-static struct crashtype *find_crashtype(const char *name)
+static const struct crashtype *find_crashtype(const char *name)
{
int i;
@@ -228,7 +228,7 @@ static struct crashtype *find_crashtype(const char *name)
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
-static noinline void lkdtm_do_action(struct crashtype *crashtype)
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
{
if (WARN_ON(!crashtype || !crashtype->func))
return;
@@ -236,7 +236,7 @@ static noinline void lkdtm_do_action(struct crashtype *crashtype)
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
- struct crashtype *crashtype)
+ const struct crashtype *crashtype)
{
int ret;
@@ -300,7 +300,7 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
char *buf;
int err;
@@ -368,7 +368,7 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
- struct crashtype *crashtype;
+ const struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
@@ -404,7 +404,7 @@ static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int i;
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index e19e6acb191b..374edde72a14 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -23,5 +23,4 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
-EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 7d2d5d4a1624..b52e9b97a7c0 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -83,25 +83,6 @@ TRACE_EVENT(mei_pci_cfg_read,
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
-TRACE_EVENT(mei_pci_cfg_write,
- TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
- TP_ARGS(dev, reg, offs, val),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(const char *, reg)
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev))
- __entry->reg = reg;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] pci cfg write %s[%#x] = %#x",
- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
-);
-
#endif /* _MEI_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 6fd9d367dea7..227cc7443671 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,3 +1,5 @@
+menu "Intel MIC & related support"
+
comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
@@ -150,3 +152,5 @@ config VOP
if VOP
source "drivers/vhost/Kconfig.vringh"
endif
+
+endmenu
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index eda38cbe8530..41f2a9f6851d 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ea80ff4cd7f9..ccfa98af1dd3 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -122,6 +122,10 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+
+ /* debugfs files (only in main mmc_blk_data) */
+ struct dentry *status_dentry;
+ struct dentry *ext_csd_dentry;
};
/* Device type for RPMB character devices */
@@ -233,9 +237,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
/* Dispatch locking to the block layer */
req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+ if (IS_ERR(req)) {
+ count = PTR_ERR(req);
+ goto out_put;
+ }
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
+ blk_put_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -248,7 +257,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
set_disk_ro(part_md->disk, 1);
}
}
-
+out_put:
mmc_blk_put(md);
return count;
}
@@ -624,6 +633,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto cmd_done;
+ }
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
@@ -691,6 +704,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto cmd_err;
+ }
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
@@ -2550,6 +2567,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
/* Ask the block layer about the card status */
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2557,6 +2576,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
*val = ret;
ret = 0;
}
+ blk_put_request(req);
return ret;
}
@@ -2583,10 +2603,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
/* Ask the block layer for the EXT CSD */
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_free;
+ }
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(mq->queue, NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
+ blk_put_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
@@ -2632,7 +2657,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
struct dentry *root;
@@ -2642,28 +2667,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
root = card->debugfs_root;
if (mmc_card_mmc(card) || mmc_card_sd(card)) {
- if (!debugfs_create_file("status", S_IRUSR, root, card,
- &mmc_dbg_card_status_fops))
+ md->status_dentry =
+ debugfs_create_file("status", S_IRUSR, root, card,
+ &mmc_dbg_card_status_fops);
+ if (!md->status_dentry)
return -EIO;
}
if (mmc_card_mmc(card)) {
- if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
- &mmc_dbg_ext_csd_fops))
+ md->ext_csd_dentry =
+ debugfs_create_file("ext_csd", S_IRUSR, root, card,
+ &mmc_dbg_ext_csd_fops);
+ if (!md->ext_csd_dentry)
return -EIO;
}
return 0;
}
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ if (!card->debugfs_root)
+ return;
+
+ if (!IS_ERR_OR_NULL(md->status_dentry)) {
+ debugfs_remove(md->status_dentry);
+ md->status_dentry = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+ debugfs_remove(md->ext_csd_dentry);
+ md->ext_csd_dentry = NULL;
+ }
+}
#else
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
return 0;
}
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+}
+
#endif /* CONFIG_DEBUG_FS */
static int mmc_blk_probe(struct mmc_card *card)
@@ -2703,7 +2753,7 @@ static int mmc_blk_probe(struct mmc_card *card)
}
/* Add two debugfs entries */
- mmc_blk_add_debugfs(card);
+ mmc_blk_add_debugfs(card, md);
pm_runtime_set_autosuspend_delay(&card->dev, 3000);
pm_runtime_use_autosuspend(&card->dev);
@@ -2729,6 +2779,7 @@ static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ mmc_blk_remove_debugfs(card, md);
mmc_blk_remove_parts(card, md);
pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index a4b49e25fe96..7586ff2ad1f1 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
return ret;
ret = host->bus_ops->suspend(host);
+ if (ret)
+ pm_generic_resume(dev);
+
return ret;
}
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91964ce..79a5b985ccf5 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_APACER 0x27
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 01e459a34f33..0f4a7d7b2626 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -314,4 +314,5 @@ err:
void mmc_remove_card_debugfs(struct mmc_card *card)
{
debugfs_remove_recursive(card->debugfs_root);
+ card->debugfs_root = NULL;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 35a9e4fd1a9f..64b03d6eaf18 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,9 +160,9 @@ out:
return err;
}
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = (struct mmc_host *)data;
+ struct mmc_host *host = from_timer(host, t, retune_timer);
mmc_retune_needed(host);
}
@@ -389,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
- setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+ timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a552f61060d2..208a762b87ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -781,7 +781,7 @@ MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
card->ext_csd.device_life_time_est_typ_b);
@@ -791,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
static ssize_t mmc_fwrev_show(struct device *dev,
@@ -1290,7 +1290,7 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
- int card_drv_type, drive_strength, drv_type;
+ int card_drv_type, drive_strength, drv_type = 0;
int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9cbc9f8..75d317623852 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
MMC_QUIRK_BLK_NO_CMD23),
/*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+ MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
* Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 45bf78f32716..62b84dd8f9fe 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
static ssize_t mmc_dsr_show(struct device *dev,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3fb7d2eec93f..c283291db705 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -29,6 +29,9 @@
#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
#define CORE_VERSION_MINOR_MASK 0xff
+#define CORE_MCI_GENERICS 0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
+
#define CORE_HC_MODE 0x78
#define HC_MODE_EN 0x1
#define CORE_POWER 0x0
@@ -1028,12 +1031,23 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
bool done = false;
+ u32 val;
pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
mmc_hostname(host->mmc), __func__, req_type,
msm_host->curr_pwr_state, msm_host->curr_io_level);
/*
+ * The power interrupt will not be generated for signal voltage
+ * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+ */
+ val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+ if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+ !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+ return;
+ }
+
+ /*
* The IRQ for request type IO High/LOW will be generated when -
* there is a state change in 1.8V enable bit (bit 3) of
* SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 2f14334e42df..e9290a3439d5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -3651,22 +3652,29 @@ int sdhci_setup_host(struct sdhci_host *host)
spin_lock_init(&host->lock);
/*
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
+ */
+ mmc->max_req_size = 524288;
+
+ /*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
*/
- if (host->flags & SDHCI_USE_ADMA)
+ if (host->flags & SDHCI_USE_ADMA) {
mmc->max_segs = SDHCI_MAX_SEGS;
- else if (host->flags & SDHCI_USE_SDMA)
+ } else if (host->flags & SDHCI_USE_SDMA) {
mmc->max_segs = 1;
- else /* PIO */
+ if (swiotlb_max_segment()) {
+ unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
+ IO_TLB_SEGSIZE;
+ mmc->max_req_size = min(mmc->max_req_size,
+ max_req_size);
+ }
+ } else { /* PIO */
mmc->max_segs = SDHCI_MAX_SEGS;
-
- /*
- * Maximum number of sectors in one transfer. Limited by SDMA boundary
- * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
- * is less anyway.
- */
- mmc->max_req_size = 524288;
+ }
/*
* Maximum segment size. Could be one segment with the maximum number
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5a2d71729b9a..2a8ac6829d42 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,5 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index afb43d5e1782..1cd0fff0e940 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
-static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
- mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
+ mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
+ mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
return mtd;
}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index e67f73ab44c9..20e3604b4d71 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
-static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_point = maprom_point;
+ mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 84b16133554b..0806f72102c0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
- if (IS_ERR_OR_NULL(root))
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ dev_warn(floor->dev.parent,
+ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
+ }
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 268aae45b514..555b94406e0b 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-static struct mtd_partition lart_partitions[] = {
+static const struct mtd_partition lart_partitions[] = {
/* blob */
{
.name = "blob",
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 00eea6fd379c..dbe6a1de2bb8 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index cbd8547d7aad..0bf4aeaf0cb8 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
*virt = mtd->priv + from;
*retlen = len;
+
+ if (phys) {
+ /* limit retlen to the number of contiguous physical pages */
+ unsigned long page_ofs = offset_in_page(*virt);
+ void *addr = *virt - page_ofs;
+ unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+ *phys = __pfn_to_phys(pfn0) + page_ofs;
+ len += page_ofs;
+ while (len > PAGE_SIZE) {
+ len -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pfn0++;
+ pfn1 = vmalloc_to_pfn(addr);
+ if (pfn1 != pfn0) {
+ *retlen = addr - *virt;
+ break;
+ }
+ }
+ }
+
return 0;
}
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return 0;
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- return (unsigned long) mtd->priv + offset;
-}
-
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
- mtd->_get_unmapped_area = ram_get_unmapped_area;
mtd->_read = ram_read;
mtd->_write = ram_write;
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index f5396f26ddb4..26f9feaa5d17 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -47,6 +47,11 @@ enum flash_op {
FLASH_OP_ERASE,
};
+/*
+ * Don't return -ERESTARTSYS if we can't get a token, the MTD core
+ * might have split up the call from userspace and called into the
+ * driver more than once, we'll already have done some amount of work.
+ */
static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
loff_t offset, size_t len, size_t *retlen, u_char *buf)
{
@@ -63,7 +68,8 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
if (token < 0) {
if (token != -ERESTARTSYS)
dev_err(dev, "Failed to get an async token\n");
-
+ else
+ token = -EINTR;
return token;
}
@@ -78,32 +84,53 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
rc = opal_flash_erase(info->id, offset, len, token);
break;
default:
- BUG_ON(1);
- }
-
- if (rc != OPAL_ASYNC_COMPLETION) {
- dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
- op, rc);
+ WARN_ON_ONCE(1);
opal_async_release_token(token);
return -EIO;
}
- rc = opal_async_wait_response(token, &msg);
- opal_async_release_token(token);
- if (rc) {
- dev_err(dev, "opal async wait failed (rc %d)\n", rc);
- return -EIO;
+ if (rc == OPAL_ASYNC_COMPLETION) {
+ rc = opal_async_wait_response_interruptible(token, &msg);
+ if (rc) {
+ /*
+ * If we return the mtd core will free the
+ * buffer we've just passed to OPAL but OPAL
+ * will continue to read or write from that
+ * memory.
+ * It may be tempting to ultimately return 0
+ * if we're doing a read or a write since we
+ * are going to end up waiting until OPAL is
+ * done. However, because the MTD core sends
+ * us the userspace request in chunks, we need
+ * it to know we've been interrupted.
+ */
+ rc = -EINTR;
+ if (opal_async_wait_response(token, &msg))
+ dev_err(dev, "opal_async_wait_response() failed\n");
+ goto out;
+ }
+ rc = opal_get_async_rc(msg);
}
- rc = opal_get_async_rc(msg);
- if (rc == OPAL_SUCCESS) {
- rc = 0;
- if (retlen)
- *retlen = len;
- } else {
- rc = -EIO;
- }
+ /*
+ * OPAL does mutual exclusion on the flash, it will return
+ * OPAL_BUSY.
+ * During firmware updates by the service processor OPAL may
+ * be (temporarily) prevented from accessing the flash, in
+ * this case OPAL will also return OPAL_BUSY.
+ * Both cases aren't errors exactly but the flash could have
+ * changed, userspace should be informed.
+ */
+ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY)
+ dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
+ op, rc);
+
+ if (rc == OPAL_SUCCESS && retlen)
+ *retlen = len;
+ rc = opal_error_code(rc);
+out:
+ opal_async_release_token(token);
return rc;
}
@@ -220,21 +247,20 @@ static int powernv_flash_probe(struct platform_device *pdev)
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!data)
+ return -ENOMEM;
+
data->mtd.priv = data;
ret = of_property_read_u32(dev->of_node, "ibm,opal-id", &(data->id));
if (ret) {
dev_err(dev, "no device property 'ibm,opal-id'\n");
- goto out;
+ return ret;
}
ret = powernv_flash_set_driver_info(dev, &data->mtd);
if (ret)
- goto out;
+ return ret;
dev_set_drvdata(dev, data);
@@ -243,10 +269,7 @@ static int powernv_flash_probe(struct platform_device *pdev)
* with an ffs partition at the start, it should prove easier for users
* to deal with partitions or not as they see fit
*/
- ret = mtd_device_register(&data->mtd, NULL, 0);
-
-out:
- return ret;
+ return mtd_device_register(&data->mtd, NULL, 0);
}
/**
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8087c36dc693..0ec85f316d24 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
- ioremap(start, length))) {
- E("slram: ioremap failed\n");
+ memremap(start, length,
+ MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+ E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
- iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
@@ -206,7 +207,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
- iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d504b3d1791d..70f488628464 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
.bankwidth = 2,
};
-static struct mtd_partition flagadm_parts[] = {
+static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 15bbda03be65..a0b8fa7849a9 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition partitions[] =
+static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 81dc2598bc0a..3528497f96c7 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -52,7 +52,7 @@
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a577ef8553d0..729579fb654f 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
.bankwidth = AMD_BUSWIDTH,
};
-static struct mtd_partition nettel_amd_partitions[] = {
+static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 51572895c02c..6d9a4d6f9839 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -43,7 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
- /* release resources */
-
- if (info->area) {
- release_resource(info->area);
- kfree(info->area);
- }
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
kfree(info);
return 0;
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource specified\n");
- err = -ENOENT;
+ info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
- /* register our usage of the memory area */
-
- info->area = request_mem_region(res->start, info->map.size, pdev->name);
- if (info->area == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- err = -EIO;
- goto exit_free;
- }
-
- /* remap the memory area */
-
- info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
- if (info->map.virt == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() region\n");
- err = -EIO;
- goto exit_free;
- }
-
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 556a2dfe94c5..4337d279ad83 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 9969fedb1f13..8f177e0acb8c 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
.phys = WINDOW_ADDR
};
-static struct mtd_partition ts5500_partitions[] = {
+static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 00a8190797ec..aef030ca8601 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-static struct mtd_partition uclinux_romfs[] = {
+static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 3568294d4854..de8c902059b8 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -375,12 +375,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
return -EINVAL;
if (!mtd->_write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
-
- if (ret)
- return ret;
+ return -EOPNOTSUPP;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
@@ -419,9 +414,6 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
@@ -618,9 +610,6 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
- if (!access_ok(VERIFY_READ, usr_data, req.len) ||
- !access_ok(VERIFY_READ, usr_oob, req.ooblen))
- return -EFAULT;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
@@ -662,21 +651,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
struct mtd_info *mtd = mfi->mtd;
void __user *argp = (void __user *)arg;
int ret = 0;
- u_long size;
struct mtd_info_user info;
pr_debug("MTD_ioctl\n");
- size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
- if (cmd & IOC_IN) {
- if (!access_ok(VERIFY_READ, argp, size))
- return -EFAULT;
- }
- if (cmd & IOC_OUT) {
- if (!access_ok(VERIFY_WRITE, argp, size))
- return -EFAULT;
- }
-
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d573606b91c2..60bf53df5454 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/*
- * try to support NOMMU mmaps on concatenated devices
- * - we don't support subdev spanning as we can't guarantee it'll work
- */
-static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_concat *concat = CONCAT(mtd);
- int i;
-
- for (i = 0; i < concat->num_subdev; i++) {
- struct mtd_info *subdev = concat->subdev[i];
-
- if (offset >= subdev->size) {
- offset -= subdev->size;
- continue;
- }
-
- return mtd_get_unmapped_area(subdev, len, offset, flags);
- }
-
- return (unsigned long) -ENOSYS;
-}
-
-/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
* stored to *new_dev upon success. This function does _not_
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._unlock = concat_unlock;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
- concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e7ea842ba3db..73b605577447 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
- if (!mtd->_get_unmapped_area)
- return -EOPNOTSUPP;
- if (offset >= mtd->size || len > mtd->size - offset)
- return -EINVAL;
- return mtd->_get_unmapped_area(mtd, len, offset, flags);
+ size_t retlen;
+ void *virt;
+ int ret;
+
+ ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+ if (ret)
+ return ret;
+ if (retlen != len) {
+ mtd_unpoint(mtd, offset, retlen);
+ return -ENOSYS;
+ }
+ return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len > mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ u64 maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ int ret;
+
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
ledtrig_mtd_activity();
return mtd->_write_oob(mtd, to, ops);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a308e707392d..be088bccd593 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return part->parent->_unpoint(part->parent, from + part->offset, len);
}
-static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- offset += part->offset;
- return part->parent->_get_unmapped_area(part->parent, len, offset,
- flags);
-}
-
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd._unpoint = part_unpoint;
}
- if (parent->_get_unmapped_area)
- slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (parent->_write_oob)
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index e43fea896d1e..d58a61c09304 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
return ERR_PTR(ret);
}
/* go */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 7d9080e33865..f07492c6f4b2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -50,7 +50,7 @@
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
-#define LOW_FRAG_GC_TRESHOLD 5
+#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
- if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3f2036f31da4..bb48aafed9a2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
+
This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+ PXA3xx processors (NFCv1) and also on 32-bit Armada
+ platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+ platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6e2db700d923..118a1349aad3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index dcec9cf4983f..d60ada45c549 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
* Define partitions for flash devices
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index f25eca79f4e5..90a71a56bc23 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
- if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
- (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+ if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
list_for_each_entry(nand, &nc->chips, node) {
int i;
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 8268636675ef..fcbe4fd6e684 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
- struct atmel_pmecc *pmecc = user->pmecc;
-
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
- /* Reset the ECC engine */
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(pmecc);
return pmecc;
}
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
index a8ddbfca2ea5..817e0dd9fd15 100644
--- a/drivers/mtd/nand/atmel/pmecc.h
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9d4a28fa6b73..8ab827edf94e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
- /* One more address cycle for devices > 32MiB */
- if (this->chipsize > (32 << 20))
+ if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d8c012..dd56a671ea42 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1763,7 +1763,7 @@ try_dmaread:
err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
addr);
/* erased page bitflips corrected */
- if (err > 0)
+ if (err >= 0)
return err;
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1fc435f994e1..b01c9804590e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
/*
* Define static partitions for flash device
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
[0] = {
.name = "cmx270-0",
.offset = 0,
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3087b0ba7b7f..5124f8ae8c04 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "denali.h"
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR 0x00
-#define DENALI_HOST_DATA 0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-static void denali_host_write(struct denali_nand_info *denali,
- uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
{
- iowrite32(addr, denali->host + DENALI_HOST_ADDR);
- iowrite32(data, denali->host + DENALI_HOST_DATA);
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
- denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
- denali->irq_mask);
+ irq_mask);
return 0;
}
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare)
-{
- int ecc_en_flag, transfer_spare_flag;
-
- /* set ECC, transfer spare bits if needed */
- ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
- transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
- /* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
- max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
- err_sector = ECC_SECTOR(err_addr);
- err_byte = ECC_BYTE(err_addr);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
- err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
- err_device = ECC_ERR_DEVICE(err_cor_info);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
- if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
- } while (!ECC_LAST_ERR(err_cor_info));
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
- * Once handle all ecc errors, controller will trigger a
- * ECC_TRANSACTION_DONE interrupt, so here just wait for
- * a while for this interrupt
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
- ioread32(denali->reg + DMA_ENABLE);
-}
-
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- denali_host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- denali_host_write(denali, mode, dma_addr);
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
- denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- denali_host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
-{
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, dma_addr, page, write);
- else
- denali_setup_dma32(denali, dma_addr, page, write);
+ denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+ *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
- denali_enable_dma(denali, true);
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
- denali_setup_dma(denali, dma_addr, page, write);
+ denali->setup_dma(denali, dma_addr, page, write);
- /* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
- denali_enable_dma(denali, false);
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
- setup_ecc_for_xfer(denali, !raw, raw);
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
- tmp |= acc_clks;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
- tmp |= re_2_we;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
- tmp |= re_2_re;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
- /* tWHR -> WE_2_RE */
- we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+ t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
- tmp |= we_2_re;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
- tmp &= ~addr_2_data_mask;
- tmp |= addr_2_data;
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
- tmp |= rdwr_en_hi;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
- tmp |= rdwr_en_lo;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
- tmp |= cs_setup;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- detect_max_banks(denali);
+ denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
- /* Should set value for these registers when init */
- iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
- /*
- * the completion object will be used to notify
- * the callee that the interrupt is done
- */
- init_completion(&denali->complete);
-
- /*
- * the spinlock will be used to synchronize the ISR with any
- * element that might be access shared data (interrupt status)
- */
- spin_lock_init(&denali->irq_lock);
-}
-
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
- denali_drv_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
- /* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- /* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
- /*
- * scan for NAND devices attached to the controller
- * this is the first stage in a two step process to register
- * with the nand subsystem
- */
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
}
- /*
- * second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management.
- */
-
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
-
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
- /* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
-/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 9239e6793e6e..2911066dacac 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
-#define MAKE_ECC_CORRECTION(val, thresh) \
- (((val) & (ECC_CORRECTION__VALUE)) | \
- (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
-#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
-#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
-#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
-
- /* elements used by ISR */
struct completion complete;
- spinlock_t irq_lock;
- uint32_t irq_mask;
- uint32_t irq_status;
+ spinlock_t irq_lock; /* protect irq_mask and irq_status */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
int irq;
-
- void *buf;
+ void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
- int dma_avail;
+ int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes;
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
- unsigned int revision;
- unsigned int caps;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 56e2e177644d..cfd33e6ca77f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "denali.h"
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
-
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 81370c79aa48..57fb7ae31412 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
+#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
-/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
-
module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index c3aa53caab5c..72671dc52e2e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
+ if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index fd3648952b5a..a8bde6665c24 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce,
- !(ctrl & NAND_NCE));
- gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
- gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ if (gpiomtd->nce)
+ gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
- plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
- plat->gpio_nce = of_get_gpio(dev->of_node, 1);
- plat->gpio_ale = of_get_gpio(dev->of_node, 2);
- plat->gpio_cle = of_get_gpio(dev->of_node, 3);
- plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
+ struct device *dev = &pdev->dev;
int ret = 0;
- if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
- gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
- if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
- "NAND NCE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
- "NAND NWP");
- if (ret)
- return ret;
+ gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->ale)) {
+ ret = PTR_ERR(gpiomtd->ale);
+ goto out_ce;
}
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
- "NAND RDY");
- if (ret)
- return ret;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
- chip->dev_ready = gpio_nand_devready;
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
}
+ /* Using RDY pin */
+ if (gpiomtd->rdy)
+ chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a1b983..d4d824ef64e9 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
- /* handle the block mark swapping */
- block_mark_swapping(this, payload_virt, auxiliary_virt);
-
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, auxiliary_virt);
+
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index d9ee1a7e6956..0897261c3e17 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 7f3b065b6b8f..c51d214d169e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
- writel(0, ecc->regs + ECC_IRQ_REG(op));
-
return IRQ_HANDLED;
}
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE)
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 53e5e0337c3e..f3be0b2a8869 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- pr_debug("%s: RESET failed\n", __func__);
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
- printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+ dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
- pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
- /* One more address cycle for higher density devices */
- if (mtd->size >= 0x4000000) {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 12edaae17d81..6135d007a068 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (section)
+ if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
chip->badblockbits = 8;
chip->erase = single_erase;
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 246b4393118e..44322a363ba5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent;
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
-
- if (IS_ERR_OR_NULL(root))
- return -1;
+ }
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7bb4d2ea9342..af5b32c9a791 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 54540c8fa1a2..dad438c4906a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
*
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
*/
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
- int i, j;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
- ecc_code = ecc_calc;
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH8_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
- *ecc_code++ = (bch_val4 & 0xFF);
- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
- *ecc_code++ = (bch_val3 & 0xFF);
- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
- *ecc_code++ = (bch_val2 & 0xFF);
- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
- *ecc_code++ = (bch_val1 & 0xFF);
- break;
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH4_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
- ((bch_val1 >> 28) & 0xF);
- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val1 & 0xF) << 4);
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- val = readl(gpmc_regs->gpmc_bch_result6[i]);
- ecc_code[0] = ((val >> 8) & 0xFF);
- ecc_code[1] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result5[i]);
- ecc_code[2] = ((val >> 24) & 0xFF);
- ecc_code[3] = ((val >> 16) & 0xFF);
- ecc_code[4] = ((val >> 8) & 0xFF);
- ecc_code[5] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result4[i]);
- ecc_code[6] = ((val >> 24) & 0xFF);
- ecc_code[7] = ((val >> 16) & 0xFF);
- ecc_code[8] = ((val >> 8) & 0xFF);
- ecc_code[9] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result3[i]);
- ecc_code[10] = ((val >> 24) & 0xFF);
- ecc_code[11] = ((val >> 16) & 0xFF);
- ecc_code[12] = ((val >> 8) & 0xFF);
- ecc_code[13] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result2[i]);
- ecc_code[14] = ((val >> 24) & 0xFF);
- ecc_code[15] = ((val >> 16) & 0xFF);
- ecc_code[16] = ((val >> 8) & 0xFF);
- ecc_code[17] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result1[i]);
- ecc_code[18] = ((val >> 24) & 0xFF);
- ecc_code[19] = ((val >> 16) & 0xFF);
- ecc_code[20] = ((val >> 8) & 0xFF);
- ecc_code[21] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result0[i]);
- ecc_code[22] = ((val >> 24) & 0xFF);
- ecc_code[23] = ((val >> 16) & 0xFF);
- ecc_code[24] = ((val >> 8) & 0xFF);
- ecc_code[25] = ((val >> 0) & 0xFF);
- break;
- default:
- return -EINVAL;
- }
-
- /* ECC scheme specific syndrome customizations */
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch4_polynomial[j];
- break;
- case OMAP_ECC_BCH4_CODE_HW:
- /* Set 8th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch8_polynomial[j];
- break;
- case OMAP_ECC_BCH8_CODE_HW:
- /* Set 14th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- break;
- default:
- return -EINVAL;
- }
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
- ecc_calc += eccbytes;
+ ecc_calc += eccbytes;
}
return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
- struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
- if (dev->of_node) {
- if (omap_get_dt_info(dev, info))
- return -EINVAL;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->ecc_opt = pdata->ecc_opt;
- if (pdata->dev_ready)
- dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
- info->xfer_type = pdata->xfer_type;
- info->devsize = pdata->devsize;
- info->elm_of_node = pdata->elm_of_node;
- info->flash_bbt = pdata->flash_bbt;
- }
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
- platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (!omap2_nand_ecc_check(info, pdata)) {
+ if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
- if (dev->of_node)
- mtd_device_register(mtd, NULL, 0);
- else
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto return_error;
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 85cff68643e0..90b9a9ccbe60 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 3baddfc997d1..2656c1ac5646 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
+ * @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
- * @base_dma: physical base address of controller registers
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
}
/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
- int size;
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
- size = num_regs * sizeof(u32);
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
- int size;
vaddr = offset_to_nandc_reg(regs, first);
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- size = num_regs * sizeof(u32);
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr, size,
- flow_control);
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (r)
return r;
}
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
+ nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e7f3c98487e6..3c5008a4f5f3 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
- const struct of_device_id *match;
- struct flctl_soc_config *config;
+ const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- match = of_match_device(of_flctl_match, dev);
- if (match)
- config = (struct flctl_soc_config *)match->data;
- else {
+ config = of_device_get_match_data(dev);
+ if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index d206b3c533bc..ee5ab994132f 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX
may contain up to 3/4 partitions (depending on the version).
This driver will parse TRX header and report at least two partitions:
kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+ tristate "Sharp SL Series NAND flash partition parser"
+ depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
+ help
+ This provides the read-only FTL logic necessary to read the partition
+ table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+ partition parser using this code.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 4d9024e0be3b..5b1bcc3d90d9 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644
index 000000000000..5fe0079ea5ed
--- /dev/null
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -0,0 +1,398 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ * http://support.ezaurus.com/developer/source/source_dl.asp
+ * drivers/mtd/nand/sharp_sl_logical.c
+ * linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00 8
+#define NAND_NOOB_LOGADDR_01 9
+#define NAND_NOOB_LOGADDR_10 10
+#define NAND_NOOB_LOGADDR_11 11
+#define NAND_NOOB_LOGADDR_20 12
+#define NAND_NOOB_LOGADDR_21 13
+
+#define BLOCK_IS_RESERVED 0xffff
+#define BLOCK_UNMASK_COMPLEMENT 1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS 3
+#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR 0x00060000
+#define SHARPSL_PARTINFO2_LADDR 0x00064000
+
+#define BOOT_MAGIC 0x424f4f54
+#define FSRO_MAGIC 0x4653524f
+#define FSRW_MAGIC 0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax: number of logical blocks
+ * @log2phy: the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+ unsigned int logmax;
+ unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+ u8 freebytes = 0;
+ int section = 0;
+
+ while (true) {
+ struct mtd_oob_region oobfree = { };
+ int ret, i;
+
+ ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+ if (ret)
+ break;
+
+ if (!oobfree.length || oobfree.offset > 15 ||
+ (oobfree.offset + oobfree.length) < 8)
+ continue;
+
+ i = oobfree.offset >= 8 ? oobfree.offset : 8;
+ for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+ freebytes |= BIT(i - 8);
+
+ if (freebytes == 0xff)
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret != 0 || mtd->oobsize != ops.oobretlen)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
+ * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
+ * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+ u16 us;
+ int good0, good1;
+
+ if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+ oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+ good0 = NAND_NOOB_LOGADDR_00;
+ good1 = NAND_NOOB_LOGADDR_01;
+ } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+ oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+ good0 = NAND_NOOB_LOGADDR_10;
+ good1 = NAND_NOOB_LOGADDR_11;
+ } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+ oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+ good0 = NAND_NOOB_LOGADDR_20;
+ good1 = NAND_NOOB_LOGADDR_21;
+ } else {
+ return -EINVAL;
+ }
+
+ us = oob[good0] | oob[good1] << 8;
+
+ /* parity check */
+ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+ return -EINVAL;
+
+ /* reserved */
+ if (us == BLOCK_IS_RESERVED)
+ return BLOCK_IS_RESERVED;
+
+ return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+ unsigned int block_num, log_num, phymax;
+ loff_t block_adr;
+ u8 *oob;
+ int i, ret;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+ return -ENOMEM;
+
+ phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+ /* FTL reserves 5% of the blocks + 1 spare */
+ ftl->logmax = ((phymax * 95) / 100) - 1;
+
+ ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+ GFP_KERNEL);
+ if (!ftl->log2phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize ftl->log2phy */
+ for (i = 0; i < ftl->logmax; i++)
+ ftl->log2phy[i] = UINT_MAX;
+
+ /* create physical-logical table */
+ for (block_num = 0; block_num < phymax; block_num++) {
+ block_adr = block_num * mtd->erasesize;
+
+ if (mtd_block_isbad(mtd, block_adr))
+ continue;
+
+ if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+ continue;
+
+ /* get logical block */
+ log_num = sharpsl_nand_get_logical_num(oob);
+
+ /* cut-off errors and skip the out-of-range values */
+ if (log_num > 0 && log_num < ftl->logmax) {
+ if (ftl->log2phy[log_num] == UINT_MAX)
+ ftl->log2phy[log_num] = block_num;
+ }
+ }
+
+ pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+ phymax, ftl->logmax, phymax - ftl->logmax);
+
+ ret = 0;
+exit:
+ kfree(oob);
+ return ret;
+}
+
+void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+ kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+ loff_t from,
+ size_t len,
+ void *buf,
+ struct sharpsl_ftl *ftl)
+{
+ unsigned int log_num, final_log_num;
+ unsigned int block_num;
+ loff_t block_adr;
+ loff_t block_ofs;
+ size_t retlen;
+ int err;
+
+ log_num = mtd_div_by_eb((u32)from, mtd);
+ final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+ if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+ return -EINVAL;
+
+ block_num = ftl->log2phy[log_num];
+ block_adr = block_num * mtd->erasesize;
+ block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+ err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (!err && retlen != len)
+ err = -EIO;
+
+ if (err)
+ pr_err("sharpslpart: error, read failed at %#llx\n",
+ block_adr + block_ofs);
+
+ return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev: size erasesize name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+ __le32 start;
+ __le32 end;
+ __be32 magic;
+ u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+ loff_t from,
+ size_t len,
+ struct sharpsl_nand_partinfo *buf,
+ struct sharpsl_ftl *ftl)
+{
+ int ret;
+
+ ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+ if (ret)
+ return ret;
+
+ /* check for magics */
+ if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+ be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+ be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+ pr_err("sharpslpart: magic values mismatch\n");
+ return -EINVAL;
+ }
+
+ /* fixup for hardcoded value 64 MiB (for older models) */
+ buf[2].end = cpu_to_le32(master->size);
+
+ /* extra sanity check */
+ if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+ le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+ le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+ le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+ le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+ pr_err("sharpslpart: partition sizes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct sharpsl_ftl ftl;
+ struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+ struct mtd_partition *sharpsl_nand_parts;
+ int err;
+
+ /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+ err = sharpsl_nand_check_ooblayout(master);
+ if (err)
+ return err;
+
+ /* init logical mgmt (FTL) */
+ err = sharpsl_nand_init_ftl(master, &ftl);
+ if (err)
+ return err;
+
+ /* read and validate first partition table */
+ pr_info("sharpslpart: try reading first partition table\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO1_LADDR,
+ sizeof(buf), buf, &ftl);
+ if (err) {
+ /* fallback: read second partition table */
+ pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO2_LADDR,
+ sizeof(buf), buf, &ftl);
+ }
+
+ /* cleanup logical mgmt (FTL) */
+ sharpsl_nand_cleanup_ftl(&ftl);
+
+ if (err) {
+ pr_err("sharpslpart: both partition tables are invalid\n");
+ return err;
+ }
+
+ sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
+ SHARPSL_NAND_PARTS, GFP_KERNEL);
+ if (!sharpsl_nand_parts)
+ return -ENOMEM;
+
+ /* original names */
+ sharpsl_nand_parts[0].name = "smf";
+ sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+ sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+ le32_to_cpu(buf[0].start);
+
+ sharpsl_nand_parts[1].name = "root";
+ sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+ sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+ le32_to_cpu(buf[1].start);
+
+ sharpsl_nand_parts[2].name = "home";
+ sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+ sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+ le32_to_cpu(buf[2].start);
+
+ *pparts = sharpsl_nand_parts;
+ return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+ .parse_fn = sharpsl_parse_mtd_partitions,
+ .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 3692dd547879..4237c7cebf02 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -989,9 +989,9 @@ restart:
/* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = (struct sm_ftl *)data;
+ struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
@@ -1139,7 +1139,7 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
mutex_init(&ftl->mutex);
- setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
init_completion(&ftl->erase_completion);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 69c638dd0484..89da88e59121 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || COMPILE_TEST)
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash platform driver"
depends on X86
select SPI_INTEL_SPI
help
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 53c7d8e0327a..75a2bc447a99 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@@ -38,6 +39,9 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -75,7 +79,9 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
+ bool rclk_en;
u32 trigger_address;
+ u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
- const unsigned int bypass,
+ const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(nor);
- cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
}
if (switch_cs || switch_ck)
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return -ENXIO;
}
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
return 0;
}
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
+ unsigned long data;
int ret;
int irq;
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
- return ret;
+ goto probe_clk_failed;
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ data = (unsigned long)of_device_get_match_data(dev);
+ if (data & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
}
return ret;
-probe_irq_failed:
- cqspi_controller_enable(cqspi, 0);
probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct of_device_id cqspi_dt_ids[] = {
- {.compatible = "cdns,qspi-nor",},
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = (void *)0,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index e82652335ede..c0976f2e3dd1 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 8a596bfeddff..ef034d898a23 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -67,8 +67,6 @@
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
#define PR_RPE BIT(15)
#define PR_BASE_MASK 0x3fff
-/* Last PR is GPR0 */
-#define PR_NUM (5 + 1)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@@ -90,20 +88,35 @@
#define OPMENU0 0x08
#define OPMENU1 0x0c
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_BCR 0xfc
#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -117,8 +130,11 @@
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
* @writeable: Is the chip writeable
- * @swseq: Use SW sequencer in register reads/writes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
@@ -132,8 +148,11 @@ struct intel_spi {
void __iomem *pregs;
void __iomem *sregs;
size_t nregions;
+ size_t pr_num;
bool writeable;
- bool swseq;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
u8 preopcodes[2];
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
- for (i = 0; i < PR_NUM; i++)
+ for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
dev_dbg(ispi->dev, "Protected regions:\n");
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
- ispi->swseq ? 'S' : 'H');
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
static int intel_spi_init(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, val;
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
int i;
switch (ispi->info->type) {
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
if (writeable) {
/* Disable write protection */
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
ispi->erase_64k = true;
break;
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
return -EINVAL;
}
- /* Disable #SMI generation */
+ /* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
- * BIOS programs allowed opcodes and then locks down the register.
- * So read back what opcodes it decided to support. That's the set
- * we are going to support as well.
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
*/
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ ispi->erase_64k = false;
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
- * using software sequencer. If we find that BIOS has programmed
- * opcodes for the software sequencer we use that over the hardware
- * sequencer.
+ * using software sequencer.
*/
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
-
+ if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
- ispi->swseq = true;
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+
+ val = readl(ispi->sregs + PREOP_OPTYPE);
+ ispi->preopcodes[0] = val;
+ ispi->preopcodes[1] = val >> 8;
+ }
}
intel_spi_dump_regs(ispi);
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
return 0;
}
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
+ int preop;
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
- if (ispi->opcodes[i] == opcode)
- return i;
- return -EINVAL;
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
{
u32 val, status;
int ret;
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return -EINVAL;
}
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
{
- u32 val, status;
+ u32 val = 0, status;
+ u16 preop;
int ret;
- ret = intel_spi_opcode_index(ispi, opcode);
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
- val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if (preop) {
+ val |= SSFSTS_CTL_ACS;
+ if (preop >> 8)
+ val |= SSFSTS_CTL_SPOP;
+ }
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
- status = readl(ispi->base + SSFSTS_CTL);
+ status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/* Address of the first chip */
writel(0, ispi->base + FADDR);
- if (ispi->swseq)
- ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, len);
if (ret)
return ret;
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
- * controller so skip it here now.
+ * controller so skip it here now. If the controller is not locked,
+ * program the opcode to the PREOP register for later use.
*/
- if (opcode == SPINOR_OP_WREN)
+ if (opcode == SPINOR_OP_WREN) {
+ if (!ispi->locked)
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+
return 0;
+ }
writel(0, ispi->base + FADDR);
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
if (ret)
return ret;
- if (ispi->swseq)
- return intel_spi_sw_cycle(ispi, opcode, buf, len);
- return intel_spi_hw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, len);
}
static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
- /* Write enable */
- if (ispi->preopcodes[1] == SPINOR_OP_WREN)
- val |= SSFSTS_CTL_SPOP;
- val |= SSFSTS_CTL_ACS;
- writel(val, ispi->base + HSFSTS_CTL);
-
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
}
/* Start the write now */
- val = readl(ispi->base + HSFSTS_CTL);
- writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
erase_size = SZ_4K;
}
+ if (ispi->swseq_erase) {
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+ 0, OPTYPE_WRITE_WITH_ADDR);
+ if (ret)
+ return ret;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+ }
+
while (len > 0) {
writel(offs, ispi->base + FADDR);
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
{
int i;
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index c258c7adf1c5..abe455ccd68b 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
+static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+{
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mt8173_nor->nor_clk);
+}
+
+static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ if (ret) {
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
return PTR_ERR(mt8173_nor->nor_clk);
mt8173_nor->dev = &pdev->dev;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+
+ ret = mt8173_nor_enable_clk(mt8173_nor);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- return ret;
- }
/* only support one attached flash */
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = mtk_nor_init(mt8173_nor, flash_np);
nor_free:
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
- }
+ if (ret)
+ mt8173_nor_disable_clk(mt8173_nor);
+
return ret;
}
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
{
struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ mt8173_nor_disable_clk(mt8173_nor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ mt8173_nor_disable_clk(mt8173_nor);
+
return 0;
}
+static int mtk_nor_resume(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ return mt8173_nor_enable_clk(mt8173_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+ .suspend = mtk_nor_suspend,
+ .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS NULL
+#endif
+
static const struct of_device_id mtk_nor_of_ids[] = {
{ .compatible = "mediatek,mt8173-nor"},
{ /* sentinel */ }
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
.remove = mtk_nor_drv_remove,
.driver = {
.name = "mtk-nor",
+ .pm = MTK_NOR_DEV_PM_OPS,
.of_match_table = mtk_nor_of_ids,
},
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 19c000722cbc..bc266f70a15b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -89,6 +89,8 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+
+ int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int macronix_quad_enable(struct spi_nor *nor);
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
+ { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ .quad_enable = macronix_quad_enable,
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR ||
- header.minor < SFDP_JESD216_MINOR)
+ header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = spansion_quad_enable;
break;
}
+
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * set it in flash info list.
+ */
+ if (info->quad_enable)
+ params->quad_enable = info->quad_enable;
}
/* Override the parameters with data read from SFDP tables. */
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
/* Enable Quad I/O if needed. */
enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4);
- if (enable_quad_io && params->quad_enable) {
- err = params->quad_enable(nor);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
if (err) {
dev_err(nor->dev, "quad mode not supported\n");
return err;
}
}
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 1);
+
return 0;
}
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set
- */
-
- if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST ||
- info->flags & SPI_NOR_HAS_LOCK) {
- write_enable(nor);
- write_sr(nor, 0);
- spi_nor_wait_till_ready(nor);
- }
-
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
info->flags & SPI_NOR_4B_OPCODES)
spi_nor_set_4byte_opcodes(nor, info);
- else
- set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
}
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 86c0931543c5..b3c7f6addba7 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -1,9 +1,22 @@
/*
- * stm32_quadspi.c
+ * Driver for stm32 quadspi controller
*
- * Copyright (C) 2017, Ludovic Barre
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
*
- * License terms: GNU General Public License (GPL), version 2
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/errno.h>
@@ -113,6 +126,7 @@
#define STM32_MAX_MMAP_SZ SZ_256M
#define STM32_MAX_NORCHIP 2
+#define STM32_QSPI_FIFO_SZ 32
#define STM32_QSPI_FIFO_TIMEOUT_US 30000
#define STM32_QSPI_BUSY_TIMEOUT_US 100000
@@ -124,6 +138,7 @@ struct stm32_qspi_flash {
u32 presc;
u32 read_mode;
bool registered;
+ u32 prefetch_limit;
};
struct stm32_qspi {
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
STM32_QSPI_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
- break;
+ return ret;
}
tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
}
- return ret;
+ return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
{
struct stm32_qspi *qspi = flash->qspi;
u32 ccr, dcr, cr;
+ u32 last_byte;
int err;
err = stm32_qspi_wait_nobusy(qspi);
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
if (err)
goto abort;
writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+ } else {
+ last_byte = cmd->addr + cmd->len;
+ if (last_byte > flash->prefetch_limit)
+ goto abort;
}
return err;
@@ -322,7 +342,9 @@ abort:
cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
- dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+ if (err)
+ dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
return err;
}
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
}
flash->fsize = FSIZE_VAL(mtd->size);
+ flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
flash->read_mode = CCR_FMODE_MM;
if (mtd->size > qspi->mm_size)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index a1b33aa6054a..9697977b80f0 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return -EINVAL;
bond_opt_initval(&newval,
- nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+ nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
if (err)
return err;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index fed75e75207a..b8029ea03307 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -66,9 +66,9 @@ static const struct cfhsi_config hsi_default_config = {
static LIST_HEAD(cfhsi_list);
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -737,9 +737,9 @@ out_of_sync:
schedule_work(&cfhsi->out_of_sync_work);
}
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -997,9 +997,9 @@ static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
wake_up_interruptible(&cfhsi->wake_down_wait);
}
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -1211,14 +1211,11 @@ static int cfhsi_open(struct net_device *ndev)
init_waitqueue_head(&cfhsi->flush_fifo_wait);
/* Setup the inactivity timer. */
- setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
/* Setup the slowpath RX timer. */
- setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
/* Setup the aggregation timer. */
- setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
/* Activate HSI interface. */
res = cfhsi->ops->cfhsi_up(cfhsi->ops);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a13a4896a8bd..0626dcfd1f3d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -184,12 +184,12 @@
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
* Filter? connected? Passive detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no ? no no
+ * MX25 FlexCAN2 03.00.00.00 no no no no no
* MX28 FlexCAN2 03.00.04.00 yes yes no no no
- * MX35 FlexCAN2 03.00.00.00 no no ? no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no no
* MX53 FlexCAN2 03.00.00.00 yes no no no no
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
- * VF610 FlexCAN3 ? no yes ? yes yes?
+ * VF610 FlexCAN3 ? no yes no yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 85268be0c913..55513411a82e 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
/* if this frame is an echo, */
if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
- int n;
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- n = can_get_echo_skb(priv->ndev, msg->client);
+ can_get_echo_skb(priv->ndev, msg->client);
spin_unlock_irqrestore(&priv->echo_lock, flags);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
stats->tx_packets++;
- if (n) {
- /* restart tx queue only if a slot is free */
- netif_wake_queue(priv->ndev);
- }
+ /* restart tx queue (a slot is free) */
+ netif_wake_queue(priv->ndev);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b4efd711f824..788c3464a3b0 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -825,7 +825,10 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return err;
+ /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+ * the probe() function must return a negative errno in case of failure
+ * (err is unchanged if negative) */
+ return pcibios_err_to_errno(err);
}
/* free the board structure object, as well as its resources: */
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 131026fbc2d7..5adc95c922ee 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -717,7 +717,10 @@ failure_release_regions:
failure_disable_pci:
pci_disable_device(pdev);
- return err;
+ /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+ * the probe() function must return a negative errno in case of failure
+ * (err is unchanged if negative) */
+ return pcibios_err_to_errno(err);
}
static void peak_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4d4941469cfc..db6ea936dc3f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
mbx_mask = hecc_read(priv, HECC_CANMIM);
mbx_mask |= HECC_TX_MBOX_MASK;
hecc_write(priv, HECC_CANMIM, mbx_mask);
+ } else {
+ /* repoll is done only if whole budget is used */
+ num_pkts = quota;
}
return num_pkts;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b3d02759c226..b00358297424 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
case -ECONNRESET: /* unlink */
case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
case -ESHUTDOWN:
return;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9fdb0f0bfa06..c6dcf93675c0 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
case -ESHUTDOWN:
return;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 9b18d96ef526..63587b8e6825 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
}
if (pos + tmp->len > actual_len) {
- dev_err(dev->udev->dev.parent,
- "Format error\n");
+ dev_err_ratelimited(dev->udev->dev.parent,
+ "Format error\n");
break;
}
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
if (err) {
netdev_err(netdev, "Error transmitting URB\n");
usb_unanchor_urb(urb);
+ kfree(buf);
usb_free_urb(urb);
return err;
}
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
case 0:
break;
case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
case -ESHUTDOWN:
return;
default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+ while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
msg = urb->transfer_buffer + pos;
/* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
}
if (pos + msg->len > urb->actual_length) {
- dev_err(dev->udev->dev.parent, "Format error\n");
+ dev_err_ratelimited(dev->udev->dev.parent,
+ "Format error\n");
break;
}
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
usb_unanchor_urb(urb);
+ kfree(buf);
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 7f0272558bef..8d8c2086424d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
break;
case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
case -ESHUTDOWN:
return;
@@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
goto cleanup_unregister_candev;
}
- dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+ dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
return 0;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d000cb62d6ae..27861c417c94 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
break;
case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
case -ESHUTDOWN:
return;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index b6e2bfd7d2d6..8b1a859f5140 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -165,9 +165,16 @@ static unsigned int network_rec_config_shadow = 0;
static unsigned int network_tr_ctrl_shadow = 0;
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
/* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL);
-static DEFINE_TIMER(clear_led_timer, NULL);
static int current_speed; /* Speed read from transceiver */
static int current_speed_selection; /* Speed selected by user */
static unsigned long led_next_time;
@@ -175,7 +182,6 @@ static int led_active;
static int rx_queue_len;
/* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL);
static int full_duplex;
static enum duplex current_duplex;
@@ -200,9 +206,7 @@ static void update_rx_stats(struct net_device_stats *);
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
-static void e100_check_speed(unsigned long priv);
static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
static void e100_set_duplex(struct net_device* dev, enum duplex);
static void e100_negotiate(struct net_device* dev);
@@ -214,7 +218,6 @@ static void e100_send_mdio_bit(unsigned char bit);
static unsigned char e100_receive_mdio_bit(void);
static void e100_reset_transceiver(struct net_device* net);
-static void e100_clear_network_leds(unsigned long dummy);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
@@ -381,17 +384,12 @@ etrax_ethernet_init(void)
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- speed_timer.data = (unsigned long)dev;
- speed_timer.function = e100_check_speed;
-
- clear_led_timer.function = e100_clear_network_leds;
- clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
- duplex_timer.function = e100_check_duplex;
+
+ timer_dev = dev;
/* Initialize mii interface */
np->mii_if.phy_id_mask = 0x1f;
@@ -680,9 +678,9 @@ intel_check_speed(struct net_device* dev)
}
#endif
static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
{
- struct net_device* dev = (struct net_device*)priv;
+ struct net_device* dev = timer_dev;
struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
@@ -799,9 +797,9 @@ e100_set_speed(struct net_device* dev, unsigned long speed)
}
static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)priv;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
int old_duplex;
@@ -1669,9 +1667,9 @@ e100_hardware_send_packet(struct net_local *np, char *buf, int length)
}
static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)dummy;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
spin_lock(&np->led_lock);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 93faa1fed6f2..b62d47210db8 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,7 +14,6 @@
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/mii.h>
@@ -95,7 +94,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b721a2009b50..23b45da784cb 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
slice_num, false);
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
- slice_num, true);
+ SLICE_NUM_MASK, true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Insert into Action and policer RAMs now, set chain ID to
* the one we are chained to
*/
- ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
queue_num, true);
if (ret)
goto out_err;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8171055fde7a..66d33e97cbc5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
u16 mask;
mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
- mask |= GENMASK(chip->g1_irq.nirqs, 0);
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
return 0;
out_disable:
- mask |= GENMASK(chip->g1_irq.nirqs, 0);
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
{ },
};
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
+
+ list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ bus = mdio_bus->bus;
+
+ mdiobus_unregister(bus);
+ }
+}
+
static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
struct device_node *np)
{
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
match = of_match_node(mv88e6xxx_mdio_external_match, child);
if (match) {
err = mv88e6xxx_mdio_register(chip, child, true);
- if (err)
+ if (err) {
+ mv88e6xxx_mdios_unregister(chip);
return err;
+ }
}
}
return 0;
}
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
- struct mv88e6xxx_mdio_bus *mdio_bus;
- struct mii_bus *bus;
-
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
- bus = mdio_bus->bus;
-
- mdiobus_unregister(bus);
- }
-}
-
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 436668bd50dc..46af8052e535 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -149,9 +149,9 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
mutex_unlock(&chip->reg_lock);
}
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = (void *)_ps;
+ struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -193,8 +193,7 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
- (unsigned long)chip);
+ timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
}
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
break;
case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index fccce4b47778..74263f8efe1a 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -139,9 +139,9 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = (equalizer_t *) param;
+ equalizer_t *eql = from_timer(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -178,7 +178,7 @@ static void __init eql_setup(struct net_device *dev)
{
equalizer_t *eql = netdev_priv(dev);
- setup_timer(&eql->timer, eql_timer, (unsigned long)eql);
+ timer_setup(&eql->timer, eql_timer, 0);
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
spin_lock_init(&eql->queue.lock);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 0658cde1586a..7120f2b9c6ef 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1092,9 +1092,11 @@ static void tx_reclaim_skb(struct bfin_mac_local *lp)
return;
}
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
{
- tx_reclaim_skb((struct bfin_mac_local *)lp);
+ struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+ tx_reclaim_skb(lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
@@ -1650,8 +1652,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
- setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout,
- (unsigned long)lp);
+ timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
lp->flags = 0;
netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 658e92f79d36..48220b6c600d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3080,9 +3080,9 @@ err_out:
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+ struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3624,8 +3624,7 @@ static int et131x_open(struct net_device *netdev)
int result;
/* Start the timer to track NIC errors */
- setup_timer(&adapter->error_timer, et131x_error_timer_handler,
- (unsigned long)adapter);
+ timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
adapter->error_timer.expires = jiffies +
msecs_to_jiffies(TX_ERROR_PERIOD);
add_timer(&adapter->error_timer);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 5417e4da64ca..97c5a89a9cf7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rc = ena_alloc_rx_page(rx_ring, rx_info,
- __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
+ GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"failed to alloc buffer for rx queue %d\n",
@@ -2579,6 +2579,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
bool wd_state;
int rc;
+ set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
@@ -2592,6 +2593,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ /* Make sure we don't have a race with AENQ Links state handler */
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2618,7 +2624,7 @@ err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
-
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n");
@@ -2853,9 +2859,9 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
(netdev->features & GENMASK_ULL(63, 32)) >> 32;
}
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -3272,8 +3278,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_update_hints(adapter, &get_feat_ctx.hw_hints);
- setup_timer(&adapter->timer_service, ena_timer_service,
- (unsigned long)adapter);
+ timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
@@ -3495,7 +3500,8 @@ static void ena_update_on_link_change(void *adapter_data,
if (status) {
netdev_dbg(adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
- netif_carrier_on(adapter->netdev);
+ if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
} else {
clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
netif_carrier_off(adapter->netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index ed8bd0a579c4..3bbc003871de 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -272,7 +272,8 @@ enum ena_flags_t {
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
ENA_FLAG_MSIX_ENABLED,
- ENA_FLAG_TRIGGER_RESET
+ ENA_FLAG_TRIGGER_RESET,
+ ENA_FLAG_ONGOING_RESET
};
/* adapter specific private data structure */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 45d92304068e..cc1e4f820e64 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -295,7 +295,7 @@ again:
order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */
- gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages_node(node, gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..105fdb958cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
#define AQ_CFG_PCI_FUNC_PORTS 2U
-#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
__stringify(NIC_MINOR_DRIVER_VERSION)"."\
__stringify(NIC_BUILD_DRIVER_VERSION)"."\
- __stringify(NIC_REVISION_DRIVER_VERSION)
+ __stringify(NIC_REVISION_DRIVER_VERSION) \
+ AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"OutUCast",
"OutMCast",
"OutBCast",
- "InUCastOctects",
- "OutUCastOctects",
- "InMCastOctects",
- "OutMCastOctects",
- "InBCastOctects",
- "OutBCastOctects",
- "InOctects",
- "OutOctects",
+ "InUCastOctets",
+ "OutUCastOctets",
+ "InMCastOctets",
+ "OutMCastOctets",
+ "InBCastOctets",
+ "OutBCastOctets",
+ "InOctets",
+ "OutOctets",
"InPacketsDma",
"OutPacketsDma",
"InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..b3825de6cdfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
unsigned int mbps;
};
+struct aq_stats_s {
+ u64 uprc;
+ u64 mprc;
+ u64 bprc;
+ u64 erpt;
+ u64 uptc;
+ u64 mptc;
+ u64 bptc;
+ u64 erpr;
+ u64 mbtc;
+ u64 bbtc;
+ u64 mbrc;
+ u64 bbrc;
+ u64 ubrc;
+ u64 ubtc;
+ u64 dpc;
+ u64 dma_pkt_rc;
+ u64 dma_pkt_tc;
+ u64 dma_oct_rc;
+ u64 dma_oct_tc;
+};
+
#define AQ_HW_IRQ_INVALID 0U
#define AQ_HW_IRQ_LEGACY 1U
#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
void (*destroy)(struct aq_hw_s *self);
int (*get_hw_caps)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps);
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
int (*hw_update_stats)(struct aq_hw_s *self);
- int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
- unsigned int *p_count);
+ struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 483e97691eea..75a894a9251c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -163,14 +165,11 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
return 0;
}
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
- struct net_device *ndev = aq_nic_get_ndev(self);
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
+ int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- unsigned int i = 0U;
- struct aq_ring_stats_rx_s stats_rx;
- struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
@@ -182,28 +181,19 @@ static void aq_nic_service_timer_cb(unsigned long param)
if (self->aq_hw_ops.hw_update_stats)
self->aq_hw_ops.hw_update_stats(self->aq_hw);
- memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
- memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
- if (self->aq_vec[i])
- aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
- }
+ aq_nic_update_ndev_stats(self);
- ndev->stats.rx_packets = stats_rx.packets;
- ndev->stats.rx_bytes = stats_rx.bytes;
- ndev->stats.rx_errors = stats_rx.errors;
- ndev->stats.tx_packets = stats_tx.packets;
- ndev->stats.tx_bytes = stats_tx.bytes;
- ndev->stats.tx_errors = stats_tx.errors;
+ /* If no link - use faster timer rate to detect link up asap */
+ if (!netif_carrier_ok(self->ndev))
+ ctimer = max(ctimer / 2, 1);
err_exit:
- mod_timer(&self->service_timer,
- jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer, jiffies + ctimer);
}
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, polling_timer);
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
- struct device *dev,
+ struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
ndev->netdev_ops = ndev_ops;
ndev->ethtool_ops = et_ops;
- SET_NETDEV_DEV(ndev, dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->if_port = port;
self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
&self->aq_hw_ops);
- err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+ err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
+ pdev->device, pdev->subsystem_device);
if (err < 0)
goto err_exit;
@@ -440,14 +431,12 @@ int aq_nic_start(struct aq_nic_s *self)
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
- setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
mod_timer(&self->service_timer, jiffies +
AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
- setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
@@ -751,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
unsigned int count = 0U;
- int err = 0;
+ struct aq_vec_s *aq_vec = NULL;
+ struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
- err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
- if (err < 0)
+ if (!stats)
goto err_exit;
- data += count;
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats->dma_pkt_rc;
+ data[++i] = stats->dma_pkt_tc;
+ data[++i] = stats->dma_oct_rc;
+ data[++i] = stats->dma_oct_tc;
+ data[++i] = stats->dpc;
+
+ i++;
+
+ data += i;
count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
@@ -770,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
}
err_exit:;
- (void)err;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+ struct net_device *ndev = self->ndev;
+ struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+
+ ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
+ ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+ ndev->stats.rx_errors = stats->erpr;
+ ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
+ ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+ ndev->stats.tx_errors = stats->erpt;
+ ndev->stats.multicast = stats->mprc;
}
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..3c9f8db03d5f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
- struct device *dev,
+ struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..58c29d04b186 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
pci_set_drvdata(pdev, self);
self->pdev = pdev;
- err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+ err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
+ pdev->subsystem_device);
if (err < 0)
goto err_exit;
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
for (port = 0; port < self->ports; ++port) {
struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- &pdev->dev, self,
+ pdev, self,
port, aq_hw_ops);
if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0654e0c76bc2..519ca6534b85 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
- __GFP_COMP, pages_order);
+ buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
if (!buff->page) {
err = -ENOMEM;
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..f18dce14c93c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
#include "hw_atl_a0_internal.h"
static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device)
{
memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+
+ if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+
+ if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
+ }
+
return 0;
}
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ hw_atl_utils_update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..e4a22ce7bf09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device)
{
memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+
+ if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+
+ if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
+ }
+
return 0;
}
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
};
int err = 0;
+ u32 val;
self->aq_nic_cfg = aq_nic_cfg;
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = aq_hw_read_reg(self, pci_reg_control6_adr);
+ aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ hw_atl_utils_update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..93450ec930e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
#define tx_dma_desc_base_addrmsw_adr(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
+/* tx dma total request limit */
+#define tx_dma_total_req_limit_adr 0x00007b20u
+
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
* Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
/* default value of bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_default 0x1
+/* PCI core control register */
+#define pci_reg_control6_adr 0x1014u
+
/* global microprocessor scratch pad definitions */
#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..f2ce12ed4218 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
- if (!self->aq_link_status.mbps)
- return 0;
-
hw_atl_utils_mpi_read_stats(self, &mbox);
#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
mbox.stats._N_ - hw_self->last_stats._N_)
-
- AQ_SDELTA(uprc);
- AQ_SDELTA(mprc);
- AQ_SDELTA(bprc);
- AQ_SDELTA(erpt);
-
- AQ_SDELTA(uptc);
- AQ_SDELTA(mptc);
- AQ_SDELTA(bptc);
- AQ_SDELTA(erpr);
-
- AQ_SDELTA(ubrc);
- AQ_SDELTA(ubtc);
- AQ_SDELTA(mbrc);
- AQ_SDELTA(mbtc);
- AQ_SDELTA(bbrc);
- AQ_SDELTA(bbtc);
- AQ_SDELTA(dpc);
-
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+ }
#undef AQ_SDELTA
+ hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
+ hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
+ hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
+ hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data, unsigned int *p_count)
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
- struct hw_atl_stats_s *stats = &hw_self->curr_stats;
- int i = 0;
-
- data[i] = stats->uprc + stats->mprc + stats->bprc;
- data[++i] = stats->uprc;
- data[++i] = stats->mprc;
- data[++i] = stats->bprc;
- data[++i] = stats->erpt;
- data[++i] = stats->uptc + stats->mptc + stats->bptc;
- data[++i] = stats->uptc;
- data[++i] = stats->mptc;
- data[++i] = stats->bptc;
- data[++i] = stats->ubrc;
- data[++i] = stats->ubtc;
- data[++i] = stats->mbrc;
- data[++i] = stats->mbtc;
- data[++i] = stats->bbrc;
- data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
- data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats->dpc;
-
- if (p_count)
- *p_count = ++i;
-
- return 0;
+ return &PHAL_ATLANTIC->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..21aeca6908d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
struct __packed hw_atl_s {
struct aq_hw_s base;
struct hw_atl_stats_s last_stats;
- struct hw_atl_stats_s curr_stats;
+ struct aq_stats_s curr_stats;
u64 speed;
unsigned int chip_features;
u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data,
- unsigned int *p_count);
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..9009f2651e70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
#define VER_H
#define NIC_MAJOR_DRIVER_VERSION 1
-#define NIC_MINOR_DRIVER_VERSION 5
-#define NIC_BUILD_DRIVER_VERSION 345
+#define NIC_MINOR_DRIVER_VERSION 6
+#define NIC_BUILD_DRIVER_VERSION 13
#define NIC_REVISION_DRIVER_VERSION 0
+#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
+
#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index e278e3d96ee0..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* RMII interface needs always a rate of 50MHz */
err = clk_set_rate(priv->refclk, 50000000);
- if (err)
+ if (err) {
dev_err(dev,
"failed to change reference clock rate (%d)\n", err);
+ goto out_regulator_disable;
+ }
if (priv->soc_data->need_div_macclk) {
priv->macclk = devm_clk_get(dev, "macclk");
@@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* RMII TX/RX needs always a rate of 25MHz */
err = clk_set_rate(priv->macclk, 25000000);
- if (err)
+ if (err) {
dev_err(dev,
"failed to change mac clock rate (%d)\n", err);
+ goto out_clk_disable_macclk;
+ }
}
err = arc_emac_probe(ndev, interface);
if (err) {
dev_err(dev, "failed to probe arc emac (%d)\n", err);
- goto out_regulator_disable;
+ goto out_clk_disable_macclk;
}
return 0;
+out_clk_disable_macclk:
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
out_regulator_disable:
if (priv->regulator)
regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8c9986f3fc01..94270f654b3b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -222,9 +222,10 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+ struct atl1c_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2613,8 +2614,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7e195af0bc..9dc6da039a6d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -130,9 +130,10 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2361,8 +2362,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
/* get user settings */
atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 83d2db2abb45..b81fbf119bce 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2575,9 +2575,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -3071,8 +3072,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* assume we have no link for now */
netif_carrier_off(netdev);
- setup_timer(&adapter->phy_config_timer, atl1_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
adapter->phy_timer_pending = false;
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 77a1c03255de..db4bcc51023a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1028,9 +1028,9 @@ static void atl2_tx_timeout(struct net_device *netdev)
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1053,9 +1053,10 @@ static void atl2_watchdog(unsigned long data)
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1434,11 +1435,9 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_check_options(adapter);
- setup_timer(&adapter->watchdog_timer, atl2_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
- setup_timer(&adapter->phy_config_timer, atl2_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 42e44fc03a18..e445ab724827 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp)
}
}
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = (struct b44 *) __opaque;
+ struct b44 *bp = from_timer(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1474,7 +1474,7 @@ static int b44_open(struct net_device *dev)
goto out;
}
- setup_timer(&bp->timer, b44_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, b44_timer, 0);
bp->timer.expires = jiffies + HZ;
add_timer(&bp->timer);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b3055a76dfbf..7919f6112ecf 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
}
static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = (struct bnx2 *) data;
+ struct bnx2 *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -8462,7 +8462,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
- setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2_timer, 0);
bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index be9fd7d184d0..91e2a7560b48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = (struct bnx2x *) data;
+ struct bnx2x *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -12421,7 +12421,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
- setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2x_timer, 0);
bp->timer.expires = jiffies + bp->current_interval;
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 33c49ad697e4..61ca4eb7c6fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
* here forever if we consistently cannot allocate
* buffers.
*/
- else if (rc == -ENOMEM)
+ else if (rc == -ENOMEM && budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
- if (likely(rc == -EIO))
+ if (likely(rc == -EIO) && budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+ struct hwrm_short_input short_input = {0};
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (bp->flags & BNXT_FLAG_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
- struct hwrm_short_input short_input = {0};
memcpy(short_cmd_req, req, msg_len);
memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -6962,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev)
}
#endif
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = (struct bnxt *)data;
+ struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev))
@@ -7236,7 +7236,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bnxt_init_dflt_coal(bp);
- setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
+ bnxt_ulp_shutdown(bp);
+
if (system_state == SYSTEM_POWER_OFF) {
- bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7ce1d4b7e67d..b13ce5ebde8d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
- length, data);
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ start, length, data);
}
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d5031f436f83..3d201d7324bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
{
int ifindex = tcf_mirred_ifindex(tc_act);
struct net_device *dev;
- u16 dst_fid;
dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return -EINVAL;
}
- /* find the FID from dev */
- dst_fid = bnxt_flow_get_dst_fid(bp, dev);
- if (dst_fid == BNXT_FID_INVALID) {
- netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
- return -EINVAL;
- }
-
actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
- actions->dst_fid = dst_fid;
actions->dst_dev = dev;
return 0;
}
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
if (rc)
return rc;
- /* Tunnel encap/decap action must be accompanied by a redirect action */
- if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
- actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
- !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
- netdev_info(bp->dev,
- "error: no redir action along with encap/decap");
- return -EINVAL;
+ if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+ if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+ /* dst_fid is PF's fid */
+ actions->dst_fid = bp->pf.fw_fid;
+ } else {
+ /* find the FID from dst_dev */
+ actions->dst_fid =
+ bnxt_flow_get_dst_fid(bp, actions->dst_dev);
+ if (actions->dst_fid == BNXT_FID_INVALID)
+ return -EINVAL;
+ }
}
return rc;
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
- enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
- CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
ether_addr_copy(req.dst_macaddr, l2_info->dmac);
- ether_addr_copy(req.src_macaddr, l2_info->smac);
}
if (l2_info->num_vlans) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
struct ip_tunnel_key *tun_key,
- struct bnxt_tc_l2_key *l2_info,
- struct net_device *real_dst_dev)
+ struct bnxt_tc_l2_key *l2_info)
{
#ifdef CONFIG_INET
+ struct net_device *real_dst_dev = bp->dev;
struct flowi4 flow = { {0} };
struct net_device *dst_dev;
struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
*/
tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
tun_key.tp_dst = flow->tun_key.tp_dst;
- rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+ rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
if (rc)
goto put_decap;
- decap_key->ttl = tun_key.ttl;
decap_l2_info = &decap_node->l2_info;
+ /* decap smac is wildcarded */
ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
- ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
if (l2_info.num_vlans) {
decap_l2_info->num_vlans = l2_info.num_vlans;
decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
goto done;
- rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
- flow->actions.dst_dev);
+ rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
if (rc)
goto put_encap;
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0;
}
+static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ u16 src_fid)
+{
+ if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+ flow->src_fid = bp->pf.fw_fid;
+ else
+ flow->src_fid = src_fid;
+}
+
/* Add a new flow or replace an existing flow.
* Notes on locking:
* There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
if (rc)
goto free_node;
- flow->src_fid = src_fid;
+
+ bnxt_tc_set_src_fid(bp, flow, src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d8d5f207c759..de51c2177d03 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
}
}
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = (struct tg3 *) __opaque;
+ struct tg3 *tp = from_timer(tp, t, timer);
spin_lock(&tp->lock);
@@ -11087,7 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp)
tp->asf_multiplier = (HZ / tp->timer_offset) *
TG3_FW_UPDATE_FREQ_SEC;
- setup_timer(&tp->timer, tg3_timer, (unsigned long)tp);
+ timer_setup(&tp->timer, tg3_timer, 0);
}
static void tg3_timer_start(struct tg3 *tp)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 6aa0eee88ea5..a5eecd895a82 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
dev_err(&oct->pci_dev->dev,
"ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
core);
- err_msg_was_printed[core] = true;
+ err_msg_was_printed[core] = true;
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 433f3619de8f..f2d1a076a038 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -198,7 +198,7 @@ static inline void
struct sk_buff *skb;
struct octeon_skb_page_info *skb_pg_info;
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (unlikely(!page))
return NULL;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d4496e9afcdf..a3d12dbde95b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
/* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ if (ip.v4->version == 4)
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 8dc21c9f9716..973c1fb70d09 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -123,9 +123,9 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
}
#ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 0ae83e091a62..8c4ce50da6e1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -16,12 +16,11 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
#ifdef CONFIG_RFS_ACCEL
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
static inline void enic_rfs_timer_start(struct enic *enic)
{
- setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire,
- (unsigned long)enic);
+ timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4a11baffe02d..e130fb757e7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1676,9 +1676,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
return work_done;
}
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, notify_timer);
enic_notify_check(enic);
@@ -2846,8 +2846,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification timer, HW reset task, and wq locks
*/
- setup_timer(&enic->notify_timer, enic_notify_timer,
- (unsigned long)enic);
+ timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 23053919ebf5..ae55da60ed0e 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
RXFSD = 0x00000800, /* first descriptor */
RXLSD = 0x00000400, /* last descriptor */
ErrorSummary = 0x80, /* error summary */
- RUNT = 0x40, /* runt packet received */
- LONG = 0x20, /* long packet received */
+ RUNTPKT = 0x40, /* runt packet received */
+ LONGPKT = 0x20, /* long packet received */
FAE = 0x10, /* frame align error */
CRC = 0x08, /* crc error */
RXER = 0x04, /* receive error */
@@ -1628,7 +1628,7 @@ static int netdev_rx(struct net_device *dev)
dev->name, rx_status);
dev->stats.rx_errors++; /* end of a packet. */
- if (rx_status & (LONG | RUNT))
+ if (rx_status & (LONGPKT | RUNTPKT))
dev->stats.rx_length_errors++;
if (rx_status & RXER)
dev->stats.rx_frame_errors++;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5be52d89b182..7f837006bb6a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_init_addr_hash_table(priv);
- /* Insert receive time stamps into padding alignment bytes */
+ /* Insert receive time stamps into padding alignment bytes, and
+ * plus 2 bytes padding to ensure the cpu alignment.
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- priv->padding = 8;
+ priv->padding = 8 + DEFAULT_PADDING;
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
struct phy_device *phydev;
+ struct ethtool_eee edata;
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
/* Add support for flow control, but don't advertise it by default */
phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ /* disable EEE autoneg, EEE not supported by eTSEC */
+ memset(&edata, 0, sizeof(struct ethtool_eee));
+ phy_ethtool_set_eee(phydev, &edata);
+
return 0;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 04aaacbc3d45..1dc4aef37d3a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -849,7 +849,6 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
- dma_addr_t dma_addr;
int len = 0;
if (adapter->vpd->buff)
@@ -879,7 +878,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
adapter->vpd->dma_addr =
dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
dev_err(dev, "Could not map VPD buffer\n");
kfree(adapter->vpd->buff);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8172cf08cc33..3bac9df1c099 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
rar_num = E1000_RAR_ENTRIES;
- /* Zero out the other 15 receive addresses. */
- e_dbg("Clearing RAR[1-15]\n");
+ /* Zero out the following 14 receive addresses. RAR[15] is for
+ * manageability
+ */
+ e_dbg("Clearing RAR[1-14]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 67163ca898ba..00a36df02a3f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -113,7 +113,8 @@
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
+#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
+#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f2f49239b015..9f18d39bdc8f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
- /* SPT and KBL Si errata workaround to avoid Tx hang */
- reg_val &= ~BIT(28);
- reg_val |= BIT(29);
+ /* SPT and KBL Si errata workaround to avoid Tx hang.
+ * Dropping the number of outstanding requests from
+ * 3 to 2 in order to avoid a buffer overrun.
+ */
+ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+ reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
ew32(TARC(0), reg_val);
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index dbd69310f263..538b42d5c187 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5829715fa342..e019baa905c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -90,7 +90,6 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
-#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9dcb2a961197..9af74253c3f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0203665cb53c..095965f268bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
@@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a964d6e4a9e..321d8be80871 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2167,6 +2167,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
}
/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
- if ((vsi->type == I40E_VSI_MAIN) &&
- (pf->lan_veb != I40E_NO_VEB) &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and VLAN
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
- */
- if (pf->cur_promisc != cur_promisc) {
- pf->cur_promisc = cur_promisc;
- if (cur_promisc)
- aq_ret =
- i40e_aq_set_default_vsi(hw,
- vsi->seid,
- NULL);
- else
- aq_ret =
- i40e_aq_clear_default_vsi(hw,
- vsi->seid,
- NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "Set default VSI failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set unicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set multicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
@@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
@@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
return -EINVAL;
*reconfig_rss = false;
-
- if (num_queues > I40E_MAX_QUEUES_PER_CH) {
- dev_err(&pf->pdev->dev,
- "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
- num_queues, I40E_MAX_QUEUES_PER_CH);
- return -EINVAL;
- }
-
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
@@ -7407,7 +7401,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
dev_err(&pf->pdev->dev,
"Failed to add cloud filter, err %s\n",
i40e_stat_str(&pf->hw, err));
- err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
goto err;
}
@@ -9429,6 +9422,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired)
rtnl_unlock();
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 0ccab0a5d717..7689c2ee0d46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- i40e_status ret_code;
+ i40e_status ret_code = 0;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- i40e_release_nvm(hw);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d6d352a6e6ea..4566d66ffc7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 00d4833e9925..0e8568719b4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -629,6 +629,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f8a794b72462..36cb8e068e85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
}
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
- (u8 *)vfres, sizeof(vfres));
+ (u8 *)vfres, sizeof(*vfres));
}
/**
@@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f)
+ if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add MAC filter %pM for VF %d\n",
- al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- goto error_param;
- } else {
- vf->num_mac++;
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ } else {
+ vf->num_mac++;
+ }
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index fe817e2b6fef..50864f99446d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index d8131139565e..da60ce12b33d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -26,6 +26,26 @@ static struct i40e_ops i40evf_lan_ops = {
};
/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ int i;
+
+ memset(params, 0, sizeof(struct i40e_params));
+ params->mtu = vsi->netdev->mtu;
+ params->link_up = vsi->back->link_up;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ params->qos.prio_qos[i].tc = 0;
+ params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+ }
+}
+
+/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
@@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
return;
cinst = vsi->back->cinst;
- memset(&params, 0, sizeof(params));
- params.mtu = vsi->netdev->mtu;
- params.link_up = vsi->back->link_up;
- params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
@@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
@@ -166,9 +184,9 @@ static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_params params;
if (!vf_registered_client)
goto out;
@@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- cinst->lan_info.params.qos.prio_qos[i].tc = 0;
- cinst->lan_info.params.qos.prio_qos[i].qs_handle =
- vsi->qs_handle;
- }
-
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ca2ebdbd24d7..7b2a4eba92e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e94d3c256667..c208753ff5b7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 713e8df23744..4214c1519a87 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ca06c3cc2ca8..62a18914f00f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index feed11bc9ddf..1f4a69134ade 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81c1fac00d33..62f204f32316 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1346,9 +1346,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
spin_unlock_bh(&mp->mib_counters_lock);
}
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)_mp;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2321,9 +2321,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)data;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -3178,8 +3178,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
- (unsigned long)mp);
+ timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3188,7 +3187,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+ timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c9798210fa0f..0495487f7b42 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev->regs + MVMDIO_ERR_INT_MASK);
} else if (dev->err_interrupt == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out_mdio;
}
if (pdev->dev.of_node)
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 6c20e811f973..634b2f41cc9e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -85,7 +85,7 @@
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
-#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) ((idx) << 8)
+#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
#define MVPP22_RSS_TABLE_ENTRY 0x1508
@@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val |= MVPP2_GMAC_DISABLE_PADDING;
- val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
@@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val &= ~MVPP2_GMAC_DISABLE_PADDING;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
}
/* The port is connected to a copper PHY */
@@ -5607,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
u32 txq_dma;
/* Allocate memory for TX descriptors */
- aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+ aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
&aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
@@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
sizeof(*txq_pcpu->buffs),
GFP_KERNEL);
if (!txq_pcpu->buffs)
- goto cleanup;
+ return -ENOMEM;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
- goto cleanup;
+ return -ENOMEM;
}
return 0;
-cleanup:
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->buffs);
-
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
- }
-
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_dma);
-
- return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
+ /* The Tx ring size cannot be smaller than the minimum number of
+ * descriptors needed for TSO.
+ */
+ if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+ new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
if (ring->rx_pending != new_rx_pending) {
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
ring->rx_pending, new_rx_pending);
@@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
for_each_available_child_of_node(dn, port_node) {
err = mvpp2_port_probe(pdev, port_node, priv, i);
if (err < 0)
- goto err_mg_clk;
+ goto err_port_probe;
i++;
}
@@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
if (!priv->stats_queue) {
err = -ENOMEM;
- goto err_mg_clk;
+ goto err_port_probe;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_port_probe:
+ i = 0;
+ for_each_available_child_of_node(dn, port_node) {
+ if (priv->port_list[i])
+ mvpp2_port_remove(priv->port_list[i]);
+ i++;
+ }
err_mg_clk:
clk_disable_unprepare(priv->axi_clk);
if (priv->hw_version == MVPP22)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 91b1c154fd29..7bbd86f08e5f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -362,9 +362,9 @@ static void rxq_refill(struct net_device *dev)
}
}
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = (void *)data;
+ struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1496,8 +1496,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
- setup_timer(&pep->timeout, rxq_refill_timer_wrapper,
- (unsigned long)pep);
+ timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
pep->smi_bus = mdiobus_alloc();
if (!pep->smi_bus) {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index eef35bf3e849..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1495,9 +1495,9 @@ static int xm_check_link(struct net_device *dev)
* get an interrupt when carrier is detected, need to poll for
* link coming up.
*/
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = (struct skge_port *) arg;
+ struct skge_port *skge = from_timer(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -3897,7 +3897,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Only used for Genesis XMAC */
if (is_genesis(hw))
- setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
if (hw->ports > 1) {
skge_write32(hw, B0_IMSK, 0);
skge_read32(hw, B0_IMSK);
- free_irq(pdev->irq, hw);
}
spin_unlock_irq(&hw->hw_lock);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 1145cde2274a..9efe1771423c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2974,9 +2974,9 @@ static int sky2_rx_hung(struct net_device *dev)
}
}
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -5083,7 +5083,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sky2_show_addr(dev1);
}
- setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+ timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct net_device *dev = mdev->pndev[port];
struct mlx4_en_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+
+ mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox_priority)) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return PTR_ERR(mailbox_priority);
+ }
+
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mlx4_en_stats = mailbox->buf;
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
+ /* 0xffs indicates invalid value */
+ memset(mailbox_priority->buf, 0xff,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+ memset(mailbox_priority->buf, 0,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
+ in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+ 0, MLX4_CMD_DUMP_ETH_STATS,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+ }
+
+ flowstats = mailbox_priority->buf;
+
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
- spin_unlock_bh(&priv->stats_lock);
-
- memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
- counter_index = mlx4_get_default_counter_index(mdev->dev, port);
- err = mlx4_get_counter_stats(mdev->dev, counter_index,
- &tmp_counter_stats, reset);
-
- /* 0xffs indicates invalid value */
- memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
- memset(mailbox->buf, 0,
- sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
- err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
- in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
- 0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
- if (err)
- goto out;
- }
-
- flowstats = mailbox->buf;
-
- spin_lock_bh(&priv->stats_lock);
-
if (tmp_counter_stats.counter_mode == 0) {
priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 92aec17f4b4d..85e28efcda33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
- GFP_KERNEL | __GFP_COLD)) {
+ GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
@@ -551,8 +551,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
- GFP_ATOMIC | __GFP_COLD |
- __GFP_MEMALLOC))
+ GFP_ATOMIC | __GFP_MEMALLOC))
break;
ring->prod++;
} while (likely(--missing));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
- if (priv->port_up)
+ if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
buf[4] = mlx4_en_test_loopback(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..2b72677eccd4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+#define PREAMBLE_LEN 8
+#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
+ ETH_HLEN + PREAMBLE_LEN)
#define MLX4_EN_MIN_MTU 46
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
- res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d46ec84ebdf..9bd8d28de152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3142,13 +3142,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
+ GFP_KERNEL);
if (!mlxsw_sp->port_to_module) {
err = -ENOMEM;
goto err_port_to_module_alloc;
}
for (i = 1; i < max_ports; i++) {
+ /* Mark as invalid */
+ mlxsw_sp->port_to_module[i] = -1;
+
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
@@ -3216,6 +3220,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
+ if (mlxsw_sp->port_to_module[local_port] < 0)
+ continue;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
@@ -4294,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid = 1;
int err;
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4306,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
true, false);
if (err)
goto err_port_vlan_set;
+
+ for (; vid <= VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, false);
+ if (err)
+ goto err_vid_learning_set;
+ }
+
return 0;
+err_vid_learning_set:
+ for (vid--; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
err_port_stp_set:
@@ -4317,6 +4335,12 @@ err_port_stp_set:
static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid;
+
+ for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, true);
+
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 58cf222fb985..432ab9b12b7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -152,7 +152,7 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
struct mlxsw_sp_upper *lags;
- u8 *port_to_module;
+ int *port_to_module;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 632c7b229054..72ef4f8025f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
}
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif);
static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(new_lb_rif);
ipip_entry->ol_lb = new_lb_rif;
- if (keep_encap) {
- list_splice_init(&old_lb_rif->common.nexthop_list,
- &new_lb_rif->common.nexthop_list);
- mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
- }
+ if (keep_encap)
+ mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+ &new_lb_rif->common);
mlxsw_sp_rif_destroy(&old_lb_rif->common);
return 0;
}
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+
/**
* Update the offload related to an IPIP entry. This always updates decap, and
* in addition to that it also:
@@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
if (!ipip_entry)
return 0;
+
+ /* For flat configuration cases, moving overlay to a different VRF might
+ * cause local address conflict, and the conflicting tunnels need to be
+ * demoted.
+ */
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
@@ -3343,22 +3363,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
}
-static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct net_device *ol_dev)
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
bool removing;
if (!nh->nh_grp->gateway || nh->ipip_entry)
- return 0;
-
- nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- if (!nh->ipip_entry)
- return -ENOENT;
+ return;
- removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+ nh->ipip_entry = ipip_entry;
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- return 0;
+ mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
}
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -3403,21 +3420,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct net_device *dev = fib_nh->nh_dev;
- enum mlxsw_sp_ipip_type ipipt;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV4)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3545,6 +3562,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
}
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+ list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+ nh->rif = new_rif;
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -3996,7 +4025,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
case RTN_LOCAL:
ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4, dip);
- if (ipip_entry) {
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
fib_entry,
@@ -4694,21 +4723,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct rt6_info *rt)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct net_device *dev = rt->dst.dev;
- enum mlxsw_sp_ipip_type ipipt;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV6)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b171ed2015fe..2521c8c40015 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3501,7 +3501,7 @@ static void myri10ge_watchdog(struct work_struct *work)
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
@@ -3509,7 +3509,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
u32 rx_pause_cnt;
u16 cmd;
- mgp = (struct myri10ge_priv *)arg;
+ mgp = from_timer(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
@@ -3930,8 +3930,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_save_state(pdev);
/* Setup the watchdog timer */
- setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
- (unsigned long)mgp);
+ timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index fe7e0e1dd01d..b2299f2b2155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1530,7 +1530,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for"
"vpath:%d", vp_id);
- return status;
+ return status;
}
} else
return VXGE_HW_FAIL;
@@ -1950,19 +1950,19 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
* for all VPATHs. The h/w only uses the lowest numbered VPATH
* when steering frames.
*/
- for (index = 0; index < vdev->no_of_vpath; index++) {
+ for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
+ if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
vdev->vpaths[index].device_id);
return status;
- }
- }
+ }
+ }
return status;
}
@@ -1991,7 +1991,7 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return status;
}
}
}
@@ -2474,32 +2474,31 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
+ vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].fifo;
irq_req = 1;
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle,
- 0,
+ vdev->entries[intr_cnt].vector,
+ vxge_rx_msix_napi_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].ring;
irq_req = 1;
break;
@@ -2512,9 +2511,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
vxge_rem_msix_isr(vdev);
vdev->config.intr_type = INTA;
vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA"
- , vdev->ndev->name);
- goto INTA_MODE;
+ "%s: Defaulting to INTA",
+ vdev->ndev->name);
+ goto INTA_MODE;
}
if (irq_req) {
@@ -4505,8 +4504,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
+ ret = -EINVAL;
+ goto _exit3;
}
if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index b6cee71f49d3..bc879aeb62d4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog && !prog->aux->offload)
- return -EINVAL;
+ if (prog) {
+ struct bpf_dev_offload *offload = prog->aux->offload;
+
+ if (!offload)
+ return -EINVAL;
+ if (offload->netdev != nn->dp.netdev)
+ return -EINVAL;
+ }
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e0283bb24f06..8fcc90c0d2d3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false);
}
+static int
+nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
+{
+ return tc_setup_cb_egdev_register(netdev,
+ nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
+static void
+nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
+{
+ tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -452,6 +467,9 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
+ .repr_init = nfp_flower_repr_netdev_init,
+ .repr_clean = nfp_flower_repr_netdev_clean,
+
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c90e72b7ff5a..e6b26c5ae6e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -52,8 +52,7 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
-#define NFP_FL_META_FLAG_NEW_MASK 128
-#define NFP_FL_META_FLAG_LAST_MASK 1
+#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
@@ -197,5 +196,7 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 193520ef23f0..db977cf8e933 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
id = nfp_add_mask_table(app, mask_data, mask_len);
if (id < 0)
return false;
- *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
*mask_id = id;
@@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
+ if (meta_flags)
+ *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
nfp_release_mask_id(app, *mask_id);
kfree(mask_entry);
if (meta_flags)
- *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
return true;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index cdbb5464b790..553f94f55dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -131,7 +131,8 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow,
+ bool egress)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
@@ -167,6 +168,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
+ if (!egress)
+ return -EOPNOTSUPP;
+
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
@@ -194,6 +198,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_vxlan);
+ } else if (egress) {
+ /* Reject non tunnel matches offloaded to egress repr. */
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -308,6 +315,7 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
+ * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -315,7 +323,7 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
@@ -326,7 +334,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow);
+ err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
if (err)
goto err_free_key_ls;
@@ -447,7 +455,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct tc_cls_flower_offload *flower, bool egress)
{
if (!eth_proto_is_802_3(flower->common.protocol) ||
flower->common.chain_index)
@@ -455,7 +463,7 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower);
+ return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
@@ -465,6 +473,23 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct nfp_repr *repr = cb_priv;
+
+ if (!tc_can_offload(repr->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@@ -476,7 +501,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 54b67c9b8d5b..0e5e0305ad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -76,6 +76,8 @@ extern const struct nfp_app_type app_flower;
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
+ * @repr_init: representor about to be registered
+ * @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
* @start: start application logic
@@ -109,6 +111,9 @@ struct nfp_app_type {
int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
+ int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
+
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
@@ -212,6 +217,21 @@ static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
return app->type->repr_stop(app, repr);
}
+static inline int
+nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
+{
+ if (!app->type->repr_init)
+ return 0;
+ return app->type->repr_init(app, netdev);
+}
+
+static inline void
+nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
+{
+ if (app->type->repr_clean)
+ app->type->repr_clean(app, netdev);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 232044b1b7aa..1a603fdd9e80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
} else {
struct page *page;
- page = alloc_page(GFP_KERNEL | __GFP_COLD);
+ page = alloc_page(GFP_KERNEL);
frag = page ? page_address(page) : NULL;
}
if (!frag) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 60c8d733a37d..2801ecd09eab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -104,7 +104,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "rx_frame_too_long_errors",
NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
{ "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
- { "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
+ { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
{ "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
{ "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
{ "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 1bce8c131bb9..78b36c67c232 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
{
u8 __iomem *mem = port->eth_stats;
- /* TX and RX stats are flipped as we are returning the stats as seen
- * at the switch port corresponding to the phys port.
- */
- stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
- stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
- stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+ stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+ stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
- stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
- stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
- stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+ stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+ stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
}
static void
@@ -258,6 +255,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
+ nfp_app_repr_clean(repr->app, repr->netdev);
dst_release((struct dst_entry *)repr->dst);
nfp_port_free(repr->port);
}
@@ -297,6 +295,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->netdev_ops = &nfp_repr_netdev_ops;
netdev->ethtool_ops = &nfp_port_ethtool_ops;
+ netdev->max_mtu = pf_netdev->max_mtu;
+
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
if (nfp_app_has_tc(app)) {
@@ -304,12 +304,18 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->hw_features |= NETIF_F_HW_TC;
}
- err = register_netdev(netdev);
+ err = nfp_app_repr_init(app, netdev);
if (err)
goto err_clean;
+ err = register_netdev(netdev);
+ if (err)
+ goto err_repr_clean;
+
return 0;
+err_repr_clean:
+ nfp_app_repr_clean(app, netdev);
err_clean:
dst_release((struct dst_entry *)repr->dst);
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 51dcb9c603ee..21bd4aa32646 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -157,7 +157,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
/* unused 0x008 */
#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
-#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
+#define NFP_MAC_STATS_RX_VLAN_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ac8439ceea10..481876b5424c 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
tx_skb->dma_len,
DMA_TO_DEVICE);
else
- pci_unmap_page(np->pci_dev, tx_skb->dma,
+ dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_skb->dma = 0;
}
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 457ee80307ea..40e52ffb732f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1089,9 +1089,10 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
* pch_gbe_watchdog - Watchdog process
* @data: Board private structure
*/
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -2644,8 +2645,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "Invalid MAC address, "
"interface disabled.\n");
}
- setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 49591d9c2e1b..c9a55b774935 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -943,9 +943,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
#define TX_CLEAN_INTERVAL HZ
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+ struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1199,8 +1199,7 @@ static int pasemi_mac_open(struct net_device *dev)
if (dev->phydev)
phy_start(dev->phydev);
- setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
- (unsigned long)mac->tx);
+ timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6e15d3c10ebf..fe7c1f230028 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1277,11 +1277,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC);
if (!dcbx_info)
return NULL;
- memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 05479d435469..9e5264d8773b 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3749,9 +3749,9 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
}
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
@@ -3891,7 +3891,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
- setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev);
+ timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
if (!cards_found) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 29fea74bff2e..7b97a9969046 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
{
if (!rx_ring->pg_chunk.page) {
u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
- GFP_ATOMIC,
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
#define MDIO_CLK_25_28 7
#define MDIO_WAIT_TIMES 1000
+#define MDIO_STATUS_DELAY_TIME 1
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)),
- 100, MDIO_WAIT_TIMES * 100))
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
writel(reg, adpt->base + EMAC_MDIO_CTRL);
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
- !(reg & (MDIO_START | MDIO_BUSY)), 100,
- MDIO_WAIT_TIMES * 100))
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 71bee1af71ef..df21e900f874 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -195,6 +195,7 @@ err2:
err1:
rmnet_unregister_real_device(real_dev, port);
err0:
+ kfree(ep);
return err;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29842ccc91a9..08e4afc0ab39 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
- return RMNET_MAP_CONSUMED;
+ goto fail;
}
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
- return RMNET_MAP_CONSUMED;
+ goto fail;
if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
skb->protocol = htons(ETH_P_MAP);
return RMNET_MAP_SUCCESS;
+
+fail:
+ kfree_skb(skb);
+ return RMNET_MAP_CONSUMED;
}
static void
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index dcb8c39382e7..fc0d5fa65ad4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2030,21 +2030,6 @@ out:
return ret;
}
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->advertising);
- rtl_unlock_work(tp);
-
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
return rc;
}
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int rc;
+ u32 advertising;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising))
+ return -EINVAL;
+
+ del_timer_sync(&tp->timer);
+
+ rtl_lock_work(tp);
+ rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex, advertising);
+ rtl_unlock_work(tp);
+
+ return rc;
+}
+
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
@@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -3789,27 +3795,32 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
+ rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
rtl_writephy(tp, 0x0e, 0x003c);
rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0006);
rtl_writephy(tp, 0x0d, 0x0000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
+ rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+ /* soft-reset phy */
+ rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..009780df664b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
struct ravb_private *priv = netdev_priv(ndev);
int ret = 0;
- if (priv->wol_enabled) {
- /* Reduce the usecount of the clock to zero and then
- * restore it to its original value. This is done to force
- * the clock to be re-enabled which is a workaround
- * for renesas-cpg-mssr driver which do not enable clocks
- * when resuming from PSCI suspend/resume.
- *
- * Without this workaround the driver fails to communicate
- * with the hardware if WoL was enabled when the system
- * entered PSCI suspend. This is due to that if WoL is enabled
- * we explicitly keep the clock from being turned off when
- * suspending, but in PSCI sleep power is cut so the clock
- * is disabled anyhow, the clock driver is not aware of this
- * so the clock is not turned back on when resuming.
- *
- * TODO: once the renesas-cpg-mssr suspend/resume is working
- * this clock dance should be removed.
- */
- clk_disable(priv->clk);
- clk_disable(priv->clk);
- clk_enable(priv->clk);
- clk_enable(priv->clk);
-
- /* Set reset mode to rearm the WoL logic */
+ /* If WoL is enabled set reset mode to rearm the WoL logic */
+ if (priv->wol_enabled)
ravb_write(ndev, CCC_OPC_RESET, CCC);
- }
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7e060aa9fbed..75323000c364 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
entry, le32_to_cpu(txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+ dma_unmap_single(&mdp->pdev->dev,
+ le32_to_cpu(txdesc->addr),
le32_to_cpu(txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
if (mdp->rx_skbuff[i]) {
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(&mdp->pdev->dev,
le32_to_cpu(rxdesc->addr),
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
}
}
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
mdp->rx_desc_dma);
mdp->rx_ring = NULL;
}
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
sh_eth_tx_free(ndev, false);
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
- GFP_KERNEL);
+ mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
+ &mdp->rx_desc_dma, GFP_KERNEL);
if (!mdp->rx_ring)
goto ring_free;
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
- mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
- GFP_KERNEL);
+ mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
+ &mdp->tx_desc_dma, GFP_KERNEL);
if (!mdp->tx_ring)
goto ring_free;
return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, dma_addr,
+ dma_unmap_single(&mdp->pdev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
buf_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
@@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
+ /* mask with MAC supported features */
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
+ int err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+ }
+
phy_attached_info(phydev);
return 0;
@@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* soft swap. */
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
- dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 0653b70723a3..6d6fb8cf3e7c 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1983,9 +1983,9 @@ err_out:
return err;
}
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = (struct ofdpa *)data;
+ struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2368,8 +2368,7 @@ static int ofdpa_init(struct rocker *rocker)
hash_init(ofdpa->neigh_tbl);
spin_lock_init(&ofdpa->neigh_tbl_lock);
- setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
- (unsigned long) ofdpa);
+ timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 382019b302db..02456ed13a7d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
do {
page = ef4_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8cb60513dca2..cfe76aad79ee 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0ea7e16f2e6e..9937a2450e57 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->flags & EFX_TX_BUF_SKB) {
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
(*bytes_compl) += buffer->skb->len;
dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 61cb24810d10..9e6db16af663 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -1,8 +1,8 @@
/*
* dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
*
- * Copyright (C) Alexandre Torgue 2015
- * Author: Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e5ff734d4f9b..9eb7f65d8000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
val, reg);
if (gmac->variant->soc_has_internal_phy) {
- if (of_property_read_bool(priv->plat->phy_node,
- "allwinner,leds-active-low"))
+ if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
else
reg &= ~H3_EPHY_LED_POL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff4fb5eae1af..d7250539d0bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -345,9 +345,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
@@ -401,9 +401,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- setup_timer(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer,
- (unsigned long)priv);
+ timer_setup(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer, 0);
mod_timer(&priv->eee_ctrl_timer,
STMMAC_LPI_T(eee_timer));
@@ -2221,9 +2220,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct stmmac_priv *priv = from_timer(priv, t, txtimer);
u32 tx_queues_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2244,7 +2243,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
+ timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
add_timer(&priv->txtimer);
}
@@ -2589,6 +2588,7 @@ static int stmmac_open(struct net_device *dev)
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ priv->mss = 0;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index e9672b1f9968..031cf9c3435a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
dma_addr_t pages_dma;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index e1b55b8fb8e0..1f8e9601592a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -358,9 +358,9 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -391,8 +391,7 @@ static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- setup_timer(&channel->tx_timer, xlgmac_tx_timer,
- (unsigned long)channel);
+ timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
}
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index db8a4bcfc6c7..a73600dceb8b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -119,8 +119,8 @@ do { \
#define CPDMA_RXCP 0x60
#define CPSW_POLL_WEIGHT 64
-#define CPSW_MIN_PACKET_SIZE 60
-#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
+#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cd1185e66133..b432a75fb874 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -765,9 +765,9 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+ struct cpsw_ale *ale = from_timer(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -859,7 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale);
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 15e2e3031d36..ed58c746e4af 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
sw_data[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 4ad821655e51..e831c49713ee 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2745,9 +2745,9 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+ struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
@@ -3616,8 +3616,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
}
spin_unlock_bh(&gbe_dev->hw_stats_lock);
- setup_timer(&gbe_dev->timer, netcp_ethss_timer,
- (unsigned long)gbe_dev);
+ timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
*inst_priv = gbe_dev;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 8f53d762fbc4..5a4e78fde530 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -254,7 +254,7 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1425,7 +1425,7 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1576,7 +1576,7 @@ drop_and_reuse:
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index a913538d3213..d925b8203996 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -912,8 +912,9 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* packets, including updating the queue tail pointer.
*/
static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
{
+ struct spider_net_card *card = from_timer(card, t, tx_timer);
if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
@@ -1265,7 +1266,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
- spider_net_cleanup_tx_ring(card);
+ spider_net_cleanup_tx_ring(&card->tx_timer);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
@@ -1977,9 +1978,9 @@ init_firmware_failed:
* @data: used for pointer to card structure
*
*/
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
{
- struct spider_net_card *card = (struct spider_net_card *)data;
+ struct spider_net_card *card = from_timer(card, t, aneg_timer);
struct mii_phy *phy = &card->phy;
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
@@ -2256,14 +2257,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
pci_set_drvdata(card->pdev, netdev);
- setup_timer(&card->tx_timer,
- (void(*)(unsigned long))spider_net_cleanup_tx_ring,
- (unsigned long)card);
+ timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
netdev->irq = card->pdev->irq;
card->aneg_count = 0;
- setup_timer(&card->aneg_timer, spider_net_link_phy,
- (unsigned long)card);
+ timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 83e6f76eb965..33949248c829 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
else
name = "Rhine III";
- netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name, (long)ioaddr, dev->dev_addr, rp->irq);
+ netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+ name, ioaddr, dev->dev_addr, rp->irq);
dev_set_drvdata(hwdev, dev);
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6d68c8a8f4f2..da4ec575ccf9 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
depends on (PPC || MICROBLAZE)
+ depends on !64BIT || BROKEN
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 4e16d839c311..b718a02a6bb6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
return 0;
@@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
- goto nla_put_failure;
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums))
goto nla_put_failure;
+#endif
return 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c9f7215c5dc2..3de272959090 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1005,7 +1005,7 @@ static void __scc_start_tx_timer(struct scc_channel *scc,
} else
if (when != TIMER_OFF)
{
- scc->tx_t.function = (TIMER_FUNC_TYPE)handler;
+ scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
@@ -1031,7 +1031,7 @@ static void scc_start_defer(struct scc_channel *scc)
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_busy;
+ scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
}
@@ -1047,7 +1047,7 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_maxkeyup;
+ scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
}
@@ -1428,7 +1428,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
del_timer(&scc->tx_wdog);
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)scc_stop_calibrate;
+ scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
rrpriv->info_dma);
rrpriv->info = NULL;
- free_irq(pdev->irq, dev);
spin_unlock_irqrestore(&rrpriv->lock, flags);
+ free_irq(pdev->irq, dev);
return 0;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4958bb6b7376..88ddfb92122b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -646,6 +646,10 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0
+#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
+ NETIF_F_TSO6)
+
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index da216ca4f2b2..5129647d420c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2011,7 +2011,7 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- /* hw_features computed in rndis_filter_device_add */
+ /* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8b1242b8d8ef..7b637c7dd1e5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1131,69 +1131,20 @@ unlock:
rtnl_unlock();
}
-struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
- struct netvsc_device_info *device_info)
+static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
+ struct netvsc_device *nvdev)
{
- struct net_device *net = hv_get_drvdata(dev);
+ struct net_device *net = rndis_device->ndev;
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *net_device;
- struct rndis_device *rndis_device;
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- struct ndis_recv_scale_cap rsscap;
- u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE;
- u32 mtu, size;
- const struct cpumask *node_cpu_mask;
- u32 num_possible_rss_qs;
- int i, ret;
-
- rndis_device = get_rndis_device();
- if (!rndis_device)
- return ERR_PTR(-ENODEV);
-
- /*
- * Let the inner driver handle this first to create the netvsc channel
- * NOTE! Once the channel is created, we may get a receive callback
- * (RndisFilterOnReceive()) before this call is completed
- */
- net_device = netvsc_device_add(dev, device_info);
- if (IS_ERR(net_device)) {
- kfree(rndis_device);
- return net_device;
- }
-
- /* Initialize the rndis device */
- net_device->max_chn = 1;
- net_device->num_chn = 1;
-
- net_device->extension = rndis_device;
- rndis_device->ndev = net;
-
- /* Send the rndis initialization message */
- ret = rndis_filter_init_device(rndis_device, net_device);
- if (ret != 0)
- goto err_dev_remv;
-
- /* Get the MTU from the host */
- size = sizeof(u32);
- ret = rndis_filter_query_device(rndis_device, net_device,
- RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
- &mtu, &size);
- if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
- net->mtu = mtu;
-
- /* Get the mac address */
- ret = rndis_filter_query_device_mac(rndis_device, net_device);
- if (ret != 0)
- goto err_dev_remv;
-
- memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+ int ret;
/* Find HW offload capabilities */
- ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps);
+ ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
- goto err_dev_remv;
+ return ret;
/* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
@@ -1201,8 +1152,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* Linux does not care about IP checksum, always does in kernel */
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+ /* Reset previously set hw_features flags */
+ net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
+ net_device_ctx->tx_checksum_mask = 0;
+
/* Compute tx offload settings based on hw capabilities */
- net->hw_features = NETIF_F_RXCSUM;
+ net->hw_features |= NETIF_F_RXCSUM;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
@@ -1246,10 +1201,75 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
}
}
+ /* In case some hw_features disappeared we need to remove them from
+ * net->features list as they're no longer supported.
+ */
+ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
+
netif_set_gso_max_size(net, gso_max_size);
- ret = rndis_filter_set_offload_params(net, net_device, &offloads);
- if (ret)
+ ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
+
+ return ret;
+}
+
+struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ struct netvsc_device_info *device_info)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct netvsc_device *net_device;
+ struct rndis_device *rndis_device;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ u32 mtu, size;
+ const struct cpumask *node_cpu_mask;
+ u32 num_possible_rss_qs;
+ int i, ret;
+
+ rndis_device = get_rndis_device();
+ if (!rndis_device)
+ return ERR_PTR(-ENODEV);
+
+ /* Let the inner driver handle this first to create the netvsc channel
+ * NOTE! Once the channel is created, we may get a receive callback
+ * (RndisFilterOnReceive()) before this call is completed
+ */
+ net_device = netvsc_device_add(dev, device_info);
+ if (IS_ERR(net_device)) {
+ kfree(rndis_device);
+ return net_device;
+ }
+
+ /* Initialize the rndis device */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+
+ net_device->extension = rndis_device;
+ rndis_device->ndev = net;
+
+ /* Send the rndis initialization message */
+ ret = rndis_filter_init_device(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ /* Get the MTU from the host */
+ size = sizeof(u32);
+ ret = rndis_filter_query_device(rndis_device, net_device,
+ RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+ &mtu, &size);
+ if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+ net->mtu = mtu;
+
+ /* Get the mac address */
+ ret = rndis_filter_query_device_mac(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+
+ /* Query and set hardware capabilities */
+ ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
+ if (ret != 0)
goto err_dev_remv;
rndis_filter_query_device_link_status(rndis_device, net_device);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index f2a7e929316e..77cc4fbaeace 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return false;
}
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
case htons(ETH_P_ARP): {
struct arphdr *arph;
- if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
@@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
+ struct icmp6hdr *icmph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+
+ if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Need to access the ipv6 address in body */
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ + sizeof(struct in6_addr))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+ }
+
*type = IPVL_ICMPV6;
- lyr3h = ip6h + 1;
+ lyr3h = icmph;
}
break;
}
@@ -375,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
.flowi4_oif = dev->ifindex,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};
@@ -510,7 +529,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
struct ipvl_addr *addr;
int addr_type;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -539,7 +558,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
@@ -606,7 +625,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
@@ -627,7 +646,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -666,7 +685,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
} else {
struct ipvl_addr *addr;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
@@ -717,7 +736,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index a266aa435d4d..30cb803e2fe5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -107,16 +107,6 @@ static int ipvlan_port_create(struct net_device *dev)
struct ipvl_port *port;
int err, idx;
- if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
- netdev_err(dev, "Master is either lo or non-ether device\n");
- return -EINVAL;
- }
-
- if (netdev_is_rx_handler_busy(dev)) {
- netdev_err(dev, "Device is already in use.\n");
- return -EBUSY;
- }
-
port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -179,8 +169,9 @@ static void ipvlan_port_destroy(struct net_device *dev)
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- const struct net_device *phy_dev = ipvlan->phy_dev;
- struct ipvl_port *port = ipvlan->port;
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port;
+ int err;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
@@ -196,18 +187,27 @@ static int ipvlan_init(struct net_device *dev)
if (!ipvlan->pcpu_stats)
return -ENOMEM;
+ if (!netif_is_ipvlan_port(phy_dev)) {
+ err = ipvlan_port_create(phy_dev);
+ if (err < 0) {
+ free_percpu(ipvlan->pcpu_stats);
+ return err;
+ }
+ }
+ port = ipvlan_port_get_rtnl(phy_dev);
port->count += 1;
-
return 0;
}
static void ipvlan_uninit(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ipvl_port *port = ipvlan->port;
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port;
free_percpu(ipvlan->pcpu_stats);
+ port = ipvlan_port_get_rtnl(phy_dev);
port->count -= 1;
if (!port->count)
ipvlan_port_destroy(port->dev);
@@ -554,7 +554,6 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct net_device *phy_dev;
int err;
u16 mode = IPVLAN_MODE_L3;
- bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -568,28 +567,41 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
phy_dev = tmp->phy_dev;
} else if (!netif_is_ipvlan_port(phy_dev)) {
- err = ipvlan_port_create(phy_dev);
- if (err < 0)
- return err;
- create = true;
- }
+ /* Exit early if the underlying link is invalid or busy */
+ if (phy_dev->type != ARPHRD_ETHER ||
+ phy_dev->flags & IFF_LOOPBACK) {
+ netdev_err(phy_dev,
+ "Master is either lo or non-ether device\n");
+ return -EINVAL;
+ }
- if (data && data[IFLA_IPVLAN_MODE])
- mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+ if (netdev_is_rx_handler_busy(phy_dev)) {
+ netdev_err(phy_dev, "Device is already in use.\n");
+ return -EBUSY;
+ }
+ }
- port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
- ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
- /* Flags are per port and latest update overrides. User has
- * to be consistent in setting it just like the mode attribute.
+ /* TODO Probably put random address here to be presented to the
+ * world but keep using the physical-dev address for the outgoing
+ * packets.
*/
- if (data && data[IFLA_IPVLAN_FLAGS])
- ipvlan->port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
+ memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+
+ dev->priv_flags |= IFF_IPVLAN_SLAVE;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ return err;
+
+ /* ipvlan_init() would have created the port, if required */
+ port = ipvlan_port_get_rtnl(phy_dev);
+ ipvlan->port = port;
/* If the port-id base is at the MAX value, then wrap it around and
* begin from 0x1 again. This may be due to a busy system where lots
@@ -609,31 +621,28 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
GFP_KERNEL);
if (err < 0)
- goto destroy_ipvlan_port;
+ goto unregister_netdev;
dev->dev_id = err;
+
/* Increment id-base to the next slot for the future assignment */
port->dev_id_start = err + 1;
- /* TODO Probably put random address here to be presented to the
- * world but keep using the physical-dev address for the outgoing
- * packets.
- */
- memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+ err = netdev_upper_dev_link(phy_dev, dev, extack);
+ if (err)
+ goto remove_ida;
- dev->priv_flags |= IFF_IPVLAN_SLAVE;
+ /* Flags are per port and latest update overrides. User has
+ * to be consistent in setting it just like the mode attribute.
+ */
+ if (data && data[IFLA_IPVLAN_FLAGS])
+ port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
- err = register_netdevice(dev);
- if (err < 0)
- goto remove_ida;
+ if (data && data[IFLA_IPVLAN_MODE])
+ mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- err = netdev_upper_dev_link(phy_dev, dev, extack);
- if (err) {
- goto unregister_netdev;
- }
err = ipvlan_set_port_mode(port, mode);
- if (err) {
+ if (err)
goto unlink_netdev;
- }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
@@ -641,13 +650,10 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
-unregister_netdev:
- unregister_netdevice(dev);
remove_ida:
ida_simple_remove(&port->ida, dev->dev_id);
-destroy_ipvlan_port:
- if (create)
- ipvlan_port_destroy(phy_dev);
+unregister_netdev:
+ unregister_netdevice(dev);
return err;
}
EXPORT_SYMBOL_GPL(ipvlan_link_new);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2c98152d1e1b..1d025ab9568f 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2411,7 +2411,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!hdr)
return -EMSGSIZE;
- genl_dump_check_consistent(cb, hdr, &macsec_fam);
+ genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6add563..e911e4990b20 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
{
int value;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, MII_BMCR);
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- mutex_unlock(&phydev->lock);
-
return 0;
}
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 72f4228a63bb..9442db221834 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27df044..b5a8f750e433 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
if (err < 0)
goto error;
+ /* Do not touch the fiber page if we're in copper->sgmii mode */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ return 0;
+
/* Then the fiber link */
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index aebc08beceba..21b3f36e023a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -16,6 +16,7 @@
* link takes priority and the other port is completely locked out.
*/
#include <linux/phy.h>
+#include <linux/marvell_phy.h>
enum {
MV_PCS_BASE_T = 0x0000,
@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = 0x002b09aa,
- .phy_id_mask = 0xffffffff,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.features = SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Full |
@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { 0x002b09aa, 0xffffffff },
+ { 0x002b09aa, MARVELL_PHY_ID_MASK },
{ },
};
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c1a36..54d00a1d2bef 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
if (addr == mdiodev->addr) {
dev->of_node = child;
+ dev->fwnode = of_fwnode_handle(child);
return;
}
}
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7585d9..842eb871a6e3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
static int meson_gxl_config_init(struct phy_device *phydev)
{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
return 0;
}
+/* This function is provided to cope with the possible failures of this phy
+ * during aneg process. When aneg fails, the PHY reports that aneg is done
+ * but the value found in MII_LPA is wrong:
+ * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
+ * the link partner (LP) supports aneg but the LP never acked our base
+ * code word, it is likely that we never sent it to begin with.
+ * - Late failures: MII_LPA is filled with a value which seems to make sense
+ * but it actually is not what the LP is advertising. It seems that we
+ * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
+ * If this particular bit is not set when aneg is reported being done,
+ * it means MII_LPA is likely to be wrong.
+ *
+ * In both case, forcing a restart of the aneg process solve the problem.
+ * When this failure happens, the first retry is usually successful but,
+ * in some cases, it may take up to 6 retries to get a decent result
+ */
+static int meson_gxl_read_status(struct phy_device *phydev)
+{
+ int ret, wol, lpa, exp;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_aneg_done(phydev);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ goto read_status_continue;
+
+ /* Need to access WOL bank, make sure the access is open */
+ ret = phy_write(phydev, 0x14, 0x0000);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0400);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0000);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0400);
+ if (ret)
+ return ret;
+
+ /* Request LPI_STATUS WOL register */
+ ret = phy_write(phydev, 0x14, 0x8D80);
+ if (ret)
+ return ret;
+
+ /* Read LPI_STATUS value */
+ wol = phy_read(phydev, 0x15);
+ if (wol < 0)
+ return wol;
+
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ exp = phy_read(phydev, MII_EXPANSION);
+ if (exp < 0)
+ return exp;
+
+ if (!(wol & BIT(12)) ||
+ ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
+ /* Looks like aneg failed after all */
+ phydev_dbg(phydev, "LPA corruption - aneg restart\n");
+ return genphy_restart_aneg(phydev);
+ }
+ }
+
+read_status_continue:
+ return genphy_read_status(phydev);
+}
+
static struct phy_driver meson_gxl_phy[] = {
{
.phy_id = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
.config_init = meson_gxl_config_init,
.config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
- .read_status = genphy_read_status,
+ .read_status = meson_gxl_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fdb43dd9b5cd..ab4614113403 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
}
+/* Center KSZ9031RNX FLP timing at 16ms. */
static int ksz9031_center_flp_timing(struct phy_device *phydev)
{
int result;
- /* Center KSZ9031RNX FLP timing at 16ms. */
result = ksz9031_extended_write(phydev, OP_DATA, 0,
MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ if (result)
+ return result;
+
result = ksz9031_extended_write(phydev, OP_DATA, 0,
MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
-
if (result)
return result;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67bc1e73..ed10d1fc8f59 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- bool do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
phydev->state = PHY_UP;
break;
case PHY_HALTED:
+ /* if phy was suspended, bring the physical link up again */
+ phy_resume(phydev);
+
/* make sure interrupts are re-enabled for the PHY */
if (phydev->irq != PHY_POLL) {
err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
}
phydev->state = PHY_RESUMING;
- do_resume = true;
break;
default:
break;
}
mutex_unlock(&phydev->lock);
- /* if phy was suspended, bring the physical link up again */
- if (do_resume)
- phy_resume(phydev);
-
phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac29025..b15b31ca2618 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
goto no_resume;
+ mutex_lock(&phydev->lock);
ret = phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
if (ret < 0)
return ret;
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
+ mutex_lock(&phydev->lock);
phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
phy_led_triggers_register(phydev);
return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0;
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
if (phydev->drv && phydrv->resume)
ret = phydrv->resume(phydev);
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
{
int value;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, MII_BMCR);
phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
- mutex_unlock(&phydev->lock);
-
return 0;
}
EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e3bbc70372d3..5dc9668dde34 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -773,6 +773,7 @@ void phylink_stop(struct phylink *pl)
sfp_upstream_stop(pl->sfp_bus);
set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+ queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
}
EXPORT_SYMBOL_GPL(phylink_stop);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e381811e5f11..9dfc1c4c954f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
{
unsigned int los = sfp->state & SFP_F_LOS;
- /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
- * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume
- * the same as SFP_OPTIONS_LOS_NORMAL set.
+ /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+ * are set, we assume that no LOS signal is available.
*/
- if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
+ if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
los ^= SFP_F_LOS;
+ else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+ los = 0;
if (los)
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
sfp_sm_link_up(sfp);
}
+static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+{
+ return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+ event == SFP_E_LOS_LOW) ||
+ (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+ event == SFP_E_LOS_HIGH);
+}
+
+static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+{
+ return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+ event == SFP_E_LOS_HIGH) ||
+ (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+ event == SFP_E_LOS_LOW);
+}
+
static void sfp_sm_fault(struct sfp *sfp, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
return -EINVAL;
}
+ /* If the module requires address swap mode, warn about it */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ dev_warn(sfp->dev,
+ "module address swap to access page 0xA2 is not supported.\n");
+
return sfp_module_insert(sfp->sfp_bus, &sfp->id);
}
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_WAIT_LOS:
if (event == SFP_E_TX_FAULT)
sfp_sm_fault(sfp, true);
- else if (event ==
- (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
- SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
+ else if (sfp_los_event_inactive(sfp, event))
sfp_sm_link_up(sfp);
break;
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
if (event == SFP_E_TX_FAULT) {
sfp_sm_link_down(sfp);
sfp_sm_fault(sfp, true);
- } else if (event ==
- (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
- SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
+ } else if (sfp_los_event_active(sfp, event)) {
sfp_sm_link_down(sfp);
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
}
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
{
/* locking... and check module is present */
- if (sfp->id.ext.sff8472_compliance) {
+ if (sfp->id.ext.sff8472_compliance &&
+ !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
} else {
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index eb8a18991d8c..cc63102ca96e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -106,8 +106,8 @@ static int slip_esc6(unsigned char *p, unsigned char *d, int len);
static void slip_unesc6(struct slip *sl, unsigned char c);
#endif
#ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
@@ -763,8 +763,8 @@ static struct slip *sl_alloc(dev_t line)
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
- setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl);
- setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl);
+ timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+ timer_setup(&sl->outfill_timer, sl_outfill, 0);
#endif
slip_devs[i] = dev;
return sl;
@@ -1388,9 +1388,9 @@ module_exit(slip_exit);
* added by Stanislav Voronyi. All changes before marked VSV
*/
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, outfill_timer);
spin_lock(&sl->lock);
@@ -1419,9 +1419,9 @@ out:
spin_unlock(&sl->lock);
}
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, keepalive_timer);
spin_lock(&sl->lock);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index b13890953ebb..0a886fda0129 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
DEFINE_WAIT(wait);
ssize_t ret = 0;
- if (!iov_iter_count(to))
+ if (!iov_iter_count(to)) {
+ if (skb)
+ kfree_skb(skb);
return 0;
+ }
if (skb)
goto put;
@@ -1077,7 +1080,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
size_t total_len, int flags)
{
struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+ struct sk_buff *skb = m->msg_control;
int ret;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+ if (skb)
+ kfree_skb(skb);
return -EINVAL;
- ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
- m->msg_control);
+ }
+ ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 228d4aa6d9ae..ca5e375de27c 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
if (ring->ring->is_tx) {
dir = DMA_TO_DEVICE;
order = 0;
- size = tbnet_frame_size(tf);
+ size = TBNET_FRAME_SIZE;
} else {
dir = DMA_FROM_DEVICE;
order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
struct tbnet_frame *tf;
unsigned int index;
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
tf = &ring->frames[index];
tf->frame.size = 0;
- tf->frame.buffer_phy = 0;
+
+ dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
+ tbnet_frame_size(tf), DMA_TO_DEVICE);
return tf;
}
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
- struct device *dma_dev = tb_ring_dma_device(ring);
struct tbnet *net = netdev_priv(tf->dev);
- dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- tf->frame.buffer_phy = 0;
-
/* Return buffer to the ring */
net->tx_ring.prod++;
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
static int tbnet_alloc_tx_buffers(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
unsigned int i;
for (i = 0; i < TBNET_RING_SIZE; i++) {
struct tbnet_frame *tf = &ring->frames[i];
+ dma_addr_t dma_addr;
tf->page = alloc_page(GFP_KERNEL);
if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
return -ENOMEM;
}
+ dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
+ __free_page(tf->page);
+ tf->page = NULL;
+ tbnet_free_buffers(ring);
+ return -ENOMEM;
+ }
+
tf->dev = net->dev;
+ tf->frame.buffer_phy = dma_addr;
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
return 0;
}
-static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
-{
- dma_addr_t dma_addr;
-
- dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, dma_addr))
- return false;
-
- tf->frame.buffer_phy = dma_addr;
- return true;
-}
-
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
struct tbnet_frame **frames, u32 frame_count)
{
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* No need to calculate checksum so we just update the
- * total frame count and map the frames for DMA.
+ * total frame count and sync the frames for DMA.
*/
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev,
+ frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = csum_fold(wsum);
/* Checksum is finally calculated and we don't touch the memory
- * anymore, so DMA map the frames now.
+ * anymore, so DMA sync the frames now.
*/
for (i = 0; i < frame_count; i++) {
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
-
-err_unmap:
- while (i--)
- dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
- tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
-
- return false;
}
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6bb1e604aadd..4f4a842a1c9c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -444,9 +444,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_unlock_bh(&tun->lock);
}
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = (struct tun_struct *)data;
+ struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
@@ -1196,7 +1196,9 @@ static void tun_flow_init(struct tun_struct *tun)
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
- setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + tun->ageing_time));
}
static void tun_flow_uninit(struct tun_struct *tun)
@@ -1485,6 +1487,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
+ rcu_read_unlock();
return NULL;
case XDP_TX:
xdp_xmit = true;
@@ -1517,7 +1520,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
- rcu_read_lock();
+ rcu_read_unlock();
return NULL;
}
@@ -1949,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (!iov_iter_count(to))
+ if (!iov_iter_count(to)) {
+ if (skb)
+ kfree_skb(skb);
return 0;
+ }
if (!skb) {
/* Read frames from ring */
@@ -2066,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
+ struct sk_buff *skb = m->msg_control;
int ret;
- if (!tun)
- return -EBADFD;
+ if (!tun) {
+ ret = -EBADFD;
+ goto out_free_skb;
+ }
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
- goto out;
+ goto out_put_tun;
}
if (flags & MSG_ERRQUEUE) {
ret = sock_recv_errqueue(sock->sk, m, total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
- m->msg_control);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2089,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
out:
tun_put(tun);
return ret;
+
+out_put_tun:
+ tun_put(tun);
+out_free_skb:
+ if (skb)
+ kfree_skb(skb);
+ return ret;
}
static int tun_peek_len(struct socket *sock)
@@ -2369,6 +2384,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 42d7edcc3106..981c931a7a1f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -76,7 +76,6 @@
#define MOD_AUTHOR "Option Wireless"
#define MOD_DESCRIPTION "USB High Speed Option driver"
-#define MOD_LICENSE "GPL"
#define HSO_MAX_NET_DEVICES 10
#define HSO__MAX_MTU 2048
@@ -3286,7 +3285,7 @@ module_exit(hso_exit);
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESCRIPTION);
-MODULE_LICENSE(MOD_LICENSE);
+MODULE_LICENSE("GPL");
/* change the debug level (eg: insmod hso.ko debug=0x04) */
MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index ca71f6c03859..7275761a1177 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -291,12 +291,15 @@ static void ipheth_sndbulk_callback(struct urb *urb)
static int ipheth_carrier_set(struct ipheth_device *dev)
{
- struct usb_device *udev = dev->udev;
+ struct usb_device *udev;
int retval;
+
if (!dev)
return 0;
if (!dev->confirmed_pairing)
return 0;
+
+ udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 720a3a248070..3000ddd1c7e2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
net->hard_header_len = 0;
net->addr_len = 0;
net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
netdev_dbg(net, "mode: raw IP\n");
} else if (!net->header_ops) { /* don't bother if already set */
ether_setup(net);
+ clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
netdev_dbg(net, "mode: Ethernet\n");
}
@@ -1202,12 +1204,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
@@ -1239,6 +1243,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 80348b6a8646..d56fe32bf48d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
return -ENOLINK;
}
- skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+ if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+ skb = __netdev_alloc_skb(dev->net, size, flags);
+ else
+ skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index edf984406ba0..559b215c0169 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int num_skb_frags;
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
- if (unlikely(!ctx)) {
+ if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
@@ -1030,7 +1030,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
int err;
bool oom;
- gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7ac487031b4b..19b9cc51079e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
- u16 vid)
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, u16 vid)
{
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index c7721c729541..afeca6bcdade 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -558,9 +558,9 @@ out:
return NET_RX_DROP;
}
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = (struct proto *)arg;
+ struct proto *proto = from_timer(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
@@ -610,7 +610,7 @@ static void ppp_start(struct net_device *dev)
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
proto->dev = dev;
- setup_timer(&proto->timer, ppp_timer, (unsigned long)proto);
+ timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
}
ppp->protos[IDX_LCP].pid = PID_LCP;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 37b1e0d03e31..90a4ad9a2d08 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- data = kmalloc(xc.len, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
+ data = memdup_user(xc.data, xc.len);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
break;
}
-
- if(copy_from_user(data, xc.data, xc.len))
- {
- kfree(data);
- ret = -ENOMEM;
- break;
- }
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e31438541ee1..7d295ee71534 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
#define MICHAEL_MIC_LEN 8
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
- enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return IEEE80211_TKIP_ICV_LEN;
+ return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
@@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
- if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
- enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
- skb_trim(msdu, msdu->len - 8);
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
- if (status->flag & RX_FLAG_ICV_STRIPPED &&
- enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- skb_trim(msdu, msdu->len - 8);
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index dfb26f03c1a2..1b05b5d7a038 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, false);
if (!skb)
return false;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 71812a2dd513..f7d228b5ba93 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
}
/* External RF module */
- iris_node = of_find_node_by_name(mmio_node, "iris");
+ iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index ede89d4ffc88..e99e766a3028 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -518,11 +518,11 @@ exit:
/* LED trigger */
static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
DEFINE_LED_TRIGGER(ledtrig_tx);
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
{
static int tx_lastactivity;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 3559fb5b8fb0..03aae6bc1838 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -280,9 +280,9 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
/**
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
{
- struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+ struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
brcmf_dbg(TRACE, "enter\n");
bt_local->timer_on = false;
@@ -380,7 +380,7 @@ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
/* Set up timer for BT */
btci->timer_on = false;
btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
- setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+ timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
btci->cfg = cfg;
btci->saved_regs_part1 = false;
btci->saved_regs_part2 = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 6e70df978159..15fa00d79fc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2983,10 +2983,10 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
}
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
- (struct brcmf_cfg80211_info *)data;
+ from_timer(cfg, t, escan_timeout);
if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
@@ -3150,8 +3150,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
brcmf_cfg80211_escan_handler);
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
/* Init scan_timeout timer */
- setup_timer(&cfg->escan_timeout, brcmf_escan_timeout,
- (unsigned long)cfg);
+ timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
INIT_WORK(&cfg->escan_timeout_work,
brcmf_cfg80211_escan_timeout_worker);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e3495ea95553..cdf9e4161592 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
return head_pad;
}
-/**
+/*
* struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
* bus layer usage.
*/
@@ -3972,9 +3972,9 @@ brcmf_sdio_watchdog_thread(void *data)
}
static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+ struct brcmf_sdio *bus = from_timer(bus, t, timer);
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -4121,8 +4121,8 @@ release:
sdio_release_host(sdiodev->func[1]);
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
- device_release_driver(dev);
device_release_driver(&sdiodev->func[2]->dev);
+ device_release_driver(dev);
}
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
@@ -4169,8 +4169,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_waitqueue_head(&bus->dcmd_resp_wait);
/* Set up the watchdog timer */
- setup_timer(&bus->timer, brcmf_sdio_watchdog,
- (unsigned long)bus);
+ timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index af7c4f36b66f..e7e75b458005 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -72,18 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+ IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+ IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
@@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
};
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index ea8206515171..705f83b02e13 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -80,15 +80,15 @@
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 2acd94da9efe..d11d72615de2 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -399,9 +399,9 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
* was received. We need to ensure we receive the statistics in order
* to update the temperature used for calibrating the TXPOWER.
*/
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -556,9 +556,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
* this function is to perform continuous uCode event logging operation
* if enabled
*/
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -1085,11 +1085,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
- setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
- (unsigned long)priv);
+ timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
- setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
- (unsigned long)priv);
+ timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 5b73492e7ff7..6524533d723c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -164,9 +164,10 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
* without doing anything, driver should continue the 5 seconds timer
* to wake up uCode for temperature check until temperature drop below CT
*/
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
@@ -214,9 +215,10 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
}
}
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_waiting_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -612,10 +614,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
tt->state = IWL_TI_0;
- setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
- iwl_tt_check_exit_ct_kill, (unsigned long)priv);
- setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
- iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+ timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+ iwl_tt_check_exit_ct_kill, 0);
+ timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+ iwl_tt_ready_for_ct_kill, 0);
/* setup deferred ct kill work */
INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5a40092febfb..3bfc657f6b42 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
- u8 reserved2;
+ u8 reserved;
u8 scan_start_mac_id;
- u8 extended_dwell;
- u8 active_dwell;
- u8 passive_dwell;
- u8 fragmented_dwell;
union {
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
+ sizeof(__le16))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 87b4434224a1..dfa111bb411e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -68,6 +68,9 @@
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
+ * monitor mode. Note this queue is the same as the queue for P2P device
+ * but we can't have active monitor mode along with P2P device anyway.
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
* that we are never left without the possibility to connect to an AP.
@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_AUX_QUEUE = 1,
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+ IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 9c889a32fe24..223fb77a3aa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
{
- iwl_fw_dbg_stop_recording(fwrt);
-
fwrt->dump.conf = FW_DBG_INVALID;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 740d97093d1c..37a5c5b4eda6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index d1263a554420..e21e46cf6f9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -366,6 +366,7 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
+ u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
@@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index ca0b5536a8a6..921cab9e2d73 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -117,6 +117,7 @@
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
#define FH_RSCSR_RPA_EN BIT(25)
+#define FH_RSCSR_RADA_EN BIT(26)
#define FH_RSCSR_RXQ_POS 16
#define FH_RSCSR_RXQ_MASK 0x3F0000
@@ -128,7 +129,8 @@ struct iwl_rx_packet {
* 31: flag flush RB request
* 30: flag ignore TC (terminal counter) request
* 29: flag fast IRQ request
- * 28-26: Reserved
+ * 28-27: Reserved
+ * 26: RADA enabled
* 25: Offload enabled
* 24: RPF enabled
* 23: RSS enabled
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index a2bf530eeae4..2f22e14e00fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
u32 action)
{
struct iwl_mac_ctx_cmd cmd = {};
- u32 tfd_queue_msk = 0;
+ u32 tfd_queue_msk = BIT(mvm->snif_queue);
int ret;
WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 0e18c5066f04..55ab5349dd40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -972,6 +972,7 @@ struct iwl_mvm {
/* Tx queues */
u16 aux_queue;
+ u16 snif_queue;
u16 probe_queue;
u16 p2p_dev_queue;
@@ -1060,6 +1061,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1071,6 +1073,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
+ IWL_MVM_STATUS_NEED_FLUSH_P2P,
};
/* Keep track of completed init configuration */
@@ -1142,6 +1145,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 7078b7e458be..45470b6b351a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 76dc58381e1c..3b8d44361380 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int energy_a, energy_b, max_energy;
+ u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
@@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
energy_a, energy_b, max_energy);
rx_status->signal = max_energy;
- rx_status->chains = 0; /* TODO: phy info */
+ rx_status->chains =
+ (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
rx_status->chain_signal[0] = energy_a;
rx_status->chain_signal[1] = energy_b;
rx_status->chain_signal[2] = S8_MIN;
@@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats,
- struct iwl_rx_mpdu_desc *desc, int queue,
- u8 *crypt_len)
+ struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
+ int queue, u8 *crypt_len)
{
u16 status = le16_to_cpu(desc->status);
@@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return -1;
stats->flag |= RX_FLAG_DECRYPTED;
+ if (pkt_flags & FH_RSCSR_RADA_EN)
+ stats->flag |= RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
return 0;
case IWL_RX_MPDU_STATUS_SEC_TKIP:
@@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_WEP)
*crypt_len = IEEE80211_WEP_IV_LEN;
+
+ if (pkt_flags & FH_RSCSR_RADA_EN)
+ stats->flag |= RX_FLAG_ICV_STRIPPED;
+
return 0;
case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
@@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
- if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
+ if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
+ le32_to_cpu(pkt->len_n_flags), queue,
+ &crypt_len)) {
kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 774122fed454..e4fd476e9ccb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
u32 measurement_dwell;
};
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ if (params->measurement_dwell) {
+ cmd->v7.active_dwell = params->measurement_dwell;
+ cmd->v7.passive_dwell = params->measurement_dwell;
+ } else {
+ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ }
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ }
+
+ return;
+ }
+
if (params->measurement_dwell) {
- cmd->active_dwell = params->measurement_dwell;
- cmd->passive_dwell = params->measurement_dwell;
- cmd->extended_dwell = params->measurement_dwell;
+ cmd->v1.active_dwell = params->measurement_dwell;
+ cmd->v1.passive_dwell = params->measurement_dwell;
+ cmd->v1.extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->v6.max_out_time[1] =
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[1] =
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
-
- if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- else
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.channel_flags = channel_flags;
+ cmd->v7.n_channels = params->n_channels;
+ } else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_has_new_tx_api(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c19f98489d4e..1add5615fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
sta->sta_id = IWL_MVM_INVALID_STA;
}
-static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+ u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
if (iwl_mvm_has_new_tx_api(mvm)) {
- int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
- mvm->aux_sta.sta_id,
- IWL_MAX_TID_COUNT,
- wdg_timeout);
- mvm->aux_queue = queue;
+ int tvqm_queue =
+ iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+ *queue = tvqm_queue;
} else {
struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
- .sta_id = mvm->aux_sta.sta_id,
+ .fifo = fifo,
+ .sta_id = sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
- iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
- wdg_timeout);
+ iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
}
}
@@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
/* Map Aux queue to fifo - needs to happen before adding Aux station */
if (!iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_queue(mvm);
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MVM_TX_FIFO_MCAST);
ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
MAC_INDEX_AUX, 0);
@@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_queue(mvm);
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MVM_TX_FIFO_MCAST);
return 0;
}
@@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
lockdep_assert_held(&mvm->mutex);
- return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
+
+ /* Map snif queue to fifo - must happen before adding snif station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+ mvm->snif_sta.sta_id,
+ IWL_MVM_TX_FIFO_BE);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
mvmvif->id, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+ mvm->snif_sta.sta_id,
+ IWL_MVM_TX_FIFO_BE);
+
+ return 0;
}
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+ IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 4d0314912e94..e25cda9fbf6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* executed, and a new time event means a new command.
*/
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+ /* Do the same for the P2P device queue (STA) */
+ if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ struct iwl_mvm_vif *mvmvif;
+
+ /*
+ * NB: access to this pointer would be racy, but the flush bit
+ * can only be set when we had a P2P-Device VIF, and we have a
+ * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+ * not really racy.
+ */
+
+ if (!WARN_ON(!mvm->p2p_device_vif)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
+ CMD_ASYNC);
+ }
+ }
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- else
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ } else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+ }
iwl_mvm_roc_finished(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 593b7f97b29c..333bcb75b8af 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
- queue = mvm->aux_queue;
+ queue = mvm->snif_queue;
+ sta_id = mvm->snif_sta.sta_id;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d46115e2d69e..03ffd84786ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
unsigned int default_timeout =
cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
+ /*
+ * We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+ vif && vif->type == NL80211_IFTYPE_AP)
+ return IWL_WATCHDOG_DISABLED;
return iwlmvm_mod_params.tfd_q_hang_detect ?
default_timeout : IWL_WATCHDOG_DISABLED;
+ }
trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
txq_timer = (void *)trigger->data;
@@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
return le32_to_cpu(txq_timer->p2p_go);
case NL80211_IFTYPE_P2P_DEVICE:
return le32_to_cpu(txq_timer->p2p_device);
+ case NL80211_IFTYPE_MONITOR:
+ return default_timeout;
default:
WARN_ON(1);
return mvm->cfg->base_params->wd_timeout;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4a21c12276d7..ccd7c33c4c28 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -535,47 +535,122 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
@@ -590,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c59f4581e972..ac05fd1e74c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -49,6 +49,7 @@
*
*****************************************************************************/
#include "iwl-trans.h"
+#include "iwl-prph.h"
#include "iwl-context-info.h"
#include "internal.h"
@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
+ /* Stop dbgc before stopping device */
+ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+ udelay(100);
+ iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b7a51603465b..4541c86881d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
4, buf, i, 0);
}
+ goto out;
err_read:
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
+ /* Stop dbgc before stopping device */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ } else {
+ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+ udelay(100);
+ iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+ }
+
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index b5c459cd70ce..fed6d842a5e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -147,9 +147,9 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
- struct iwl_txq *txq = (void *)data;
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -495,8 +495,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
- (unsigned long)txq);
+ timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
txq->n_window = slots_num;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 1a8d8db80b05..b4dfe1893d18 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -185,9 +185,9 @@ static void hostap_event_expired_sta(struct net_device *dev,
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
{
- struct sta_info *sta = (struct sta_info *) data;
+ struct sta_info *sta = from_timer(sta, t, timer);
local_info_t *local;
struct ap_data *ap;
unsigned long next_time = 0;
@@ -1189,10 +1189,8 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- init_timer(&sta->timer);
+ timer_setup(&sta->timer, ap_handle_timer, 0);
sta->timer.expires = jiffies + ap->max_inactivity;
- sta->timer.data = (unsigned long) sta;
- sta->timer.function = ap_handle_timer;
if (!ap->local->hostapd)
add_timer(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 72b46eaf3de2..5c4a17a18968 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -2794,9 +2794,9 @@ static void prism2_check_sta_fw_version(local_info_t *local)
}
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, passive_scan_timer);
struct net_device *dev = local->dev;
u16 chan;
@@ -2869,10 +2869,10 @@ static void handle_comms_qual_update(struct work_struct *work)
* used to monitor that local->last_tick_timer is being updated. If not,
* interrupt busy-loop is assumed and driver tries to recover by masking out
* some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
{
static unsigned long last_inquire = 0;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, tick_timer);
local->last_tick_timer = jiffies;
/* Inquire CommTallies every 10 seconds to keep the statistics updated
@@ -3225,13 +3225,8 @@ while (0)
lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
- init_timer(&local->passive_scan_timer);
- local->passive_scan_timer.data = (unsigned long) local;
- local->passive_scan_timer.function = hostap_passive_scan;
-
- init_timer(&local->tick_timer);
- local->tick_timer.data = (unsigned long) local;
- local->tick_timer.function = hostap_tick_timer;
+ timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+ timer_setup(&local->tick_timer, hostap_tick_timer, 0);
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 501180584b4b..94ad6fe29e69 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -319,9 +319,9 @@ static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
mod_timer(timer, expire);
}
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
{
- struct request_context *ctx = (void *) _ctx;
+ struct request_context *ctx = from_timer(ctx, t, timer);
ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
@@ -365,7 +365,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
refcount_set(&ctx->refcount, 1);
init_completion(&ctx->done);
- setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+ timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
return ctx;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 07a49f58070a..10b075a46b26 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2805,7 +2805,7 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
return -EMSGSIZE;
if (cb)
- genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
+ genl_dump_check_consistent(cb, hdr);
if (data->alpha2[0] && data->alpha2[1])
param.reg_alpha2 = data->alpha2;
@@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
+ int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_new_radio(info, &param);
+ ret = mac80211_hwsim_new_radio(info, &param);
+ kfree(hwname);
+ return ret;
}
static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 7d6dc76c930a..6711e7fb6926 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -554,7 +554,7 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EFAULT;
}
- mac->scan_timeout.function = (TIMER_FUNC_TYPE)qtnf_scan_timeout;
+ mac->scan_timeout.function = qtnf_scan_timeout;
mod_timer(&mac->scan_timeout,
jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 2d2c1ea65cb2..3423dc51198b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -288,7 +288,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- setup_timer(&mac->scan_timeout, NULL, 0);
+ timer_setup(&mac->scan_timeout, NULL, 0);
}
qtnf_mac_init_primary_intf(mac);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index d8afcdfca1ed..0133fcd4601b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -569,7 +569,7 @@ static int dl_startup_params(struct net_device *dev)
local->card_status = CARD_DL_PARAM;
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
- local->timer.function = (TIMER_FUNC_TYPE)verify_dl_startup;
+ local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1947,12 +1947,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)start_net;
+ local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
}
add_timer(&local->timer);
}
@@ -2417,9 +2417,9 @@ static void authenticate(ray_dev_t *local)
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
} else {
- local->timer.function = (TIMER_FUNC_TYPE)authenticate_timeout;
+ local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
add_timer(&local->timer);
@@ -2502,7 +2502,7 @@ static void associate(ray_dev_t *local)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 08730227cd18..8f8443833348 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
u8 *buf;
int status = -ENOMEM;
+ if (len > RSI_USB_CTRL_BUF_SIZE)
+ return -EINVAL;
+
buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!buf)
return status;
- if (len > RSI_USB_CTRL_BUF_SIZE)
- return -EINVAL;
-
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
@@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
u8 *usb_reg_buf;
int status = -ENOMEM;
+ if (len > RSI_USB_CTRL_BUF_SIZE)
+ return -EINVAL;
+
usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!usb_reg_buf)
return status;
- if (len > RSI_USB_CTRL_BUF_SIZE)
- return -EINVAL;
-
usb_reg_buf[0] = (value & 0x00ff);
usb_reg_buf[1] = (value & 0xff00) >> 8;
usb_reg_buf[2] = 0x0;
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 03687a80d6e9..38678e9a0562 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
@@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9915d83a4a30..6d02c660b4ab 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
if (!skb)
goto out;
size = skb->len;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2bfc12fdc929..761cf8573a80 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ptr = NULL;
} else {
skb = ieee80211_nullfunc_get(wl->hw,
- wl12xx_wlvif_to_vif(wlvif));
+ wl12xx_wlvif_to_vif(wlvif),
+ false);
if (!skb)
goto out;
size = skb->len;
@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif, false);
if (!skb)
goto out;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index c346c021b999..d47921a84509 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -196,9 +196,9 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
{
- struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
@@ -2279,8 +2279,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
- setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wlvif);
+ timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
return 0;
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d6dff347f896..78ebe494fef0 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Obtain the queue to be used to transmit this packet */
index = skb_get_queue_mapping(skb);
if (index >= num_queues) {
- pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+ pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
index, vif->dev->name);
index %= num_queues;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b8689c6d887..c5a34671abda 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,8 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
struct netfront_stats {
u64 packets;
u64 bytes;
@@ -228,9 +230,9 @@ static bool xennet_can_sg(struct net_device *dev)
}
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = (struct netfront_queue *)data;
+ struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
napi_schedule(&queue->napi);
}
@@ -1605,8 +1607,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
- (unsigned long)queue);
+ timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
@@ -2021,10 +2022,12 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
+ wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
+ wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@@ -2130,6 +2133,20 @@ static int xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+ xenbus_switch_state(dev, XenbusStateClosing);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosing);
+
+ xenbus_switch_state(dev, XenbusStateClosed);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
+ }
+
xennet_disconnect_backend(info);
unregister_netdev(info->netdev);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 7f8960a46aab..52c8ae504e32 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -130,9 +130,9 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
}
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
{
- struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+ struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
nfc_err(priv->dev, "FW loading timeout");
priv->fw_dnld.state = STATE_RESET;
@@ -538,8 +538,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
}
/* Configure a timer for timeout */
- setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
- (unsigned long) priv);
+ timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
mod_timer(&priv->fw_dnld.timer,
jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index c05cb637ba92..a0cc1cc45292 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1232,9 +1232,9 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
return 0;
}
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
{
- struct pn533 *dev = (struct pn533 *)data;
+ struct pn533 *dev = from_timer(dev, t, listen_timer);
dev_dbg(dev->dev, "Listen mode timeout\n");
@@ -2632,9 +2632,7 @@ struct pn533 *pn533_register_device(u32 device_type,
if (priv->wq == NULL)
goto error;
- init_timer(&priv->listen_timer);
- priv->listen_timer.data = (unsigned long) priv;
- priv->listen_timer.function = pn533_listen_mode_timer;
+ timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
skb_queue_head_init(&priv->resp_q);
skb_queue_head_init(&priv->fragment_skb);
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 9477994cf975..f26d938d240f 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -246,18 +246,18 @@ void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
}
EXPORT_SYMBOL(ndlc_recv);
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
pr_debug("\n");
schedule_work(&ndlc->sm_work);
}
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
pr_debug("\n");
@@ -282,13 +282,8 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
*ndlc_id = ndlc;
/* initialize timers */
- init_timer(&ndlc->t1_timer);
- ndlc->t1_timer.data = (unsigned long)ndlc;
- ndlc->t1_timer.function = ndlc_t1_timeout;
-
- init_timer(&ndlc->t2_timer);
- ndlc->t2_timer.data = (unsigned long)ndlc;
- ndlc->t2_timer.function = ndlc_t2_timeout;
+ timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+ timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
skb_queue_head_init(&ndlc->rcv_q);
skb_queue_head_init(&ndlc->send_q);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 56f2112e0cd8..f55d082ace71 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -677,7 +677,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
}
EXPORT_SYMBOL(st_nci_se_io);
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -690,7 +690,7 @@ static void st_nci_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
pr_debug("\n");
@@ -708,9 +708,10 @@ static void st_nci_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
{
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -725,15 +726,11 @@ int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function =
- st_nci_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st_nci_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.xch_error = false;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 3a98563d4a12..4bed9e842db3 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -252,7 +252,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -265,7 +265,8 @@ static void st21nfca_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.bwi_timer);
pr_debug("\n");
@@ -283,9 +284,10 @@ static void st21nfca_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -392,14 +394,11 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st21nfca_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.count_pipes = 0;
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index a89243c9fdd3..e51b581fd102 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -1,3 +1,4 @@
source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/mscc/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 87332c3905f0..923c442db750 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/
+obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index d44d7ef38fe8..0cd79f367f7c 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
/*
* IDT PCIe-switch models ports configuration structures
*/
-static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
.name = "89HPES24NT6AG2",
.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
};
-static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
.name = "89HPES32NT8AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
.name = "89HPES32NT8BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
.name = "89HPES12NT12G2",
.port_cnt = 3, .ports = {0, 8, 16}
};
-static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
.name = "89HPES16NT16G2",
.port_cnt = 4, .ports = {0, 8, 12, 16}
};
-static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
.name = "89HPES24NT24G2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
.name = "89HPES32NT24AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
.name = "89HPES32NT24BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 2557e2c05b90..4de074a86073 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
{
struct pci_dev *pdev;
void __iomem *mmio;
- resource_size_t bar_size;
phys_addr_t bar_addr;
- int b2b_bar;
- u8 bar_sz;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
- if (ndev->b2b_idx == UINT_MAX) {
- dev_dbg(&pdev->dev, "not using b2b mw\n");
- b2b_bar = 0;
- ndev->b2b_off = 0;
- } else {
- b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
- if (b2b_bar < 0)
- return -EIO;
-
- dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
-
- bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
-
- dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
-
- if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
- dev_dbg(&pdev->dev, "b2b using first half of bar\n");
- ndev->b2b_off = bar_size >> 1;
- } else if (bar_size >= XEON_B2B_MIN_SIZE) {
- dev_dbg(&pdev->dev, "b2b using whole bar\n");
- ndev->b2b_off = 0;
- --ndev->mw_count;
- } else {
- dev_dbg(&pdev->dev, "b2b bar size is too small\n");
- return -EIO;
- }
- }
-
- /*
- * Reset the secondary bar sizes to match the primary bar sizes,
- * except disable or halve the size of the b2b secondary bar.
- */
- pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
- if (b2b_bar == 1) {
- if (ndev->b2b_off)
- bar_sz -= 1;
- else
- bar_sz = 0;
- }
-
- pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
- pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
-
- pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
- if (b2b_bar == 2) {
- if (ndev->b2b_off)
- bar_sz -= 1;
- else
- bar_sz = 0;
- }
-
- pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
- pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
-
- /* SBAR01 hit by first part of the b2b bar */
- if (b2b_bar == 0)
- bar_addr = addr->bar0_addr;
- else if (b2b_bar == 1)
- bar_addr = addr->bar2_addr64;
- else if (b2b_bar == 2)
- bar_addr = addr->bar4_addr64;
- else
- return -EIO;
-
/* setup incoming bar limits == base addrs (zero length windows) */
- bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+ bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
- bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig
new file mode 100644
index 000000000000..013ed6716438
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Kconfig
@@ -0,0 +1,9 @@
+config NTB_SWITCHTEC
+ tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
+ select PCI_SW_SWITCHTEC
+ help
+ Enables NTB support for Switchtec PCI switches. This also
+ selects the Switchtec management driver as they share the same
+ hardware interface.
+
+ If unsure, say N.
diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile
new file mode 100644
index 000000000000..064686ead1ba
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
new file mode 100644
index 000000000000..afe8ed6f3b23
--- /dev/null
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -0,0 +1,1216 @@
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/switchtec.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ntb.h>
+
+MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsemi Corporation");
+
+static ulong max_mw_size = SZ_2M;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size,
+ "Max memory window size reported to the upper layer");
+
+static bool use_lut_mws;
+module_param(use_lut_mws, bool, 0644);
+MODULE_PARM_DESC(use_lut_mws,
+ "Enable the use of the LUT based memory windows");
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+ u64 low, high;
+
+ low = ioread32(mmio);
+ high = ioread32(mmio + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+ iowrite32(val, mmio);
+ iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+#define SWITCHTEC_NTB_MAGIC 0x45CC0001
+#define MAX_MWS 128
+
+struct shared_mw {
+ u32 magic;
+ u32 link_sta;
+ u32 partition_id;
+ u64 mw_sizes[MAX_MWS];
+ u32 spad[128];
+};
+
+#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
+#define LUT_SIZE SZ_64K
+
+struct switchtec_ntb {
+ struct ntb_dev ntb;
+ struct switchtec_dev *stdev;
+
+ int self_partition;
+ int peer_partition;
+
+ int doorbell_irq;
+ int message_irq;
+
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct ntb_ctrl_regs __iomem *mmio_ctrl;
+ struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
+ struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
+ struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
+ struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
+
+ struct shared_mw *self_shared;
+ struct shared_mw __iomem *peer_shared;
+ dma_addr_t self_shared_dma;
+
+ u64 db_mask;
+ u64 db_valid_mask;
+ int db_shift;
+ int db_peer_shift;
+
+ /* synchronize rmw access of db_mask and hw reg */
+ spinlock_t db_mask_lock;
+
+ int nr_direct_mw;
+ int nr_lut_mw;
+ int direct_mw_to_bar[MAX_DIRECT_MW];
+
+ int peer_nr_direct_mw;
+ int peer_nr_lut_mw;
+ int peer_direct_mw_to_bar[MAX_DIRECT_MW];
+
+ bool link_is_up;
+ enum ntb_speed link_speed;
+ enum ntb_width link_width;
+};
+
+static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
+{
+ return container_of(ntb, struct switchtec_ntb, ntb);
+}
+
+static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
+ struct ntb_ctrl_regs __iomem *ctl,
+ u32 op, int wait_status)
+{
+ static const char * const op_text[] = {
+ [NTB_CTRL_PART_OP_LOCK] = "lock",
+ [NTB_CTRL_PART_OP_CFG] = "configure",
+ [NTB_CTRL_PART_OP_RESET] = "reset",
+ };
+
+ int i;
+ u32 ps;
+ int status;
+
+ switch (op) {
+ case NTB_CTRL_PART_OP_LOCK:
+ status = NTB_CTRL_PART_STATUS_LOCKING;
+ break;
+ case NTB_CTRL_PART_OP_CFG:
+ status = NTB_CTRL_PART_STATUS_CONFIGURING;
+ break;
+ case NTB_CTRL_PART_OP_RESET:
+ status = NTB_CTRL_PART_STATUS_RESETTING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iowrite32(op, &ctl->partition_op);
+
+ for (i = 0; i < 1000; i++) {
+ if (msleep_interruptible(50) != 0) {
+ iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
+ return -EINTR;
+ }
+
+ ps = ioread32(&ctl->partition_status) & 0xFFFF;
+
+ if (ps != status)
+ break;
+ }
+
+ if (ps == wait_status)
+ return 0;
+
+ if (ps == status) {
+ dev_err(&sndev->stdev->dev,
+ "Timed out while peforming %s (%d). (%08x)",
+ op_text[op], op,
+ ioread32(&ctl->partition_status));
+
+ return -ETIMEDOUT;
+ }
+
+ return -EIO;
+}
+
+static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
+ u32 val)
+{
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
+ return -EINVAL;
+
+ iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
+
+ return 0;
+}
+
+static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ int nr_direct_mw = sndev->peer_nr_direct_mw;
+ int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (!use_lut_mws)
+ nr_lut_mw = 0;
+
+ return nr_direct_mw + nr_lut_mw;
+}
+
+static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+ return mw_idx - sndev->nr_direct_mw + 1;
+}
+
+static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+ return mw_idx - sndev->peer_nr_direct_mw + 1;
+}
+
+static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
+ int widx, resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ int lut;
+ resource_size_t size;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ lut = widx >= sndev->peer_nr_direct_mw;
+ size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (addr_align)
+ *addr_align = lut ? size : SZ_4K;
+
+ if (size_align)
+ *size_align = lut ? size : SZ_4K;
+
+ if (size_max)
+ *size_max = size;
+
+ return 0;
+}
+
+static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int bar = sndev->peer_direct_mw_to_bar[idx];
+ u32 ctl_val;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+ iowrite32(0, &ctl->bar_entry[bar].win_size);
+ iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+ iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ int xlate_pos = ilog2(size);
+ int bar = sndev->peer_direct_mw_to_bar[idx];
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ u32 ctl_val;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
+
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+ iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
+ iowrite64(sndev->self_partition | addr,
+ &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+ iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
+ &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int xlate_pos = ilog2(size);
+ int nr_direct_mw = sndev->peer_nr_direct_mw;
+ int rc;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
+ widx, pidx, &addr, &size);
+
+ if (widx >= switchtec_ntb_mw_count(ntb, pidx))
+ return -EINVAL;
+
+ if (xlate_pos < 12)
+ return -EINVAL;
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ return rc;
+
+ if (addr == 0 || size == 0) {
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_clr_direct(sndev, widx);
+ else
+ switchtec_ntb_mw_clr_lut(sndev, widx);
+ } else {
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
+ else
+ switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
+ }
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+
+ if (rc == -EIO) {
+ dev_err(&sndev->stdev->dev,
+ "Hardware reported an error configuring mw %d: %08x",
+ widx, ioread32(&ctl->bar_error));
+
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_clr_direct(sndev, widx);
+ else
+ switchtec_ntb_mw_clr_lut(sndev, widx);
+
+ switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ }
+
+ return rc;
+}
+
+static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
+}
+
+static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
+ int idx, phys_addr_t *base,
+ resource_size_t *size)
+{
+ int bar = sndev->direct_mw_to_bar[idx];
+ size_t offset = 0;
+
+ if (bar < 0)
+ return -EINVAL;
+
+ if (idx == 0) {
+ /*
+ * This is the direct BAR shared with the LUTs
+ * which means the actual window will be offset
+ * by the size of all the LUT entries.
+ */
+
+ offset = LUT_SIZE * sndev->nr_lut_mw;
+ }
+
+ if (base)
+ *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+ if (size) {
+ *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
+ if (offset && *size > offset)
+ *size = offset;
+
+ if (*size > max_mw_size)
+ *size = max_mw_size;
+ }
+
+ return 0;
+}
+
+static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
+ int idx, phys_addr_t *base,
+ resource_size_t *size)
+{
+ int bar = sndev->direct_mw_to_bar[0];
+ int offset;
+
+ offset = LUT_SIZE * lut_index(sndev, idx);
+
+ if (base)
+ *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+ if (size)
+ *size = LUT_SIZE;
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base,
+ resource_size_t *size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < sndev->nr_direct_mw)
+ return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
+ else if (idx < switchtec_ntb_peer_mw_count(ntb))
+ return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
+ else
+ return -EINVAL;
+}
+
+static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
+ int partition,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct switchtec_dev *stdev = sndev->stdev;
+
+ u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
+ u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
+
+ if (speed)
+ *speed = (linksta >> 16) & 0xF;
+
+ if (width)
+ *width = (linksta >> 20) & 0x3F;
+}
+
+static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
+{
+ enum ntb_speed self_speed, peer_speed;
+ enum ntb_width self_width, peer_width;
+
+ if (!sndev->link_is_up) {
+ sndev->link_speed = NTB_SPEED_NONE;
+ sndev->link_width = NTB_WIDTH_NONE;
+ return;
+ }
+
+ switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
+ &self_speed, &self_width);
+ switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
+ &peer_speed, &peer_width);
+
+ sndev->link_speed = min(self_speed, peer_speed);
+ sndev->link_width = min(self_width, peer_width);
+}
+
+enum {
+ LINK_MESSAGE = 0,
+ MSG_LINK_UP = 1,
+ MSG_LINK_DOWN = 2,
+ MSG_CHECK_LINK = 3,
+};
+
+static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
+{
+ int link_sta;
+ int old = sndev->link_is_up;
+
+ link_sta = sndev->self_shared->link_sta;
+ if (link_sta) {
+ u64 peer = ioread64(&sndev->peer_shared->magic);
+
+ if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
+ link_sta = peer >> 32;
+ else
+ link_sta = 0;
+ }
+
+ sndev->link_is_up = link_sta;
+ switchtec_ntb_set_link_speed(sndev);
+
+ if (link_sta != old) {
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
+ ntb_link_event(&sndev->ntb);
+ dev_info(&sndev->stdev->dev, "ntb link %s",
+ link_sta ? "up" : "down");
+ }
+}
+
+static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
+{
+ struct switchtec_ntb *sndev = stdev->sndev;
+
+ switchtec_ntb_check_link(sndev);
+}
+
+static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (speed)
+ *speed = sndev->link_speed;
+ if (width)
+ *width = sndev->link_width;
+
+ return sndev->link_is_up;
+}
+
+static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ dev_dbg(&sndev->stdev->dev, "enabling link");
+
+ sndev->self_shared->link_sta = 1;
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+ switchtec_ntb_check_link(sndev);
+
+ return 0;
+}
+
+static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ dev_dbg(&sndev->stdev->dev, "disabling link");
+
+ sndev->self_shared->link_sta = 0;
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+ switchtec_ntb_check_link(sndev);
+
+ return 0;
+}
+
+static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+ return 1;
+}
+
+static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_vector < 0 || db_vector > 1)
+ return 0;
+
+ return sndev->db_valid_mask;
+}
+
+static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
+{
+ u64 ret;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
+
+ return ret & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
+
+ return 0;
+}
+
+static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ unsigned long irqflags;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_bits & ~sndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+ sndev->db_mask |= db_bits << sndev->db_shift;
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+ spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ unsigned long irqflags;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_bits & ~sndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+ sndev->db_mask &= ~(db_bits << sndev->db_shift);
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+ spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
+ phys_addr_t *db_addr,
+ resource_size_t *db_size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ unsigned long offset;
+
+ offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
+ (unsigned long)sndev->stdev->mmio;
+
+ offset += sndev->db_shift / 8;
+
+ if (db_addr)
+ *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
+ if (db_size)
+ *db_size = sizeof(u32);
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ iowrite64(db_bits << sndev->db_peer_shift,
+ &sndev->mmio_self_dbmsg->odb);
+
+ return 0;
+}
+
+static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return ARRAY_SIZE(sndev->self_shared->spad);
+}
+
+static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+ return 0;
+
+ if (!sndev->self_shared)
+ return 0;
+
+ return sndev->self_shared->spad[idx];
+}
+
+static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+ return -EINVAL;
+
+ if (!sndev->self_shared)
+ return -EIO;
+
+ sndev->self_shared->spad[idx] = val;
+
+ return 0;
+}
+
+static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
+ int sidx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+ return 0;
+
+ if (!sndev->peer_shared)
+ return 0;
+
+ return ioread32(&sndev->peer_shared->spad[sidx]);
+}
+
+static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int sidx, u32 val)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+ return -EINVAL;
+
+ if (!sndev->peer_shared)
+ return -EIO;
+
+ iowrite32(val, &sndev->peer_shared->spad[sidx]);
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
+ int sidx, phys_addr_t *spad_addr)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ unsigned long offset;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
+ (unsigned long)sndev->stdev->mmio;
+
+ if (spad_addr)
+ *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
+
+ return 0;
+}
+
+static const struct ntb_dev_ops switchtec_ntb_ops = {
+ .mw_count = switchtec_ntb_mw_count,
+ .mw_get_align = switchtec_ntb_mw_get_align,
+ .mw_set_trans = switchtec_ntb_mw_set_trans,
+ .peer_mw_count = switchtec_ntb_peer_mw_count,
+ .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
+ .link_is_up = switchtec_ntb_link_is_up,
+ .link_enable = switchtec_ntb_link_enable,
+ .link_disable = switchtec_ntb_link_disable,
+ .db_valid_mask = switchtec_ntb_db_valid_mask,
+ .db_vector_count = switchtec_ntb_db_vector_count,
+ .db_vector_mask = switchtec_ntb_db_vector_mask,
+ .db_read = switchtec_ntb_db_read,
+ .db_clear = switchtec_ntb_db_clear,
+ .db_set_mask = switchtec_ntb_db_set_mask,
+ .db_clear_mask = switchtec_ntb_db_clear_mask,
+ .db_read_mask = switchtec_ntb_db_read_mask,
+ .peer_db_addr = switchtec_ntb_peer_db_addr,
+ .peer_db_set = switchtec_ntb_peer_db_set,
+ .spad_count = switchtec_ntb_spad_count,
+ .spad_read = switchtec_ntb_spad_read,
+ .spad_write = switchtec_ntb_spad_write,
+ .peer_spad_read = switchtec_ntb_peer_spad_read,
+ .peer_spad_write = switchtec_ntb_peer_spad_write,
+ .peer_spad_addr = switchtec_ntb_peer_spad_addr,
+};
+
+static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
+{
+ u64 part_map;
+
+ sndev->ntb.pdev = sndev->stdev->pdev;
+ sndev->ntb.topo = NTB_TOPO_SWITCH;
+ sndev->ntb.ops = &switchtec_ntb_ops;
+
+ sndev->self_partition = sndev->stdev->partition;
+
+ sndev->mmio_ntb = sndev->stdev->mmio_ntb;
+ part_map = ioread64(&sndev->mmio_ntb->ep_map);
+ part_map &= ~(1 << sndev->self_partition);
+ sndev->peer_partition = ffs(part_map) - 1;
+
+ dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
+ sndev->self_partition, sndev->stdev->partition_count,
+ part_map);
+
+ sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
+ SWITCHTEC_NTB_REG_CTRL_OFFSET;
+ sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET;
+
+ sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
+ sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
+ sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
+}
+
+static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
+{
+ int i;
+ int cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
+ u32 r = ioread32(&ctrl->bar_entry[i].ctl);
+
+ if (r & NTB_CTRL_BAR_VALID)
+ map[cnt++] = i;
+ }
+
+ return cnt;
+}
+
+static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
+{
+ sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
+ sndev->mmio_self_ctrl);
+
+ sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
+ sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
+
+ dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
+ sndev->nr_direct_mw, sndev->nr_lut_mw);
+
+ sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
+ sndev->mmio_peer_ctrl);
+
+ sndev->peer_nr_lut_mw =
+ ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
+ sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
+
+ dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
+ sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
+
+}
+
+/*
+ * There are 64 doorbells in the switch hardware but this is
+ * shared among all partitions. So we must split them in half
+ * (32 for each partition). However, the message interrupts are
+ * also shared with the top 4 doorbells so we just limit this to
+ * 28 doorbells per partition
+ */
+static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
+{
+ sndev->db_valid_mask = 0x0FFFFFFF;
+
+ if (sndev->self_partition < sndev->peer_partition) {
+ sndev->db_shift = 0;
+ sndev->db_peer_shift = 32;
+ } else {
+ sndev->db_shift = 32;
+ sndev->db_peer_shift = 0;
+ }
+
+ sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+ iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
+ &sndev->mmio_self_dbmsg->odb_mask);
+}
+
+static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
+{
+ int i;
+ u32 msg_map = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+ int m = i | sndev->peer_partition << 2;
+
+ msg_map |= m << i * 8;
+ }
+
+ iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
+ iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
+ &sndev->mmio_self_dbmsg->imsg[i]);
+}
+
+static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
+{
+ int rc = 0;
+ u16 req_id;
+ u32 error;
+
+ req_id = ioread16(&sndev->mmio_ntb->requester_id);
+
+ if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
+ dev_err(&sndev->stdev->dev,
+ "Not enough requester IDs available.");
+ return -EFAULT;
+ }
+
+ rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+ NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ return rc;
+
+ iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
+ &sndev->mmio_self_ctrl->partition_ctrl);
+
+ /*
+ * Root Complex Requester ID (which is 0:00.0)
+ */
+ iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
+ &sndev->mmio_self_ctrl->req_id_table[0]);
+
+ /*
+ * Host Bridge Requester ID (as read from the mmap address)
+ */
+ iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
+ &sndev->mmio_self_ctrl->req_id_table[1]);
+
+ rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+ NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ if (rc == -EIO) {
+ error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
+ dev_err(&sndev->stdev->dev,
+ "Error setting up the requester ID table: %08x",
+ error);
+ }
+
+ return rc;
+}
+
+static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
+{
+ int i;
+
+ memset(sndev->self_shared, 0, LUT_SIZE);
+ sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
+ sndev->self_shared->partition_id = sndev->stdev->partition;
+
+ for (i = 0; i < sndev->nr_direct_mw; i++) {
+ int bar = sndev->direct_mw_to_bar[i];
+ resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
+
+ if (i == 0)
+ sz = min_t(resource_size_t, sz,
+ LUT_SIZE * sndev->nr_lut_mw);
+
+ sndev->self_shared->mw_sizes[i] = sz;
+ }
+
+ for (i = 0; i < sndev->nr_lut_mw; i++) {
+ int idx = sndev->nr_direct_mw + i;
+
+ sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
+ }
+}
+
+static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int bar = sndev->direct_mw_to_bar[0];
+ u32 ctl_val;
+ int rc;
+
+ sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
+ LUT_SIZE,
+ &sndev->self_shared_dma,
+ GFP_KERNEL);
+ if (!sndev->self_shared) {
+ dev_err(&sndev->stdev->dev,
+ "unable to allocate memory for shared mw");
+ return -ENOMEM;
+ }
+
+ switchtec_ntb_init_shared(sndev);
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ goto unalloc_and_exit;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val &= 0xFF;
+ ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
+ ctl_val |= ilog2(LUT_SIZE) << 8;
+ ctl_val |= (sndev->nr_lut_mw - 1) << 14;
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+
+ iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
+ sndev->self_shared_dma),
+ &ctl->lut_entry[0]);
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ if (rc) {
+ u32 bar_error, lut_error;
+
+ bar_error = ioread32(&ctl->bar_error);
+ lut_error = ioread32(&ctl->lut_error);
+ dev_err(&sndev->stdev->dev,
+ "Error setting up shared MW: %08x / %08x",
+ bar_error, lut_error);
+ goto unalloc_and_exit;
+ }
+
+ sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
+ if (!sndev->peer_shared) {
+ rc = -ENOMEM;
+ goto unalloc_and_exit;
+ }
+
+ dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
+ return 0;
+
+unalloc_and_exit:
+ dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+ sndev->self_shared, sndev->self_shared_dma);
+
+ return rc;
+}
+
+static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
+{
+ if (sndev->peer_shared)
+ pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
+
+ if (sndev->self_shared)
+ dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+ sndev->self_shared,
+ sndev->self_shared_dma);
+}
+
+static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
+{
+ struct switchtec_ntb *sndev = dev;
+
+ dev_dbg(&sndev->stdev->dev, "doorbell\n");
+
+ ntb_db_event(&sndev->ntb, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
+{
+ int i;
+ struct switchtec_ntb *sndev = dev;
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+ u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
+
+ if (msg & NTB_DBMSG_IMSG_STATUS) {
+ dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
+ (u32)msg);
+ iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
+
+ if (i == LINK_MESSAGE)
+ switchtec_ntb_check_link(sndev);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
+{
+ int i;
+ int rc;
+ int doorbell_irq = 0;
+ int message_irq = 0;
+ int event_irq;
+ int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
+
+ event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
+
+ while (doorbell_irq == event_irq)
+ doorbell_irq++;
+ while (message_irq == doorbell_irq ||
+ message_irq == event_irq)
+ message_irq++;
+
+ dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
+ event_irq, doorbell_irq, message_irq);
+
+ for (i = 0; i < idb_vecs - 4; i++)
+ iowrite8(doorbell_irq,
+ &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+ for (; i < idb_vecs; i++)
+ iowrite8(message_irq,
+ &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+ sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
+ sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
+
+ rc = request_irq(sndev->doorbell_irq,
+ switchtec_ntb_doorbell_isr, 0,
+ "switchtec_ntb_doorbell", sndev);
+ if (rc)
+ return rc;
+
+ rc = request_irq(sndev->message_irq,
+ switchtec_ntb_message_isr, 0,
+ "switchtec_ntb_message", sndev);
+ if (rc) {
+ free_irq(sndev->doorbell_irq, sndev);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
+{
+ free_irq(sndev->doorbell_irq, sndev);
+ free_irq(sndev->message_irq, sndev);
+}
+
+static int switchtec_ntb_add(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ struct switchtec_ntb *sndev;
+ int rc;
+
+ stdev->sndev = NULL;
+
+ if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
+ return -ENODEV;
+
+ if (stdev->partition_count != 2)
+ dev_warn(dev, "ntb driver only supports 2 partitions");
+
+ sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
+ if (!sndev)
+ return -ENOMEM;
+
+ sndev->stdev = stdev;
+ switchtec_ntb_init_sndev(sndev);
+ switchtec_ntb_init_mw(sndev);
+ switchtec_ntb_init_db(sndev);
+ switchtec_ntb_init_msgs(sndev);
+
+ rc = switchtec_ntb_init_req_id_table(sndev);
+ if (rc)
+ goto free_and_exit;
+
+ rc = switchtec_ntb_init_shared_mw(sndev);
+ if (rc)
+ goto free_and_exit;
+
+ rc = switchtec_ntb_init_db_msg_irq(sndev);
+ if (rc)
+ goto deinit_shared_and_exit;
+
+ rc = ntb_register_device(&sndev->ntb);
+ if (rc)
+ goto deinit_and_exit;
+
+ stdev->sndev = sndev;
+ stdev->link_notifier = switchtec_ntb_link_notification;
+ dev_info(dev, "NTB device registered");
+
+ return 0;
+
+deinit_and_exit:
+ switchtec_ntb_deinit_db_msg_irq(sndev);
+deinit_shared_and_exit:
+ switchtec_ntb_deinit_shared_mw(sndev);
+free_and_exit:
+ kfree(sndev);
+ dev_err(dev, "failed to register ntb device: %d", rc);
+ return rc;
+}
+
+void switchtec_ntb_remove(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ struct switchtec_ntb *sndev = stdev->sndev;
+
+ if (!sndev)
+ return;
+
+ stdev->link_notifier = NULL;
+ stdev->sndev = NULL;
+ ntb_unregister_device(&sndev->ntb);
+ switchtec_ntb_deinit_db_msg_irq(sndev);
+ switchtec_ntb_deinit_shared_mw(sndev);
+ kfree(sndev);
+ dev_info(dev, "ntb device unregistered");
+}
+
+static struct class_interface switchtec_interface = {
+ .add_dev = switchtec_ntb_add,
+ .remove_dev = switchtec_ntb_remove,
+};
+
+static int __init switchtec_ntb_init(void)
+{
+ switchtec_interface.class = switchtec_class;
+ return class_interface_register(&switchtec_interface);
+}
+module_init(switchtec_ntb_init);
+
+static void __exit switchtec_ntb_exit(void)
+{
+ class_interface_unregister(&switchtec_interface);
+}
+module_exit(switchtec_ntb_exit);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f58d8e305323..045e3dd4750e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -191,8 +191,6 @@ struct ntb_transport_qp {
struct ntb_transport_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
- resource_size_t xlat_align;
- resource_size_t xlat_align_size;
void __iomem *vbase;
size_t xlat_size;
size_t buff_size;
@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
size_t xlat_size, buff_size;
+ resource_size_t xlat_align;
+ resource_size_t xlat_align_size;
int rc;
if (!size)
return -EINVAL;
- xlat_size = round_up(size, mw->xlat_align_size);
- buff_size = round_up(size, mw->xlat_align);
+ rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
+ &xlat_align_size, NULL);
+ if (rc)
+ return rc;
+
+ xlat_size = round_up(size, xlat_align_size);
+ buff_size = round_up(size, xlat_align);
/* No need to re-setup */
if (mw->xlat_size == xlat_size)
@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
* is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB.
*/
- if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+ if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr);
ntb_free_mw(nt, num_mw);
@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i];
- rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
- &mw->xlat_align_size, NULL);
- if (rc)
- goto err1;
-
rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
&mw->phys_size);
if (rc)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 759f772fa00c..427112cf101a 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
struct perf_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
- resource_size_t xlat_align;
- resource_size_t xlat_align_size;
void __iomem *vbase;
size_t xlat_size;
size_t buf_size;
@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
{
struct perf_mw *mw = &perf->mw;
size_t xlat_size, buf_size;
+ resource_size_t xlat_align;
+ resource_size_t xlat_align_size;
int rc;
if (!size)
return -EINVAL;
- xlat_size = round_up(size, mw->xlat_align_size);
- buf_size = round_up(size, mw->xlat_align);
+ rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
+ &xlat_align_size, NULL);
+ if (rc)
+ return rc;
+
+ xlat_size = round_up(size, xlat_align_size);
+ buf_size = round_up(size, xlat_align);
if (mw->xlat_size == xlat_size)
return 0;
@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
mw = &perf->mw;
- rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
- &mw->xlat_align_size, NULL);
- if (rc)
- return rc;
-
rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
if (rc)
return rc;
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 938a18bcfc3f..3f5a92bae6f8 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -107,9 +107,9 @@ struct pp_ctx {
static struct dentry *pp_debugfs_dir;
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
{
- struct pp_ctx *pp = (void *)ctx;
+ struct pp_ctx *pp = from_timer(pp, t, db_timer);
unsigned long irqflags;
u64 db_bits, db_mask;
u32 spad_rd, spad_wr;
@@ -153,7 +153,7 @@ static void pp_link_event(void *ctx)
if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
dev_dbg(&pp->ntb->dev, "link is up\n");
- pp_ping((unsigned long)pp);
+ pp_ping(&pp->db_timer);
} else {
dev_dbg(&pp->ntb->dev, "link is down\n");
del_timer(&pp->db_timer);
@@ -252,7 +252,7 @@ static int pp_probe(struct ntb_client *client,
pp->db_bits = 0;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
- setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+ timer_setup(&pp->db_timer, pp_ping, 0);
pp->db_delay = msecs_to_jiffies(delay_ms);
rc = ntb_set_ctx(ntb, pp, &pp_ops);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index a69815c45ce6..91526a986caa 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
phys_addr_t base;
resource_size_t mw_size;
- resource_size_t align_addr;
- resource_size_t align_size;
- resource_size_t max_size;
+ resource_size_t align_addr = 0;
+ resource_size_t align_size = 0;
+ resource_size_t max_size = 0;
buf_size = min_t(size_t, size, 512);
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 447e0e14f3b6..70d5f3ad9909 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -21,6 +21,7 @@ libnvdimm-y += region_devs.o
libnvdimm-y += region.o
libnvdimm-y += namespace_devs.o
libnvdimm-y += label.o
+libnvdimm-y += badrange.o
libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
libnvdimm-$(CONFIG_BTT) += btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
new file mode 100644
index 000000000000..e068d72b4357
--- /dev/null
+++ b/drivers/nvdimm/badrange.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/libnvdimm.h>
+#include <linux/badblocks.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/ndctl.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "nd-core.h"
+#include "nd.h"
+
+void badrange_init(struct badrange *badrange)
+{
+ INIT_LIST_HEAD(&badrange->list);
+ spin_lock_init(&badrange->lock);
+}
+EXPORT_SYMBOL_GPL(badrange_init);
+
+static void append_badrange_entry(struct badrange *badrange,
+ struct badrange_entry *bre, u64 addr, u64 length)
+{
+ lockdep_assert_held(&badrange->lock);
+ bre->start = addr;
+ bre->length = length;
+ list_add_tail(&bre->list, &badrange->list);
+}
+
+static int alloc_and_append_badrange_entry(struct badrange *badrange,
+ u64 addr, u64 length, gfp_t flags)
+{
+ struct badrange_entry *bre;
+
+ bre = kzalloc(sizeof(*bre), flags);
+ if (!bre)
+ return -ENOMEM;
+
+ append_badrange_entry(badrange, bre, addr, length);
+ return 0;
+}
+
+static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
+{
+ struct badrange_entry *bre, *bre_new;
+
+ spin_unlock(&badrange->lock);
+ bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
+ spin_lock(&badrange->lock);
+
+ if (list_empty(&badrange->list)) {
+ if (!bre_new)
+ return -ENOMEM;
+ append_badrange_entry(badrange, bre_new, addr, length);
+ return 0;
+ }
+
+ /*
+ * There is a chance this is a duplicate, check for those first.
+ * This will be the common case as ARS_STATUS returns all known
+ * errors in the SPA space, and we can't query it per region
+ */
+ list_for_each_entry(bre, &badrange->list, list)
+ if (bre->start == addr) {
+ /* If length has changed, update this list entry */
+ if (bre->length != length)
+ bre->length = length;
+ kfree(bre_new);
+ return 0;
+ }
+
+ /*
+ * If not a duplicate or a simple length update, add the entry as is,
+ * as any overlapping ranges will get resolved when the list is consumed
+ * and converted to badblocks
+ */
+ if (!bre_new)
+ return -ENOMEM;
+ append_badrange_entry(badrange, bre_new, addr, length);
+
+ return 0;
+}
+
+int badrange_add(struct badrange *badrange, u64 addr, u64 length)
+{
+ int rc;
+
+ spin_lock(&badrange->lock);
+ rc = add_badrange(badrange, addr, length);
+ spin_unlock(&badrange->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(badrange_add);
+
+void badrange_forget(struct badrange *badrange, phys_addr_t start,
+ unsigned int len)
+{
+ struct list_head *badrange_list = &badrange->list;
+ u64 clr_end = start + len - 1;
+ struct badrange_entry *bre, *next;
+
+ spin_lock(&badrange->lock);
+
+ /*
+ * [start, clr_end] is the badrange interval being cleared.
+ * [bre->start, bre_end] is the badrange_list entry we're comparing
+ * the above interval against. The badrange list entry may need
+ * to be modified (update either start or length), deleted, or
+ * split into two based on the overlap characteristics
+ */
+
+ list_for_each_entry_safe(bre, next, badrange_list, list) {
+ u64 bre_end = bre->start + bre->length - 1;
+
+ /* Skip intervals with no intersection */
+ if (bre_end < start)
+ continue;
+ if (bre->start > clr_end)
+ continue;
+ /* Delete completely overlapped badrange entries */
+ if ((bre->start >= start) && (bre_end <= clr_end)) {
+ list_del(&bre->list);
+ kfree(bre);
+ continue;
+ }
+ /* Adjust start point of partially cleared entries */
+ if ((start <= bre->start) && (clr_end > bre->start)) {
+ bre->length -= clr_end - bre->start + 1;
+ bre->start = clr_end + 1;
+ continue;
+ }
+ /* Adjust bre->length for partial clearing at the tail end */
+ if ((bre->start < start) && (bre_end <= clr_end)) {
+ /* bre->start remains the same */
+ bre->length = start - bre->start;
+ continue;
+ }
+ /*
+ * If clearing in the middle of an entry, we split it into
+ * two by modifying the current entry to represent one half of
+ * the split, and adding a new entry for the second half.
+ */
+ if ((bre->start < start) && (bre_end > clr_end)) {
+ u64 new_start = clr_end + 1;
+ u64 new_len = bre_end - new_start + 1;
+
+ /* Add new entry covering the right half */
+ alloc_and_append_badrange_entry(badrange, new_start,
+ new_len, GFP_NOWAIT);
+ /* Adjust this entry to cover the left half */
+ bre->length = start - bre->start;
+ continue;
+ }
+ }
+ spin_unlock(&badrange->lock);
+}
+EXPORT_SYMBOL_GPL(badrange_forget);
+
+static void set_badblock(struct badblocks *bb, sector_t s, int num)
+{
+ dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
+ (u64) s * 512, (u64) num * 512);
+ /* this isn't an error as the hardware will still throw an exception */
+ if (badblocks_set(bb, s, num, 1))
+ dev_info_once(bb->dev, "%s: failed for sector %llx\n",
+ __func__, (u64) s);
+}
+
+/**
+ * __add_badblock_range() - Convert a physical address range to bad sectors
+ * @bb: badblocks instance to populate
+ * @ns_offset: namespace offset where the error range begins (in bytes)
+ * @len: number of bytes of badrange to be added
+ *
+ * This assumes that the range provided with (ns_offset, len) is within
+ * the bounds of physical addresses for this namespace, i.e. lies in the
+ * interval [ns_start, ns_start + ns_size)
+ */
+static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
+{
+ const unsigned int sector_size = 512;
+ sector_t start_sector, end_sector;
+ u64 num_sectors;
+ u32 rem;
+
+ start_sector = div_u64(ns_offset, sector_size);
+ end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
+ if (rem)
+ end_sector++;
+ num_sectors = end_sector - start_sector;
+
+ if (unlikely(num_sectors > (u64)INT_MAX)) {
+ u64 remaining = num_sectors;
+ sector_t s = start_sector;
+
+ while (remaining) {
+ int done = min_t(u64, remaining, INT_MAX);
+
+ set_badblock(bb, s, done);
+ remaining -= done;
+ s += done;
+ }
+ } else
+ set_badblock(bb, start_sector, num_sectors);
+}
+
+static void badblocks_populate(struct badrange *badrange,
+ struct badblocks *bb, const struct resource *res)
+{
+ struct badrange_entry *bre;
+
+ if (list_empty(&badrange->list))
+ return;
+
+ list_for_each_entry(bre, &badrange->list, list) {
+ u64 bre_end = bre->start + bre->length - 1;
+
+ /* Discard intervals with no intersection */
+ if (bre_end < res->start)
+ continue;
+ if (bre->start > res->end)
+ continue;
+ /* Deal with any overlap after start of the namespace */
+ if (bre->start >= res->start) {
+ u64 start = bre->start;
+ u64 len;
+
+ if (bre_end <= res->end)
+ len = bre->length;
+ else
+ len = res->start + resource_size(res)
+ - bre->start;
+ __add_badblock_range(bb, start - res->start, len);
+ continue;
+ }
+ /*
+ * Deal with overlap for badrange starting before
+ * the namespace.
+ */
+ if (bre->start < res->start) {
+ u64 len;
+
+ if (bre_end < res->end)
+ len = bre->start + bre->length - res->start;
+ else
+ len = resource_size(res);
+ __add_badblock_range(bb, 0, len);
+ }
+ }
+}
+
+/**
+ * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
+ *
+ * The badrange list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges. Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
+ */
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res)
+{
+ struct nvdimm_bus *nvdimm_bus;
+
+ if (!is_memory(&nd_region->dev)) {
+ dev_WARN_ONCE(&nd_region->dev, 1,
+ "%s only valid for pmem regions\n", __func__);
+ return;
+ }
+ nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ badblocks_populate(&nvdimm_bus->badrange, bb, res);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index d5612bd1cc81..e949e3302af4 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -23,6 +23,7 @@
#include <linux/ndctl.h>
#include <linux/fs.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "btt.h"
#include "nd.h"
@@ -1402,6 +1403,8 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->private_data = btt;
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
+ btt->btt_disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index baf283986a7e..0a5e6cd758fe 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -221,7 +222,7 @@ static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t phys, u64 cleared)
{
if (cleared > 0)
- nvdimm_forget_poison(nvdimm_bus, phys, cleared);
+ badrange_forget(&nvdimm_bus->badrange, phys, cleared);
if (cleared > 0 && cleared / 512)
nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
@@ -344,11 +345,10 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- INIT_LIST_HEAD(&nvdimm_bus->poison_list);
init_waitqueue_head(&nvdimm_bus->probe_wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex);
- spin_lock_init(&nvdimm_bus->poison_lock);
+ badrange_init(&nvdimm_bus->badrange);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
@@ -395,15 +395,15 @@ static int child_unregister(struct device *dev, void *data)
return 0;
}
-static void free_poison_list(struct list_head *poison_list)
+static void free_badrange_list(struct list_head *badrange_list)
{
- struct nd_poison *pl, *next;
+ struct badrange_entry *bre, *next;
- list_for_each_entry_safe(pl, next, poison_list, list) {
- list_del(&pl->list);
- kfree(pl);
+ list_for_each_entry_safe(bre, next, badrange_list, list) {
+ list_del(&bre->list);
+ kfree(bre);
}
- list_del_init(poison_list);
+ list_del_init(badrange_list);
}
static int nd_bus_remove(struct device *dev)
@@ -417,9 +417,9 @@ static int nd_bus_remove(struct device *dev)
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
- spin_lock(&nvdimm_bus->poison_lock);
- free_poison_list(&nvdimm_bus->poison_list);
- spin_unlock(&nvdimm_bus->poison_lock);
+ spin_lock(&nvdimm_bus->badrange.lock);
+ free_badrange_list(&nvdimm_bus->badrange.list);
+ spin_unlock(&nvdimm_bus->badrange.lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index bb71f0cf8f5d..1dc527660637 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -398,265 +398,11 @@ struct attribute_group nvdimm_bus_attribute_group = {
};
EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
-static void set_badblock(struct badblocks *bb, sector_t s, int num)
+int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
- dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
- (u64) s * 512, (u64) num * 512);
- /* this isn't an error as the hardware will still throw an exception */
- if (badblocks_set(bb, s, num, 1))
- dev_info_once(bb->dev, "%s: failed for sector %llx\n",
- __func__, (u64) s);
+ return badrange_add(&nvdimm_bus->badrange, addr, length);
}
-
-/**
- * __add_badblock_range() - Convert a physical address range to bad sectors
- * @bb: badblocks instance to populate
- * @ns_offset: namespace offset where the error range begins (in bytes)
- * @len: number of bytes of poison to be added
- *
- * This assumes that the range provided with (ns_offset, len) is within
- * the bounds of physical addresses for this namespace, i.e. lies in the
- * interval [ns_start, ns_start + ns_size)
- */
-static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
-{
- const unsigned int sector_size = 512;
- sector_t start_sector, end_sector;
- u64 num_sectors;
- u32 rem;
-
- start_sector = div_u64(ns_offset, sector_size);
- end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
- if (rem)
- end_sector++;
- num_sectors = end_sector - start_sector;
-
- if (unlikely(num_sectors > (u64)INT_MAX)) {
- u64 remaining = num_sectors;
- sector_t s = start_sector;
-
- while (remaining) {
- int done = min_t(u64, remaining, INT_MAX);
-
- set_badblock(bb, s, done);
- remaining -= done;
- s += done;
- }
- } else
- set_badblock(bb, start_sector, num_sectors);
-}
-
-static void badblocks_populate(struct list_head *poison_list,
- struct badblocks *bb, const struct resource *res)
-{
- struct nd_poison *pl;
-
- if (list_empty(poison_list))
- return;
-
- list_for_each_entry(pl, poison_list, list) {
- u64 pl_end = pl->start + pl->length - 1;
-
- /* Discard intervals with no intersection */
- if (pl_end < res->start)
- continue;
- if (pl->start > res->end)
- continue;
- /* Deal with any overlap after start of the namespace */
- if (pl->start >= res->start) {
- u64 start = pl->start;
- u64 len;
-
- if (pl_end <= res->end)
- len = pl->length;
- else
- len = res->start + resource_size(res)
- - pl->start;
- __add_badblock_range(bb, start - res->start, len);
- continue;
- }
- /* Deal with overlap for poison starting before the namespace */
- if (pl->start < res->start) {
- u64 len;
-
- if (pl_end < res->end)
- len = pl->start + pl->length - res->start;
- else
- len = resource_size(res);
- __add_badblock_range(bb, 0, len);
- }
- }
-}
-
-/**
- * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
- * @region: parent region of the range to interrogate
- * @bb: badblocks instance to populate
- * @res: resource range to consider
- *
- * The poison list generated during bus initialization may contain
- * multiple, possibly overlapping physical address ranges. Compare each
- * of these ranges to the resource range currently being initialized,
- * and add badblocks entries for all matching sub-ranges
- */
-void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res)
-{
- struct nvdimm_bus *nvdimm_bus;
- struct list_head *poison_list;
-
- if (!is_memory(&nd_region->dev)) {
- dev_WARN_ONCE(&nd_region->dev, 1,
- "%s only valid for pmem regions\n", __func__);
- return;
- }
- nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- poison_list = &nvdimm_bus->poison_list;
-
- nvdimm_bus_lock(&nvdimm_bus->dev);
- badblocks_populate(poison_list, bb, res);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
-}
-EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
-
-static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
- struct nd_poison *pl, u64 addr, u64 length)
-{
- lockdep_assert_held(&nvdimm_bus->poison_lock);
- pl->start = addr;
- pl->length = length;
- list_add_tail(&pl->list, &nvdimm_bus->poison_list);
-}
-
-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
- gfp_t flags)
-{
- struct nd_poison *pl;
-
- pl = kzalloc(sizeof(*pl), flags);
- if (!pl)
- return -ENOMEM;
-
- append_poison_entry(nvdimm_bus, pl, addr, length);
- return 0;
-}
-
-static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
-{
- struct nd_poison *pl, *pl_new;
-
- spin_unlock(&nvdimm_bus->poison_lock);
- pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
- spin_lock(&nvdimm_bus->poison_lock);
-
- if (list_empty(&nvdimm_bus->poison_list)) {
- if (!pl_new)
- return -ENOMEM;
- append_poison_entry(nvdimm_bus, pl_new, addr, length);
- return 0;
- }
-
- /*
- * There is a chance this is a duplicate, check for those first.
- * This will be the common case as ARS_STATUS returns all known
- * errors in the SPA space, and we can't query it per region
- */
- list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
- if (pl->start == addr) {
- /* If length has changed, update this list entry */
- if (pl->length != length)
- pl->length = length;
- kfree(pl_new);
- return 0;
- }
-
- /*
- * If not a duplicate or a simple length update, add the entry as is,
- * as any overlapping ranges will get resolved when the list is consumed
- * and converted to badblocks
- */
- if (!pl_new)
- return -ENOMEM;
- append_poison_entry(nvdimm_bus, pl_new, addr, length);
-
- return 0;
-}
-
-int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
-{
- int rc;
-
- spin_lock(&nvdimm_bus->poison_lock);
- rc = bus_add_poison(nvdimm_bus, addr, length);
- spin_unlock(&nvdimm_bus->poison_lock);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
-
-void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
- unsigned int len)
-{
- struct list_head *poison_list = &nvdimm_bus->poison_list;
- u64 clr_end = start + len - 1;
- struct nd_poison *pl, *next;
-
- spin_lock(&nvdimm_bus->poison_lock);
- WARN_ON_ONCE(list_empty(poison_list));
-
- /*
- * [start, clr_end] is the poison interval being cleared.
- * [pl->start, pl_end] is the poison_list entry we're comparing
- * the above interval against. The poison list entry may need
- * to be modified (update either start or length), deleted, or
- * split into two based on the overlap characteristics
- */
-
- list_for_each_entry_safe(pl, next, poison_list, list) {
- u64 pl_end = pl->start + pl->length - 1;
-
- /* Skip intervals with no intersection */
- if (pl_end < start)
- continue;
- if (pl->start > clr_end)
- continue;
- /* Delete completely overlapped poison entries */
- if ((pl->start >= start) && (pl_end <= clr_end)) {
- list_del(&pl->list);
- kfree(pl);
- continue;
- }
- /* Adjust start point of partially cleared entries */
- if ((start <= pl->start) && (clr_end > pl->start)) {
- pl->length -= clr_end - pl->start + 1;
- pl->start = clr_end + 1;
- continue;
- }
- /* Adjust pl->length for partial clearing at the tail end */
- if ((pl->start < start) && (pl_end <= clr_end)) {
- /* pl->start remains the same */
- pl->length = start - pl->start;
- continue;
- }
- /*
- * If clearing in the middle of an entry, we split it into
- * two by modifying the current entry to represent one half of
- * the split, and adding a new entry for the second half.
- */
- if ((pl->start < start) && (pl_end > clr_end)) {
- u64 new_start = clr_end + 1;
- u64 new_len = pl_end - new_start + 1;
-
- /* Add new entry covering the right half */
- add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
- /* Adjust this entry to cover the left half */
- pl->length = start - pl->start;
- continue;
- }
- }
- spin_unlock(&nvdimm_bus->poison_lock);
-}
-EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
+EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index e0f0e3ce1a32..f8913b8124b6 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -55,6 +55,8 @@ static int nvdimm_probe(struct device *dev)
goto err;
rc = nvdimm_init_config_data(ndd);
+ if (rc == -EACCES)
+ nvdimm_set_locked(dev);
if (rc)
goto err;
@@ -68,6 +70,7 @@ static int nvdimm_probe(struct device *dev)
rc = nd_label_reserve_dpa(ndd);
if (ndd->ns_current >= 0)
nvdimm_set_aliasing(dev);
+ nvdimm_clear_locked(dev);
nvdimm_bus_unlock(dev);
if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index f0d1b7e5de01..097794d9f786 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev)
set_bit(NDD_LOCKED, &nvdimm->flags);
}
+void nvdimm_clear_locked(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ clear_bit(NDD_LOCKED, &nvdimm->flags);
+}
+
static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
@@ -324,6 +331,17 @@ static ssize_t commands_show(struct device *dev,
}
static DEVICE_ATTR_RO(commands);
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ return sprintf(buf, "%s%s\n",
+ test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+ test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
+}
+static DEVICE_ATTR_RO(flags);
+
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -365,6 +383,7 @@ static DEVICE_ATTR_RO(available_slots);
static struct attribute *nvdimm_attributes[] = {
&dev_attr_state.attr,
+ &dev_attr_flags.attr,
&dev_attr_commands.attr,
&dev_attr_available_slots.attr,
NULL,
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9c5f108910e3..de66c02f6140 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
nsindex = to_namespace_index(ndd, 0);
memset(nsindex, 0, ndd->nsarea.config_size);
for (i = 0; i < 2; i++) {
- int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
+ int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
if (rc)
return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3e4d1e7998da..bb3ba8cf24d4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj,
if (a == &dev_attr_resource.attr) {
if (is_namespace_blk(dev))
return 0;
- return a->mode;
+ return 0400;
}
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
@@ -1875,7 +1875,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nspm: target namespace to create
* @nd_label: target pmem namespace label to evaluate
*/
-struct device *create_namespace_pmem(struct nd_region *nd_region,
+static struct device *create_namespace_pmem(struct nd_region *nd_region,
struct nd_namespace_index *nsindex,
struct nd_namespace_label *nd_label)
{
@@ -2186,7 +2186,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
return i;
}
-struct device *create_namespace_blk(struct nd_region *nd_region,
+static struct device *create_namespace_blk(struct nd_region *nd_region,
struct nd_namespace_label *nd_label, int count)
{
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 86bc19ae30da..79274ead54fb 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -29,10 +29,9 @@ struct nvdimm_bus {
struct list_head list;
struct device dev;
int id, probe_active;
- struct list_head poison_list;
struct list_head mapping_list;
struct mutex reconfig_mutex;
- spinlock_t poison_lock;
+ struct badrange badrange;
};
struct nvdimm {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 9c758a91372b..e958f3724c41 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -34,12 +34,6 @@ enum {
NVDIMM_IO_ATOMIC = 1,
};
-struct nd_poison {
- u64 start;
- u64 length;
- struct list_head list;
-};
-
struct nvdimm_drvdata {
struct device *dev;
int nslabel_size;
@@ -254,6 +248,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
void nvdimm_set_aliasing(struct device *dev);
void nvdimm_set_locked(struct device *dev);
+void nvdimm_clear_locked(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 9576c444f0ab..65cc171c721d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
+static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ if (a == &dev_attr_resource.attr)
+ return 0400;
+ return a->mode;
+}
+
struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
+ .is_visible = pfn_visible,
};
static const struct attribute_group *nd_pfn_attribute_groups[] = {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 39dfd7affa31..7fbc5c5dc8e1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -31,6 +31,7 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
@@ -394,6 +395,7 @@ static int pmem_attach_disk(struct device *dev,
disk->fops = &pmem_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 829d760f651c..abaf38c61220 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
return 0;
- if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
- return 0;
+ if (a == &dev_attr_resource.attr) {
+ if (is_nd_pmem(dev))
+ return 0400;
+ else
+ return 0;
+ }
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 25da74d310d1..f837d666cbd4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1449,19 +1449,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
int srcu_idx, ret;
u8 data[16] = { 0, };
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ return -EWOULDBLOCK;
+
put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(head->ns_id);
+ c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10);
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- ret = -EWOULDBLOCK;
- else
- ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns)
{
- struct nvme_ns_head *head = ns->head;
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -2980,15 +2978,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns);
- if (head)
- list_del_rcu(&ns->siblings);
+ list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
- synchronize_srcu(&head->srcu);
+ synchronize_srcu(&ns->head->srcu);
nvme_put_ns(ns);
}
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 42232e731f19..9ba614953607 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+ struct request *rq)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ /*
+ * We cannot accept any other command until the connect command has
+ * completed, so only allow connect to pass.
+ */
+ if (!blk_rq_is_passthrough(rq) ||
+ cmd->common.opcode != nvme_fabrics_command ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ /*
+ * Reconnecting state means transport disruption, which can take
+ * a long time and even might fail permanently, fail fast to
+ * give upper layers a chance to failover.
+ * Deleting state means that the ctrl will never accept commands
+ * again, fail it permanently.
+ */
+ if (ctrl->state == NVME_CTRL_RECONNECTING ||
+ ctrl->state == NVME_CTRL_DELETING) {
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+ }
+ return BLK_STS_RESOURCE; /* try again later */
+ }
+
+ return BLK_STS_OK;
+}
+
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7ab0be55c7d0..0a8af4daef89 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -31,7 +31,8 @@
enum nvme_fc_queue_flags {
- NVME_FC_Q_CONNECTED = (1 << 0),
+ NVME_FC_Q_CONNECTED = 0,
+ NVME_FC_Q_LIVE,
};
#define NVMEFC_QUEUE_DELAY 3 /* ms units */
@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
return;
+ clear_bit(NVME_FC_Q_LIVE, &queue->flags);
/*
* Current implementation never disconnects a single queue.
* It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/
queue->connection_id = 0;
- clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
}
static void
@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
+
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
}
return ret;
@@ -2320,6 +2323,14 @@ busy:
return BLK_STS_RESOURCE;
}
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
+}
+
static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len;
blk_status_t ret;
+ ret = nvme_fc_is_ready(queue, rq);
+ if (unlikely(ret))
+ return ret;
+
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
return ret;
@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_disconnect_admin_queue;
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
/*
* Check controller capabilities
*
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 78d92151a904..1218a9fca846 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
bio->bi_opf |= REQ_NVME_MPATH;
ret = direct_make_request(bio);
} else if (!list_empty_careful(&head->list)) {
- dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+ dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c0873a68872f..ea1aa5283e8e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
* found empirically.
*/
-#define NVME_QUIRK_DELAY_AMOUNT 2000
+#define NVME_QUIRK_DELAY_AMOUNT 2300
enum nvme_ctrl_state {
NVME_CTRL_NEW,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a11cfd470089..f5800c3c9082 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1759,6 +1759,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
+ dev->nr_host_mem_descs = 0;
}
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
@@ -1787,7 +1788,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (!bufs)
goto out_free_descs;
- for (size = 0; size < preferred; size += len) {
+ for (size = 0; size < preferred && i < max_entries; size += len) {
dma_addr_t dma_addr;
len = min_t(u64, chunk_size, preferred - size);
@@ -2428,7 +2429,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return -ENODEV;
}
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
{
if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
/*
@@ -2443,6 +2444,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
(dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
return NVME_QUIRK_NO_DEEPEST_PS;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+ /*
+ * Samsung SSD 960 EVO drops off the PCIe bus after system
+ * suspend on a Ryzen board, ASUS PRIME B350M-A.
+ */
+ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+ dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+ return NVME_QUIRK_NO_APST;
}
return 0;
@@ -2482,7 +2491,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto unmap;
- quirks |= check_dell_samsung_bug(pdev);
+ quirks |= check_vendor_combination_bug(pdev);
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
@@ -2665,6 +2674,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4f9bf2f815c3..37af56596be6 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <rdma/mr_pool.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/atomic.h>
@@ -59,6 +60,9 @@ struct nvme_rdma_request {
struct nvme_request req;
struct ib_mr *mr;
struct nvme_rdma_qe sqe;
+ union nvme_result result;
+ __le16 status;
+ refcount_t ref;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
@@ -73,11 +77,11 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags {
NVME_RDMA_Q_ALLOCATED = 0,
NVME_RDMA_Q_LIVE = 1,
+ NVME_RDMA_Q_TR_READY = 2,
};
struct nvme_rdma_queue {
struct nvme_rdma_qe *rsp_ring;
- atomic_t sig_count;
int queue_size;
size_t cmnd_capsule_len;
struct nvme_rdma_ctrl *ctrl;
@@ -258,32 +262,6 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
return ret;
}
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
- struct nvme_rdma_ctrl *ctrl = data;
- struct nvme_rdma_device *dev = ctrl->device;
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- int ret = 0;
-
- if (WARN_ON_ONCE(!req->mr))
- return 0;
-
- ib_dereg_mr(req->mr);
-
- req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
- ctrl->max_fr_pages);
- if (IS_ERR(req->mr)) {
- ret = PTR_ERR(req->mr);
- req->mr = NULL;
- goto out;
- }
-
- req->mr->need_inval = false;
-
-out:
- return ret;
-}
-
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
@@ -293,9 +271,6 @@ static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
- if (req->mr)
- ib_dereg_mr(req->mr);
-
nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
}
@@ -317,21 +292,9 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
if (ret)
return ret;
- req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
- ctrl->max_fr_pages);
- if (IS_ERR(req->mr)) {
- ret = PTR_ERR(req->mr);
- goto out_free_qe;
- }
-
req->queue = queue;
return 0;
-
-out_free_qe:
- nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- return -ENOMEM;
}
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -428,10 +391,23 @@ out_err:
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
- rdma_destroy_qp(queue->cm_id);
+ if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+ return;
+
+ dev = queue->device;
+ ibdev = dev->dev;
+
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+ /*
+ * The cm_id object might have been destroyed during RDMA connection
+ * establishment error flow to avoid getting other cma events, thus
+ * the destruction of the QP shouldn't use rdma_cm API.
+ */
+ ib_destroy_qp(queue->qp);
ib_free_cq(queue->ib_cq);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
@@ -440,6 +416,12 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
nvme_rdma_dev_put(dev);
}
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+ ibdev->attrs.max_fast_reg_page_list_len);
+}
+
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{
struct ib_device *ibdev;
@@ -482,8 +464,24 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_qp;
}
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+ queue->queue_size,
+ IB_MR_TYPE_MEM_REG,
+ nvme_rdma_get_max_fr_pages(ibdev));
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize MR pool sized %d for QID %d\n",
+ queue->queue_size, idx);
+ goto out_destroy_ring;
+ }
+
+ set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
return 0;
+out_destroy_ring:
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
out_destroy_qp:
rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
@@ -510,7 +508,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cmnd_capsule_len = sizeof(struct nvme_command);
queue->queue_size = queue_size;
- atomic_set(&queue->sig_count, 0);
queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
RDMA_PS_TCP, IB_QPT_RC);
@@ -546,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
return ret;
}
@@ -756,8 +754,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->device = ctrl->queues[0].device;
- ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
@@ -771,10 +768,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_free_tagset;
}
- } else {
- error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
- if (error)
- goto out_free_queue;
}
error = nvme_rdma_start_queue(ctrl, 0);
@@ -854,10 +847,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_tag_set;
}
} else {
- ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
- if (ret)
- goto out_free_io_queues;
-
blk_mq_update_nr_hw_queues(&ctrl->tag_set,
ctrl->ctrl.queue_count - 1);
}
@@ -1018,8 +1007,18 @@ static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
- if (unlikely(wc->status != IB_WC_SUCCESS))
+ struct nvme_rdma_request *req =
+ container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+ return;
+ }
+
+ if (refcount_dec_and_test(&req->ref))
+ nvme_end_request(rq, req->status, req->result);
+
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1030,7 +1029,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
.num_sge = 0,
- .send_flags = 0,
+ .send_flags = IB_SEND_SIGNALED,
.ex.invalidate_rkey = req->mr->rkey,
};
@@ -1044,22 +1043,15 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
- int res;
if (!blk_rq_bytes(rq))
return;
- if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
- res = nvme_rdma_inv_rkey(queue, req);
- if (unlikely(res < 0)) {
- dev_err(ctrl->ctrl.device,
- "Queueing INV WR for rkey %#x failed (%d)\n",
- req->mr->rkey, res);
- nvme_rdma_error_recovery(queue->ctrl);
- }
+ if (req->mr) {
+ ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ req->mr = NULL;
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
@@ -1118,12 +1110,18 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr;
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
/*
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (unlikely(nr < count)) {
+ ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ req->mr = NULL;
if (nr < 0)
return nr;
return -EINVAL;
@@ -1142,8 +1140,6 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
- req->mr->need_inval = true;
-
sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
@@ -1163,7 +1159,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->num_sge = 1;
req->inline_data = false;
- req->mr->need_inval = false;
+ refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -1200,25 +1196,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- if (unlikely(wc->status != IB_WC_SUCCESS))
- nvme_rdma_wr_error(cq, wc, "SEND");
-}
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_request *req =
+ container_of(qe, struct nvme_rdma_request, sqe);
+ struct request *rq = blk_mq_rq_from_pdu(req);
-/*
- * We want to signal completion at least every queue depth/2. This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
- int limit = 1 << ilog2((queue->queue_size + 1) / 2);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "SEND");
+ return;
+ }
- return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+ if (refcount_dec_and_test(&req->ref))
+ nvme_end_request(rq, req->status, req->result);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
- struct ib_send_wr *first, bool flush)
+ struct ib_send_wr *first)
{
struct ib_send_wr wr, *bad_wr;
int ret;
@@ -1227,31 +1222,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
sge->length = sizeof(struct nvme_command),
sge->lkey = queue->device->pd->local_dma_lkey;
- qe->cqe.done = nvme_rdma_send_done;
-
wr.next = NULL;
wr.wr_cqe = &qe->cqe;
wr.sg_list = sge;
wr.num_sge = num_sge;
wr.opcode = IB_WR_SEND;
- wr.send_flags = 0;
-
- /*
- * Unsignalled send completions are another giant desaster in the
- * IB Verbs spec: If we don't regularly post signalled sends
- * the send queue will fill up and only a QP reset will rescue us.
- * Would have been way to obvious to handle this in hardware or
- * at least the RDMA stack..
- *
- * Always signal the flushes. The magic request used for the flush
- * sequencer is not allocated in our driver's tagset and it's
- * triggered to be freed by blk_cleanup_queue(). So we need to
- * always mark it as signaled to ensure that the "wr_cqe", which is
- * embedded in request's payload, is not freed when __ib_process_cq()
- * calls wr_cqe->done().
- */
- if (nvme_rdma_queue_sig_limit(queue) || flush)
- wr.send_flags |= IB_SEND_SIGNALED;
+ wr.send_flags = IB_SEND_SIGNALED;
if (first)
first->next = &wr;
@@ -1301,6 +1277,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
return queue->ctrl->tag_set.tags[queue_idx - 1];
}
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
@@ -1319,10 +1301,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
cmd->common.flags |= NVME_CMD_SGL_METABUF;
nvme_rdma_set_sg_null(cmd);
+ sqe->cqe.done = nvme_rdma_async_done;
+
ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
DMA_TO_DEVICE);
- ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
WARN_ON_ONCE(ret);
}
@@ -1343,14 +1327,34 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
}
req = blk_mq_rq_to_pdu(rq);
- if (rq->tag == tag)
- ret = 1;
+ req->status = cqe->status;
+ req->result = cqe->result;
- if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
- wc->ex.invalidate_rkey == req->mr->rkey)
- req->mr->need_inval = false;
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+ if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Bogus remote invalidation for rkey %#x\n",
+ req->mr->rkey);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ } else if (req->mr) {
+ ret = nvme_rdma_inv_rkey(queue, req);
+ if (unlikely(ret < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, ret);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ /* the local invalidation completion will end the request */
+ return 0;
+ }
+
+ if (refcount_dec_and_test(&req->ref)) {
+ if (rq->tag == tag)
+ ret = 1;
+ nvme_end_request(rq, req->status, req->result);
+ }
- nvme_end_request(rq, cqe->status, cqe->result);
return ret;
}
@@ -1591,31 +1595,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
* We cannot accept any other command until the Connect command has completed.
*/
static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
- struct nvme_command *cmd = nvme_req(rq)->cmd;
-
- if (!blk_rq_is_passthrough(rq) ||
- cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- /*
- * reconnecting state means transport disruption, which
- * can take a long time and even might fail permanently,
- * fail fast to give upper layers a chance to failover.
- * deleting state means that the ctrl will never accept
- * commands again, fail it permanently.
- */
- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
- queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
- }
- return BLK_STS_RESOURCE; /* try again later */
- }
- }
-
- return 0;
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1627,14 +1611,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data;
- bool flush = false;
struct ib_device *dev;
blk_status_t ret;
int err;
WARN_ON_ONCE(rq->tag < 0);
- ret = nvme_rdma_queue_is_ready(queue, rq);
+ ret = nvme_rdma_is_ready(queue, rq);
if (unlikely(ret))
return ret;
@@ -1656,13 +1639,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
goto err;
}
+ sqe->cqe.done = nvme_rdma_send_done;
+
ib_dma_sync_single_for_device(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- if (req_op(rq) == REQ_OP_FLUSH)
- flush = true;
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
- req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+ req->mr ? &req->reg_wr.wr : NULL);
if (unlikely(err)) {
nvme_rdma_unmap_data(queue, rq);
goto err;
@@ -1810,7 +1793,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
- .reinit_request = nvme_rdma_reinit_request,
};
static inline bool
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 739b8feadc7d..5fd86039e353 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+ /* release the queue lookup reference on the completed IO */
+ nvmet_fc_tgt_q_put(queue);
+
spin_lock_irqsave(&queue->qlock, flags);
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp) {
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
spin_unlock_irqrestore(&queue->qlock, flags);
-
- /* Release reference taken at queue lookup and fod allocation */
- nvmet_fc_tgt_q_put(queue);
return;
}
@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
deferfcp->fcp_req);
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+
kfree(deferfcp);
spin_lock_irqsave(&queue->qlock, flags);
@@ -2144,6 +2147,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod)
{
struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+ u32 xfrlen = be32_to_cpu(cmdiu->data_len);
int ret;
/*
@@ -2157,7 +2161,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
- fod->req.transfer_len = be32_to_cpu(cmdiu->data_len);
if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
fod->io_dir = NVMET_FCP_WRITE;
if (!nvme_is_write(&cmdiu->sqe))
@@ -2168,7 +2171,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
goto transport_error;
} else {
fod->io_dir = NVMET_FCP_NODATA;
- if (fod->req.transfer_len)
+ if (xfrlen)
goto transport_error;
}
@@ -2192,6 +2195,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
return;
}
+ fod->req.transfer_len = xfrlen;
+
/* keep a running counter of tail position */
atomic_inc(&fod->queue->sqtail);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 96d390416789..1e21b286f299 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
}
+enum nvme_loop_queue_flags {
+ NVME_LOOP_Q_LIVE = 0,
+};
+
struct nvme_loop_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvme_loop_ctrl *ctrl;
+ unsigned long flags;
};
static struct nvmet_port *nvmet_loop_port;
@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
+}
+
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
+ ret = nvme_loop_is_ready(queue, req);
+ if (unlikely(ret))
+ return ret;
+
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)
return ret;
@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{
int i;
- for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ }
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
return ret;
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
}
return 0;
@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
if (error)
goto out_cleanup_queue;
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (error) {
dev_err(ctrl->ctrl.device,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 101ced4c84be..ff505af064ba 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -123,6 +123,17 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
+config UNIPHIER_EFUSE
+ tristate "UniPhier SoCs eFuse support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of UniPhier SoC
+ from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-uniphier-efuse.
+
config NVMEM_VF610_OCOTP
tristate "VF610 SoC OCOTP support"
depends on SOC_VF610 || COMPILE_TEST
@@ -135,13 +146,33 @@ config NVMEM_VF610_OCOTP
be called nvmem-vf610-ocotp.
config MESON_EFUSE
- tristate "Amlogic eFuse Support"
+ tristate "Amlogic Meson GX eFuse Support"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson SoCs.
+ the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
will be called nvmem_meson_efuse.
+config MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on SOC_IMX6 || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 6f7a77fb3ee7..e54dcfa6565a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -27,7 +27,13 @@ obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 3c56e3b2bd65..5e9e324427f9 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -232,7 +232,6 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = bcm_otpc_read,
.reg_write = bcm_otpc_write,
};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d12e5de78e70..5a5cefd12153 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -462,6 +462,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->id = rval;
nvmem->owner = config->owner;
+ if (!nvmem->owner && config->dev->driver)
+ nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride;
nvmem->word_size = config->word_size;
nvmem->size = config->size;
@@ -615,7 +617,7 @@ static struct nvmem_device *nvmem_find(const char *name)
return to_nvmem_device(d);
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
@@ -753,7 +755,7 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
return cell;
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
@@ -946,8 +948,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
-static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
- void *buf)
+static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
{
u8 *p, *b;
int i, bit_offset = cell->bit_offset;
@@ -1028,8 +1029,8 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
-static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
- u8 *_buf, int len)
+static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+ u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 52ff65e0673f..52cfe91d9762 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -34,7 +34,6 @@ struct imx_iim_drvdata {
struct iim_priv {
void __iomem *base;
struct clk *clk;
- struct nvmem_config nvmem;
};
static int imx_iim_read(void *context, unsigned int offset,
@@ -108,7 +107,7 @@ static int imx_iim_probe(struct platform_device *pdev)
struct resource *res;
struct iim_priv *iim;
struct nvmem_device *nvmem;
- struct nvmem_config *cfg;
+ struct nvmem_config cfg = {};
const struct imx_iim_drvdata *drvdata = NULL;
iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
@@ -130,19 +129,16 @@ static int imx_iim_probe(struct platform_device *pdev)
if (IS_ERR(iim->clk))
return PTR_ERR(iim->clk);
- cfg = &iim->nvmem;
+ cfg.name = "imx-iim",
+ cfg.read_only = true,
+ cfg.word_size = 1,
+ cfg.stride = 1,
+ cfg.reg_read = imx_iim_read,
+ cfg.dev = dev;
+ cfg.size = drvdata->nregs;
+ cfg.priv = iim;
- cfg->name = "imx-iim",
- cfg->read_only = true,
- cfg->word_size = 1,
- cfg->stride = 1,
- cfg->owner = THIS_MODULE,
- cfg->reg_read = imx_iim_read,
- cfg->dev = dev;
- cfg->size = drvdata->nregs;
- cfg->priv = iim;
-
- nvmem = nvmem_register(cfg);
+ nvmem = nvmem_register(&cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 193ca8fd350a..d7ba351a70c9 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -40,14 +40,19 @@
#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
#define IMX_OCOTP_ADDR_TIMING 0x0010
-#define IMX_OCOTP_ADDR_DATA 0x0020
+#define IMX_OCOTP_ADDR_DATA0 0x0020
+#define IMX_OCOTP_ADDR_DATA1 0x0030
+#define IMX_OCOTP_ADDR_DATA2 0x0040
+#define IMX_OCOTP_ADDR_DATA3 0x0050
#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
-#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_FSOURCE 1001 /* > 1000 ns */
+#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
@@ -57,10 +62,16 @@ struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
- unsigned int nregs;
+ const struct ocotp_params *params;
struct nvmem_config *config;
};
+struct ocotp_params {
+ unsigned int nregs;
+ unsigned int bank_address_words;
+ void (*set_timing)(struct ocotp_priv *priv);
+};
+
static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
{
int count;
@@ -121,8 +132,8 @@ static int imx_ocotp_read(void *context, unsigned int offset,
index = offset >> 2;
count = bytes >> 2;
- if (count > (priv->nregs - index))
- count = priv->nregs - index;
+ if (count > (priv->params->nregs - index))
+ count = priv->params->nregs - index;
mutex_lock(&ocotp_mutex);
@@ -160,6 +171,52 @@ read_end:
return ret;
}
+static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ unsigned long strobe_read, relax, strobe_prog;
+ u32 timing = 0;
+
+ /* 47.3.1.3.1
+ * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+ relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
+static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ u64 fsource, strobe_prog;
+ u32 timing = 0;
+
+ /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
+ * 6.4.3.3
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
+ NSEC_PER_SEC) + 1;
+ strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
+ NSEC_PER_SEC) + 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (fsource << 12) & 0x000FF000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
static int imx_ocotp_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
@@ -167,11 +224,9 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
u32 *buf = val;
int ret;
- unsigned long clk_rate = 0;
- unsigned long strobe_read, relax, strobe_prog;
- u32 timing = 0;
u32 ctrl;
u8 waddr;
+ u8 word = 0;
/* allow only writing one complete OTP word at a time */
if ((bytes != priv->config->word_size) ||
@@ -187,23 +242,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
return ret;
}
- /* 47.3.1.3.1
- * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
- * fields with timing values to match the current frequency of the
- * ipg_clk. OTP writes will work at maximum bus frequencies as long
- * as the HW_OCOTP_TIMING parameters are set correctly.
- */
- clk_rate = clk_get_rate(priv->clk);
-
- relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
- strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
- strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
-
- timing = strobe_prog & 0x00000FFF;
- timing |= (relax << 12) & 0x0000F000;
- timing |= (strobe_read << 16) & 0x003F0000;
-
- writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+ /* Setup the write timing values */
+ priv->params->set_timing(priv);
/* 47.3.1.3.2
* Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
@@ -224,8 +264,23 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* description. Both the unlock code and address can be written in the
* same operation.
*/
- /* OTP write/read address specifies one of 128 word address locations */
- waddr = offset / 4;
+ if (priv->params->bank_address_words != 0) {
+ /*
+ * In banked/i.MX7 mode the OTP register bank goes into waddr
+ * see i.MX 7Solo Applications Processor Reference Manual, Rev.
+ * 0.1 section 6.4.3.1
+ */
+ offset = offset / priv->config->word_size;
+ waddr = offset / priv->params->bank_address_words;
+ word = offset & (priv->params->bank_address_words - 1);
+ } else {
+ /*
+ * Non-banked i.MX6 mode.
+ * OTP write/read address specifies one of 128 word address
+ * locations
+ */
+ waddr = offset / 4;
+ }
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
@@ -251,8 +306,43 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* shift right (with zero fill). This shifting is required to program
* the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
* modified.
+ * Note: on i.MX7 there are four data fields to write for banked write
+ * with the fuse blowing operation only taking place after data0
+ * has been written. This is why data0 must always be the last
+ * register written.
*/
- writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA);
+ if (priv->params->bank_address_words != 0) {
+ /* Banked/i.MX7 mode */
+ switch (word) {
+ case 0:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 1:
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 2:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 3:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ }
+ } else {
+ /* Non-banked i.MX6 mode */
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ }
/* 47.4.1.4.5
* Once complete, the controller will clear BUSY. A write request to a
@@ -303,17 +393,46 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = imx_ocotp_read,
.reg_write = imx_ocotp_write,
};
+static const struct ocotp_params imx6q_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sl_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sx_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6ul_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx7d_params = {
+ .nregs = 64,
+ .bank_address_words = 4,
+ .set_timing = imx_ocotp_set_imx7_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
- { .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
- { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
- { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
- { .compatible = "fsl,imx7d-ocotp", (void *)64 },
+ { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
+ { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
+ { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
+ { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -342,8 +461,8 @@ static int imx_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
of_id = of_match_device(imx_ocotp_dt_ids, dev);
- priv->nregs = (unsigned long)of_id->data;
- imx_ocotp_nvmem_config.size = 4 * priv->nregs;
+ priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
@@ -375,5 +494,5 @@ static struct platform_driver imx_ocotp_driver = {
module_platform_driver(imx_ocotp_driver);
MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
-MODULE_DESCRIPTION("i.MX6 OCOTP fuse box driver");
+MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index 6c7e2c424a4e..b1af966206a6 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -159,7 +159,6 @@ static struct nvmem_config lpc18xx_nvmem_config = {
.word_size = 4,
.reg_read = lpc18xx_eeprom_read,
.reg_write = lpc18xx_eeprom_gather_write,
- .owner = THIS_MODULE,
};
static int lpc18xx_eeprom_probe(struct platform_device *pdev)
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
index be8d07403ffc..95268db155e9 100644
--- a/drivers/nvmem/lpc18xx_otp.c
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -64,7 +64,6 @@ static struct nvmem_config lpc18xx_otp_nvmem_config = {
.read_only = true,
.word_size = LPC18XX_OTP_WORD_SIZE,
.stride = LPC18XX_OTP_WORD_SIZE,
- .owner = THIS_MODULE,
.reg_read = lpc18xx_otp_read,
};
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 70bfc9839bb2..a43c68f90937 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -1,5 +1,5 @@
/*
- * Amlogic eFuse Driver
+ * Amlogic Meson GX eFuse Driver
*
* Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
@@ -37,7 +37,6 @@ static int meson_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "meson-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -89,5 +88,5 @@ static struct platform_driver meson_efuse_driver = {
module_platform_driver(meson_efuse_driver);
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
-MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
new file mode 100644
index 000000000000..a346b4923550
--- /dev/null
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -0,0 +1,265 @@
+/*
+ * Amlogic Meson6, Meson8 and Meson8b eFuse Driver
+ *
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define MESON_MX_EFUSE_CNTL1 0x04
+#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
+#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
+
+#define MESON_MX_EFUSE_CNTL2 0x08
+
+#define MESON_MX_EFUSE_CNTL4 0x10
+#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
+
+struct meson_mx_efuse_platform_data {
+ const char *name;
+ unsigned int word_size;
+};
+
+struct meson_mx_efuse {
+ void __iomem *base;
+ struct clk *core_clk;
+ struct nvmem_device *nvmem;
+ struct nvmem_config config;
+};
+
+static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
+ u32 mask, u32 set)
+{
+ u32 data;
+
+ data = readl(efuse->base + reg);
+ data &= ~mask;
+ data |= (set & mask);
+
+ writel(data, efuse->base + reg);
+}
+
+static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
+{
+ int err;
+
+ err = clk_prepare_enable(efuse->core_clk);
+ if (err)
+ return err;
+
+ /* power up the efuse */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
+ MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
+
+ return 0;
+}
+
+static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
+{
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE);
+
+ clk_disable_unprepare(efuse->core_clk);
+}
+
+static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
+ unsigned int addr, u32 *value)
+{
+ int err;
+ u32 regval;
+
+ /* write the address to read */
+ regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
+
+ /* inform the hardware that we changed the address */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
+
+ /* start the read process */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
+
+ /*
+ * perform a dummy read to ensure that the HW has the RD_BUSY bit set
+ * when polling for the status below.
+ */
+ readl(efuse->base + MESON_MX_EFUSE_CNTL1);
+
+ err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
+ regval,
+ (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
+ 1, 1000);
+ if (err) {
+ dev_err(efuse->config.dev,
+ "Timeout while reading efuse address %u\n", addr);
+ return err;
+ }
+
+ *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
+
+ return 0;
+}
+
+static int meson_mx_efuse_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct meson_mx_efuse *efuse = context;
+ u32 tmp;
+ int err, i, addr;
+
+ err = meson_mx_efuse_hw_enable(efuse);
+ if (err)
+ return err;
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
+
+ for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
+ addr = i / efuse->config.word_size;
+
+ err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
+ if (err)
+ break;
+
+ memcpy(buf + i, &tmp, efuse->config.word_size);
+ }
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
+
+ meson_mx_efuse_hw_disable(efuse);
+
+ return err;
+}
+
+static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
+ .name = "meson6-efuse",
+ .word_size = 1,
+};
+
+static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
+ .name = "meson8-efuse",
+ .word_size = 4,
+};
+
+static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
+ .name = "meson8b-efuse",
+ .word_size = 4,
+};
+
+static const struct of_device_id meson_mx_efuse_match[] = {
+ { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
+ { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
+ { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
+
+static int meson_mx_efuse_probe(struct platform_device *pdev)
+{
+ const struct meson_mx_efuse_platform_data *drvdata;
+ struct meson_mx_efuse *efuse;
+ struct resource *res;
+
+ drvdata = of_device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ efuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name,
+ GFP_KERNEL);
+ efuse->config.owner = THIS_MODULE;
+ efuse->config.dev = &pdev->dev;
+ efuse->config.priv = efuse;
+ efuse->config.stride = drvdata->word_size;
+ efuse->config.word_size = drvdata->word_size;
+ efuse->config.size = SZ_512;
+ efuse->config.read_only = true;
+ efuse->config.reg_read = meson_mx_efuse_read;
+
+ efuse->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(efuse->core_clk)) {
+ dev_err(&pdev->dev, "Failed to get core clock\n");
+ return PTR_ERR(efuse->core_clk);
+ }
+
+ efuse->nvmem = nvmem_register(&efuse->config);
+ if (IS_ERR(efuse->nvmem))
+ return PTR_ERR(efuse->nvmem);
+
+ platform_set_drvdata(pdev, efuse);
+
+ return 0;
+}
+
+static int meson_mx_efuse_remove(struct platform_device *pdev)
+{
+ struct meson_mx_efuse *efuse = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(efuse->nvmem);
+}
+
+static struct platform_driver meson_mx_efuse_driver = {
+ .probe = meson_mx_efuse_probe,
+ .remove = meson_mx_efuse_remove,
+ .driver = {
+ .name = "meson-mx-efuse",
+ .of_match_table = meson_mx_efuse_match,
+ },
+};
+
+module_platform_driver(meson_mx_efuse_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 32fd572e18c5..9ee3479cfc7b 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -18,15 +18,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct mtk_efuse_priv {
+ void __iomem *base;
+};
+
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- *val++ = readl(base + reg + (i++ * 4));
+ *val++ = readl(priv->base + reg + (i++ * 4));
return 0;
}
@@ -34,12 +38,12 @@ static int mtk_reg_read(void *context,
static int mtk_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- writel(*val++, base + reg + (i++ * 4));
+ writel(*val++, priv->base + reg + (i++ * 4));
return 0;
}
@@ -49,27 +53,26 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct nvmem_config *econfig;
- void __iomem *base;
+ struct nvmem_config econfig = {};
+ struct mtk_efuse_priv *priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
- if (!econfig)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- econfig->stride = 4;
- econfig->word_size = 4;
- econfig->reg_read = mtk_reg_read;
- econfig->reg_write = mtk_reg_write;
- econfig->size = resource_size(res);
- econfig->priv = base;
- econfig->dev = dev;
- econfig->owner = THIS_MODULE;
- nvmem = nvmem_register(econfig);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.reg_read = mtk_reg_read;
+ econfig.reg_write = mtk_reg_write;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index d26dd03cec80..7018e2ef5714 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -118,7 +118,6 @@ static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
- .owner = THIS_MODULE,
.reg_read = mxs_ocotp_read,
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 2bdb6c389328..cb3b48b47d64 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -17,15 +17,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct qfprom_priv {
+ void __iomem *base;
+};
+
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- *val++ = readb(base + reg + i++);
+ *val++ = readb(priv->base + reg + i++);
return 0;
}
@@ -33,12 +37,12 @@ static int qfprom_reg_read(void *context,
static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- writeb(*val++, base + reg + i++);
+ writeb(*val++, priv->base + reg + i++);
return 0;
}
@@ -52,7 +56,6 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
@@ -64,16 +67,20 @@ static int qfprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- void __iomem *base;
+ struct qfprom_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
econfig.size = resource_size(res);
econfig.dev = dev;
- econfig.priv = base;
+ econfig.priv = priv;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 63e3eb55f3ac..123de77ca5d6 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -149,7 +149,6 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -178,6 +177,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
+ .compatible = "rockchip,rk3368-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
},
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
new file mode 100644
index 000000000000..e5c2a4a17f03
--- /dev/null
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define IMX6Q_SNVS_HPLR 0x00
+#define IMX6Q_GPR_SL BIT(5)
+#define IMX6Q_SNVS_LPLR 0x34
+#define IMX6Q_GPR_HL BIT(5)
+#define IMX6Q_SNVS_LPGPR 0x68
+
+struct snvs_lpgpr_cfg {
+ int offset;
+ int offset_hplr;
+ int offset_lplr;
+};
+
+struct snvs_lpgpr_priv {
+ struct device_d *dev;
+ struct regmap *regmap;
+ struct nvmem_config cfg;
+ const struct snvs_lpgpr_cfg *dcfg;
+};
+
+static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
+ .offset = IMX6Q_SNVS_LPGPR,
+ .offset_hplr = IMX6Q_SNVS_HPLR,
+ .offset_lplr = IMX6Q_SNVS_LPLR,
+};
+
+static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+ unsigned int lock_reg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_SL)
+ return -EPERM;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_HL)
+ return -EPERM;
+
+ return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
+ bytes / 4);
+}
+
+static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+
+ return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
+ val, bytes / 4);
+}
+
+static int snvs_lpgpr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *syscon_node;
+ struct snvs_lpgpr_priv *priv;
+ struct nvmem_config *cfg;
+ struct nvmem_device *nvmem;
+ const struct snvs_lpgpr_cfg *dcfg;
+
+ if (!node)
+ return -ENOENT;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ syscon_node = of_get_parent(node);
+ if (!syscon_node)
+ return -ENODEV;
+
+ priv->regmap = syscon_node_to_regmap(syscon_node);
+ of_node_put(syscon_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->dcfg = dcfg;
+
+ cfg = &priv->cfg;
+ cfg->priv = priv;
+ cfg->name = dev_name(dev);
+ cfg->dev = dev;
+ cfg->stride = 4,
+ cfg->word_size = 4,
+ cfg->size = 4,
+ cfg->owner = THIS_MODULE,
+ cfg->reg_read = snvs_lpgpr_read,
+ cfg->reg_write = snvs_lpgpr_write,
+
+ nvmem = nvmem_register(cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int snvs_lpgpr_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id snvs_lpgpr_dt_ids[] = {
+ { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
+ { .compatible = "fsl,imx6ul-snvs-lpgpr",
+ .data = &snvs_lpgpr_cfg_imx6q },
+ { },
+};
+MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
+
+static struct platform_driver snvs_lpgpr_driver = {
+ .probe = snvs_lpgpr_probe,
+ .remove = snvs_lpgpr_remove,
+ .driver = {
+ .name = "snvs_lpgpr",
+ .of_match_table = snvs_lpgpr_dt_ids,
+ },
+};
+module_platform_driver(snvs_lpgpr_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 Secure Non-Volatile Storage");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 0d6648be93b8..99bd54d85fcb 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -40,7 +40,6 @@ static struct nvmem_config econfig = {
.read_only = true,
.stride = 4,
.word_size = 1,
- .owner = THIS_MODULE,
};
struct sunxi_sid_cfg {
@@ -199,10 +198,16 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
+static const struct sunxi_sid_cfg sun50i_a64_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+};
+
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
new file mode 100644
index 000000000000..9d278b4e1dc7
--- /dev/null
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -0,0 +1,97 @@
+/*
+ * UniPhier eFuse driver
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct uniphier_efuse_priv {
+ void __iomem *base;
+};
+
+static int uniphier_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct uniphier_efuse_priv *priv = context;
+ u32 *val = _val;
+ int offs;
+
+ for (offs = 0; offs < bytes; offs += sizeof(u32))
+ *val++ = readl(priv->base + reg + offs);
+
+ return 0;
+}
+
+static int uniphier_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = {};
+ struct uniphier_efuse_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.read_only = true;
+ econfig.reg_read = uniphier_reg_read;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int uniphier_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id uniphier_efuse_of_match[] = {
+ { .compatible = "socionext,uniphier-efuse",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
+
+static struct platform_driver uniphier_efuse_driver = {
+ .probe = uniphier_efuse_probe,
+ .remove = uniphier_efuse_remove,
+ .driver = {
+ .name = "uniphier-efuse",
+ .of_match_table = uniphier_efuse_of_match,
+ },
+};
+module_platform_driver(uniphier_efuse_driver);
+
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("UniPhier eFuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
index 72e4faabce29..5ae9e002f195 100644
--- a/drivers/nvmem/vf610-ocotp.c
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -206,7 +206,6 @@ static int vf610_ocotp_read(void *context, unsigned int offset,
static struct nvmem_config ocotp_config = {
.name = "ocotp",
- .owner = THIS_MODULE,
.stride = 4,
.word_size = 4,
.reg_read = vf610_ocotp_read,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f2e649ff746f..26618ba8f92a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -761,10 +761,10 @@ EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
* of_find_node_by_name - Find a node by its "name" property
- * @from: The node to start searching from or NULL, the node
+ * @from: The node to start searching from or NULL; the node
* you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
+ * will. Typically, you pass what the previous call
+ * returned. of_node_put() will be called on @from.
* @name: The name string to match against
*
* Returns a node pointer with refcount incremented, use
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index c454941b34ec..ab988d88704d 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -695,7 +695,7 @@ int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
/*
* Returns 0 on success, a negative error value in case of an error.
*
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
* final notification error is reported.
*/
int __of_changeset_apply_notify(struct of_changeset *ocs)
@@ -795,7 +795,7 @@ int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
}
/*
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
* final notification error is reported.
*/
int __of_changeset_revert_notify(struct of_changeset *ocs)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 98258583abb0..3481e69738b5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* can be looked up later */
of_node_get(child);
phy->mdio.dev.of_node = child;
+ phy->mdio.dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the phy struct;
* register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
*/
of_node_get(child);
mdiodev->dev.of_node = child;
+ mdiodev->dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->phy_mask = ~0;
mdio->dev.of_node = np;
+ mdio->dev.fwnode = of_fwnode_handle(np);
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index e9ec931f5b9a..a7b1cb6c2f65 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -374,7 +374,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, *id_out);
+ rid_len, rid, masked_rid - rid_base + out_base);
return 0;
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 32771c2ced7b..22b75c82e377 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -397,3 +397,29 @@ void of_reserved_mem_device_release(struct device *dev)
rmem->ops->device_release(rmem, dev);
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
+
+/**
+ * of_reserved_mem_lookup() - acquire reserved_mem from a device node
+ * @np: node pointer of the desired reserved-memory region
+ *
+ * This function allows drivers to acquire a reference to the reserved_mem
+ * struct based on a device node handle.
+ *
+ * Returns a reserved_mem reference, or NULL on error.
+ */
+struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
+{
+ const char *name;
+ int i;
+
+ if (!np->full_name)
+ return NULL;
+
+ name = kbasename(np->full_name);
+ for (i = 0; i < reserved_mem_count; i++)
+ if (!strcmp(reserved_mem[i].name, name))
+ return &reserved_mem[i];
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c150abb9049d..3981b7da4fa9 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -522,7 +522,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
struct device_node *node, *overlay_node;
struct fragment *fragment;
struct fragment *fragments;
- int cnt, ret;
+ int cnt, id, ret;
/*
* Warn for some issues. Can not return -EINVAL for these until
@@ -543,9 +543,9 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
of_changeset_init(&ovcs->cset);
- ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
- if (ovcs->id <= 0)
- return ovcs->id;
+ id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
+ if (id <= 0)
+ return id;
cnt = 0;
@@ -572,18 +572,20 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
cnt = 0;
for_each_child_of_node(tree, node) {
+ overlay_node = of_get_child_by_name(node, "__overlay__");
+ if (!overlay_node)
+ continue;
+
fragment = &fragments[cnt];
- fragment->overlay = of_get_child_by_name(node, "__overlay__");
- if (fragment->overlay) {
- fragment->target = find_target_node(node);
- if (!fragment->target) {
- of_node_put(fragment->overlay);
- ret = -EINVAL;
- goto err_free_fragments;
- } else {
- cnt++;
- }
+ fragment->overlay = overlay_node;
+ fragment->target = find_target_node(node);
+ if (!fragment->target) {
+ of_node_put(fragment->overlay);
+ ret = -EINVAL;
+ goto err_free_fragments;
}
+
+ cnt++;
}
/*
@@ -611,6 +613,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
goto err_free_fragments;
}
+ ovcs->id = id;
ovcs->count = cnt;
ovcs->fragments = fragments;
@@ -619,7 +622,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
err_free_fragments:
kfree(fragments);
err_free_idr:
- idr_remove(&ovcs_idr, ovcs->id);
+ idr_remove(&ovcs_idr, id);
pr_err("%s() failed, ret = %d\n", __func__, ret);
@@ -630,9 +633,8 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
{
int i;
- if (!ovcs->cset.entries.next)
- return;
- of_changeset_destroy(&ovcs->cset);
+ if (ovcs->cset.entries.next)
+ of_changeset_destroy(&ovcs->cset);
if (ovcs->id)
idr_remove(&ovcs_idr, ovcs->id);
@@ -660,14 +662,14 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
* A non-zero return value will not have created the changeset if error is from:
* - parameter checks
* - building the changeset
- * - overlay changset pre-apply notifier
+ * - overlay changeset pre-apply notifier
*
* If an error is returned by an overlay changeset pre-apply notifier
* then no further overlay changeset pre-apply notifier will be called.
*
* A non-zero return value will have created the changeset if error is from:
* - overlay changeset entry notifier
- * - overlay changset post-apply notifier
+ * - overlay changeset post-apply notifier
*
* If an error is returned by an overlay changeset post-apply notifier
* then no further overlay changeset post-apply notifier will be called.
@@ -706,12 +708,11 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
}
of_overlay_mutex_lock();
+ mutex_lock(&of_mutex);
ret = of_resolve_phandles(tree);
if (ret)
- goto err_overlay_unlock;
-
- mutex_lock(&of_mutex);
+ goto err_free_overlay_changeset;
ret = init_overlay_changeset(ovcs, tree);
if (ret)
@@ -736,14 +737,13 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
devicetree_state_flags |= DTSF_APPLY_FAIL;
}
goto err_free_overlay_changeset;
- } else {
- ret = __of_changeset_apply_notify(&ovcs->cset);
- if (ret)
- pr_err("overlay changeset entry notify error %d\n",
- ret);
- /* fall through */
}
+ ret = __of_changeset_apply_notify(&ovcs->cset);
+ if (ret)
+ pr_err("overlay changeset entry notify error %d\n", ret);
+ /* notify failure is not fatal, continue */
+
list_add_tail(&ovcs->ovcs_list, &ovcs_list);
*ovcs_id = ovcs->id;
@@ -755,18 +755,14 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
ret = ret_tmp;
}
- mutex_unlock(&of_mutex);
- of_overlay_mutex_unlock();
-
- goto out;
-
-err_overlay_unlock:
- of_overlay_mutex_unlock();
+ goto out_unlock;
err_free_overlay_changeset:
free_overlay_changeset(ovcs);
+out_unlock:
mutex_unlock(&of_mutex);
+ of_overlay_mutex_unlock();
out:
pr_debug("%s() err=%d\n", __func__, ret);
@@ -871,7 +867,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
*
* A non-zero return value will not revert the changeset if error is from:
* - parameter checks
- * - overlay changset pre-remove notifier
+ * - overlay changeset pre-remove notifier
* - overlay changeset entry revert
*
* If an error is returned by an overlay changeset pre-remove notifier
@@ -882,7 +878,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
*
* A non-zero return value will revert the changeset if error is from:
* - overlay changeset entry notifier
- * - overlay changset post-remove notifier
+ * - overlay changeset post-remove notifier
*
* If an error is returned by an overlay changeset post-remove notifier
* then no further overlay changeset post-remove notifier will be called.
@@ -931,15 +927,13 @@ int of_overlay_remove(int *ovcs_id)
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
goto out_unlock;
- } else {
- ret = __of_changeset_revert_notify(&ovcs->cset);
- if (ret) {
- pr_err("overlay changeset entry notify error %d\n",
- ret);
- /* fall through - changeset was reverted */
- }
}
+ ret = __of_changeset_revert_notify(&ovcs->cset);
+ if (ret)
+ pr_err("overlay changeset entry notify error %d\n", ret);
+ /* notify failure is not fatal, continue */
+
*ovcs_id = 0;
ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ac15d0e3d27d..b7cf84b29737 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -497,6 +497,12 @@ int of_platform_default_populate(struct device_node *root,
EXPORT_SYMBOL_GPL(of_platform_default_populate);
#ifndef CONFIG_PPC
+static const struct of_device_id reserved_mem_matches[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ { .compatible = "ramoops" },
+ {}
+};
+
static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
@@ -505,15 +511,12 @@ static int __init of_platform_default_populate_init(void)
return -ENODEV;
/*
- * Handle ramoops explicitly, since it is inside /reserved-memory,
- * which lacks a "compatible" property.
+ * Handle certain compatibles explicitly, since we don't want to create
+ * platform_devices for every node in /reserved-memory with a
+ * "compatible",
*/
- node = of_find_node_by_path("/reserved-memory");
- if (node) {
- node = of_find_compatible_node(node, NULL, "ramoops");
- if (node)
- of_platform_device_create(node, NULL, NULL);
- }
+ for_each_matching_node(node, reserved_mem_matches)
+ of_platform_device_create(node, NULL, NULL);
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 3031fc2f18f6..32389acfa616 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+DTC_FLAGS_testcases := -Wno-interrupts_property
obj-y += testcases.dtb.o
targets += testcases.dtb testcases.dtb.S
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index ce49463d9d32..55fe0ee20109 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
+/plugin/;
+
/ {
testcase-data {
changeset {
@@ -15,66 +17,3 @@
#include "tests-match.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
-
-/*
- * phandle fixup data - generated by dtc patches that aren't upstream.
- * This data must be regenerated whenever phandle references are modified in
- * the testdata tree.
- *
- * The format of this data may be subject to change. For the time being consider
- * this a kernel-internal data format.
- */
-/ { __local_fixups__ {
- testcase-data {
- phandle-tests {
- consumer-a {
- phandle-list = <0x00000000 0x00000008
- 0x00000018 0x00000028
- 0x00000034 0x00000038>;
- phandle-list-bad-args = <0x00000000 0x0000000c>;
- };
- };
- interrupts {
- intmap0 {
- interrupt-map = <0x00000004 0x00000010
- 0x00000024 0x00000034>;
- };
- intmap1 {
- interrupt-map = <0x0000000c>;
- };
- interrupts0 {
- interrupt-parent = <0x00000000>;
- };
- interrupts1 {
- interrupt-parent = <0x00000000>;
- };
- interrupts-extended0 {
- interrupts-extended = <0x00000000 0x00000008
- 0x00000018 0x00000024
- 0x0000002c 0x00000034
- 0x0000003c>;
- };
- };
- testcase-device1 {
- interrupt-parent = <0x00000000>;
- };
- testcase-device2 {
- interrupt-parent = <0x00000000>;
- };
- overlay2 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay3 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay4 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- };
-}; };
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e568b1e82501..0f8052f1355c 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2165,7 +2165,6 @@ static int __init overlay_data_add(int onum)
ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
if (ret < 0) {
pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
- of_overlay_mutex_unlock();
goto out_free_np_overlay;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a25fed52f7e9..41b740aed3a3 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
iounmap(base_addr);
}
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1292)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+ quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1291)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+ quirk_diva_aux_disable);
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 0186db7680d4..62873070f988 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1769,7 +1769,7 @@ stop:
/*--- Default parport operations ---------------------------------------*/
-static __initdata struct parport_operations parport_ip32_ops = {
+static const struct parport_operations parport_ip32_ops __initconst = {
.write_data = parport_ip32_write_data,
.read_data = parport_ip32_read_data,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 90944667ccea..bda151788f3f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -80,15 +80,6 @@ config XEN_PCIDEV_FRONTEND
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config HT_IRQ
- bool "Interrupts on hypertransport devices"
- default y
- depends on PCI && X86_LOCAL_APIC
- help
- This allows native hypertransport devices to use interrupts.
-
- If unsure say Y.
-
config PCI_ATS
bool
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d5e047f0a32..c7819b973df7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -21,9 +21,6 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796eccb2be..52ab3cb0a0bf 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_bridge;
+ goto err_free_resource_list;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_bridge;
+ goto err_free_resource_list;
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
err_pm_disable:
pm_runtime_disable(dev);
-err_free_bridge:
- pci_free_host_bridge(bridge);
+err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+ pci_free_host_bridge(bridge);
return err;
}
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
deleted file mode 100644
index bb88c26f5144..000000000000
--- a/drivers/pci/htirq.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File: htirq.c
- * Purpose: Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained. But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
- unsigned long flags;
-
- spin_lock_irqsave(&ht_irq_lock, flags);
- if (cfg->msg.address_lo != msg->address_lo) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
- }
- if (cfg->msg.address_hi != msg->address_hi) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
- }
- if (cfg->update)
- cfg->update(cfg->dev, irq, msg);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
- cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
- *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo |= 1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo &= ~1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
- int max_irq, pos, irq;
- unsigned long flags;
- u32 data;
-
- pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
- if (!pos)
- return -EINVAL;
-
- /* Verify the idx I want to use is in range */
- spin_lock_irqsave(&ht_irq_lock, flags);
- pci_write_config_byte(dev, pos + 2, 1);
- pci_read_config_dword(dev, pos + 4, &data);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
-
- max_irq = (data >> 16) & 0xff;
- if (idx > max_irq)
- return -EINVAL;
-
- irq = arch_setup_ht_irq(idx, pos, dev, update);
- if (irq > 0)
- dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
- return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
- return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence. The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
- arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..14fd865a5120 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
* the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
- pci_update_current_state(pci_dev, PCI_D0);
+ /*
+ * pci_restore_state() requires the device to be in D0 (because of MSI
+ * restoration among other things), so force it into D0 in case the
+ * driver's "freeze" callbacks put it into a low-power state directly.
+ */
+ pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index da45dbea20ce..730cc897b94d 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h>
@@ -20,8 +21,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
-#include <linux/pci.h>
-#include <linux/cdev.h>
#include <linux/wait.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static dev_t switchtec_devt;
-static struct class *switchtec_class;
static DEFINE_IDA(switchtec_minor_ida);
-#define MICROSEMI_VENDOR_ID 0x11f8
-#define MICROSEMI_NTB_CLASSCODE 0x068000
-#define MICROSEMI_MGMT_CLASSCODE 0x058000
-
-#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
-#define SWITCHTEC_MAX_PFF_CSR 48
-
-#define SWITCHTEC_EVENT_OCCURRED BIT(0)
-#define SWITCHTEC_EVENT_CLEAR BIT(0)
-#define SWITCHTEC_EVENT_EN_LOG BIT(1)
-#define SWITCHTEC_EVENT_EN_CLI BIT(2)
-#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
-#define SWITCHTEC_EVENT_FATAL BIT(4)
-
-enum {
- SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
- SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
- SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
- SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
- SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
- SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
- SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
- SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
-};
-
-struct mrpc_regs {
- u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
- u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
- u32 cmd;
- u32 status;
- u32 ret_value;
-} __packed;
-
-enum mrpc_status {
- SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
- SWITCHTEC_MRPC_STATUS_DONE = 2,
- SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
- SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
-};
-
-struct sw_event_regs {
- u64 event_report_ctrl;
- u64 reserved1;
- u64 part_event_bitmap;
- u64 reserved2;
- u32 global_summary;
- u32 reserved3[3];
- u32 stack_error_event_hdr;
- u32 stack_error_event_data;
- u32 reserved4[4];
- u32 ppu_error_event_hdr;
- u32 ppu_error_event_data;
- u32 reserved5[4];
- u32 isp_error_event_hdr;
- u32 isp_error_event_data;
- u32 reserved6[4];
- u32 sys_reset_event_hdr;
- u32 reserved7[5];
- u32 fw_exception_hdr;
- u32 reserved8[5];
- u32 fw_nmi_hdr;
- u32 reserved9[5];
- u32 fw_non_fatal_hdr;
- u32 reserved10[5];
- u32 fw_fatal_hdr;
- u32 reserved11[5];
- u32 twi_mrpc_comp_hdr;
- u32 twi_mrpc_comp_data;
- u32 reserved12[4];
- u32 twi_mrpc_comp_async_hdr;
- u32 twi_mrpc_comp_async_data;
- u32 reserved13[4];
- u32 cli_mrpc_comp_hdr;
- u32 cli_mrpc_comp_data;
- u32 reserved14[4];
- u32 cli_mrpc_comp_async_hdr;
- u32 cli_mrpc_comp_async_data;
- u32 reserved15[4];
- u32 gpio_interrupt_hdr;
- u32 gpio_interrupt_data;
- u32 reserved16[4];
-} __packed;
-
-enum {
- SWITCHTEC_CFG0_RUNNING = 0x04,
- SWITCHTEC_CFG1_RUNNING = 0x05,
- SWITCHTEC_IMG0_RUNNING = 0x03,
- SWITCHTEC_IMG1_RUNNING = 0x07,
-};
-
-struct sys_info_regs {
- u32 device_id;
- u32 device_version;
- u32 firmware_version;
- u32 reserved1;
- u32 vendor_table_revision;
- u32 table_format_version;
- u32 partition_id;
- u32 cfg_file_fmt_version;
- u16 cfg_running;
- u16 img_running;
- u32 reserved2[57];
- char vendor_id[8];
- char product_id[16];
- char product_revision[4];
- char component_vendor[8];
- u16 component_id;
- u8 component_revision;
-} __packed;
-
-struct flash_info_regs {
- u32 flash_part_map_upd_idx;
-
- struct active_partition_info {
- u32 address;
- u32 build_version;
- u32 build_string;
- } active_img;
-
- struct active_partition_info active_cfg;
- struct active_partition_info inactive_img;
- struct active_partition_info inactive_cfg;
-
- u32 flash_length;
-
- struct partition_info {
- u32 address;
- u32 length;
- } cfg0;
-
- struct partition_info cfg1;
- struct partition_info img0;
- struct partition_info img1;
- struct partition_info nvlog;
- struct partition_info vendor[8];
-};
-
-struct ntb_info_regs {
- u8 partition_count;
- u8 partition_id;
- u16 reserved1;
- u64 ep_map;
- u16 requester_id;
-} __packed;
-
-struct part_cfg_regs {
- u32 status;
- u32 state;
- u32 port_cnt;
- u32 usp_port_mode;
- u32 usp_pff_inst_id;
- u32 vep_pff_inst_id;
- u32 dsp_pff_inst_id[47];
- u32 reserved1[11];
- u16 vep_vector_number;
- u16 usp_vector_number;
- u32 port_event_bitmap;
- u32 reserved2[3];
- u32 part_event_summary;
- u32 reserved3[3];
- u32 part_reset_hdr;
- u32 part_reset_data[5];
- u32 mrpc_comp_hdr;
- u32 mrpc_comp_data[5];
- u32 mrpc_comp_async_hdr;
- u32 mrpc_comp_async_data[5];
- u32 dyn_binding_hdr;
- u32 dyn_binding_data[5];
- u32 reserved4[159];
-} __packed;
-
-enum {
- SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
- SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
- SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
- SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
-};
-
-struct pff_csr_regs {
- u16 vendor_id;
- u16 device_id;
- u32 pci_cfg_header[15];
- u32 pci_cap_region[48];
- u32 pcie_cap_region[448];
- u32 indirect_gas_window[128];
- u32 indirect_gas_window_off;
- u32 reserved[127];
- u32 pff_event_summary;
- u32 reserved2[3];
- u32 aer_in_p2p_hdr;
- u32 aer_in_p2p_data[5];
- u32 aer_in_vep_hdr;
- u32 aer_in_vep_data[5];
- u32 dpc_hdr;
- u32 dpc_data[5];
- u32 cts_hdr;
- u32 cts_data[5];
- u32 reserved3[6];
- u32 hotplug_hdr;
- u32 hotplug_data[5];
- u32 ier_hdr;
- u32 ier_data[5];
- u32 threshold_hdr;
- u32 threshold_data[5];
- u32 power_mgmt_hdr;
- u32 power_mgmt_data[5];
- u32 tlp_throttling_hdr;
- u32 tlp_throttling_data[5];
- u32 force_speed_hdr;
- u32 force_speed_data[5];
- u32 credit_timeout_hdr;
- u32 credit_timeout_data[5];
- u32 link_state_hdr;
- u32 link_state_data[5];
- u32 reserved4[174];
-} __packed;
-
-struct switchtec_dev {
- struct pci_dev *pdev;
- struct device dev;
- struct cdev cdev;
-
- int partition;
- int partition_count;
- int pff_csr_count;
- char pff_local[SWITCHTEC_MAX_PFF_CSR];
-
- void __iomem *mmio;
- struct mrpc_regs __iomem *mmio_mrpc;
- struct sw_event_regs __iomem *mmio_sw_event;
- struct sys_info_regs __iomem *mmio_sys_info;
- struct flash_info_regs __iomem *mmio_flash_info;
- struct ntb_info_regs __iomem *mmio_ntb;
- struct part_cfg_regs __iomem *mmio_part_cfg;
- struct part_cfg_regs __iomem *mmio_part_cfg_all;
- struct pff_csr_regs __iomem *mmio_pff_csr;
-
- /*
- * The mrpc mutex must be held when accessing the other
- * mrpc_ fields, alive flag and stuser->state field
- */
- struct mutex mrpc_mutex;
- struct list_head mrpc_queue;
- int mrpc_busy;
- struct work_struct mrpc_work;
- struct delayed_work mrpc_timeout;
- bool alive;
-
- wait_queue_head_t event_wq;
- atomic_t event_cnt;
-};
-
-static struct switchtec_dev *to_stdev(struct device *dev)
-{
- return container_of(dev, struct switchtec_dev, dev);
-}
+struct class *switchtec_class;
+EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
MRPC_IDLE = 0,
@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
.compat_ioctl = switchtec_dev_ioctl,
};
+static void link_event_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+
+ stdev = container_of(work, struct switchtec_dev, link_event_work);
+
+ if (stdev->link_notifier)
+ stdev->link_notifier(stdev);
+}
+
+static void check_link_state_events(struct switchtec_dev *stdev)
+{
+ int idx;
+ u32 reg;
+ int count;
+ int occurred = 0;
+
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
+ dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
+ count = (reg >> 5) & 0xFF;
+
+ if (count != stdev->link_event_count[idx]) {
+ occurred = 1;
+ stdev->link_event_count[idx] = count;
+ }
+ }
+
+ if (occurred)
+ schedule_work(&stdev->link_event_work);
+}
+
+static void enable_link_state_events(struct switchtec_dev *stdev)
+{
+ int idx;
+
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_pff_csr[idx].link_state_hdr);
+ }
+}
+
static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
stdev->mrpc_busy = 0;
INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
+ INIT_WORK(&stdev->link_event_work, link_event_work);
init_waitqueue_head(&stdev->event_wq);
atomic_set(&stdev->event_cnt, 0);
@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+ return 0;
+
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg);
@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
if (!stdev->pff_local[idx])
continue;
+
count += mask_event(stdev, eid, idx);
}
} else {
@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
}
+ check_link_state_events(stdev);
+
for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
event_count += mask_all_events(stdev, eid);
@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev;
int rc;
+ if (pdev->class == MICROSEMI_NTB_CLASSCODE)
+ request_module_nowait("ntb_hw_switchtec");
+
stdev = stdev_create(pdev);
if (IS_ERR(stdev))
return PTR_ERR(stdev);
@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
+ enable_link_state_events(stdev);
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 55ef7d1fd8da..102646fedb56 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1599,7 +1599,7 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
}
-struct bin_attribute pccard_cis_attr = {
+const struct bin_attribute pccard_cis_attr = {
.attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
.read = pccard_show_cis,
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index e86cd6b31773..6765beadea95 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -152,7 +152,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s);
int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
-extern struct bin_attribute pccard_cis_attr;
+extern const struct bin_attribute pccard_cis_attr;
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 70b089430fcc..9a4940e56e2f 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -380,11 +380,10 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -758,9 +757,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index e50bbf826188..c2239a7e383a 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -386,10 +386,9 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -729,9 +728,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 2f490930430d..93a5c7423d80 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -144,6 +144,7 @@ int pcmcia_badge4_init(struct sa1111_dev *dev)
sa11xx_drv_pcmcia_add_one);
}
+#ifndef MODULE
static int __init pcmv_setup(char *s)
{
int v[4];
@@ -158,3 +159,4 @@ static int __init pcmv_setup(char *s)
}
__setup("pcmv=", pcmv_setup);
+#endif
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 3d95dffcff7a..5ef351f87bfe 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -63,11 +63,12 @@
#define IDX_IRQ_S1_READY_NINT (3)
#define IDX_IRQ_S1_CD_VALID (4)
#define IDX_IRQ_S1_BVD1_STSCHG (5)
+#define NUM_IRQS (6)
void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
- unsigned long status = sa1111_readl(s->dev->mapbase + PCSR);
+ u32 status = readl_relaxed(s->dev->mapbase + PCSR);
switch (skt->nr) {
case 0:
@@ -95,7 +96,7 @@ void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_sta
int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
- unsigned int pccr_skt_mask, pccr_set_mask, val;
+ u32 pccr_skt_mask, pccr_set_mask, val;
unsigned long flags;
switch (skt->nr) {
@@ -123,10 +124,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
local_irq_save(flags);
- val = sa1111_readl(s->dev->mapbase + PCCR);
+ val = readl_relaxed(s->dev->mapbase + PCCR);
val &= ~pccr_skt_mask;
val |= pccr_set_mask & pccr_skt_mask;
- sa1111_writel(val, s->dev->mapbase + PCCR);
+ writel_relaxed(val, s->dev->mapbase + PCCR);
local_irq_restore(flags);
return 0;
@@ -137,12 +138,18 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
{
struct sa1111_pcmcia_socket *s;
struct clk *clk;
- int i, ret = 0;
+ int i, ret = 0, irqs[NUM_IRQS];
clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
+ for (i = 0; i < NUM_IRQS; i++) {
+ irqs[i] = sa1111_get_irq(dev, i);
+ if (irqs[i] <= 0)
+ return irqs[i] ? : -ENXIO;
+ }
+
ops->socket_state = sa1111_pcmcia_socket_state;
for (i = 0; i < ops->nr; i++) {
@@ -156,16 +163,16 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
s->dev = dev;
if (s->soc.nr) {
- s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S1_READY_NINT];
- s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S1_CD_VALID];
+ s->soc.socket.pci_irq = irqs[IDX_IRQ_S1_READY_NINT];
+ s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S1_CD_VALID];
s->soc.stat[SOC_STAT_CD].name = "SA1111 CF card detect";
- s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S1_BVD1_STSCHG];
+ s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S1_BVD1_STSCHG];
s->soc.stat[SOC_STAT_BVD1].name = "SA1111 CF BVD1";
} else {
- s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S0_READY_NINT];
- s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S0_CD_VALID];
+ s->soc.socket.pci_irq = irqs[IDX_IRQ_S0_READY_NINT];
+ s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S0_CD_VALID];
s->soc.stat[SOC_STAT_CD].name = "SA1111 PCMCIA card detect";
- s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S0_BVD1_STSCHG];
+ s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S0_BVD1_STSCHG];
s->soc.stat[SOC_STAT_BVD1].name = "SA1111 PCMCIA BVD1";
}
@@ -201,8 +208,8 @@ static int pcmcia_probe(struct sa1111_dev *dev)
/*
* Initialise the suspend state.
*/
- sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
- sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
+ writel_relaxed(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
+ writel_relaxed(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
ret = -ENODEV;
#ifdef CONFIG_SA1100_BADGE4
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 82cd8b08d71f..4571cc098b76 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,11 +2,10 @@
# PINCTRL infrastructure and drivers
#
-config PINCTRL
- bool
+menuconfig PINCTRL
+ bool "Pin controllers"
-menu "Pin controllers"
- depends on PINCTRL
+if PINCTRL
config GENERIC_PINCTRL_GROUPS
bool
@@ -33,7 +32,8 @@ config DEBUG_PINCTRL
config PINCTRL_ADI2
bool "ADI pin controller driver"
- depends on BLACKFIN
+ depends on (BF54x || BF60x)
+ depends on !GPIO_ADI
select PINMUX
select IRQ_DOMAIN
help
@@ -98,7 +98,8 @@ config PINCTRL_AT91PIO4
config PINCTRL_AMD
tristate "AMD GPIO pin control"
- depends on GPIOLIB
+ depends on HAS_IOMEM
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -152,12 +153,14 @@ config PINCTRL_GEMINI
depends on ARCH_GEMINI
default ARCH_GEMINI
select PINMUX
+ select GENERIC_PINCONF
select MFD_SYSCON
config PINCTRL_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
depends on SPI_MASTER || I2C
depends on I2C || I2C=n
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP_I2C if I2C
select REGMAP_SPI if SPI_MASTER
@@ -168,16 +171,6 @@ config PINCTRL_MCP23S08
This provides a GPIO interface supporting inputs and outputs.
The I2C versions of the chips can be used as interrupt-controller.
-config PINCTRL_MESON
- bool
- depends on OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB
- select OF_GPIO
- select REGMAP_MMIO
-
config PINCTRL_OXNAS
bool
depends on OF
@@ -210,6 +203,7 @@ config PINCTRL_RZA1
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
+ depends on HAS_IOMEM
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
@@ -226,10 +220,11 @@ config PINCTRL_SIRF
config PINCTRL_SX150X
bool "Semtech SX150x I2C GPIO expander pinctrl driver"
- depends on GPIOLIB && I2C=y
+ depends on I2C=y
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP
help
@@ -369,6 +364,7 @@ source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
source "drivers/pinctrl/zte/Kconfig"
+source "drivers/pinctrl/meson/Kconfig"
config PINCTRL_XWAY
bool
@@ -380,4 +376,4 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
-endmenu
+endif
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index b93f62dc8733..b70058caee50 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -311,7 +311,7 @@ static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return 0;
- return pinctrl_request_gpio(gpio);
+ return pinctrl_gpio_request(gpio);
}
static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
@@ -322,7 +322,7 @@ static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return;
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 1cfe45fd391f..e67ae52023ad 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -275,23 +275,6 @@ static struct irq_chip nsp_gpio_irq_chip = {
.irq_set_type = nsp_gpio_irq_set_type,
};
-/*
- * Request the nsp IOMUX pinmux controller to mux individual pins to GPIO
- */
-static int nsp_gpio_request(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- return pinctrl_request_gpio(gpio);
-}
-
-static void nsp_gpio_free(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- pinctrl_free_gpio(gpio);
-}
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -670,8 +653,8 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->label = dev_name(dev);
gc->parent = dev;
gc->of_node = dev->of_node;
- gc->request = nsp_gpio_request;
- gc->free = nsp_gpio_free;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->set = nsp_gpio_set;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 56fbe4c3e800..4c8d5b23e4d0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -733,14 +733,14 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
}
/**
- * pinctrl_request_gpio() - request a single pin to be used as GPIO
+ * pinctrl_gpio_request() - request a single pin to be used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_request() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed in.
*/
-int pinctrl_request_gpio(unsigned gpio)
+int pinctrl_gpio_request(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -765,17 +765,17 @@ int pinctrl_request_gpio(unsigned gpio)
return ret;
}
-EXPORT_SYMBOL_GPL(pinctrl_request_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_request);
/**
- * pinctrl_free_gpio() - free control on a single pin, currently used as GPIO
+ * pinctrl_gpio_free() - free control on a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_free() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed out.
*/
-void pinctrl_free_gpio(unsigned gpio)
+void pinctrl_gpio_free(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -795,7 +795,7 @@ void pinctrl_free_gpio(unsigned gpio)
mutex_unlock(&pctldev->mutex);
}
-EXPORT_SYMBOL_GPL(pinctrl_free_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_free);
static int pinctrl_gpio_direction(unsigned gpio, bool input)
{
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7880c3adc450..8cf2eba17c8c 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -154,7 +154,7 @@ struct pinctrl_setting {
* or pin, and each of these will increment the @usecount.
* @mux_owner: The name of device that called pinctrl_get().
* @mux_setting: The most recent selected mux setting for this pin, if any.
- * @gpio_owner: If pinctrl_request_gpio() was called for this pin, this is
+ * @gpio_owner: If pinctrl_gpio_request() was called for this pin, this is
* the name of the GPIO that "owns" this pin.
*/
struct pin_desc {
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index f30720a752f3..4aea1b8504f7 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -5,7 +5,8 @@ if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
- depends on GPIOLIB && ACPI
+ depends on ACPI
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -65,6 +66,14 @@ config PINCTRL_CANNONLAKE
This pinctrl driver provides an interface that allows configuring
of Intel Cannon Lake PCH pins and using them as GPIOs.
+config PINCTRL_CEDARFORK
+ tristate "Intel Cedar Fork pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Cedar Fork PCH pins and using them as GPIOs.
+
config PINCTRL_DENVERTON
tristate "Intel Denverton pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 624d367caa09..fadfe3ea2b04 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
+obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
new file mode 100644
index 000000000000..59216b0533d9
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -0,0 +1,375 @@
+/*
+ * Intel Cedar Fork PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define CDF_PAD_OWN 0x020
+#define CDF_PADCFGLOCK 0x0c0
+#define CDF_HOSTSW_OWN 0x120
+#define CDF_GPI_IS 0x200
+#define CDF_GPI_IE 0x230
+
+#define CDF_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define CDF_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = CDF_PAD_OWN, \
+ .padcfglock_offset = CDF_PADCFGLOCK, \
+ .hostown_offset = CDF_HOSTSW_OWN, \
+ .is_offset = CDF_GPI_IS, \
+ .ie_offset = CDF_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Cedar Fork PCH */
+static const struct pinctrl_pin_desc cdf_pins[] = {
+ /* WEST2 */
+ PINCTRL_PIN(0, "GBE_SDP_TIMESYNC0_S2N"),
+ PINCTRL_PIN(1, "GBE_SDP_TIMESYNC1_S2N"),
+ PINCTRL_PIN(2, "GBE_SDP_TIMESYNC2_S2N"),
+ PINCTRL_PIN(3, "GBE_SDP_TIMESYNC3_S2N"),
+ PINCTRL_PIN(4, "GBE0_I2C_CLK"),
+ PINCTRL_PIN(5, "GBE0_I2C_DATA"),
+ PINCTRL_PIN(6, "GBE1_I2C_CLK"),
+ PINCTRL_PIN(7, "GBE1_I2C_DATA"),
+ PINCTRL_PIN(8, "GBE2_I2C_CLK"),
+ PINCTRL_PIN(9, "GBE2_I2C_DATA"),
+ PINCTRL_PIN(10, "GBE3_I2C_CLK"),
+ PINCTRL_PIN(11, "GBE3_I2C_DATA"),
+ PINCTRL_PIN(12, "GBE0_LED0"),
+ PINCTRL_PIN(13, "GBE0_LED1"),
+ PINCTRL_PIN(14, "GBE0_LED2"),
+ PINCTRL_PIN(15, "GBE1_LED0"),
+ PINCTRL_PIN(16, "GBE1_LED1"),
+ PINCTRL_PIN(17, "GBE1_LED2"),
+ PINCTRL_PIN(18, "GBE2_LED0"),
+ PINCTRL_PIN(19, "GBE2_LED1"),
+ PINCTRL_PIN(20, "GBE2_LED2"),
+ PINCTRL_PIN(21, "GBE3_LED0"),
+ PINCTRL_PIN(22, "GBE3_LED1"),
+ PINCTRL_PIN(23, "GBE3_LED2"),
+ /* WEST3 */
+ PINCTRL_PIN(24, "NCSI_RXD0"),
+ PINCTRL_PIN(25, "NCSI_CLK_IN"),
+ PINCTRL_PIN(26, "NCSI_RXD1"),
+ PINCTRL_PIN(27, "NCSI_CRS_DV"),
+ PINCTRL_PIN(28, "NCSI_ARB_IN"),
+ PINCTRL_PIN(29, "NCSI_TX_EN"),
+ PINCTRL_PIN(30, "NCSI_TXD0"),
+ PINCTRL_PIN(31, "NCSI_TXD1"),
+ PINCTRL_PIN(32, "NCSI_ARB_OUT"),
+ PINCTRL_PIN(33, "GBE_SMB_CLK"),
+ PINCTRL_PIN(34, "GBE_SMB_DATA"),
+ PINCTRL_PIN(35, "GBE_SMB_ALRT_N"),
+ PINCTRL_PIN(36, "THERMTRIP_N"),
+ PINCTRL_PIN(37, "PCHHOT_N"),
+ PINCTRL_PIN(38, "ERROR0_N"),
+ PINCTRL_PIN(39, "ERROR1_N"),
+ PINCTRL_PIN(40, "ERROR2_N"),
+ PINCTRL_PIN(41, "MSMI_N"),
+ PINCTRL_PIN(42, "CATERR_N"),
+ PINCTRL_PIN(43, "MEMTRIP_N"),
+ PINCTRL_PIN(44, "UART0_RXD"),
+ PINCTRL_PIN(45, "UART0_TXD"),
+ PINCTRL_PIN(46, "UART1_RXD"),
+ PINCTRL_PIN(47, "UART1_TXD"),
+ /* WEST01 */
+ PINCTRL_PIN(48, "GBE_GPIO13"),
+ PINCTRL_PIN(49, "AUX_PWR"),
+ PINCTRL_PIN(50, "CPU_GP_2"),
+ PINCTRL_PIN(51, "CPU_GP_3"),
+ PINCTRL_PIN(52, "FAN_PWM_0"),
+ PINCTRL_PIN(53, "FAN_PWM_1"),
+ PINCTRL_PIN(54, "FAN_PWM_2"),
+ PINCTRL_PIN(55, "FAN_PWM_3"),
+ PINCTRL_PIN(56, "FAN_TACH_0"),
+ PINCTRL_PIN(57, "FAN_TACH_1"),
+ PINCTRL_PIN(58, "FAN_TACH_2"),
+ PINCTRL_PIN(59, "FAN_TACH_3"),
+ PINCTRL_PIN(60, "ME_SMB0_CLK"),
+ PINCTRL_PIN(61, "ME_SMB0_DATA"),
+ PINCTRL_PIN(62, "ME_SMB0_ALRT_N"),
+ PINCTRL_PIN(63, "ME_SMB1_CLK"),
+ PINCTRL_PIN(64, "ME_SMB1_DATA"),
+ PINCTRL_PIN(65, "ME_SMB1_ALRT_N"),
+ PINCTRL_PIN(66, "ME_SMB2_CLK"),
+ PINCTRL_PIN(67, "ME_SMB2_DATA"),
+ PINCTRL_PIN(68, "ME_SMB2_ALRT_N"),
+ PINCTRL_PIN(69, "GBE_MNG_I2C_CLK"),
+ PINCTRL_PIN(70, "GBE_MNG_I2C_DATA"),
+ /* WEST5 */
+ PINCTRL_PIN(71, "IE_UART_RXD"),
+ PINCTRL_PIN(72, "IE_UART_TXD"),
+ PINCTRL_PIN(73, "VPP_SMB_CLK"),
+ PINCTRL_PIN(74, "VPP_SMB_DATA"),
+ PINCTRL_PIN(75, "VPP_SMB_ALRT_N"),
+ PINCTRL_PIN(76, "PCIE_CLKREQ0_N"),
+ PINCTRL_PIN(77, "PCIE_CLKREQ1_N"),
+ PINCTRL_PIN(78, "PCIE_CLKREQ2_N"),
+ PINCTRL_PIN(79, "PCIE_CLKREQ3_N"),
+ PINCTRL_PIN(80, "PCIE_CLKREQ4_N"),
+ PINCTRL_PIN(81, "PCIE_CLKREQ5_N"),
+ PINCTRL_PIN(82, "PCIE_CLKREQ6_N"),
+ PINCTRL_PIN(83, "PCIE_CLKREQ7_N"),
+ PINCTRL_PIN(84, "PCIE_CLKREQ8_N"),
+ PINCTRL_PIN(85, "PCIE_CLKREQ9_N"),
+ PINCTRL_PIN(86, "FLEX_CLK_SE0"),
+ PINCTRL_PIN(87, "FLEX_CLK_SE1"),
+ PINCTRL_PIN(88, "FLEX_CLK1_50"),
+ PINCTRL_PIN(89, "FLEX_CLK2_50"),
+ PINCTRL_PIN(90, "FLEX_CLK_125"),
+ /* WESTC */
+ PINCTRL_PIN(91, "TCK_PCH"),
+ PINCTRL_PIN(92, "JTAGX_PCH"),
+ PINCTRL_PIN(93, "TRST_N_PCH"),
+ PINCTRL_PIN(94, "TMS_PCH"),
+ PINCTRL_PIN(95, "TDI_PCH"),
+ PINCTRL_PIN(96, "TDO_PCH"),
+ /* WESTC_DFX */
+ PINCTRL_PIN(97, "CX_PRDY_N"),
+ PINCTRL_PIN(98, "CX_PREQ_N"),
+ PINCTRL_PIN(99, "CPU_FBREAK_OUT_N"),
+ PINCTRL_PIN(100, "TRIGGER0_N"),
+ PINCTRL_PIN(101, "TRIGGER1_N"),
+ /* WESTA */
+ PINCTRL_PIN(102, "DBG_PTI_CLK0"),
+ PINCTRL_PIN(103, "DBG_PTI_CLK3"),
+ PINCTRL_PIN(104, "DBG_PTI_DATA0"),
+ PINCTRL_PIN(105, "DBG_PTI_DATA1"),
+ PINCTRL_PIN(106, "DBG_PTI_DATA2"),
+ PINCTRL_PIN(107, "DBG_PTI_DATA3"),
+ PINCTRL_PIN(108, "DBG_PTI_DATA4"),
+ PINCTRL_PIN(109, "DBG_PTI_DATA5"),
+ PINCTRL_PIN(110, "DBG_PTI_DATA6"),
+ PINCTRL_PIN(111, "DBG_PTI_DATA7"),
+ /* WESTB */
+ PINCTRL_PIN(112, "DBG_PTI_DATA8"),
+ PINCTRL_PIN(113, "DBG_PTI_DATA9"),
+ PINCTRL_PIN(114, "DBG_PTI_DATA10"),
+ PINCTRL_PIN(115, "DBG_PTI_DATA11"),
+ PINCTRL_PIN(116, "DBG_PTI_DATA12"),
+ PINCTRL_PIN(117, "DBG_PTI_DATA13"),
+ PINCTRL_PIN(118, "DBG_PTI_DATA14"),
+ PINCTRL_PIN(119, "DBG_PTI_DATA15"),
+ PINCTRL_PIN(120, "DBG_SPARE0"),
+ PINCTRL_PIN(121, "DBG_SPARE1"),
+ PINCTRL_PIN(122, "DBG_SPARE2"),
+ PINCTRL_PIN(123, "DBG_SPARE3"),
+ /* WESTD */
+ PINCTRL_PIN(124, "CPU_PWR_GOOD"),
+ PINCTRL_PIN(125, "PLTRST_CPU_N"),
+ PINCTRL_PIN(126, "NAC_RESET_NAC_N"),
+ PINCTRL_PIN(127, "PCH_SBLINK_RX"),
+ PINCTRL_PIN(128, "PCH_SBLINK_TX"),
+ PINCTRL_PIN(129, "PMSYNC_CLK"),
+ PINCTRL_PIN(130, "CPU_ERR0_N"),
+ PINCTRL_PIN(131, "CPU_ERR1_N"),
+ PINCTRL_PIN(132, "CPU_ERR2_N"),
+ PINCTRL_PIN(133, "CPU_THERMTRIP_N"),
+ PINCTRL_PIN(134, "CPU_MSMI_N"),
+ PINCTRL_PIN(135, "CPU_CATERR_N"),
+ PINCTRL_PIN(136, "CPU_MEMTRIP_N"),
+ PINCTRL_PIN(137, "NAC_GR_N"),
+ PINCTRL_PIN(138, "NAC_XTAL_VALID"),
+ PINCTRL_PIN(139, "NAC_WAKE_N"),
+ PINCTRL_PIN(140, "NAC_SBLINK_CLK_S2N"),
+ PINCTRL_PIN(141, "NAC_SBLINK_N2S"),
+ PINCTRL_PIN(142, "NAC_SBLINK_S2N"),
+ PINCTRL_PIN(143, "NAC_SBLINK_CLK_N2S"),
+ /* WESTD_PECI */
+ PINCTRL_PIN(144, "ME_PECI"),
+ /* WESTF */
+ PINCTRL_PIN(145, "NAC_RMII_CLK"),
+ PINCTRL_PIN(146, "NAC_RGMII_CLK"),
+ PINCTRL_PIN(147, "NAC_SPARE0"),
+ PINCTRL_PIN(148, "NAC_SPARE1"),
+ PINCTRL_PIN(149, "NAC_SPARE2"),
+ PINCTRL_PIN(150, "NAC_INIT_SX_WAKE_N"),
+ PINCTRL_PIN(151, "NAC_GBE_GPIO0_S2N"),
+ PINCTRL_PIN(152, "NAC_GBE_GPIO1_S2N"),
+ PINCTRL_PIN(153, "NAC_GBE_GPIO2_S2N"),
+ PINCTRL_PIN(154, "NAC_GBE_GPIO3_S2N"),
+ PINCTRL_PIN(155, "NAC_NCSI_RXD0"),
+ PINCTRL_PIN(156, "NAC_NCSI_CLK_IN"),
+ PINCTRL_PIN(157, "NAC_NCSI_RXD1"),
+ PINCTRL_PIN(158, "NAC_NCSI_CRS_DV"),
+ PINCTRL_PIN(159, "NAC_NCSI_ARB_IN"),
+ PINCTRL_PIN(160, "NAC_NCSI_TX_EN"),
+ PINCTRL_PIN(161, "NAC_NCSI_TXD0"),
+ PINCTRL_PIN(162, "NAC_NCSI_TXD1"),
+ PINCTRL_PIN(163, "NAC_NCSI_ARB_OUT"),
+ PINCTRL_PIN(164, "NAC_NCSI_OE_N"),
+ PINCTRL_PIN(165, "NAC_GBE_SMB_CLK"),
+ PINCTRL_PIN(166, "NAC_GBE_SMB_DATA"),
+ PINCTRL_PIN(167, "NAC_GBE_SMB_ALRT_N"),
+ /* EAST2 */
+ PINCTRL_PIN(168, "USB_OC0_N"),
+ PINCTRL_PIN(169, "GBE_GPIO0"),
+ PINCTRL_PIN(170, "GBE_GPIO1"),
+ PINCTRL_PIN(171, "GBE_GPIO2"),
+ PINCTRL_PIN(172, "GBE_GPIO3"),
+ PINCTRL_PIN(173, "GBE_GPIO4"),
+ PINCTRL_PIN(174, "GBE_GPIO5"),
+ PINCTRL_PIN(175, "GBE_GPIO6"),
+ PINCTRL_PIN(176, "GBE_GPIO7"),
+ PINCTRL_PIN(177, "GBE_GPIO8"),
+ PINCTRL_PIN(178, "GBE_GPIO9"),
+ PINCTRL_PIN(179, "GBE_GPIO10"),
+ PINCTRL_PIN(180, "GBE_GPIO11"),
+ PINCTRL_PIN(181, "GBE_GPIO12"),
+ PINCTRL_PIN(182, "SATA0_LED_N"),
+ PINCTRL_PIN(183, "SATA1_LED_N"),
+ PINCTRL_PIN(184, "SATA_PDETECT0"),
+ PINCTRL_PIN(185, "SATA_PDETECT1"),
+ PINCTRL_PIN(186, "SATA0_SDOUT"),
+ PINCTRL_PIN(187, "SATA1_SDOUT"),
+ PINCTRL_PIN(188, "SATA2_LED_N"),
+ PINCTRL_PIN(189, "SATA_PDETECT2"),
+ PINCTRL_PIN(190, "SATA2_SDOUT"),
+ /* EAST3 */
+ PINCTRL_PIN(191, "ESPI_IO0"),
+ PINCTRL_PIN(192, "ESPI_IO1"),
+ PINCTRL_PIN(193, "ESPI_IO2"),
+ PINCTRL_PIN(194, "ESPI_IO3"),
+ PINCTRL_PIN(195, "ESPI_CLK"),
+ PINCTRL_PIN(196, "ESPI_RST_N"),
+ PINCTRL_PIN(197, "ESPI_CS0_N"),
+ PINCTRL_PIN(198, "ESPI_ALRT0_N"),
+ PINCTRL_PIN(199, "ESPI_CS1_N"),
+ PINCTRL_PIN(200, "ESPI_ALRT1_N"),
+ PINCTRL_PIN(201, "ESPI_CLK_LOOPBK"),
+ /* EAST0 */
+ PINCTRL_PIN(202, "SPI_CS0_N"),
+ PINCTRL_PIN(203, "SPI_CS1_N"),
+ PINCTRL_PIN(204, "SPI_MOSI_IO0"),
+ PINCTRL_PIN(205, "SPI_MISO_IO1"),
+ PINCTRL_PIN(206, "SPI_IO2"),
+ PINCTRL_PIN(207, "SPI_IO3"),
+ PINCTRL_PIN(208, "SPI_CLK"),
+ PINCTRL_PIN(209, "SPI_CLK_LOOPBK"),
+ PINCTRL_PIN(210, "SUSPWRDNACK"),
+ PINCTRL_PIN(211, "PMU_SUSCLK"),
+ PINCTRL_PIN(212, "ADR_COMPLETE"),
+ PINCTRL_PIN(213, "ADR_TRIGGER_N"),
+ PINCTRL_PIN(214, "PMU_SLP_S45_N"),
+ PINCTRL_PIN(215, "PMU_SLP_S3_N"),
+ PINCTRL_PIN(216, "PMU_WAKE_N"),
+ PINCTRL_PIN(217, "PMU_PWRBTN_N"),
+ PINCTRL_PIN(218, "PMU_RESETBUTTON_N"),
+ PINCTRL_PIN(219, "PMU_PLTRST_N"),
+ PINCTRL_PIN(220, "SUS_STAT_N"),
+ PINCTRL_PIN(221, "PMU_I2C_CLK"),
+ PINCTRL_PIN(222, "PMU_I2C_DATA"),
+ PINCTRL_PIN(223, "PECI_SMB_CLK"),
+ PINCTRL_PIN(224, "PECI_SMB_DATA"),
+ PINCTRL_PIN(225, "PECI_SMB_ALRT_N"),
+ /* EMMC */
+ PINCTRL_PIN(226, "EMMC_CMD"),
+ PINCTRL_PIN(227, "EMMC_STROBE"),
+ PINCTRL_PIN(228, "EMMC_CLK"),
+ PINCTRL_PIN(229, "EMMC_D0"),
+ PINCTRL_PIN(230, "EMMC_D1"),
+ PINCTRL_PIN(231, "EMMC_D2"),
+ PINCTRL_PIN(232, "EMMC_D3"),
+ PINCTRL_PIN(233, "EMMC_D4"),
+ PINCTRL_PIN(234, "EMMC_D5"),
+ PINCTRL_PIN(235, "EMMC_D6"),
+ PINCTRL_PIN(236, "EMMC_D7"),
+};
+
+static const struct intel_padgroup cdf_community0_gpps[] = {
+ CDF_GPP(0, 0, 23), /* WEST2 */
+ CDF_GPP(1, 24, 47), /* WEST3 */
+ CDF_GPP(2, 48, 70), /* WEST01 */
+ CDF_GPP(3, 71, 90), /* WEST5 */
+ CDF_GPP(4, 91, 96), /* WESTC */
+ CDF_GPP(5, 97, 101), /* WESTC_DFX */
+ CDF_GPP(6, 102, 111), /* WESTA */
+ CDF_GPP(7, 112, 123), /* WESTB */
+ CDF_GPP(8, 124, 143), /* WESTD */
+ CDF_GPP(9, 144, 144), /* WESTD_PECI */
+ CDF_GPP(10, 145, 167), /* WESTF */
+};
+
+static const struct intel_padgroup cdf_community1_gpps[] = {
+ CDF_GPP(0, 168, 190), /* EAST2 */
+ CDF_GPP(1, 191, 201), /* EAST3 */
+ CDF_GPP(2, 202, 225), /* EAST0 */
+ CDF_GPP(3, 226, 236), /* EMMC */
+};
+
+static const struct intel_community cdf_communities[] = {
+ CDF_COMMUNITY(0, 0, 167, cdf_community0_gpps), /* West */
+ CDF_COMMUNITY(1, 168, 236, cdf_community1_gpps), /* East */
+};
+
+static const struct intel_pinctrl_soc_data cdf_soc_data = {
+ .pins = cdf_pins,
+ .npins = ARRAY_SIZE(cdf_pins),
+ .communities = cdf_communities,
+ .ncommunities = ARRAY_SIZE(cdf_communities),
+};
+
+static int cdf_pinctrl_probe(struct platform_device *pdev)
+{
+ return intel_pinctrl_probe(pdev, &cdf_soc_data);
+}
+
+static const struct dev_pm_ops cdf_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
+ { "INTC3001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cdf_pinctrl_acpi_match);
+
+static struct platform_driver cdf_pinctrl_driver = {
+ .probe = cdf_pinctrl_probe,
+ .driver = {
+ .name = "cedarfork-pinctrl",
+ .acpi_match_table = cdf_pinctrl_acpi_match,
+ .pm = &cdf_pinctrl_pm_ops,
+ },
+};
+
+static int __init cdf_pinctrl_init(void)
+{
+ return platform_driver_register(&cdf_pinctrl_driver);
+}
+subsys_initcall(cdf_pinctrl_init);
+
+static void __exit cdf_pinctrl_exit(void)
+{
+ platform_driver_unregister(&cdf_pinctrl_driver);
+}
+module_exit(cdf_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Cedar Fork PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bc2bb6b328ab..bdedb6325c72 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -491,7 +491,7 @@ static const struct chv_community north_community = {
.ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
.ngpios = ARRAY_SIZE(north_pins),
/*
- * North community can benerate GPIO interrupts only for the first
+ * North community can generate GPIO interrupts only for the first
* 8 interrupts. The upper half (8-15) can only be used to trigger
* GPEs.
*/
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 4500880240f2..6572550cfe78 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
-static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 };
+static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
static const unsigned int dnv_emmc_pins[] = {
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
};
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index ffda27bfd133..12a1af45acb9 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -30,8 +30,6 @@
#define PADBAR 0x00c
#define GPI_IS 0x100
-#define GPI_GPE_STS 0x140
-#define GPI_GPE_EN 0x160
#define PADOWN_BITS 4
#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
@@ -818,7 +816,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
padgrp = intel_community_get_padgroup(community, pin);
if (!padgrp)
@@ -826,9 +824,10 @@ static void intel_gpio_irq_ack(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock(&pctrl->lock);
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
raw_spin_unlock(&pctrl->lock);
}
}
@@ -843,7 +842,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
unsigned long flags;
u32 value;
@@ -853,10 +852,11 @@ static void intel_gpio_irq_enable(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Clear interrupt status first to avoid unexpected interrupt */
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
value = readl(community->regs + community->ie_offset + gpp * 4);
value |= BIT(gpp_offset);
@@ -991,7 +991,8 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4);
+ pending = readl(community->regs + community->is_offset +
+ padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
padgrp->reg_num * 4);
@@ -1241,6 +1242,9 @@ int intel_pinctrl_probe(struct platform_device *pdev,
community->regs = regs;
community->pad_regs = regs + padbar;
+ if (!community->is_offset)
+ community->is_offset = GPI_IS;
+
ret = intel_pinctrl_add_padgroups(pctrl, community);
if (ret)
return ret;
@@ -1356,7 +1360,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
for (gpp = 0; gpp < community->ngpps; gpp++) {
/* Mask and clear all interrupts */
writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + GPI_IS + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
}
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 7fdb07753c2d..13b0bd6eb2a2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -73,6 +73,8 @@ struct intel_padgroup {
* @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it
* is assumed that the host owns the pin (rather than
* ACPI).
+ * @is_offset: Register offset of GPI_IS from @regs. If %0 then uses the
+ * default (%0x100).
* @ie_offset: Register offset of GPI_IE from @regs.
* @pin_base: Starting pin of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
@@ -98,6 +100,7 @@ struct intel_community {
unsigned padown_offset;
unsigned padcfglock_offset;
unsigned hostown_offset;
+ unsigned is_offset;
unsigned ie_offset;
unsigned pin_base;
unsigned gpp_size;
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
new file mode 100644
index 000000000000..1a51778759ea
--- /dev/null
+++ b/drivers/pinctrl/meson/Kconfig
@@ -0,0 +1,41 @@
+menuconfig PINCTRL_MESON
+ bool "Amlogic SoC pinctrl drivers"
+ depends on ARCH_MESON
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+
+if PINCTRL_MESON
+
+config PINCTRL_MESON8
+ bool "Meson 8 SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8B
+ bool "Meson 8b SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXBB
+ bool "Meson gxbb SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXL
+ bool "Meson gxl SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8_PMX
+ bool
+
+endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 27c5b5126008..cbd47bb74549 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,3 +1,6 @@
-obj-y += pinctrl-meson8.o pinctrl-meson8b.o
-obj-y += pinctrl-meson-gxbb.o pinctrl-meson-gxl.o
-obj-y += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON) += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON8_PMX) += pinctrl-meson8-pmx.o
+obj-$(CONFIG_PINCTRL_MESON8) += pinctrl-meson8.o
+obj-$(CONFIG_PINCTRL_MESON8B) += pinctrl-meson8b.o
+obj-$(CONFIG_PINCTRL_MESON_GXBB) += pinctrl-meson-gxbb.o
+obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7bbc0d3cddcf..9079020259c5 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -14,418 +14,417 @@
#include <dt-bindings/gpio/meson-gxbb-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 14
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
- MESON_PIN(BOOT_16, EE_OFF),
- MESON_PIN(BOOT_17, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOY_0, EE_OFF),
- MESON_PIN(GPIOY_1, EE_OFF),
- MESON_PIN(GPIOY_2, EE_OFF),
- MESON_PIN(GPIOY_3, EE_OFF),
- MESON_PIN(GPIOY_4, EE_OFF),
- MESON_PIN(GPIOY_5, EE_OFF),
- MESON_PIN(GPIOY_6, EE_OFF),
- MESON_PIN(GPIOY_7, EE_OFF),
- MESON_PIN(GPIOY_8, EE_OFF),
- MESON_PIN(GPIOY_9, EE_OFF),
- MESON_PIN(GPIOY_10, EE_OFF),
- MESON_PIN(GPIOY_11, EE_OFF),
- MESON_PIN(GPIOY_12, EE_OFF),
- MESON_PIN(GPIOY_13, EE_OFF),
- MESON_PIN(GPIOY_14, EE_OFF),
- MESON_PIN(GPIOY_15, EE_OFF),
- MESON_PIN(GPIOY_16, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
- MESON_PIN(GPIOX_19, EE_OFF),
- MESON_PIN(GPIOX_20, EE_OFF),
- MESON_PIN(GPIOX_21, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
- MESON_PIN(GPIOCLK_2, EE_OFF),
- MESON_PIN(GPIOCLK_3, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOX_22),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
+ MESON_PIN(GPIOCLK_2),
+ MESON_PIN(GPIOCLK_3),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_13, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_x_pins[] = { PIN(GPIOX_6, EE_OFF) };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, EE_OFF) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_out_ch23_y_pins[] = { PIN(GPIOY_8, EE_OFF) };
-static const unsigned int i2s_out_ch45_y_pins[] = { PIN(GPIOY_9, EE_OFF) };
-static const unsigned int i2s_out_ch67_y_pins[] = { PIN(GPIOY_10, EE_OFF) };
-
-static const unsigned int spdif_out_y_pins[] = { PIN(GPIOY_12, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_sclk_pins[] = { GPIOZ_6 };
+static const unsigned int spi_ss0_pins[] = { GPIOZ_7 };
+static const unsigned int spi_miso_pins[] = { GPIOZ_12 };
+static const unsigned int spi_mosi_pins[] = { GPIOZ_13 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_x_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_e_pins[] = { GPIOX_19 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+static const unsigned int pwm_f_y_pins[] = { GPIOY_15 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_out_ch23_y_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch45_y_pins[] = { GPIOY_9 };
+static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
+
+static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
- MESON_PIN(GPIOAO_10, 0),
- MESON_PIN(GPIOAO_11, 0),
- MESON_PIN(GPIOAO_12, 0),
- MESON_PIN(GPIOAO_13, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int pwm_ao_a_12_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOAO_10, 0) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, 0) };
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_13_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_12, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = { GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_6_pins[] = { GPIOAO_6 };
+static const unsigned int pwm_ao_a_12_pins[] = { GPIOAO_12 };
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_13 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_12 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_13 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_12 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
- GPIO_GROUP(BOOT_16, EE_OFF),
- GPIO_GROUP(BOOT_17, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOY_0, EE_OFF),
- GPIO_GROUP(GPIOY_1, EE_OFF),
- GPIO_GROUP(GPIOY_2, EE_OFF),
- GPIO_GROUP(GPIOY_3, EE_OFF),
- GPIO_GROUP(GPIOY_4, EE_OFF),
- GPIO_GROUP(GPIOY_5, EE_OFF),
- GPIO_GROUP(GPIOY_6, EE_OFF),
- GPIO_GROUP(GPIOY_7, EE_OFF),
- GPIO_GROUP(GPIOY_8, EE_OFF),
- GPIO_GROUP(GPIOY_9, EE_OFF),
- GPIO_GROUP(GPIOY_10, EE_OFF),
- GPIO_GROUP(GPIOY_11, EE_OFF),
- GPIO_GROUP(GPIOY_12, EE_OFF),
- GPIO_GROUP(GPIOY_13, EE_OFF),
- GPIO_GROUP(GPIOY_14, EE_OFF),
- GPIO_GROUP(GPIOY_15, EE_OFF),
- GPIO_GROUP(GPIOY_16, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
- GPIO_GROUP(GPIOX_19, EE_OFF),
- GPIO_GROUP(GPIOX_20, EE_OFF),
- GPIO_GROUP(GPIOX_21, EE_OFF),
- GPIO_GROUP(GPIOX_22, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
- GPIO_GROUP(GPIOCLK_2, EE_OFF),
- GPIO_GROUP(GPIOCLK_3, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+ GPIO_GROUP(BOOT_16),
+ GPIO_GROUP(BOOT_17),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOX_22),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+ GPIO_GROUP(GPIOCLK_2),
+ GPIO_GROUP(GPIOCLK_3),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 8, 5),
@@ -522,20 +521,20 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
- GPIO_GROUP(GPIOAO_10, 0),
- GPIO_GROUP(GPIOAO_11, 0),
- GPIO_GROUP(GPIOAO_12, 0),
- GPIO_GROUP(GPIOAO_13, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
/* bank AO */
GROUP(uart_tx_ao_b, 0, 24),
@@ -565,6 +564,9 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GROUP(spdif_out_ao_13, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -600,8 +602,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
"GPIOX_20", "GPIOX_21", "GPIOX_22",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -710,6 +710,8 @@ static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
"GPIOAO_10", "GPIOAO_11", "GPIOAO_12", "GPIOAO_13",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -751,6 +753,7 @@ static const char * const pwm_ao_b_groups[] = {
static const char * const i2s_out_ao_groups[] = {
"i2s_am_clk", "i2s_out_ao_clk", "i2s_out_lr_clk",
"i2s_out_ch01_ao", "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -806,25 +809,24 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_3, 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_17, 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxbb_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 14,
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
.funcs = meson_gxbb_periphs_functions,
@@ -833,11 +835,11 @@ struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
.funcs = meson_gxbb_aobus_functions,
@@ -846,4 +848,26 @@ struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
+ .data = &meson_gxbb_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
+ .data = &meson_gxbb_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxbb_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxbb-pinctrl",
+ .of_match_table = meson_gxbb_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxbb_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 36c14b85fc7c..b3786cde963d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -14,408 +14,400 @@
#include <dt-bindings/gpio/meson-gxl-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 10
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
- MESON_PIN(GPIOH_4, EE_OFF),
- MESON_PIN(GPIOH_5, EE_OFF),
- MESON_PIN(GPIOH_6, EE_OFF),
- MESON_PIN(GPIOH_7, EE_OFF),
- MESON_PIN(GPIOH_8, EE_OFF),
- MESON_PIN(GPIOH_9, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int i2c_sck_c_dv19_pins[] = { PIN(GPIODV_19, EE_OFF) };
-static const unsigned int i2c_sda_c_dv18_pins[] = { PIN(GPIODV_18, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_pins[] = { PIN(GPIOX_6, EE_OFF) };
-
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-
-static const unsigned int pwm_c_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
-
-static const unsigned int pwm_f_clk_pins[] = { PIN(GPIOCLK_1, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOH_6, EE_OFF) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOH_7, EE_OFF) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOH_8, EE_OFF) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOH_9, EE_OFF) };
-static const unsigned int i2s_out_ch23_z_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int i2s_out_ch45_z_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int i2s_out_ch67_z_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-
-static const unsigned int spdif_out_h_pins[] = { PIN(GPIOH_4, EE_OFF) };
-
-static const unsigned int eth_link_led_pins[] = { PIN(GPIOZ_14, EE_OFF) };
-static const unsigned int eth_act_led_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int tsin_a_d0_pins[] = { PIN(GPIODV_0, EE_OFF) };
-static const unsigned int tsin_a_d0_x_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int tsin_a_clk_pins[] = { PIN(GPIODV_8, EE_OFF) };
-static const unsigned int tsin_a_clk_x_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int tsin_a_sop_pins[] = { PIN(GPIODV_9, EE_OFF) };
-static const unsigned int tsin_a_sop_x_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int tsin_a_d_valid_pins[] = { PIN(GPIODV_10, EE_OFF) };
-static const unsigned int tsin_a_d_valid_x_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int tsin_a_fail_pins[] = { PIN(GPIODV_11, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_mosi_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_pins[] = { GPIOX_9 };
+static const unsigned int spi_ss0_pins[] = { GPIOX_10 };
+static const unsigned int spi_sclk_pins[] = { GPIOX_11 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_c_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_11 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
+static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_pins[] = { GPIOX_6 };
+
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+
+static const unsigned int pwm_c_pins[] = { GPIOZ_15 };
+
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_16 };
+
+static const unsigned int pwm_f_clk_pins[] = { GPIOCLK_1 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOH_6 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOH_7 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOH_8 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOH_9 };
+static const unsigned int i2s_out_ch23_z_pins[] = { GPIOZ_5 };
+static const unsigned int i2s_out_ch45_z_pins[] = { GPIOZ_6 };
+static const unsigned int i2s_out_ch67_z_pins[] = { GPIOZ_7 };
+
+static const unsigned int spdif_out_h_pins[] = { GPIOH_4 };
+
+static const unsigned int eth_link_led_pins[] = { GPIOZ_14 };
+static const unsigned int eth_act_led_pins[] = { GPIOZ_15 };
+
+static const unsigned int tsin_a_d0_pins[] = { GPIODV_0 };
+static const unsigned int tsin_a_d0_x_pins[] = { GPIOX_10 };
+static const unsigned int tsin_a_clk_pins[] = { GPIODV_8 };
+static const unsigned int tsin_a_clk_x_pins[] = { GPIOX_11 };
+static const unsigned int tsin_a_sop_pins[] = { GPIODV_9 };
+static const unsigned int tsin_a_sop_x_pins[] = { GPIOX_8 };
+static const unsigned int tsin_a_d_valid_pins[] = { GPIODV_10 };
+static const unsigned int tsin_a_d_valid_x_pins[] = { GPIOX_9 };
+static const unsigned int tsin_a_fail_pins[] = { GPIODV_11 };
static const unsigned int tsin_a_dp_pins[] = {
- PIN(GPIODV_1, EE_OFF),
- PIN(GPIODV_2, EE_OFF),
- PIN(GPIODV_3, EE_OFF),
- PIN(GPIODV_4, EE_OFF),
- PIN(GPIODV_5, EE_OFF),
- PIN(GPIODV_6, EE_OFF),
- PIN(GPIODV_7, EE_OFF),
+ GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
};
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_tx_ao_b_0_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_1_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_8_pins[] = { PIN(GPIOAO_8, 0) };
-
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int pwm_ao_b_6_pins[] = { PIN(GPIOAO_6, 0) };
-
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_9_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_8, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_tx_ao_b_0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b_1_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = {GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = {GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_8_pins[] = { GPIOAO_8 };
+
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_9 };
+static const unsigned int pwm_ao_b_6_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_8 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_8 };
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
- GPIO_GROUP(GPIOH_4, EE_OFF),
- GPIO_GROUP(GPIOH_5, EE_OFF),
- GPIO_GROUP(GPIOH_6, EE_OFF),
- GPIO_GROUP(GPIOH_7, EE_OFF),
- GPIO_GROUP(GPIOH_8, EE_OFF),
- GPIO_GROUP(GPIOH_9, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 5, 31),
@@ -530,16 +522,16 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxl_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
/* bank AO */
GROUP(uart_tx_ao_b_0, 0, 26),
@@ -567,6 +559,9 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(spdif_out_ao_9, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -597,8 +592,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -713,6 +706,8 @@ static const char * const tsin_a_groups[] = {
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -745,7 +740,7 @@ static const char * const pwm_ao_b_groups[] = {
};
static const char * const i2s_out_ao_groups[] = {
- "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch23_ao", "i2s_out_ch45_ao", "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -800,24 +795,23 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
};
static struct meson_bank meson_gxl_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_15, 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxl_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 10,
.pins = meson_gxl_periphs_pins,
.groups = meson_gxl_periphs_groups,
.funcs = meson_gxl_periphs_functions,
@@ -826,11 +820,11 @@ struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxl_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxl_aobus_pins,
.groups = meson_gxl_aobus_groups,
.funcs = meson_gxl_aobus_functions,
@@ -839,4 +833,26 @@ struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxl-periphs-pinctrl",
+ .data = &meson_gxl_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxl-aobus-pinctrl",
+ .data = &meson_gxl_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxl_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxl-pinctrl",
+ .of_match_table = meson_gxl_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxl_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 66ed70c12733..29a458da78db 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -31,10 +31,6 @@
* In some cases the register ranges for pull enable and pull
* direction are the same and thus there are only 3 register ranges.
*
- * Every pinmux group can be enabled by a specific bit in the first
- * register range; when all groups for a given pin are disabled the
- * pin acts as a GPIO.
- *
* For the pull and GPIO configuration every bank uses a contiguous
* set of bits in the register sets described above; the same register
* can be shared by more banks with different offsets.
@@ -50,6 +46,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -147,94 +144,24 @@ static const struct pinctrl_ops meson_pctrl_ops = {
.pin_dbg_show = meson_pin_dbg_show,
};
-/**
- * meson_pmx_disable_other_groups() - disable other groups using a given pin
- *
- * @pc: meson pin controller device
- * @pin: number of the pin
- * @sel_group: index of the selected group, or -1 if none
- *
- * The function disables all pinmux groups using a pin except the
- * selected one. If @sel_group is -1 all groups are disabled, leaving
- * the pin in GPIO mode.
- */
-static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
- unsigned int pin, int sel_group)
-{
- struct meson_pmx_group *group;
- int i, j;
-
- for (i = 0; i < pc->data->num_groups; i++) {
- group = &pc->data->groups[i];
- if (group->is_gpio || i == sel_group)
- continue;
-
- for (j = 0; j < group->num_pins; j++) {
- if (group->pins[j] == pin) {
- /* We have found a group using the pin */
- regmap_update_bits(pc->reg_mux,
- group->reg * 4,
- BIT(group->bit), 0);
- }
- }
- }
-}
-
-static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
- unsigned group_num)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
- int i, ret = 0;
-
- dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
- group->name);
-
- /*
- * Disable groups using the same pin.
- * The selected group is not disabled to avoid glitches.
- */
- for (i = 0; i < group->num_pins; i++)
- meson_pmx_disable_other_groups(pc, group->pins[i], group_num);
-
- /* Function 0 (GPIO) doesn't need any additional setting */
- if (func_num)
- ret = regmap_update_bits(pc->reg_mux, group->reg * 4,
- BIT(group->bit), BIT(group->bit));
-
- return ret;
-}
-
-static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
- struct pinctrl_gpio_range *range,
- unsigned offset)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
-
- meson_pmx_disable_other_groups(pc, offset, -1);
-
- return 0;
-}
-
-static int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->num_funcs;
}
-static const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
- unsigned selector)
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->funcs[selector].name;
}
-static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
@@ -244,14 +171,6 @@ static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
return 0;
}
-static const struct pinmux_ops meson_pmx_ops = {
- .set_mux = meson_pmx_set_mux,
- .get_functions_count = meson_pmx_get_funcs_count,
- .get_function_name = meson_pmx_get_func_name,
- .get_function_groups = meson_pmx_get_groups,
- .gpio_request_enable = meson_pmx_request_gpio,
-};
-
static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
{
@@ -399,7 +318,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
static int meson_pinconf_group_get(struct pinctrl_dev *pcdev,
unsigned int group, unsigned long *config)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
static const struct pinconf_ops meson_pinconf_ops = {
@@ -410,31 +329,18 @@ static const struct pinconf_ops meson_pinconf_ops = {
.is_generic = true,
};
-static int meson_gpio_request(struct gpio_chip *chip, unsigned gpio)
-{
- return pinctrl_request_gpio(chip->base + gpio);
-}
-
-static void meson_gpio_free(struct gpio_chip *chip, unsigned gpio)
-{
- struct meson_pinctrl *pc = gpiochip_get_data(chip);
-
- pinctrl_free_gpio(pc->data->pin_base + gpio);
-}
-
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit), BIT(bit));
}
@@ -443,21 +349,20 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
ret = regmap_update_bits(pc->reg_gpio, reg, BIT(bit), 0);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -465,16 +370,15 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -482,70 +386,33 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, val, pin;
+ unsigned int reg, bit, val;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_IN, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_IN, &reg, &bit);
regmap_read(pc->reg_gpio, reg, &val);
return !!(val & BIT(bit));
}
-static const struct of_device_id meson_pinctrl_dt_match[] = {
- {
- .compatible = "amlogic,meson8-cbus-pinctrl",
- .data = &meson8_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-cbus-pinctrl",
- .data = &meson8b_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8-aobus-pinctrl",
- .data = &meson8_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-aobus-pinctrl",
- .data = &meson8b_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
- .data = &meson_gxbb_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
- .data = &meson_gxbb_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-periphs-pinctrl",
- .data = &meson_gxl_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-aobus-pinctrl",
- .data = &meson_gxl_aobus_pinctrl_data,
- },
- { },
-};
-
static int meson_gpiolib_register(struct meson_pinctrl *pc)
{
int ret;
pc->chip.label = pc->data->name;
pc->chip.parent = pc->dev;
- pc->chip.request = meson_gpio_request;
- pc->chip.free = meson_gpio_free;
+ pc->chip.request = gpiochip_generic_request;
+ pc->chip.free = gpiochip_generic_free;
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
pc->chip.set = meson_gpio_set;
- pc->chip.base = pc->data->pin_base;
+ pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
pc->chip.of_node = pc->of_node;
@@ -640,9 +507,8 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
return 0;
}
-static int meson_pinctrl_probe(struct platform_device *pdev)
+int meson_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct meson_pinctrl *pc;
int ret;
@@ -652,17 +518,16 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
pc->dev = dev;
- match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
- pc->data = (struct meson_pinctrl_data *) match->data;
+ pc->data = (struct meson_pinctrl_data *) of_device_get_match_data(dev);
- ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
+ ret = meson_pinctrl_parse_dt(pc, dev->of_node);
if (ret)
return ret;
pc->desc.name = "pinctrl-meson";
pc->desc.owner = THIS_MODULE;
pc->desc.pctlops = &meson_pctrl_ops;
- pc->desc.pmxops = &meson_pmx_ops;
+ pc->desc.pmxops = pc->data->pmx_ops;
pc->desc.confops = &meson_pinconf_ops;
pc->desc.pins = pc->data->pins;
pc->desc.npins = pc->data->num_pins;
@@ -675,12 +540,3 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return meson_gpiolib_register(pc);
}
-
-static struct platform_driver meson_pinctrl_driver = {
- .probe = meson_pinctrl_probe,
- .driver = {
- .name = "meson-pinctrl",
- .of_match_table = meson_pinctrl_dt_match,
- },
-};
-builtin_platform_driver(meson_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 890f296f5840..183b6e471635 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -31,9 +32,7 @@ struct meson_pmx_group {
const char *name;
const unsigned int *pins;
unsigned int num_pins;
- bool is_gpio;
- unsigned int reg;
- unsigned int bit;
+ const void *data;
};
/**
@@ -103,12 +102,12 @@ struct meson_pinctrl_data {
const struct pinctrl_pin_desc *pins;
struct meson_pmx_group *groups;
struct meson_pmx_func *funcs;
- unsigned int pin_base;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
struct meson_bank *banks;
unsigned int num_banks;
+ const struct pinmux_ops *pmx_ops;
};
struct meson_pinctrl {
@@ -124,25 +123,6 @@ struct meson_pinctrl {
struct device_node *of_node;
};
-#define PIN(x, b) (b + x)
-
-#define GROUP(grp, r, b) \
- { \
- .name = #grp, \
- .pins = grp ## _pins, \
- .num_pins = ARRAY_SIZE(grp ## _pins), \
- .reg = r, \
- .bit = b, \
- }
-
-#define GPIO_GROUP(gpio, b) \
- { \
- .name = #gpio, \
- .pins = (const unsigned int[]){ PIN(gpio, b) }, \
- .num_pins = 1, \
- .is_gpio = true, \
- }
-
#define FUNCTION(fn) \
{ \
.name = #fn, \
@@ -166,13 +146,16 @@ struct meson_pinctrl {
}, \
}
-#define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
+#define MESON_PIN(x) PINCTRL_PIN(x, #x)
+
+/* Common pmx functions */
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev);
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector);
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
-extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data;
+/* Common probe function */
+int meson_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
new file mode 100644
index 000000000000..b93b058c8a07
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -0,0 +1,108 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For this first generation of pinctrl driver every pinmux group can be
+ * enabled by a specific bit in the first register range. When all groups for
+ * a given pin are disabled the pin acts as a GPIO.
+ */
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-meson.h"
+#include "pinctrl-meson8-pmx.h"
+
+/**
+ * meson8_pmx_disable_other_groups() - disable other groups using a given pin
+ *
+ * @pc: meson pin controller device
+ * @pin: number of the pin
+ * @sel_group: index of the selected group, or -1 if none
+ *
+ * The function disables all pinmux groups using a pin except the
+ * selected one. If @sel_group is -1 all groups are disabled, leaving
+ * the pin in GPIO mode.
+ */
+static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc,
+ unsigned int pin, int sel_group)
+{
+ struct meson_pmx_group *group;
+ struct meson8_pmx_data *pmx_data;
+ int i, j;
+
+ for (i = 0; i < pc->data->num_groups; i++) {
+ group = &pc->data->groups[i];
+ pmx_data = (struct meson8_pmx_data *)group->data;
+ if (pmx_data->is_gpio || i == sel_group)
+ continue;
+
+ for (j = 0; j < group->num_pins; j++) {
+ if (group->pins[j] == pin) {
+ /* We have found a group using the pin */
+ regmap_update_bits(pc->reg_mux,
+ pmx_data->reg * 4,
+ BIT(pmx_data->bit), 0);
+ }
+ }
+ }
+}
+
+static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
+ unsigned group_num)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ struct meson_pmx_group *group = &pc->data->groups[group_num];
+ struct meson8_pmx_data *pmx_data =
+ (struct meson8_pmx_data *)group->data;
+ int i, ret = 0;
+
+ dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
+ group->name);
+
+ /*
+ * Disable groups using the same pin.
+ * The selected group is not disabled to avoid glitches.
+ */
+ for (i = 0; i < group->num_pins; i++)
+ meson8_pmx_disable_other_groups(pc, group->pins[i], group_num);
+
+ /* Function 0 (GPIO) doesn't need any additional setting */
+ if (func_num)
+ ret = regmap_update_bits(pc->reg_mux, pmx_data->reg * 4,
+ BIT(pmx_data->bit),
+ BIT(pmx_data->bit));
+
+ return ret;
+}
+
+static int meson8_pmx_request_gpio(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ meson8_pmx_disable_other_groups(pc, offset, -1);
+
+ return 0;
+}
+
+const struct pinmux_ops meson8_pmx_ops = {
+ .set_mux = meson8_pmx_set_mux,
+ .get_functions_count = meson_pmx_get_funcs_count,
+ .get_function_name = meson_pmx_get_func_name,
+ .get_function_groups = meson_pmx_get_groups,
+ .gpio_request_enable = meson8_pmx_request_gpio,
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.h b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
new file mode 100644
index 000000000000..47293c28f913
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
@@ -0,0 +1,48 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+struct meson8_pmx_data {
+ bool is_gpio;
+ unsigned int reg;
+ unsigned int bit;
+};
+
+#define PMX_DATA(r, b, g) \
+ { \
+ .reg = r, \
+ .bit = b, \
+ .is_gpio = g, \
+ }
+
+#define GROUP(grp, r, b) \
+ { \
+ .name = #grp, \
+ .pins = grp ## _pins, \
+ .num_pins = ARRAY_SIZE(grp ## _pins), \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(r, b, false), \
+ }, \
+ }
+
+#define GPIO_GROUP(gpio) \
+ { \
+ .name = #gpio, \
+ .pins = (const unsigned int[]){ gpio }, \
+ .num_pins = 1, \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(0, 0, true), \
+ }, \
+ }
+
+extern const struct pinmux_ops meson8_pmx_ops;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 970f6f14502c..49c7ce03547b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -13,506 +13,495 @@
#include <dt-bindings/gpio/meson8-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 120
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_12, 0),
- MESON_PIN(GPIOX_13, 0),
- MESON_PIN(GPIOX_14, 0),
- MESON_PIN(GPIOX_15, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_2, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_4, 0),
- MESON_PIN(GPIOY_5, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
- MESON_PIN(GPIOY_15, 0),
- MESON_PIN(GPIOY_16, 0),
- MESON_PIN(GPIODV_0, 0),
- MESON_PIN(GPIODV_1, 0),
- MESON_PIN(GPIODV_2, 0),
- MESON_PIN(GPIODV_3, 0),
- MESON_PIN(GPIODV_4, 0),
- MESON_PIN(GPIODV_5, 0),
- MESON_PIN(GPIODV_6, 0),
- MESON_PIN(GPIODV_7, 0),
- MESON_PIN(GPIODV_8, 0),
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_10, 0),
- MESON_PIN(GPIODV_11, 0),
- MESON_PIN(GPIODV_12, 0),
- MESON_PIN(GPIODV_13, 0),
- MESON_PIN(GPIODV_14, 0),
- MESON_PIN(GPIODV_15, 0),
- MESON_PIN(GPIODV_16, 0),
- MESON_PIN(GPIODV_17, 0),
- MESON_PIN(GPIODV_18, 0),
- MESON_PIN(GPIODV_19, 0),
- MESON_PIN(GPIODV_20, 0),
- MESON_PIN(GPIODV_21, 0),
- MESON_PIN(GPIODV_22, 0),
- MESON_PIN(GPIODV_23, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
- MESON_PIN(GPIOZ_0, 0),
- MESON_PIN(GPIOZ_1, 0),
- MESON_PIN(GPIOZ_2, 0),
- MESON_PIN(GPIOZ_3, 0),
- MESON_PIN(GPIOZ_4, 0),
- MESON_PIN(GPIOZ_5, 0),
- MESON_PIN(GPIOZ_6, 0),
- MESON_PIN(GPIOZ_7, 0),
- MESON_PIN(GPIOZ_8, 0),
- MESON_PIN(GPIOZ_9, 0),
- MESON_PIN(GPIOZ_10, 0),
- MESON_PIN(GPIOZ_11, 0),
- MESON_PIN(GPIOZ_12, 0),
- MESON_PIN(GPIOZ_13, 0),
- MESON_PIN(GPIOZ_14, 0),
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
};
static const struct pinctrl_pin_desc meson8_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int sdxc_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a0_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a0_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a0_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a0_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a1_pins[] = { PIN(GPIOX_12, 0) };
-static const unsigned int uart_rx_a1_pins[] = { PIN(GPIOX_13, 0) };
-static const unsigned int uart_cts_a1_pins[] = { PIN(GPIOX_14, 0) };
-static const unsigned int uart_rts_a1_pins[] = { PIN(GPIOX_15, 0) };
-
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_data_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_x_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int sdxc_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_a_pins[] = { GPIOX_1, GPIOX_2, GPIOX_3 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a0_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a0_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a0_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a0_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a1_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a1_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a1_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a1_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_data_pins[] = { GPIOX_19 };
+
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_x_pins[] = { GPIOX_11 };
/* bank Y */
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIOY_0 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_1 };
+static const unsigned int uart_cts_c_pins[] = { GPIOY_2 };
+static const unsigned int uart_rts_c_pins[] = { GPIOY_3 };
-static const unsigned int pcm_out_b_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int pcm_in_b_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int pcm_fs_b_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int pcm_clk_b_pins[] = { PIN(GPIOY_7, 0) };
+static const unsigned int pcm_out_b_pins[] = { GPIOY_4 };
+static const unsigned int pcm_in_b_pins[] = { GPIOY_5 };
+static const unsigned int pcm_fs_b_pins[] = { GPIOY_6 };
+static const unsigned int pcm_clk_b_pins[] = { GPIOY_7 };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int i2c_sda_c0_pins[] = { GPIOY_0 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIOY_1 };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, 0) };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
-static const unsigned int i2s_out_ch45_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2s_out_ch23_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOY_7, 0) };
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int i2s_out_ch78_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int i2s_out_ch45_pins[] = { GPIOY_0 };
+static const unsigned int i2s_out_ch23_pins[] = { GPIOY_1 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOY_4 };
+static const unsigned int i2s_in_ch01_pins[] = { GPIOY_5 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOY_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOY_7 };
+static const unsigned int i2s_am_clk_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch78_pins[] = { GPIOY_9 };
-static const unsigned int spdif_in_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int spdif_out_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_in_pins[] = { GPIOY_2 };
+static const unsigned int spdif_out_pins[] = { GPIOY_3 };
/* bank DV */
-static const unsigned int dvin_rgb_pins[] = { PIN(GPIODV_0, 0), PIN(GPIODV_1, 0),
- PIN(GPIODV_2, 0), PIN(GPIODV_3, 0),
- PIN(GPIODV_4, 0), PIN(GPIODV_5, 0),
- PIN(GPIODV_6, 0), PIN(GPIODV_7, 0),
- PIN(GPIODV_8, 0), PIN(GPIODV_9, 0),
- PIN(GPIODV_10, 0), PIN(GPIODV_11, 0),
- PIN(GPIODV_12, 0), PIN(GPIODV_13, 0),
- PIN(GPIODV_14, 0), PIN(GPIODV_15, 0),
- PIN(GPIODV_16, 0), PIN(GPIODV_17, 0),
- PIN(GPIODV_18, 0), PIN(GPIODV_19, 0),
- PIN(GPIODV_20, 0), PIN(GPIODV_21, 0),
- PIN(GPIODV_22, 0), PIN(GPIODV_23, 0) };
-static const unsigned int dvin_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int dvin_hs_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int dvin_clk_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int dvin_de_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int enc_0_pins[] = { PIN(GPIODV_0, 0) };
-static const unsigned int enc_1_pins[] = { PIN(GPIODV_1, 0) };
-static const unsigned int enc_2_pins[] = { PIN(GPIODV_2, 0) };
-static const unsigned int enc_3_pins[] = { PIN(GPIODV_3, 0) };
-static const unsigned int enc_4_pins[] = { PIN(GPIODV_4, 0) };
-static const unsigned int enc_5_pins[] = { PIN(GPIODV_5, 0) };
-static const unsigned int enc_6_pins[] = { PIN(GPIODV_6, 0) };
-static const unsigned int enc_7_pins[] = { PIN(GPIODV_7, 0) };
-static const unsigned int enc_8_pins[] = { PIN(GPIODV_8, 0) };
-static const unsigned int enc_9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int enc_10_pins[] = { PIN(GPIODV_10, 0) };
-static const unsigned int enc_11_pins[] = { PIN(GPIODV_11, 0) };
-static const unsigned int enc_12_pins[] = { PIN(GPIODV_12, 0) };
-static const unsigned int enc_13_pins[] = { PIN(GPIODV_13, 0) };
-static const unsigned int enc_14_pins[] = { PIN(GPIODV_14, 0) };
-static const unsigned int enc_15_pins[] = { PIN(GPIODV_15, 0) };
-static const unsigned int enc_16_pins[] = { PIN(GPIODV_16, 0) };
-static const unsigned int enc_17_pins[] = { PIN(GPIODV_17, 0) };
-
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int vga_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int vga_hs_pins[] = { PIN(GPIODV_25, 0) };
-
-static const unsigned int pwm_c_dv9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_c_dv29_pins[] = { PIN(GPIODV_29, 0) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int dvin_rgb_pins[] = {
+ GPIODV_0, GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5,
+ GPIODV_6, GPIODV_7, GPIODV_8, GPIODV_9, GPIODV_10, GPIODV_11,
+ GPIODV_12, GPIODV_13, GPIODV_14, GPIODV_15, GPIODV_16, GPIODV_17,
+ GPIODV_18, GPIODV_19, GPIODV_20, GPIODV_21, GPIODV_22, GPIODV_23
+};
+static const unsigned int dvin_vs_pins[] = { GPIODV_24 };
+static const unsigned int dvin_hs_pins[] = { GPIODV_25 };
+static const unsigned int dvin_clk_pins[] = { GPIODV_26 };
+static const unsigned int dvin_de_pins[] = { GPIODV_27 };
+
+static const unsigned int enc_0_pins[] = { GPIODV_0 };
+static const unsigned int enc_1_pins[] = { GPIODV_1 };
+static const unsigned int enc_2_pins[] = { GPIODV_2 };
+static const unsigned int enc_3_pins[] = { GPIODV_3 };
+static const unsigned int enc_4_pins[] = { GPIODV_4 };
+static const unsigned int enc_5_pins[] = { GPIODV_5 };
+static const unsigned int enc_6_pins[] = { GPIODV_6 };
+static const unsigned int enc_7_pins[] = { GPIODV_7 };
+static const unsigned int enc_8_pins[] = { GPIODV_8 };
+static const unsigned int enc_9_pins[] = { GPIODV_9 };
+static const unsigned int enc_10_pins[] = { GPIODV_10 };
+static const unsigned int enc_11_pins[] = { GPIODV_11 };
+static const unsigned int enc_12_pins[] = { GPIODV_12 };
+static const unsigned int enc_13_pins[] = { GPIODV_13 };
+static const unsigned int enc_14_pins[] = { GPIODV_14 };
+static const unsigned int enc_15_pins[] = { GPIODV_15 };
+static const unsigned int enc_16_pins[] = { GPIODV_16 };
+static const unsigned int enc_17_pins[] = { GPIODV_17 };
+
+static const unsigned int uart_tx_b1_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b1_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b1_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b1_pins[] = { GPIODV_27 };
+
+static const unsigned int vga_vs_pins[] = { GPIODV_24 };
+static const unsigned int vga_hs_pins[] = { GPIODV_25 };
+
+static const unsigned int pwm_c_dv9_pins[] = { GPIODV_9 };
+static const unsigned int pwm_c_dv29_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_pins[] = { GPIOH_3 };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOH_6, 0) };
+static const unsigned int spi_ss0_0_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_0_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOH_6 };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank Z */
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int spi_ss1_1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOZ_13, 0) };
-static const unsigned int spi_ss2_1_pins[] = { PIN(GPIOZ_14, 0) };
-
-static const unsigned int eth_tx_clk_50m_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_5, 0) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_6, 0) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int eth_rx_clk_in_pins[] = { PIN(GPIOZ_8, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_13, 0) };
-
-static const unsigned int i2c_sda_a0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a0_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIOZ_2, 0) };
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIOZ_3, 0) };
-
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOZ_5, 0) };
-
-static const unsigned int i2c_sda_a1_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a1_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_a2_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a2_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int pwm_a_z0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int pwm_a_z7_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int pwm_b_z_pins[] = { PIN(GPIOZ_1, 0) };
-static const unsigned int pwm_c_z_pins[] = { PIN(GPIOZ_8, 0) };
+static const unsigned int spi_ss0_1_pins[] = { GPIOZ_9 };
+static const unsigned int spi_ss1_1_pins[] = { GPIOZ_10 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOZ_11 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOZ_12 };
+static const unsigned int spi_miso_1_pins[] = { GPIOZ_13 };
+static const unsigned int spi_ss2_1_pins[] = { GPIOZ_14 };
+
+static const unsigned int eth_tx_clk_50m_pins[] = { GPIOZ_4 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_5 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_6 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rx_clk_in_pins[] = { GPIOZ_8 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_9 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_10 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_11 };
+static const unsigned int eth_mdio_pins[] = { GPIOZ_12 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_13 };
+
+static const unsigned int i2c_sda_a0_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a0_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_b_pins[] = { GPIOZ_2 };
+static const unsigned int i2c_sck_b_pins[] = { GPIOZ_3 };
+
+static const unsigned int i2c_sda_c1_pins[] = { GPIOZ_4 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOZ_5 };
+
+static const unsigned int i2c_sda_a1_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a1_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_a2_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a2_pins[] = { GPIOZ_1 };
+
+static const unsigned int pwm_a_z0_pins[] = { GPIOZ_0 };
+static const unsigned int pwm_a_z7_pins[] = { GPIOZ_7 };
+static const unsigned int pwm_b_z_pins[] = { GPIOZ_1 };
+static const unsigned int pwm_c_z_pins[] = { GPIOZ_8 };
/* bank BOOT */
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_ce2_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int nand_ce3_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2, BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5, BOOT_6,
+ BOOT_7 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+static const unsigned int nand_ce2_pins[] = { BOOT_16 };
+static const unsigned int nand_ce3_pins[] = { BOOT_17 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4, CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int remote_output_ao_pins[] = { PIN(GPIOAO_13, AO_OFF) };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int remote_output_ao_pins[] = { GPIOAO_13 };
-static const unsigned int i2c_slave_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_slave_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_slave_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int pwm_f_ao_pins[] = { PIN(GPIO_TEST_N, AO_OFF) };
+static const unsigned int pwm_f_ao_pins[] = { GPIO_TEST_N };
-static const unsigned int i2s_am_clk_out_ao_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_ao_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_ao_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, AO_OFF) };
+static const unsigned int i2s_am_clk_out_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_ao_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
-static const unsigned int hdmi_cec_ao_pins[] = { PIN(GPIOAO_12, AO_OFF) };
+static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson8_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_12, 0),
- GPIO_GROUP(GPIOX_13, 0),
- GPIO_GROUP(GPIOX_14, 0),
- GPIO_GROUP(GPIOX_15, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_2, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_4, 0),
- GPIO_GROUP(GPIOY_5, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
- GPIO_GROUP(GPIOY_15, 0),
- GPIO_GROUP(GPIOY_16, 0),
- GPIO_GROUP(GPIODV_0, 0),
- GPIO_GROUP(GPIODV_1, 0),
- GPIO_GROUP(GPIODV_2, 0),
- GPIO_GROUP(GPIODV_3, 0),
- GPIO_GROUP(GPIODV_4, 0),
- GPIO_GROUP(GPIODV_5, 0),
- GPIO_GROUP(GPIODV_6, 0),
- GPIO_GROUP(GPIODV_7, 0),
- GPIO_GROUP(GPIODV_8, 0),
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_10, 0),
- GPIO_GROUP(GPIODV_11, 0),
- GPIO_GROUP(GPIODV_12, 0),
- GPIO_GROUP(GPIODV_13, 0),
- GPIO_GROUP(GPIODV_14, 0),
- GPIO_GROUP(GPIODV_15, 0),
- GPIO_GROUP(GPIODV_16, 0),
- GPIO_GROUP(GPIODV_17, 0),
- GPIO_GROUP(GPIODV_18, 0),
- GPIO_GROUP(GPIODV_19, 0),
- GPIO_GROUP(GPIODV_20, 0),
- GPIO_GROUP(GPIODV_21, 0),
- GPIO_GROUP(GPIODV_22, 0),
- GPIO_GROUP(GPIODV_23, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
- GPIO_GROUP(GPIOZ_0, 0),
- GPIO_GROUP(GPIOZ_1, 0),
- GPIO_GROUP(GPIOZ_2, 0),
- GPIO_GROUP(GPIOZ_3, 0),
- GPIO_GROUP(GPIOZ_4, 0),
- GPIO_GROUP(GPIOZ_5, 0),
- GPIO_GROUP(GPIOZ_6, 0),
- GPIO_GROUP(GPIOZ_7, 0),
- GPIO_GROUP(GPIOZ_8, 0),
- GPIO_GROUP(GPIOZ_9, 0),
- GPIO_GROUP(GPIOZ_10, 0),
- GPIO_GROUP(GPIOZ_11, 0),
- GPIO_GROUP(GPIOZ_12, 0),
- GPIO_GROUP(GPIOZ_13, 0),
- GPIO_GROUP(GPIOZ_14, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_18),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -727,22 +716,22 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
};
static struct meson_pmx_group meson8_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -1041,24 +1030,23 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
};
static struct meson_bank meson8_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", GPIOZ_0, GPIOZ_14, 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", CARD_0, CARD_6, 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
static struct meson_bank meson8_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
.funcs = meson8_cbus_functions,
@@ -1067,11 +1055,11 @@ struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8_cbus_functions),
.num_banks = ARRAY_SIZE(meson8_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.name = "ao-bank",
- .pin_base = 120,
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
.funcs = meson8_aobus_functions,
@@ -1080,4 +1068,26 @@ struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8-pinctrl",
+ .of_match_table = meson8_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 71f216b5b0b9..5bd808dc81e1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -14,408 +14,405 @@
#include <dt-bindings/gpio/meson8b-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 130
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
-
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
-
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
-
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
-
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
-
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
-
- MESON_PIN(DIF_0_P, 0),
- MESON_PIN(DIF_0_N, 0),
- MESON_PIN(DIF_1_P, 0),
- MESON_PIN(DIF_1_N, 0),
- MESON_PIN(DIF_2_P, 0),
- MESON_PIN(DIF_2_N, 0),
- MESON_PIN(DIF_3_P, 0),
- MESON_PIN(DIF_3_N, 0),
- MESON_PIN(DIF_4_P, 0),
- MESON_PIN(DIF_4_N, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
+
+ MESON_PIN(DIF_0_P),
+ MESON_PIN(DIF_0_N),
+ MESON_PIN(DIF_1_P),
+ MESON_PIN(DIF_1_N),
+ MESON_PIN(DIF_2_P),
+ MESON_PIN(DIF_2_N),
+ MESON_PIN(DIF_3_P),
+ MESON_PIN(DIF_3_N),
+ MESON_PIN(DIF_4_P),
+ MESON_PIN(DIF_4_N),
};
static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
/*
* The following 2 pins are not mentionned in the public datasheet
* According to this datasheet, they can't be used with the gpio
* interrupt controller
*/
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d0_0_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_d13_0_a_pins[] = { PIN(GPIOX_5, 0), PIN(GPIOX_6, 0),
- PIN(GPIOX_7, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int sdxc_d0_1_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_1_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int pwm_vs_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_vs_1_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int iso7816_0_clk_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int iso7816_0_data_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_1_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_1_data_pins[] = { PIN(GPIOX_19, 0) };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int tsin_clk_b_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int tsin_sop_b_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int tsin_d0_b_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int tsin_d_valid_b_pins[] = { PIN(GPIOX_20, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sdxc_d0_0_a_pins[] = { GPIOX_4 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5,
+ GPIOX_6, GPIOX_7 };
+static const unsigned int sdxc_d13_0_a_pins[] = { GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int sdxc_d0_1_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_1_a_pins[] = { GPIOX_1, GPIOX_2,
+ GPIOX_3 };
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int pwm_vs_0_pins[] = { GPIOX_10 };
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_vs_1_pins[] = { GPIOX_11 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_7 };
+static const unsigned int uart_tx_b1_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_b1_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_b1_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_b1_pins[] = { GPIOX_20 };
+
+static const unsigned int iso7816_0_clk_pins[] = { GPIOX_6 };
+static const unsigned int iso7816_0_data_pins[] = { GPIOX_7 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_0_pins[] = { GPIOX_9 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOX_10 };
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_1_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_1_data_pins[] = { GPIOX_19 };
+static const unsigned int spi_ss0_0_pins[] = { GPIOX_20 };
+
+static const unsigned int tsin_clk_b_pins[] = { GPIOX_8 };
+static const unsigned int tsin_sop_b_pins[] = { GPIOX_9 };
+static const unsigned int tsin_d0_b_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+static const unsigned int tsin_d_valid_b_pins[] = { GPIOX_20 };
/* bank Y */
-static const unsigned int tsin_d_valid_a_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int tsin_sop_a_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int tsin_d17_a_pins[] = { PIN(GPIOY_6, 0), PIN(GPIOY_7, 0),
- PIN(GPIOY_10, 0), PIN(GPIOY_11, 0),
- PIN(GPIOY_12, 0), PIN(GPIOY_13, 0),
- PIN(GPIOY_14, 0) };
-static const unsigned int tsin_clk_a_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int tsin_d0_a_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int tsin_d_valid_a_pins[] = { GPIOY_0 };
+static const unsigned int tsin_sop_a_pins[] = { GPIOY_1 };
+static const unsigned int tsin_d17_a_pins[] = {
+ GPIOY_6, GPIOY_7, GPIOY_10, GPIOY_11, GPIOY_12, GPIOY_13, GPIOY_14,
+};
+static const unsigned int tsin_clk_a_pins[] = { GPIOY_8 };
+static const unsigned int tsin_d0_a_pins[] = { GPIOY_9 };
-static const unsigned int spdif_out_0_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_out_0_pins[] = { GPIOY_3 };
-static const unsigned int xtal_24m_pins[] = { PIN(GPIOY_3, 0) };
-static const unsigned int iso7816_2_clk_pins[] = { PIN(GPIOY_13, 0) };
-static const unsigned int iso7816_2_data_pins[] = { PIN(GPIOY_14, 0) };
+static const unsigned int xtal_24m_pins[] = { GPIOY_3 };
+static const unsigned int iso7816_2_clk_pins[] = { GPIOY_13 };
+static const unsigned int iso7816_2_data_pins[] = { GPIOY_14 };
/* bank DV */
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_c0_pins[] = { GPIODV_29 };
-static const unsigned int pwm_vs_2_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_vs_3_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_vs_4_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_vs_2_pins[] = { GPIODV_9 };
+static const unsigned int pwm_vs_3_pins[] = { GPIODV_28 };
+static const unsigned int pwm_vs_4_pins[] = { GPIODV_29 };
-static const unsigned int xtal24_out_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int xtal24_out_pins[] = { GPIODV_29 };
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIODV_27, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_c_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_c_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_c_pins[] = { GPIODV_27 };
-static const unsigned int pwm_c1_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int pwm_c1_pins[] = { GPIODV_9 };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int i2c_sda_b0_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int i2c_sck_b0_pins[] = { PIN(GPIODV_27, 0) };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_b0_pins[] = { GPIODV_26 };
+static const unsigned int i2c_sck_b0_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_c0_pins[] = { GPIODV_28 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIODV_29 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int eth_txd1_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int eth_txd0_0_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int clk_24m_out_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int spi_ss1_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int spi_ss2_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOH_6, 0) };
-
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOH_8, 0) };
-static const unsigned int eth_tx_clk_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int i2c_sda_b1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int i2c_sck_b1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
+static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
+static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
+
+static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
+static const unsigned int spi_ss2_pins[] = { GPIOH_1 };
+static const unsigned int spi_ss0_1_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_1_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOH_6 };
+
+static const unsigned int eth_txd3_pins[] = { GPIOH_7 };
+static const unsigned int eth_txd2_pins[] = { GPIOH_8 };
+static const unsigned int eth_tx_clk_pins[] = { GPIOH_9 };
+
+static const unsigned int i2c_sda_b1_pins[] = { GPIOH_3 };
+static const unsigned int i2c_sck_b1_pins[] = { GPIOH_4 };
+static const unsigned int i2c_sda_c1_pins[] = { GPIOH_5 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOH_6 };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank BOOT */
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_15_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_dqs_18_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_15_pins[] = { BOOT_15 };
+static const unsigned int nand_dqs_18_pins[] = { BOOT_18 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2,
+ BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5,
+ BOOT_6, BOOT_7 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_8 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_10 };
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
+
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_8 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_10 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4,
+ CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int clk_32k_in_out_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int hdmi_cec_1_pins[] = { PIN(GPIOAO_12, AO_OFF) };
-static const unsigned int ir_blaster_pins[] = { PIN(GPIOAO_13, AO_OFF) };
-
-static const unsigned int pwm_c2_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int ir_remote_out_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int i2s_am_clk_out_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_01_pins[] = { PIN(GPIOAO_11, AO_OFF) };
-
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int spdif_out_1_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int clk_32k_in_out_pins[] = { GPIOAO_6 };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int hdmi_cec_1_pins[] = { GPIOAO_12 };
+static const unsigned int ir_blaster_pins[] = { GPIOAO_13 };
+
+static const unsigned int pwm_c2_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int ir_remote_out_pins[] = { GPIOAO_7 };
+static const unsigned int i2s_am_clk_out_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_01_pins[] = { GPIOAO_11 };
+
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
+static const unsigned int spdif_out_1_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_in_ch01_pins[] = { GPIOAO_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOAO_10 };
/* bank DIF */
-static const unsigned int eth_rxd1_pins[] = { PIN(DIF_0_P, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(DIF_0_N, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(DIF_1_P, 0) };
-static const unsigned int eth_rx_clk_pins[] = { PIN(DIF_1_N, 0) };
-static const unsigned int eth_txd0_1_pins[] = { PIN(DIF_2_P, 0) };
-static const unsigned int eth_txd1_1_pins[] = { PIN(DIF_2_N, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(DIF_3_P, 0) };
-static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
-static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
+static const unsigned int eth_rxd1_pins[] = { DIF_0_P };
+static const unsigned int eth_rxd0_pins[] = { DIF_0_N };
+static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
+static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
+static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
+static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
+static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
+static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
+static const unsigned int eth_mdc_pins[] = { DIF_4_P };
+static const unsigned int eth_mdio_en_pins[] = { DIF_4_N };
static struct meson_pmx_group meson8b_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
-
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
-
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
-
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
-
- GPIO_GROUP(DIF_0_P, 0),
- GPIO_GROUP(DIF_0_N, 0),
- GPIO_GROUP(DIF_1_P, 0),
- GPIO_GROUP(DIF_1_N, 0),
- GPIO_GROUP(DIF_2_P, 0),
- GPIO_GROUP(DIF_2_N, 0),
- GPIO_GROUP(DIF_3_P, 0),
- GPIO_GROUP(DIF_3_N, 0),
- GPIO_GROUP(DIF_4_P, 0),
- GPIO_GROUP(DIF_4_N, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(DIF_0_P),
+ GPIO_GROUP(DIF_0_N),
+ GPIO_GROUP(DIF_1_P),
+ GPIO_GROUP(DIF_1_N),
+ GPIO_GROUP(DIF_2_P),
+ GPIO_GROUP(DIF_2_N),
+ GPIO_GROUP(DIF_3_P),
+ GPIO_GROUP(DIF_3_N),
+ GPIO_GROUP(DIF_4_P),
+ GPIO_GROUP(DIF_4_N),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -577,22 +574,22 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
};
static struct meson_pmx_group meson8b_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -887,30 +884,29 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
};
static struct meson_bank meson8b_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_14, 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_9, GPIODV_29, 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("CARD", CARD_0, CARD_6, 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
/*
* The following bank is not mentionned in the public datasheet
* There is no information whether it can be used with the gpio
* interrupt controller
*/
- BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
+ BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
static struct meson_bank meson8b_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first lastc irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
.funcs = meson8b_cbus_functions,
@@ -919,11 +915,11 @@ struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
.num_banks = ARRAY_SIZE(meson8b_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 130,
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
.funcs = meson8b_aobus_functions,
@@ -932,4 +928,26 @@ struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8b_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8b-cbus-pinctrl",
+ .data = &meson8b_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-aobus-pinctrl",
+ .data = &meson8b_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8b_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8b-pinctrl",
+ .of_match_table = meson8b_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8b_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index d754a9b10e19..bdb8d174efef 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = OUTPUT_EN;
- unsigned int mask;
+ unsigned int mask, val, ret;
armada_37xx_update_reg(&reg, offset);
mask = BIT(offset);
- return regmap_update_bits(info->regmap, reg, mask, mask);
+ ret = regmap_update_bits(info->regmap, reg, mask, mask);
+
+ if (ret)
+ return ret;
+
+ reg = OUTPUT_VAL;
+ val = value ? mask : 0;
+ regmap_update_bits(info->regmap, reg, mask, val);
+
+ return 0;
}
static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -576,6 +585,19 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
val |= (BIT(d->hwirq % GPIO_PER_REG));
break;
+ case IRQ_TYPE_EDGE_BOTH: {
+ u32 in_val, in_reg = INPUT_VAL;
+
+ armada_37xx_irq_update_reg(&in_reg, d);
+ regmap_read(info->regmap, in_reg, &in_val);
+
+ /* Set initial polarity based on current input level. */
+ if (in_val & d->mask)
+ val |= d->mask; /* falling */
+ else
+ val &= ~d->mask; /* rising */
+ break;
+ }
default:
spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
@@ -586,6 +608,40 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
+ u32 pin_idx)
+{
+ u32 reg_idx = pin_idx / GPIO_PER_REG;
+ u32 bit_num = pin_idx % GPIO_PER_REG;
+ u32 p, l, ret;
+ unsigned long flags;
+
+ regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
+
+ spin_lock_irqsave(&info->irq_lock, flags);
+ p = readl(info->base + IRQ_POL + 4 * reg_idx);
+ if ((p ^ l) & (1 << bit_num)) {
+ /*
+ * For the gpios which are used for both-edge irqs, when their
+ * interrupts happen, their input levels are changed,
+ * yet their interrupt polarities are kept in old values, we
+ * should synchronize their interrupt polarities; for example,
+ * at first a gpio's input level is low and its interrupt
+ * polarity control is "Detect rising edge", then the gpio has
+ * a interrupt , its level turns to high, we should change its
+ * polarity control to "Detect falling edge" correspondingly.
+ */
+ p ^= 1 << bit_num;
+ writel(p, info->base + IRQ_POL + 4 * reg_idx);
+ ret = 0;
+ } else {
+ /* Spurious irq */
+ ret = -1;
+ }
+
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ return ret;
+}
static void armada_37xx_irq_handler(struct irq_desc *desc)
{
@@ -609,6 +665,23 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
i * GPIO_PER_REG);
+ u32 t = irq_get_trigger_type(virq);
+
+ if ((t & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ /* Swap polarity (race with GPIO line) */
+ if (armada_37xx_edge_both_irq_swap_pol(info,
+ hwirq + i * GPIO_PER_REG)) {
+ /*
+ * For spurious irq, which gpio level
+ * is not as expected after incoming
+ * edge, just ack the gpio irq.
+ */
+ writel(1 << hwirq,
+ info->base +
+ IRQ_STATUS + 4 * i);
+ continue;
+ }
+ }
generic_handle_irq(virq);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 8eaa25c3384f..b4f7f8a458ea 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -49,6 +49,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SKEW_DELAY, "skew delay", NULL, true),
};
static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
@@ -181,6 +182,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
{ "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
+ { "skew-delay", PIN_CONFIG_SKEW_DELAY, 0 },
};
/**
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 8ed2a90a8d44..61d830c2bc17 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -753,7 +753,7 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
@@ -772,7 +772,7 @@ int amd_gpio_suspend(struct device *dev)
return 0;
}
-int amd_gpio_resume(struct device *dev)
+static int amd_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 39e6221e7100..c11b8f14d841 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -13,6 +13,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -22,6 +24,19 @@
#define DRIVER_NAME "pinctrl-gemini"
/**
+ * struct gemini_pin_conf - information about configuring a pin
+ * @pin: the pin number
+ * @reg: config register
+ * @mask: the bits affecting the configuration of the pin
+ */
+struct gemini_pin_conf {
+ unsigned int pin;
+ u32 reg;
+ u32 mask;
+};
+
+/**
+ * struct gemini_pmx - state holder for the gemini pin controller
* @dev: a pointer back to containing device
* @virtbase: the offset to the controller in virtual memory
* @map: regmap to access registers
@@ -29,6 +44,8 @@
* @is_3516: whether the SoC/package is the 3516 variant
* @flash_pin: whether the flash pin (extended pins for parallel
* flash) is set
+ * @confs: pin config information
+ * @nconfs: number of pin config information items
*/
struct gemini_pmx {
struct device *dev;
@@ -37,6 +54,8 @@ struct gemini_pmx {
bool is_3512;
bool is_3516;
bool flash_pin;
+ const struct gemini_pin_conf *confs;
+ unsigned int nconfs;
};
/**
@@ -57,6 +76,13 @@ struct gemini_pin_group {
u32 value;
};
+/* Some straight-forward control registers */
+#define GLOBAL_WORD_ID 0x00
+#define GLOBAL_STATUS 0x04
+#define GLOBAL_STATUS_FLPIN BIT(20)
+#define GLOBAL_GMAC_CTRL_SKEW 0x1c
+#define GLOBAL_GMAC0_DATA_SKEW 0x20
+#define GLOBAL_GMAC1_DATA_SKEW 0x24
/*
* Global Miscellaneous Control Register
* This register controls all Gemini pad/pin multiplexing
@@ -69,10 +95,14 @@ struct gemini_pin_group {
* DISABLED again. So you select a flash configuration once, and then
* you are stuck with it.
*/
-#define GLOBAL_WORD_ID 0x00
-#define GLOBAL_STATUS 0x04
-#define GLOBAL_STATUS_FLPIN BIT(20)
#define GLOBAL_MISC_CTRL 0x30
+#define GEMINI_GMAC_IOSEL_MASK GENMASK(28, 27)
+/* Not really used */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMII BIT(28)
+/* Activated with GMAC1 */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII BIT(27)
+/* This will be the default */
+#define GEMINI_GMAC_IOSEL_GMAC0_RGMII_GMAC1_GPIO2 0
#define TVC_CLK_PAD_ENABLE BIT(20)
#define PCI_CLK_PAD_ENABLE BIT(17)
#define LPC_CLK_PAD_ENABLE BIT(16)
@@ -86,8 +116,8 @@ struct gemini_pin_group {
#define NAND_PADS_DISABLE BIT(2)
#define PFLASH_PADS_DISABLE BIT(1)
#define SFLASH_PADS_DISABLE BIT(0)
-#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20))
-#define PADS_MAXBIT 20
+#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20) | BIT(27))
+#define PADS_MAXBIT 27
/* Ordered by bit index */
static const char * const gemini_padgroups[] = {
@@ -106,6 +136,8 @@ static const char * const gemini_padgroups[] = {
"PCI CLK",
NULL, NULL,
"TVC CLK",
+ NULL, NULL, NULL, NULL, NULL,
+ "GMAC1",
};
static const struct pinctrl_pin_desc gemini_3512_pins[] = {
@@ -493,9 +525,12 @@ static const unsigned int usb_3512_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3512_pins[] = {
- 311, 240, 258, 276, 294, 312, 241, 259, 277, 295, 313, 242, 260, 278, 296,
- 315, 297, 279, 261, 243, 316, 298, 280, 262, 244, 317, 299, 281
+static const unsigned int gmii_gmac0_3512_pins[] = {
+ 240, 241, 242, 258, 259, 260, 276, 277, 278, 294, 295, 311, 312, 313
+};
+
+static const unsigned int gmii_gmac1_3512_pins[] = {
+ 243, 244, 261, 262, 279, 280, 281, 296, 297, 298, 299, 315, 316, 317
};
static const unsigned int pci_3512_pins[] = {
@@ -645,10 +680,10 @@ static const unsigned int gpio1c_3512_pins[] = {
/* The GPIO1D (28-31) pins overlap with LCD and TVC */
static const unsigned int gpio1d_3512_pins[] = { 246, 319, 301, 283 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3512_pins[] = { 315, 297, 279, 261 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3512_pins[] = { 262, 244, 317, 299 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -715,9 +750,16 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3512_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3512_pins,
- .num_pins = ARRAY_SIZE(gmii_3512_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3512_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3512_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -931,14 +973,15 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3512_pins,
.num_pins = ARRAY_SIZE(gpio2a_3512_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3512_pins,
.num_pins = ARRAY_SIZE(gpio2b_3512_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1418,9 +1461,12 @@ static const unsigned int usb_3516_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3516_pins[] = {
- 306, 307, 308, 309, 310, 325, 326, 327, 328, 329, 330, 345, 346, 347,
- 348, 349, 350, 351, 367, 368, 369, 370, 371, 386, 387, 389, 390, 391
+static const unsigned int gmii_gmac0_3516_pins[] = {
+ 306, 307, 325, 326, 327, 328, 345, 346, 347, 348, 367, 368, 386, 387
+};
+
+static const unsigned int gmii_gmac1_3516_pins[] = {
+ 308, 309, 310, 329, 330, 349, 350, 351, 369, 370, 371, 389, 390, 391
};
static const unsigned int pci_3516_pins[] = {
@@ -1562,10 +1608,10 @@ static const unsigned int gpio1c_3516_pins[] = {
/* The GPIO1D (28-31) pins overlap with TVC */
static const unsigned int gpio1d_3516_pins[] = { 353, 311, 394, 374 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3516_pins[] = { 308, 369, 389, 329 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3516_pins[] = { 391, 351, 310, 371 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -1637,9 +1683,16 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3516_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3516_pins,
- .num_pins = ARRAY_SIZE(gmii_3516_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3516_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3516_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -1838,14 +1891,15 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3516_pins,
.num_pins = ARRAY_SIZE(gpio2a_3516_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3516_pins,
.num_pins = ARRAY_SIZE(gpio2b_3516_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1918,73 +1972,13 @@ static void gemini_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, " " DRIVER_NAME);
}
-static int gemini_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned int *reserved_maps,
- unsigned int *num_maps)
-{
- int ret;
- const char *function = NULL;
- const char *group;
- struct property *prop;
-
- ret = of_property_read_string(np, "function", &function);
- if (ret < 0)
- return ret;
-
- ret = of_property_count_strings(np, "groups");
- if (ret < 0)
- return ret;
-
- ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
- num_maps, ret);
- if (ret < 0)
- return ret;
-
- of_property_for_each_string(np, "groups", prop, group) {
- ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps,
- num_maps, group, function);
- if (ret < 0)
- return ret;
- pr_debug("ADDED FUNCTION %s <-> GROUP %s\n",
- function, group);
- }
-
- return 0;
-}
-
-static int gemini_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- unsigned int *num_maps)
-{
- unsigned int reserved_maps = 0;
- struct device_node *np;
- int ret;
-
- *map = NULL;
- *num_maps = 0;
-
- for_each_child_of_node(np_config, np) {
- ret = gemini_pinctrl_dt_subnode_to_map(pctldev, np, map,
- &reserved_maps, num_maps);
- if (ret < 0) {
- pinctrl_utils_free_map(pctldev, *map, *num_maps);
- return ret;
- }
- }
-
- return 0;
-};
-
static const struct pinctrl_ops gemini_pctrl_ops = {
.get_groups_count = gemini_get_groups_count,
.get_group_name = gemini_get_group_name,
.get_group_pins = gemini_get_group_pins,
.pin_dbg_show = gemini_pin_dbg_show,
- .dt_node_to_map = gemini_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
};
/**
@@ -2008,7 +2002,7 @@ static const char * const icegrps[] = { "icegrp" };
static const char * const idegrps[] = { "idegrp" };
static const char * const satagrps[] = { "satagrp" };
static const char * const usbgrps[] = { "usbgrp" };
-static const char * const gmiigrps[] = { "gmiigrp" };
+static const char * const gmiigrps[] = { "gmii_gmac0_grp", "gmii_gmac1_grp" };
static const char * const pcigrps[] = { "pcigrp" };
static const char * const lpcgrps[] = { "lpcgrp" };
static const char * const lcdgrps[] = { "lcdgrp" };
@@ -2074,6 +2068,16 @@ static const struct gemini_pmx_func gemini_pmx_functions[] = {
.num_groups = ARRAY_SIZE(satagrps),
},
{
+ .name = "usb",
+ .groups = usbgrps,
+ .num_groups = ARRAY_SIZE(usbgrps),
+ },
+ {
+ .name = "gmii",
+ .groups = gmiigrps,
+ .num_groups = ARRAY_SIZE(gmiigrps),
+ },
+ {
.name = "pci",
.groups = pcigrps,
.num_groups = ARRAY_SIZE(pcigrps),
@@ -2251,10 +2255,155 @@ static const struct pinmux_ops gemini_pmx_ops = {
.set_mux = gemini_pmx_set_mux,
};
+#define GEMINI_CFGPIN(_n, _r, _lb, _hb) { \
+ .pin = _n, \
+ .reg = _r, \
+ .mask = GENMASK(_hb, _lb) \
+}
+
+static const struct gemini_pin_conf gemini_confs_3512[] = {
+ GEMINI_CFGPIN(259, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(277, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(241, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(312, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(298, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(280, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(316, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(243, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(295, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(313, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(242, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(260, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(294, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(276, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(258, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(240, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(262, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(244, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(317, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(299, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(261, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(279, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(297, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(315, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf gemini_confs_3516[] = {
+ GEMINI_CFGPIN(347, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(386, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(307, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(327, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(309, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(390, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(370, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(350, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(367, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(348, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(387, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(328, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(306, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(325, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(346, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(326, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(391, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(351, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(310, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(371, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(329, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(389, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(369, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(308, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
+ unsigned int pin)
+{
+ const struct gemini_pin_conf *retconf;
+ int i;
+
+ for (i = 0; i < pmx->nconfs; i++) {
+ retconf = &pmx->confs[i];
+ if (retconf->pin == pin)
+ return retconf;
+ }
+ return NULL;
+}
+
+static int gemini_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct gemini_pin_conf *conf;
+ u32 val;
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf)
+ return -ENOTSUPP;
+ regmap_read(pmx->map, conf->reg, &val);
+ val &= conf->mask;
+ val >>= (ffs(conf->mask) - 1);
+ *config = pinconf_to_config_packed(PIN_CONFIG_SKEW_DELAY, val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int gemini_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct gemini_pin_conf *conf;
+ enum pin_config_param param;
+ u32 arg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ if (arg > 0xf)
+ return -EINVAL;
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf) {
+ dev_err(pmx->dev,
+ "invalid pin for skew delay %d\n", pin);
+ return -ENOTSUPP;
+ }
+ arg <<= (ffs(conf->mask) - 1);
+ dev_dbg(pmx->dev,
+ "set pin %d to skew delay mask %08x, val %08x\n",
+ pin, conf->mask, arg);
+ regmap_update_bits(pmx->map, conf->reg, conf->mask, arg);
+ break;
+ default:
+ dev_err(pmx->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops gemini_pinconf_ops = {
+ .pin_config_get = gemini_pinconf_get,
+ .pin_config_set = gemini_pinconf_set,
+ .is_generic = true,
+};
+
static struct pinctrl_desc gemini_pmx_desc = {
.name = DRIVER_NAME,
.pctlops = &gemini_pctrl_ops,
.pmxops = &gemini_pmx_ops,
+ .confops = &gemini_pinconf_ops,
.owner = THIS_MODULE,
};
@@ -2297,11 +2446,15 @@ static int gemini_pmx_probe(struct platform_device *pdev)
val &= 0xffff;
if (val == 0x3512) {
pmx->is_3512 = true;
+ pmx->confs = gemini_confs_3512;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3512);
gemini_pmx_desc.pins = gemini_3512_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3512_pins);
dev_info(dev, "detected 3512 chip variant\n");
} else if (val == 0x3516) {
pmx->is_3516 = true;
+ pmx->confs = gemini_confs_3516;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3516);
gemini_pmx_desc.pins = gemini_3516_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3516_pins);
dev_info(dev, "detected 3516 chip variant\n");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index d84761822243..372ddf386bdb 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -717,7 +717,7 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
{},
};
-int ingenic_pinctrl_probe(struct platform_device *pdev)
+static int ingenic_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_pinctrl *jzpc;
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index b8d2180a2bea..a7f37063518e 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -420,11 +420,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -435,11 +433,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -536,6 +532,11 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
}
return 0;
+
+report_update_failure:
+ dev_err(dev, "Reg 0x%02x update failed %d\n",
+ MAX77620_REG_GPIO0 + pin, ret);
+ return ret;
}
static const struct pinconf_ops max77620_pinconf_ops = {
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 59c899c205fb..4a6ea159c65d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -25,6 +25,7 @@
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
#define MCP_TYPE_S18 4
+#define MCP_TYPE_018 5
#define MCP_MAX_DEV_PER_CS 8
@@ -278,8 +279,7 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct mcp23s08 *mcp = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u32 arg, mask;
- u16 val;
+ u32 arg;
int ret = 0;
int i;
@@ -289,8 +289,6 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
- val = arg ? 0xFFFF : 0x0000;
- mask = BIT(pin);
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
break;
default:
@@ -837,6 +835,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23017";
break;
+
+ case MCP_TYPE_018:
+ mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23018";
+ break;
#endif /* CONFIG_I2C */
default:
@@ -883,7 +888,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
- if (type == MCP_TYPE_S18)
+ if (type == MCP_TYPE_S18 || type == MCP_TYPE_018)
status |= IOCON_INTCC | (IOCON_INTCC << 8);
ret = mcp_write(mcp, MCP_IOCON, status);
@@ -964,6 +969,10 @@ static const struct of_device_id mcp23s08_i2c_of_match[] = {
.compatible = "microchip,mcp23017",
.data = (void *) MCP_TYPE_017,
},
+ {
+ .compatible = "microchip,mcp23018",
+ .data = (void *) MCP_TYPE_018,
+ },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{
.compatible = "mcp,mcp23008",
@@ -1013,6 +1022,7 @@ static int mcp230xx_probe(struct i2c_client *client,
static const struct i2c_device_id mcp230xx_id[] = {
{ "mcp23008", MCP_TYPE_008 },
{ "mcp23017", MCP_TYPE_017 },
+ { "mcp23018", MCP_TYPE_018 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index b5cb7858ffdc..2ba17548ad5b 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -884,6 +884,24 @@ static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
+ {
+ /* edphdmi_cecinoutt1 */
+ .bank_num = 7,
+ .pin = 16,
+ .func = 2,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12) | BIT(12),
+ }, {
+ /* edphdmi_cecinout */
+ .bank_num = 7,
+ .pin = 23,
+ .func = 4,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -900,12 +918,19 @@ static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
.route_offset = 0x50,
.route_val = BIT(16) | BIT(16 + 1) | BIT(0),
}, {
- /* gmac-m1-optimized_rxd0 */
+ /* gmac-m1_rxd0 */
.bank_num = 1,
.pin = 11,
.func = 2,
.route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(16 + 10) | BIT(2) | BIT(10),
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* gmac-m1-optimized_rxd3 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 10) | BIT(10),
}, {
/* pdm_sdi0m0 */
.bank_num = 2,
@@ -3391,6 +3416,8 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.type = RK3288,
.grf_mux_offset = 0x0,
.pmu_mux_offset = 0x84,
+ .iomux_routes = rk3288_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3288_mux_route_data),
.pull_calc_reg = rk3288_calc_pull_reg_and_bit,
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
@@ -3456,8 +3483,8 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_ONLY,
DRV_TYPE_IO_DEFAULT,
DRV_TYPE_IO_DEFAULT,
- 0x0,
- 0x8,
+ 0x80,
+ 0x88,
-1,
-1,
PULL_TYPE_IO_1V8_ONLY,
@@ -3473,10 +3500,10 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
- 0x20,
- 0x28,
- 0x30,
- 0x38
+ 0xa0,
+ 0xa8,
+ 0xb0,
+ 0xb8
),
PIN_BANK_DRV_FLAGS_PULL_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 04d058706b80..717c0f4449a0 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -303,6 +303,134 @@ static const struct rza1_pinmux_conf rza1h_pmx_conf = {
};
/* ----------------------------------------------------------------------------
+ * RZ/A1L (r7s72102) pinmux flags
+ */
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p1[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p3[] = {
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 4, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 10, .func = 2 },
+ { .pin = 11, .func = 2 },
+ { .pin = 12, .func = 2 },
+ { .pin = 13, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p4[] = {
+ { .pin = 1, .func = 4 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p5[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p6[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p7[] = {
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+ { .pin = 2, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 5, .func = 3 },
+ { .pin = 6, .func = 3 },
+ { .pin = 7, .func = 3 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p9[] = {
+ { .pin = 1, .func = 2 },
+ { .pin = 0, .func = 3 },
+ { .pin = 1, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 4, .func = 3 },
+ { .pin = 5, .func = 3 },
+};
+
+static const struct rza1_swio_pin rza1l_swio_pins[] = {
+ { .port = 2, .pin = 8, .func = 2, .input = 0 },
+ { .port = 5, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 10, .func = 3, .input = 0 },
+ { .port = 7, .pin = 10, .func = 2, .input = 0 },
+ { .port = 8, .pin = 2, .func = 3, .input = 0 },
+};
+
+static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
+ [1] = { ARRAY_SIZE(rza1l_bidir_pins_p1), rza1l_bidir_pins_p1 },
+ [3] = { ARRAY_SIZE(rza1l_bidir_pins_p3), rza1l_bidir_pins_p3 },
+ [4] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p4 },
+ [5] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p5 },
+ [6] = { ARRAY_SIZE(rza1l_bidir_pins_p6), rza1l_bidir_pins_p6 },
+ [7] = { ARRAY_SIZE(rza1l_bidir_pins_p7), rza1l_bidir_pins_p7 },
+ [9] = { ARRAY_SIZE(rza1l_bidir_pins_p9), rza1l_bidir_pins_p9 },
+};
+
+static const struct rza1_swio_entry rza1l_swio_entries[] = {
+ [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+};
+
+/* RZ/A1L (r7s72102x) pinmux flags table */
+static const struct rza1_pinmux_conf rza1l_pmx_conf = {
+ .bidir_entries = rza1l_bidir_entries,
+ .swio_entries = rza1l_swio_entries,
+};
+
+/* ----------------------------------------------------------------------------
* RZ/A1 types
*/
/**
@@ -1283,9 +1411,15 @@ static int rza1_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id rza1_pinctrl_of_match[] = {
{
+ /* RZ/A1H, RZ/A1M */
.compatible = "renesas,r7s72100-ports",
.data = &rza1h_pmx_conf,
},
+ {
+ /* RZ/A1L */
+ .compatible = "renesas,r7s72102-ports",
+ .data = &rza1l_pmx_conf,
+ },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b8b3d932cd73..e6cd8de793e2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -873,13 +873,13 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
int i = 0, nconfs = 0;
unsigned long *settings = NULL, *s = NULL;
struct pcs_conf_vals *conf = NULL;
- struct pcs_conf_type prop2[] = {
+ static const struct pcs_conf_type prop2[] = {
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
{ "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
};
- struct pcs_conf_type prop4[] = {
+ static const struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
{ "pinctrl-single,bias-pulldown", PIN_CONFIG_BIAS_PULL_DOWN, },
{ "pinctrl-single,input-schmitt-enable",
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 7db4f6a6eb17..fb242c542dc9 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1087,7 +1087,7 @@ static bool sx150x_reg_volatile(struct device *dev, unsigned int reg)
return reg == pctl->data->reg_irq_src || reg == pctl->data->reg_data;
}
-const struct regmap_config sx150x_regmap_config = {
+static const struct regmap_config sx150x_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index c2c0bab04257..3e66e0d10010 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -453,6 +453,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad = pctldev->desc->pins[pin].drv_data;
+ pad->is_enabled = true;
for (i = 0; i < nconfs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -600,6 +601,10 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
}
+ val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_EN_CTL, val);
+
return ret;
}
@@ -1032,6 +1037,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */
+ { .compatible = "qcom,pmi8994-gpio" }, /* 10 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
{ .compatible = "qcom,spmi-gpio" }, /* Generic */
{ },
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 0357f9701eb9..ecfb90059eeb 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -29,7 +29,7 @@ config PINCTRL_EXYNOS5440
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX && OF
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 0c5e952461fd..cf4ae4bc9115 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_device.h>
+#include <linux/psci.h>
#include <linux/slab.h>
#include "core.h"
@@ -175,19 +176,19 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
BUG();
}
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width)
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg)
{
- return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width);
+ return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32);
}
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, u32 data)
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data)
{
if (pfc->info->unlock_reg)
sh_pfc_write_raw_reg(
sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
~data);
- sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width, data);
+ sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32, data);
}
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
@@ -389,15 +390,20 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin)
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit)
{
- unsigned int i;
+ unsigned int i, j;
- for (i = 0; i < num; i++)
- if (info[i].pin == pin)
- return &info[i];
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
+ if (pfc->info->bias_regs[i].pins[j] == pin) {
+ *bit = j;
+ return &pfc->info->bias_regs[i];
+ }
+ }
+ }
WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
@@ -567,9 +573,99 @@ static const struct of_device_id sh_pfc_of_table[] = {
};
#endif
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static void sh_pfc_nop_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+}
+
+static void sh_pfc_save_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ pfc->saved_regs[idx] = sh_pfc_read(pfc, reg);
+}
+
+static void sh_pfc_restore_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ sh_pfc_write(pfc, reg, pfc->saved_regs[idx]);
+}
+
+static unsigned int sh_pfc_walk_regs(struct sh_pfc *pfc,
+ void (*do_reg)(struct sh_pfc *pfc, u32 reg, unsigned int idx))
+{
+ unsigned int i, n = 0;
+
+ if (pfc->info->cfg_regs)
+ for (i = 0; pfc->info->cfg_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->cfg_regs[i].reg, n++);
+
+ if (pfc->info->drive_regs)
+ for (i = 0; pfc->info->drive_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->drive_regs[i].reg, n++);
+
+ if (pfc->info->bias_regs)
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ do_reg(pfc, pfc->info->bias_regs[i].puen, n++);
+ if (pfc->info->bias_regs[i].pud)
+ do_reg(pfc, pfc->info->bias_regs[i].pud, n++);
+ }
+
+ if (pfc->info->ioctrl_regs)
+ for (i = 0; pfc->info->ioctrl_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->ioctrl_regs[i].reg, n++);
+
+ return n;
+}
+
+static int sh_pfc_suspend_init(struct sh_pfc *pfc)
+{
+ unsigned int n;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ n = sh_pfc_walk_regs(pfc, sh_pfc_nop_reg);
+ if (!n)
+ return 0;
+
+ pfc->saved_regs = devm_kmalloc_array(pfc->dev, n,
+ sizeof(*pfc->saved_regs),
+ GFP_KERNEL);
+ if (!pfc->saved_regs)
+ return -ENOMEM;
+
+ dev_dbg(pfc->dev, "Allocated space to save %u regs\n", n);
+ return 0;
+}
+
+static int sh_pfc_suspend_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_save_reg);
+ return 0;
+}
+
+static int sh_pfc_resume_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
+ return 0;
+}
+
+static const struct dev_pm_ops sh_pfc_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_pfc_suspend_noirq, sh_pfc_resume_noirq)
+};
+#define DEV_PM_OPS &sh_pfc_pm
+#else
+static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
#ifdef CONFIG_OF
struct device_node *np = pdev->dev.of_node;
#endif
@@ -582,10 +678,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = of_device_get_match_data(&pdev->dev);
else
#endif
- info = platid ? (const void *)platid->driver_data : NULL;
-
- if (info == NULL)
- return -ENODEV;
+ info = (const void *)platform_get_device_id(pdev)->driver_data;
pfc = devm_kzalloc(&pdev->dev, sizeof(*pfc), GFP_KERNEL);
if (pfc == NULL)
@@ -609,6 +702,10 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = pfc->info;
}
+ ret = sh_pfc_suspend_init(pfc);
+ if (ret)
+ return ret;
+
/* Enable dummy states for those platforms without pinctrl support */
if (!of_have_populated_dt())
pinctrl_provide_dummies();
@@ -683,7 +780,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SHX3
{ "pfc-shx3", (kernel_ulong_t)&shx3_pinmux_info },
#endif
- { "sh-pfc", 0 },
{ },
};
@@ -693,6 +789,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
+ .pm = DEV_PM_OPS,
},
};
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 6d598dd63720..5af8ee26c03e 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -26,15 +26,14 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
u32 data);
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width);
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
- u32 data);
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg);
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin);
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit);
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 6b5422766f13..946d9be50b62 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -139,12 +139,12 @@ static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
return -EINVAL;
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_free_gpio(offset);
+ return pinctrl_gpio_free(offset);
}
static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index c3af9ebee4af..00d61d175249 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2912,189 +2912,230 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-#define PUPR0 0x100
-#define PUPR1 0x104
-#define PUPR2 0x108
-#define PUPR3 0x10c
-#define PUPR4 0x110
-#define PUPR5 0x114
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(0, 6), PUPR0, 0 }, /* A0 */
- { RCAR_GP_PIN(0, 7), PUPR0, 1 }, /* A1 */
- { RCAR_GP_PIN(0, 8), PUPR0, 2 }, /* A2 */
- { RCAR_GP_PIN(0, 9), PUPR0, 3 }, /* A3 */
- { RCAR_GP_PIN(0, 10), PUPR0, 4 }, /* A4 */
- { RCAR_GP_PIN(0, 11), PUPR0, 5 }, /* A5 */
- { RCAR_GP_PIN(0, 12), PUPR0, 6 }, /* A6 */
- { RCAR_GP_PIN(0, 13), PUPR0, 7 }, /* A7 */
- { RCAR_GP_PIN(0, 14), PUPR0, 8 }, /* A8 */
- { RCAR_GP_PIN(0, 15), PUPR0, 9 }, /* A9 */
- { RCAR_GP_PIN(0, 16), PUPR0, 10 }, /* A10 */
- { RCAR_GP_PIN(0, 17), PUPR0, 11 }, /* A11 */
- { RCAR_GP_PIN(0, 18), PUPR0, 12 }, /* A12 */
- { RCAR_GP_PIN(0, 19), PUPR0, 13 }, /* A13 */
- { RCAR_GP_PIN(0, 20), PUPR0, 14 }, /* A14 */
- { RCAR_GP_PIN(0, 21), PUPR0, 15 }, /* A15 */
- { RCAR_GP_PIN(0, 22), PUPR0, 16 }, /* A16 */
- { RCAR_GP_PIN(0, 23), PUPR0, 17 }, /* A17 */
- { RCAR_GP_PIN(0, 24), PUPR0, 18 }, /* A18 */
- { RCAR_GP_PIN(0, 25), PUPR0, 19 }, /* A19 */
- { RCAR_GP_PIN(0, 26), PUPR0, 20 }, /* A20 */
- { RCAR_GP_PIN(0, 27), PUPR0, 21 }, /* A21 */
- { RCAR_GP_PIN(0, 28), PUPR0, 22 }, /* A22 */
- { RCAR_GP_PIN(0, 29), PUPR0, 23 }, /* A23 */
- { RCAR_GP_PIN(0, 30), PUPR0, 24 }, /* A24 */
- { RCAR_GP_PIN(0, 31), PUPR0, 25 }, /* A25 */
- { RCAR_GP_PIN(1, 3), PUPR0, 26 }, /* /EX_CS0 */
- { RCAR_GP_PIN(1, 4), PUPR0, 27 }, /* /EX_CS1 */
- { RCAR_GP_PIN(1, 5), PUPR0, 28 }, /* /EX_CS2 */
- { RCAR_GP_PIN(1, 6), PUPR0, 29 }, /* /EX_CS3 */
- { RCAR_GP_PIN(1, 7), PUPR0, 30 }, /* /EX_CS4 */
- { RCAR_GP_PIN(1, 8), PUPR0, 31 }, /* /EX_CS5 */
-
- { RCAR_GP_PIN(0, 0), PUPR1, 0 }, /* /PRESETOUT */
- { RCAR_GP_PIN(0, 5), PUPR1, 1 }, /* /BS */
- { RCAR_GP_PIN(1, 0), PUPR1, 2 }, /* RD//WR */
- { RCAR_GP_PIN(1, 1), PUPR1, 3 }, /* /WE0 */
- { RCAR_GP_PIN(1, 2), PUPR1, 4 }, /* /WE1 */
- { RCAR_GP_PIN(1, 11), PUPR1, 5 }, /* EX_WAIT0 */
- { RCAR_GP_PIN(1, 9), PUPR1, 6 }, /* DREQ0 */
- { RCAR_GP_PIN(1, 10), PUPR1, 7 }, /* DACK0 */
- { RCAR_GP_PIN(1, 12), PUPR1, 8 }, /* IRQ0 */
- { RCAR_GP_PIN(1, 13), PUPR1, 9 }, /* IRQ1 */
-
- { RCAR_GP_PIN(1, 22), PUPR2, 0 }, /* DU0_DR0 */
- { RCAR_GP_PIN(1, 23), PUPR2, 1 }, /* DU0_DR1 */
- { RCAR_GP_PIN(1, 24), PUPR2, 2 }, /* DU0_DR2 */
- { RCAR_GP_PIN(1, 25), PUPR2, 3 }, /* DU0_DR3 */
- { RCAR_GP_PIN(1, 26), PUPR2, 4 }, /* DU0_DR4 */
- { RCAR_GP_PIN(1, 27), PUPR2, 5 }, /* DU0_DR5 */
- { RCAR_GP_PIN(1, 28), PUPR2, 6 }, /* DU0_DR6 */
- { RCAR_GP_PIN(1, 29), PUPR2, 7 }, /* DU0_DR7 */
- { RCAR_GP_PIN(1, 30), PUPR2, 8 }, /* DU0_DG0 */
- { RCAR_GP_PIN(1, 31), PUPR2, 9 }, /* DU0_DG1 */
- { RCAR_GP_PIN(2, 0), PUPR2, 10 }, /* DU0_DG2 */
- { RCAR_GP_PIN(2, 1), PUPR2, 11 }, /* DU0_DG3 */
- { RCAR_GP_PIN(2, 2), PUPR2, 12 }, /* DU0_DG4 */
- { RCAR_GP_PIN(2, 3), PUPR2, 13 }, /* DU0_DG5 */
- { RCAR_GP_PIN(2, 4), PUPR2, 14 }, /* DU0_DG6 */
- { RCAR_GP_PIN(2, 5), PUPR2, 15 }, /* DU0_DG7 */
- { RCAR_GP_PIN(2, 6), PUPR2, 16 }, /* DU0_DB0 */
- { RCAR_GP_PIN(2, 7), PUPR2, 17 }, /* DU0_DB1 */
- { RCAR_GP_PIN(2, 8), PUPR2, 18 }, /* DU0_DB2 */
- { RCAR_GP_PIN(2, 9), PUPR2, 19 }, /* DU0_DB3 */
- { RCAR_GP_PIN(2, 10), PUPR2, 20 }, /* DU0_DB4 */
- { RCAR_GP_PIN(2, 11), PUPR2, 21 }, /* DU0_DB5 */
- { RCAR_GP_PIN(2, 12), PUPR2, 22 }, /* DU0_DB6 */
- { RCAR_GP_PIN(2, 13), PUPR2, 23 }, /* DU0_DB7 */
- { RCAR_GP_PIN(2, 14), PUPR2, 24 }, /* DU0_DOTCLKIN */
- { RCAR_GP_PIN(2, 15), PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
- { RCAR_GP_PIN(2, 17), PUPR2, 26 }, /* DU0_HSYNC */
- { RCAR_GP_PIN(2, 18), PUPR2, 27 }, /* DU0_VSYNC */
- { RCAR_GP_PIN(2, 19), PUPR2, 28 }, /* DU0_EXODDF */
- { RCAR_GP_PIN(2, 20), PUPR2, 29 }, /* DU0_DISP */
- { RCAR_GP_PIN(2, 21), PUPR2, 30 }, /* DU0_CDE */
- { RCAR_GP_PIN(2, 16), PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
-
- { RCAR_GP_PIN(3, 24), PUPR3, 0 }, /* VI0_CLK */
- { RCAR_GP_PIN(3, 25), PUPR3, 1 }, /* VI0_CLKENB */
- { RCAR_GP_PIN(3, 26), PUPR3, 2 }, /* VI0_FIELD */
- { RCAR_GP_PIN(3, 27), PUPR3, 3 }, /* /VI0_HSYNC */
- { RCAR_GP_PIN(3, 28), PUPR3, 4 }, /* /VI0_VSYNC */
- { RCAR_GP_PIN(3, 29), PUPR3, 5 }, /* VI0_DATA0 */
- { RCAR_GP_PIN(3, 30), PUPR3, 6 }, /* VI0_DATA1 */
- { RCAR_GP_PIN(3, 31), PUPR3, 7 }, /* VI0_DATA2 */
- { RCAR_GP_PIN(4, 0), PUPR3, 8 }, /* VI0_DATA3 */
- { RCAR_GP_PIN(4, 1), PUPR3, 9 }, /* VI0_DATA4 */
- { RCAR_GP_PIN(4, 2), PUPR3, 10 }, /* VI0_DATA5 */
- { RCAR_GP_PIN(4, 3), PUPR3, 11 }, /* VI0_DATA6 */
- { RCAR_GP_PIN(4, 4), PUPR3, 12 }, /* VI0_DATA7 */
- { RCAR_GP_PIN(4, 5), PUPR3, 13 }, /* VI0_G2 */
- { RCAR_GP_PIN(4, 6), PUPR3, 14 }, /* VI0_G3 */
- { RCAR_GP_PIN(4, 7), PUPR3, 15 }, /* VI0_G4 */
- { RCAR_GP_PIN(4, 8), PUPR3, 16 }, /* VI0_G5 */
- { RCAR_GP_PIN(4, 21), PUPR3, 17 }, /* VI1_DATA12 */
- { RCAR_GP_PIN(4, 22), PUPR3, 18 }, /* VI1_DATA13 */
- { RCAR_GP_PIN(4, 23), PUPR3, 19 }, /* VI1_DATA14 */
- { RCAR_GP_PIN(4, 24), PUPR3, 20 }, /* VI1_DATA15 */
- { RCAR_GP_PIN(4, 9), PUPR3, 21 }, /* ETH_REF_CLK */
- { RCAR_GP_PIN(4, 10), PUPR3, 22 }, /* ETH_TXD0 */
- { RCAR_GP_PIN(4, 11), PUPR3, 23 }, /* ETH_TXD1 */
- { RCAR_GP_PIN(4, 12), PUPR3, 24 }, /* ETH_CRS_DV */
- { RCAR_GP_PIN(4, 13), PUPR3, 25 }, /* ETH_TX_EN */
- { RCAR_GP_PIN(4, 14), PUPR3, 26 }, /* ETH_RX_ER */
- { RCAR_GP_PIN(4, 15), PUPR3, 27 }, /* ETH_RXD0 */
- { RCAR_GP_PIN(4, 16), PUPR3, 28 }, /* ETH_RXD1 */
- { RCAR_GP_PIN(4, 17), PUPR3, 29 }, /* ETH_MDC */
- { RCAR_GP_PIN(4, 18), PUPR3, 30 }, /* ETH_MDIO */
- { RCAR_GP_PIN(4, 19), PUPR3, 31 }, /* ETH_LINK */
-
- { RCAR_GP_PIN(3, 6), PUPR4, 0 }, /* SSI_SCK012 */
- { RCAR_GP_PIN(3, 7), PUPR4, 1 }, /* SSI_WS012 */
- { RCAR_GP_PIN(3, 10), PUPR4, 2 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(3, 9), PUPR4, 3 }, /* SSI_SDATA1 */
- { RCAR_GP_PIN(3, 8), PUPR4, 4 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(3, 2), PUPR4, 5 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(3, 3), PUPR4, 6 }, /* SSI_WS34 */
- { RCAR_GP_PIN(3, 5), PUPR4, 7 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(3, 4), PUPR4, 8 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(2, 31), PUPR4, 9 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(3, 0), PUPR4, 10 }, /* SSI_WS5 */
- { RCAR_GP_PIN(3, 1), PUPR4, 11 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(2, 28), PUPR4, 12 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(2, 29), PUPR4, 13 }, /* SSI_WS6 */
- { RCAR_GP_PIN(2, 30), PUPR4, 14 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(2, 24), PUPR4, 15 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(2, 25), PUPR4, 16 }, /* SSI_WS78 */
- { RCAR_GP_PIN(2, 27), PUPR4, 17 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(2, 26), PUPR4, 18 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(3, 23), PUPR4, 19 }, /* TCLK0 */
- { RCAR_GP_PIN(3, 11), PUPR4, 20 }, /* SD0_CLK */
- { RCAR_GP_PIN(3, 12), PUPR4, 21 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 13), PUPR4, 22 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 14), PUPR4, 23 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 15), PUPR4, 24 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 16), PUPR4, 25 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 17), PUPR4, 26 }, /* SD0_CD */
- { RCAR_GP_PIN(3, 18), PUPR4, 27 }, /* SD0_WP */
- { RCAR_GP_PIN(2, 22), PUPR4, 28 }, /* AUDIO_CLKA */
- { RCAR_GP_PIN(2, 23), PUPR4, 29 }, /* AUDIO_CLKB */
- { RCAR_GP_PIN(1, 14), PUPR4, 30 }, /* IRQ2 */
- { RCAR_GP_PIN(1, 15), PUPR4, 31 }, /* IRQ3 */
-
- { RCAR_GP_PIN(0, 1), PUPR5, 0 }, /* PENC0 */
- { RCAR_GP_PIN(0, 2), PUPR5, 1 }, /* PENC1 */
- { RCAR_GP_PIN(0, 3), PUPR5, 2 }, /* USB_OVC0 */
- { RCAR_GP_PIN(0, 4), PUPR5, 3 }, /* USB_OVC1 */
- { RCAR_GP_PIN(1, 16), PUPR5, 4 }, /* SCIF_CLK */
- { RCAR_GP_PIN(1, 17), PUPR5, 5 }, /* TX0 */
- { RCAR_GP_PIN(1, 18), PUPR5, 6 }, /* RX0 */
- { RCAR_GP_PIN(1, 19), PUPR5, 7 }, /* SCK0 */
- { RCAR_GP_PIN(1, 20), PUPR5, 8 }, /* /CTS0 */
- { RCAR_GP_PIN(1, 21), PUPR5, 9 }, /* /RTS0 */
- { RCAR_GP_PIN(3, 19), PUPR5, 10 }, /* HSPI_CLK0 */
- { RCAR_GP_PIN(3, 20), PUPR5, 11 }, /* /HSPI_CS0 */
- { RCAR_GP_PIN(3, 21), PUPR5, 12 }, /* HSPI_RX0 */
- { RCAR_GP_PIN(3, 22), PUPR5, 13 }, /* HSPI_TX0 */
- { RCAR_GP_PIN(4, 20), PUPR5, 14 }, /* ETH_MAGIC */
- { RCAR_GP_PIN(4, 25), PUPR5, 15 }, /* AVS1 */
- { RCAR_GP_PIN(4, 26), PUPR5, 16 }, /* AVS2 */
+#define PIN_NONE U16_MAX
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUPR0", 0x100, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 6), /* A0 */
+ [ 1] = RCAR_GP_PIN(0, 7), /* A1 */
+ [ 2] = RCAR_GP_PIN(0, 8), /* A2 */
+ [ 3] = RCAR_GP_PIN(0, 9), /* A3 */
+ [ 4] = RCAR_GP_PIN(0, 10), /* A4 */
+ [ 5] = RCAR_GP_PIN(0, 11), /* A5 */
+ [ 6] = RCAR_GP_PIN(0, 12), /* A6 */
+ [ 7] = RCAR_GP_PIN(0, 13), /* A7 */
+ [ 8] = RCAR_GP_PIN(0, 14), /* A8 */
+ [ 9] = RCAR_GP_PIN(0, 15), /* A9 */
+ [10] = RCAR_GP_PIN(0, 16), /* A10 */
+ [11] = RCAR_GP_PIN(0, 17), /* A11 */
+ [12] = RCAR_GP_PIN(0, 18), /* A12 */
+ [13] = RCAR_GP_PIN(0, 19), /* A13 */
+ [14] = RCAR_GP_PIN(0, 20), /* A14 */
+ [15] = RCAR_GP_PIN(0, 21), /* A15 */
+ [16] = RCAR_GP_PIN(0, 22), /* A16 */
+ [17] = RCAR_GP_PIN(0, 23), /* A17 */
+ [18] = RCAR_GP_PIN(0, 24), /* A18 */
+ [19] = RCAR_GP_PIN(0, 25), /* A19 */
+ [20] = RCAR_GP_PIN(0, 26), /* A20 */
+ [21] = RCAR_GP_PIN(0, 27), /* A21 */
+ [22] = RCAR_GP_PIN(0, 28), /* A22 */
+ [23] = RCAR_GP_PIN(0, 29), /* A23 */
+ [24] = RCAR_GP_PIN(0, 30), /* A24 */
+ [25] = RCAR_GP_PIN(0, 31), /* A25 */
+ [26] = RCAR_GP_PIN(1, 3), /* /EX_CS0 */
+ [27] = RCAR_GP_PIN(1, 4), /* /EX_CS1 */
+ [28] = RCAR_GP_PIN(1, 5), /* /EX_CS2 */
+ [29] = RCAR_GP_PIN(1, 6), /* /EX_CS3 */
+ [30] = RCAR_GP_PIN(1, 7), /* /EX_CS4 */
+ [31] = RCAR_GP_PIN(1, 8), /* /EX_CS5 */
+ } },
+ { PINMUX_BIAS_REG("PUPR1", 0x104, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* /PRESETOUT */
+ [ 1] = RCAR_GP_PIN(0, 5), /* /BS */
+ [ 2] = RCAR_GP_PIN(1, 0), /* RD//WR */
+ [ 3] = RCAR_GP_PIN(1, 1), /* /WE0 */
+ [ 4] = RCAR_GP_PIN(1, 2), /* /WE1 */
+ [ 5] = RCAR_GP_PIN(1, 11), /* EX_WAIT0 */
+ [ 6] = RCAR_GP_PIN(1, 9), /* DREQ0 */
+ [ 7] = RCAR_GP_PIN(1, 10), /* DACK0 */
+ [ 8] = RCAR_GP_PIN(1, 12), /* IRQ0 */
+ [ 9] = RCAR_GP_PIN(1, 13), /* IRQ1 */
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUPR2", 0x108, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(1, 22), /* DU0_DR0 */
+ [ 1] = RCAR_GP_PIN(1, 23), /* DU0_DR1 */
+ [ 2] = RCAR_GP_PIN(1, 24), /* DU0_DR2 */
+ [ 3] = RCAR_GP_PIN(1, 25), /* DU0_DR3 */
+ [ 4] = RCAR_GP_PIN(1, 26), /* DU0_DR4 */
+ [ 5] = RCAR_GP_PIN(1, 27), /* DU0_DR5 */
+ [ 6] = RCAR_GP_PIN(1, 28), /* DU0_DR6 */
+ [ 7] = RCAR_GP_PIN(1, 29), /* DU0_DR7 */
+ [ 8] = RCAR_GP_PIN(1, 30), /* DU0_DG0 */
+ [ 9] = RCAR_GP_PIN(1, 31), /* DU0_DG1 */
+ [10] = RCAR_GP_PIN(2, 0), /* DU0_DG2 */
+ [11] = RCAR_GP_PIN(2, 1), /* DU0_DG3 */
+ [12] = RCAR_GP_PIN(2, 2), /* DU0_DG4 */
+ [13] = RCAR_GP_PIN(2, 3), /* DU0_DG5 */
+ [14] = RCAR_GP_PIN(2, 4), /* DU0_DG6 */
+ [15] = RCAR_GP_PIN(2, 5), /* DU0_DG7 */
+ [16] = RCAR_GP_PIN(2, 6), /* DU0_DB0 */
+ [17] = RCAR_GP_PIN(2, 7), /* DU0_DB1 */
+ [18] = RCAR_GP_PIN(2, 8), /* DU0_DB2 */
+ [19] = RCAR_GP_PIN(2, 9), /* DU0_DB3 */
+ [20] = RCAR_GP_PIN(2, 10), /* DU0_DB4 */
+ [21] = RCAR_GP_PIN(2, 11), /* DU0_DB5 */
+ [22] = RCAR_GP_PIN(2, 12), /* DU0_DB6 */
+ [23] = RCAR_GP_PIN(2, 13), /* DU0_DB7 */
+ [24] = RCAR_GP_PIN(2, 14), /* DU0_DOTCLKIN */
+ [25] = RCAR_GP_PIN(2, 15), /* DU0_DOTCLKOUT0 */
+ [26] = RCAR_GP_PIN(2, 17), /* DU0_HSYNC */
+ [27] = RCAR_GP_PIN(2, 18), /* DU0_VSYNC */
+ [28] = RCAR_GP_PIN(2, 19), /* DU0_EXODDF */
+ [29] = RCAR_GP_PIN(2, 20), /* DU0_DISP */
+ [30] = RCAR_GP_PIN(2, 21), /* DU0_CDE */
+ [31] = RCAR_GP_PIN(2, 16), /* DU0_DOTCLKOUT1 */
+ } },
+ { PINMUX_BIAS_REG("PUPR3", 0x10c, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 24), /* VI0_CLK */
+ [ 1] = RCAR_GP_PIN(3, 25), /* VI0_CLKENB */
+ [ 2] = RCAR_GP_PIN(3, 26), /* VI0_FIELD */
+ [ 3] = RCAR_GP_PIN(3, 27), /* /VI0_HSYNC */
+ [ 4] = RCAR_GP_PIN(3, 28), /* /VI0_VSYNC */
+ [ 5] = RCAR_GP_PIN(3, 29), /* VI0_DATA0 */
+ [ 6] = RCAR_GP_PIN(3, 30), /* VI0_DATA1 */
+ [ 7] = RCAR_GP_PIN(3, 31), /* VI0_DATA2 */
+ [ 8] = RCAR_GP_PIN(4, 0), /* VI0_DATA3 */
+ [ 9] = RCAR_GP_PIN(4, 1), /* VI0_DATA4 */
+ [10] = RCAR_GP_PIN(4, 2), /* VI0_DATA5 */
+ [11] = RCAR_GP_PIN(4, 3), /* VI0_DATA6 */
+ [12] = RCAR_GP_PIN(4, 4), /* VI0_DATA7 */
+ [13] = RCAR_GP_PIN(4, 5), /* VI0_G2 */
+ [14] = RCAR_GP_PIN(4, 6), /* VI0_G3 */
+ [15] = RCAR_GP_PIN(4, 7), /* VI0_G4 */
+ [16] = RCAR_GP_PIN(4, 8), /* VI0_G5 */
+ [17] = RCAR_GP_PIN(4, 21), /* VI1_DATA12 */
+ [18] = RCAR_GP_PIN(4, 22), /* VI1_DATA13 */
+ [19] = RCAR_GP_PIN(4, 23), /* VI1_DATA14 */
+ [20] = RCAR_GP_PIN(4, 24), /* VI1_DATA15 */
+ [21] = RCAR_GP_PIN(4, 9), /* ETH_REF_CLK */
+ [22] = RCAR_GP_PIN(4, 10), /* ETH_TXD0 */
+ [23] = RCAR_GP_PIN(4, 11), /* ETH_TXD1 */
+ [24] = RCAR_GP_PIN(4, 12), /* ETH_CRS_DV */
+ [25] = RCAR_GP_PIN(4, 13), /* ETH_TX_EN */
+ [26] = RCAR_GP_PIN(4, 14), /* ETH_RX_ER */
+ [27] = RCAR_GP_PIN(4, 15), /* ETH_RXD0 */
+ [28] = RCAR_GP_PIN(4, 16), /* ETH_RXD1 */
+ [29] = RCAR_GP_PIN(4, 17), /* ETH_MDC */
+ [30] = RCAR_GP_PIN(4, 18), /* ETH_MDIO */
+ [31] = RCAR_GP_PIN(4, 19), /* ETH_LINK */
+ } },
+ { PINMUX_BIAS_REG("PUPR4", 0x110, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 6), /* SSI_SCK012 */
+ [ 1] = RCAR_GP_PIN(3, 7), /* SSI_WS012 */
+ [ 2] = RCAR_GP_PIN(3, 10), /* SSI_SDATA0 */
+ [ 3] = RCAR_GP_PIN(3, 9), /* SSI_SDATA1 */
+ [ 4] = RCAR_GP_PIN(3, 8), /* SSI_SDATA2 */
+ [ 5] = RCAR_GP_PIN(3, 2), /* SSI_SCK34 */
+ [ 6] = RCAR_GP_PIN(3, 3), /* SSI_WS34 */
+ [ 7] = RCAR_GP_PIN(3, 5), /* SSI_SDATA3 */
+ [ 8] = RCAR_GP_PIN(3, 4), /* SSI_SDATA4 */
+ [ 9] = RCAR_GP_PIN(2, 31), /* SSI_SCK5 */
+ [10] = RCAR_GP_PIN(3, 0), /* SSI_WS5 */
+ [11] = RCAR_GP_PIN(3, 1), /* SSI_SDATA5 */
+ [12] = RCAR_GP_PIN(2, 28), /* SSI_SCK6 */
+ [13] = RCAR_GP_PIN(2, 29), /* SSI_WS6 */
+ [14] = RCAR_GP_PIN(2, 30), /* SSI_SDATA6 */
+ [15] = RCAR_GP_PIN(2, 24), /* SSI_SCK78 */
+ [16] = RCAR_GP_PIN(2, 25), /* SSI_WS78 */
+ [17] = RCAR_GP_PIN(2, 27), /* SSI_SDATA7 */
+ [18] = RCAR_GP_PIN(2, 26), /* SSI_SDATA8 */
+ [19] = RCAR_GP_PIN(3, 23), /* TCLK0 */
+ [20] = RCAR_GP_PIN(3, 11), /* SD0_CLK */
+ [21] = RCAR_GP_PIN(3, 12), /* SD0_CMD */
+ [22] = RCAR_GP_PIN(3, 13), /* SD0_DAT0 */
+ [23] = RCAR_GP_PIN(3, 14), /* SD0_DAT1 */
+ [24] = RCAR_GP_PIN(3, 15), /* SD0_DAT2 */
+ [25] = RCAR_GP_PIN(3, 16), /* SD0_DAT3 */
+ [26] = RCAR_GP_PIN(3, 17), /* SD0_CD */
+ [27] = RCAR_GP_PIN(3, 18), /* SD0_WP */
+ [28] = RCAR_GP_PIN(2, 22), /* AUDIO_CLKA */
+ [29] = RCAR_GP_PIN(2, 23), /* AUDIO_CLKB */
+ [30] = RCAR_GP_PIN(1, 14), /* IRQ2 */
+ [31] = RCAR_GP_PIN(1, 15), /* IRQ3 */
+ } },
+ { PINMUX_BIAS_REG("PUPR5", 0x114, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 1), /* PENC0 */
+ [ 1] = RCAR_GP_PIN(0, 2), /* PENC1 */
+ [ 2] = RCAR_GP_PIN(0, 3), /* USB_OVC0 */
+ [ 3] = RCAR_GP_PIN(0, 4), /* USB_OVC1 */
+ [ 4] = RCAR_GP_PIN(1, 16), /* SCIF_CLK */
+ [ 5] = RCAR_GP_PIN(1, 17), /* TX0 */
+ [ 6] = RCAR_GP_PIN(1, 18), /* RX0 */
+ [ 7] = RCAR_GP_PIN(1, 19), /* SCK0 */
+ [ 8] = RCAR_GP_PIN(1, 20), /* /CTS0 */
+ [ 9] = RCAR_GP_PIN(1, 21), /* /RTS0 */
+ [10] = RCAR_GP_PIN(3, 19), /* HSPI_CLK0 */
+ [11] = RCAR_GP_PIN(3, 20), /* /HSPI_CS0 */
+ [12] = RCAR_GP_PIN(3, 21), /* HSPI_RX0 */
+ [13] = RCAR_GP_PIN(3, 22), /* HSPI_TX0 */
+ [14] = RCAR_GP_PIN(4, 20), /* ETH_MAGIC */
+ [15] = RCAR_GP_PIN(4, 25), /* AVS1 */
+ [16] = RCAR_GP_PIN(4, 26), /* AVS2 */
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- addr = pfc->windows->virt + info->reg;
+ addr = pfc->windows->virt + reg->puen;
- if (ioread32(addr) & BIT(info->bit))
+ if (ioread32(addr) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_DISABLE;
@@ -3103,21 +3144,20 @@ static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
u32 value;
- u32 bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- addr = pfc->windows->virt + info->reg;
- bit = BIT(info->bit);
+ addr = pfc->windows->virt + reg->puen;
- value = ioread32(addr) & ~bit;
+ value = ioread32(addr) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- value |= bit;
+ value |= BIT(bit);
iowrite32(value, addr);
}
@@ -3144,6 +3184,7 @@ const struct sh_pfc_soc_info r8a7778_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index a0ed220071f5..333a3470e842 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -5097,6 +5097,7 @@ static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
#ifdef CONFIG_PINCTRL_PFC_R8A7745
const struct sh_pfc_soc_info r8a7745_pinmux_info = {
.name = "r8a77450_pfc",
+ .ops = &r8a7794_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95fd0994893a..1d4d84f34d60 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1443,12 +1443,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -3774,6 +3775,23 @@ static const unsigned int usb2_mux[] = {
USB2_PWEN_MARK, USB2_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+/* - USB31 ------------------------------------------------------------------ */
+static const unsigned int usb31_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int usb31_mux[] = {
+ USB31_PWEN_MARK, USB31_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -4080,6 +4098,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb31),
};
static const char * const audio_clk_groups[] = {
@@ -4537,6 +4557,14 @@ static const char * const usb2_groups[] = {
"usb2",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
+static const char * const usb31_groups[] = {
+ "usb31",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -4588,6 +4616,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(usb31),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5393,12 +5423,21 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5409,242 +5448,261 @@ static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N_A26 */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB31_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB31_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5653,28 +5711,24 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
@@ -5699,6 +5753,8 @@ const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 8b35772cda98..d1cec6d12e81 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1508,12 +1508,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -1572,6 +1573,127 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+ AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+ AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+ AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+ AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+ AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+ AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+ AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+ AUDIO_CLKOUT2_B_MARK,
+};
+static const unsigned int audio_clkout3_a_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+ AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+ AUDIO_CLKOUT3_B_MARK,
+};
+
/* - EtherAVB --------------------------------------------------------------- */
static const unsigned int avb_link_pins[] = {
/* AVB_LINK */
@@ -1659,6 +1781,221 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -1740,6 +2077,308 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -2750,6 +3389,390 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+ SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+ SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+ SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+ SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+ SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+ SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -2783,7 +3806,33 @@ static const unsigned int usb2_ch3_mux[] = {
USB2_CH3_PWEN_MARK, USB2_CH3_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
@@ -2794,6 +3843,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -2802,6 +3881,47 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -2943,10 +4063,82 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
SH_PFC_PIN_GROUP(usb2_ch3),
+ SH_PFC_PIN_GROUP(usb30),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a_a",
+ "audio_clk_a_b",
+ "audio_clk_a_c",
+ "audio_clk_b_a",
+ "audio_clk_b_b",
+ "audio_clk_c_a",
+ "audio_clk_c_b",
+ "audio_clkout_a",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
+ "audio_clkout1_a",
+ "audio_clkout1_b",
+ "audio_clkout2_a",
+ "audio_clkout2_b",
+ "audio_clkout3_a",
+ "audio_clkout3_b",
};
static const char * const avb_groups[] = {
@@ -2962,6 +4154,48 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -2973,6 +4207,74 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -3168,6 +4470,72 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi01239_ctrl",
+ "ssi1_data_a",
+ "ssi1_data_b",
+ "ssi1_ctrl_a",
+ "ssi1_ctrl_b",
+ "ssi2_data_a",
+ "ssi2_data_b",
+ "ssi2_ctrl_a",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi349_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi8_data",
+ "ssi9_data_a",
+ "ssi9_data_b",
+ "ssi9_ctrl_a",
+ "ssi9_ctrl_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -3184,9 +4552,27 @@ static const char * const usb2_ch3_groups[] = {
"usb2_ch3",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -3205,10 +4591,16 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
SH_PFC_FUNCTION(usb2_ch3),
+ SH_PFC_FUNCTION(usb30),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4021,11 +5413,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -4036,242 +5437,261 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB2_CH3_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB2_CH3_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB2_CH3_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB2_CH3_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -4280,28 +5700,24 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct soc_device_attribute r8a7795es1[] = {
@@ -4340,6 +5756,8 @@ const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 200e1f4f6db9..73ed9c74c137 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -495,7 +495,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL1_1 FM(SEL_PWM2_0) FM(SEL_PWM2_1)
#define MOD_SEL1_0 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
-/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
@@ -1518,6 +1518,7 @@ static const u16 pinmux_data[] = {
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -2392,6 +2393,50 @@ static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -3922,6 +3967,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -4286,6 +4337,15 @@ static const char * const i2c6_groups[] = {
"i2c6_c",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -4580,6 +4640,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -5416,11 +5477,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5431,242 +5501,261 @@ static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* GP7_03 */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { RCAR_GP_PIN(1, 28), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST */
- /* bit 1 n/a on M3*/
- { PIN_A_NUMBER('R', 8), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* GP6_31 */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* GP6_30 */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = RCAR_GP_PIN(1, 28), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_NONE,
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* GP6_30 */
+ [ 6] = RCAR_GP_PIN(6, 31), /* GP6_31 */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5675,28 +5764,24 @@ static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7796_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
@@ -5721,6 +5806,8 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index 4f5ee1d7317d..89b7541ab1ed 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -198,8 +198,8 @@
#define GPSR6_0 FM(QSPI0_SPCLK)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) FM(USB0_IDIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) FM(USB0_IDPU) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_11_8 FM(MSIOF2_TXD) FM(SCL3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_15_12 FM(MSIOF2_RXD) FM(SDA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_19_16 FM(MLB_CLK) FM(MSIOF2_SYNC_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -522,10 +522,8 @@ static const u16 pinmux_data[] = {
/* IPSR0 */
PINMUX_IPSR_MSEL(IP0_3_0, IRQ0_A, SEL_IRQ_0_0),
PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
- PINMUX_IPSR_GPSR(IP0_3_0, USB0_IDIN),
PINMUX_IPSR_GPSR(IP0_7_4, MSIOF2_SCK),
- PINMUX_IPSR_GPSR(IP0_7_4, USB0_IDPU),
PINMUX_IPSR_GPSR(IP0_11_8, MSIOF2_TXD),
PINMUX_IPSR_MSEL(IP0_11_8, SCL3_A, SEL_I2C3_0),
@@ -936,6 +934,129 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - AUDIO CLOCK ------------------------------------------------------------- */
+static const unsigned int audio_clk_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(4, 1),
+};
+static const unsigned int audio_clk_a_mux[] = {
+ AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clk_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(2, 27),
+};
+static const unsigned int audio_clk_b_mux[] = {
+ AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout1_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int audio_clkout1_mux[] = {
+ AUDIO_CLKOUT1_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdc_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int avb0_mdc_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_mii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0,
+ * AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0,
+ * AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ * AVB0_TXCREFCLK
+ */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int avb0_mii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK, AVB0_TD0_MARK,
+ AVB0_TD1_MARK, AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK, AVB0_RD0_MARK,
+ AVB0_RD1_MARK, AVB0_RD2_MARK, AVB0_RD3_MARK,
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_a_pins[] = {
+ /* AVB0_AVTP_PPS_A */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb0_avtp_pps_a_mux[] = {
+ AVB0_AVTP_PPS_A_MARK,
+};
+static const unsigned int avb0_avtp_match_a_pins[] = {
+ /* AVB0_AVTP_MATCH_A */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb0_avtp_match_a_mux[] = {
+ AVB0_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb0_avtp_capture_a_pins[] = {
+ /* AVB0_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb0_avtp_capture_a_mux[] = {
+ AVB0_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb0_avtp_pps_b_pins[] = {
+ /* AVB0_AVTP_PPS_B */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int avb0_avtp_pps_b_mux[] = {
+ AVB0_AVTP_PPS_B_MARK,
+};
+static const unsigned int avb0_avtp_match_b_pins[] = {
+ /* AVB0_AVTP_MATCH_B */
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int avb0_avtp_match_b_mux[] = {
+ AVB0_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb0_avtp_capture_b_pins[] = {
+ /* AVB0_AVTP_CAPTURE_B */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int avb0_avtp_capture_b_mux[] = {
+ AVB0_AVTP_CAPTURE_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c0_pins[] = {
/* SCL, SDA */
@@ -1018,6 +1139,118 @@ static const unsigned int mmc_ctrl_mux[] = {
MMC_CLK_MARK, MMC_CMD_MARK,
};
+/* - PWM0 ------------------------------------------------------------------ */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 1),
+};
+
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 18),
+};
+
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+static const unsigned int pwm0_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 29),
+};
+
+static const unsigned int pwm0_c_mux[] = {
+ PWM0_C_MARK,
+};
+
+/* - PWM1 ------------------------------------------------------------------ */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 19),
+};
+
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+static const unsigned int pwm1_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 30),
+};
+
+static const unsigned int pwm1_c_mux[] = {
+ PWM1_C_MARK,
+};
+
+/* - PWM2 ------------------------------------------------------------------ */
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 31),
+};
+
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3 ------------------------------------------------------------------ */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 27),
+};
+
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(4, 0),
+};
+
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -1202,7 +1435,75 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+/* - SSI ---------------------------------------------------------------*/
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+ SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 7),
+};
+static const unsigned int ssi4_ctrl_a_mux[] = {
+ SSI_SCK4_A_MARK, SSI_WS4_A_MARK,
+};
+static const unsigned int ssi4_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int ssi4_data_a_mux[] = {
+ SSI_SDATA4_A_MARK,
+};
+static const unsigned int ssi4_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 20),
+};
+static const unsigned int ssi4_ctrl_b_mux[] = {
+ SSI_SCK4_B_MARK, SSI_WS4_B_MARK,
+};
+static const unsigned int ssi4_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int ssi4_data_b_mux[] = {
+ SSI_SDATA4_B_MARK,
+};
+
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout1),
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdc),
+ SH_PFC_PIN_GROUP(avb0_mii),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_b),
SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c1),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -1213,6 +1514,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm0_c),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm1_c),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_data_b),
@@ -1238,6 +1551,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_b),
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi4_data_a),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi4_data_b),
+ SH_PFC_PIN_GROUP(usb0),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a",
+ "audio_clk_b",
+ "audio_clkout",
+ "audio_clkout1",
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdc",
+ "avb0_mii",
+ "avb0_avtp_pps_a",
+ "avb0_avtp_match_a",
+ "avb0_avtp_capture_a",
+ "avb0_avtp_pps_b",
+ "avb0_avtp_match_b",
+ "avb0_avtp_capture_b",
};
static const char * const i2c0_groups[] = {
@@ -1264,6 +1605,30 @@ static const char * const mmc_groups[] = {
"mmc_ctrl",
};
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+ "pwm0_c",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+ "pwm1_c",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -1310,12 +1675,31 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const ssi_groups[] = {
+ "ssi3_data",
+ "ssi34_ctrl",
+ "ssi4_ctrl_a",
+ "ssi4_data_a",
+ "ssi4_ctrl_b",
+ "ssi4_data_b",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb0),
SH_PFC_FUNCTION(i2c0),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -1323,6 +1707,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(usb0),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 5c9d79981e6d..736634aee500 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -513,7 +513,7 @@ static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
spin_unlock_irqrestore(&pfc->lock, flags);
val = (val >> offset) & GENMASK(size - 1, 0);
@@ -550,11 +550,11 @@ static int sh_pfc_pinconf_set_drive_strength(struct sh_pfc *pfc,
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
val &= ~GENMASK(offset + size - 1, offset);
val |= strength << offset;
- sh_pfc_write_reg(pfc, reg, 32, val);
+ sh_pfc_write(pfc, reg, val);
spin_unlock_irqrestore(&pfc->lock, flags);
@@ -645,7 +645,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
return bit;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
spin_unlock_irqrestore(&pfc->lock, flags);
arg = (val & BIT(bit)) ? 3300 : 1800;
@@ -716,12 +716,12 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
if (mV == 3300)
val |= BIT(bit);
else
val &= ~BIT(bit);
- sh_pfc_write_reg(pfc, pocctrl, 32, val);
+ sh_pfc_write(pfc, pocctrl, val);
spin_unlock_irqrestore(&pfc->lock, flags);
break;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 8688b405e081..213108a058fe 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -148,6 +148,21 @@ struct pinmux_drive_reg {
.reg = r, \
.fields =
+struct pinmux_bias_reg {
+ u32 puen; /* Pull-enable or pull-up control register */
+ u32 pud; /* Pull-up/down control register (optional) */
+ const u16 pins[32];
+};
+
+#define PINMUX_BIAS_REG(name1, r1, name2, r2) \
+ .puen = r1, \
+ .pud = r2, \
+ .pins =
+
+struct pinmux_ioctrl_reg {
+ u32 reg;
+};
+
struct pinmux_data_reg {
u32 reg;
u8 reg_width;
@@ -189,12 +204,6 @@ struct sh_pfc_window {
unsigned long size;
};
-struct sh_pfc_bias_info {
- u16 pin;
- u16 reg : 11;
- u16 bit : 5;
-};
-
struct sh_pfc_pin_range;
struct sh_pfc {
@@ -213,6 +222,7 @@ struct sh_pfc {
unsigned int nr_gpio_pins;
struct sh_pfc_chip *gpio;
+ u32 *saved_regs;
};
struct sh_pfc_soc_operations {
@@ -245,6 +255,8 @@ struct sh_pfc_soc_info {
const struct pinmux_cfg_reg *cfg_regs;
const struct pinmux_drive_reg *drive_regs;
+ const struct pinmux_bias_reg *bias_regs;
+ const struct pinmux_ioctrl_reg *ioctrl_regs;
const struct pinmux_data_reg *data_regs;
const u16 *pinmux_data;
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index f5cef6e5fa3e..3abb028f6158 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5860,7 +5860,7 @@ static int atlas7_gpio_request(struct gpio_chip *chip,
if (ret < 0)
return ret;
- if (pinctrl_request_gpio(chip->base + gpio))
+ if (pinctrl_gpio_request(chip->base + gpio))
return -ENODEV;
raw_spin_lock_irqsave(&a7gc->lock, flags);
@@ -5890,7 +5890,7 @@ static void atlas7_gpio_free(struct gpio_chip *chip,
raw_spin_unlock_irqrestore(&a7gc->lock, flags);
- pinctrl_free_gpio(chip->base + gpio);
+ pinctrl_gpio_free(chip->base + gpio);
}
static int atlas7_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 8b14a1f1e671..ca2347d0d579 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -614,7 +614,7 @@ static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
unsigned long flags;
- if (pinctrl_request_gpio(chip->base + offset))
+ if (pinctrl_gpio_request(chip->base + offset))
return -ENODEV;
spin_lock_irqsave(&bank->lock, flags);
@@ -644,7 +644,7 @@ static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_unlock_irqrestore(&bank->lock, flags);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 72ae6bccee55..6a0ed8ab33b9 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -204,7 +204,7 @@ static int plgpio_request(struct gpio_chip *chip, unsigned offset)
if (offset >= chip->ngpio)
return -EINVAL;
- ret = pinctrl_request_gpio(gpio);
+ ret = pinctrl_gpio_request(gpio);
if (ret)
return ret;
@@ -242,7 +242,7 @@ err1:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
err0:
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
return ret;
}
@@ -273,7 +273,7 @@ disable_clk:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
/* PLGPIO IRQ */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 02b66588cac6..a276c61be217 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -150,12 +150,12 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index f763d8d62d6e..295e48fc94bc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1289,6 +1289,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
.npins = ARRAY_SIZE(sun4i_a10_pins),
.irq_banks = 1,
.irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
index 4f2a726bbaeb..f5f77432ce6f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_FUNCTION(0x3, "uart0")), /* RX */
SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index 97b48336f84a..a78d7b922ef4 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -535,14 +535,16 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data_broken = {
.pins = sun50i_h5_pins,
.npins = ARRAY_SIZE(sun50i_h5_pins),
.irq_banks = 2,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
.pins = sun50i_h5_pins,
.npins = ARRAY_SIZE(sun50i_h5_pins),
.irq_banks = 3,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index 47afd558b114..27ec99e81c4c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -713,6 +713,7 @@ static const struct sunxi_pinctrl_desc sun5i_pinctrl_data = {
.pins = sun5i_pins,
.npins = ARRAY_SIZE(sun5i_pins),
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun5i_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 49a1deb97bb7..a00246d3dd49 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -106,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun6i_a31_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 951a25c18815..82ffaf466892 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -965,6 +965,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
.pins = sun6i_a31_pins,
.npins = ARRAY_SIZE(sun6i_a31_pins),
.irq_banks = 4,
+ .disable_strict_mode = true,
};
static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 67ee6f9b3b68..8a08c4afc6a8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -93,6 +93,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a23_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index 721b6935baf3..402fd7d21e7b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -563,6 +563,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_pinctrl_data = {
.pins = sun8i_a23_pins,
.npins = ARRAY_SIZE(sun8i_a23_pins),
.irq_banks = 3,
+ .disable_strict_mode = true,
};
static int sun8i_a23_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index ef1e0bef4099..da387211a75e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -486,6 +486,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
.irq_bank_base = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
index ebfd9a26628c..b795a199e240 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
@@ -82,7 +82,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_h3_r_pins),
.irq_banks = 1,
.pin_base = PL_BASE,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 518a92df4418..d1719a738c20 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -491,7 +491,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
.pins = sun8i_h3_pins,
.npins = ARRAY_SIZE(sun8i_h3_pins),
.irq_banks = 2,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 92a873f73697..c63086c98335 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -152,6 +152,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun9i_a80_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index bc14e954d7a2..5553c0eb0f41 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
@@ -721,6 +721,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
.pins = sun9i_a80_pins,
.npins = ARRAY_SIZE(sun9i_a80_pins),
.irq_banks = 5,
+ .disable_strict_mode = true,
};
static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 52edf3b5988d..4b6cb25bc796 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -696,6 +696,7 @@ static const struct pinmux_ops sunxi_pmx_ops = {
.get_function_groups = sunxi_pmx_get_func_groups,
.set_mux = sunxi_pmx_set_mux,
.gpio_set_direction = sunxi_pmx_gpio_set_direction,
+ .strict = true,
};
static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
@@ -1245,6 +1246,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_desc *pctrl_desc;
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
+ struct pinmux_ops *pmxops;
struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1305,7 +1307,16 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctrl_desc->npins = pctl->ngroups;
pctrl_desc->confops = &sunxi_pconf_ops;
pctrl_desc->pctlops = &sunxi_pctrl_ops;
- pctrl_desc->pmxops = &sunxi_pmx_ops;
+
+ pmxops = devm_kmemdup(&pdev->dev, &sunxi_pmx_ops, sizeof(sunxi_pmx_ops),
+ GFP_KERNEL);
+ if (!pmxops)
+ return -ENOMEM;
+
+ if (desc->disable_strict_mode)
+ pmxops->strict = false;
+
+ pctrl_desc->pmxops = pmxops;
pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
if (IS_ERR(pctl->pctl_dev)) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 1bfc0d8a55df..11b128f54ed2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -112,6 +112,7 @@ struct sunxi_pinctrl_desc {
unsigned irq_banks;
unsigned irq_bank_base;
bool irq_read_needs_mux;
+ bool disable_strict_mode;
};
struct sunxi_pinctrl_function {
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 5c1b6325d80d..a8a6510183b6 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -575,11 +575,9 @@ static int ti_iodelay_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned long *config)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
@@ -693,12 +691,10 @@ static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
unsigned int selector)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
int i;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
return;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index f9267fabe6b0..26fda5c53e65 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -204,9 +204,10 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8};
- const unsigned int strength_2bit[] = {8, 12, 16, 20};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16};
+ static const unsigned int strength_1bit[] = {4, 8};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12,
+ 14, 16};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
int ret;
@@ -399,9 +400,10 @@ static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8, -1};
- const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16, -1};
+ static const unsigned int strength_1bit[] = {4, 8, -1};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14,
+ 16, -1};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 9c5e359a63de..8a5ecd6277d8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -472,8 +472,8 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 83341284dc44..3be7967edae0 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -553,8 +553,8 @@ static const struct pinctrl_pin_desc uniphier_ld20_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rgmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 38,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index d9f166f0cc86..dbe94a9a0353 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -776,8 +776,8 @@ static const struct pinctrl_pin_desc uniphier_pxs3_pins[] = {
250, UNIPHIER_PIN_PULL_DOWN),
};
-static const unsigned int emmc_pins[] = {31, 32, 33, 34, 35, 36, 37, 38};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {32, 33, 34, 35, 36, 37, 38};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned int emmc_dat8_pins[] = {39, 40, 41, 42};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned int ether_rgmii_pins[] = {52, 53, 54, 55, 56, 57, 58, 59,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 09dac11337d1..2c745e8ccad6 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -93,12 +93,31 @@ config ASUS_LAPTOP
config DELL_SMBIOS
tristate
- select DCDBAS
+
+config DELL_SMBIOS_WMI
+ tristate "Dell SMBIOS calling interface (WMI implementation)"
+ depends on ACPI_WMI
+ select DELL_WMI_DESCRIPTOR
+ select DELL_SMBIOS
+ ---help---
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over ACPI-WMI.
+
+ If you have a Dell computer from >2007 you should say Y or M here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMBIOS_SMM
+ tristate "Dell SMBIOS calling interface (SMM implementation)"
+ depends on DCDBAS
+ select DELL_SMBIOS
---help---
- This module provides common functions for kernel modules using
- Dell SMBIOS.
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over SMI/SMM.
- If you have a Dell laptop, say Y or M here.
+ If you have a Dell computer from <=2017 you should say Y or M here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
config DELL_LAPTOP
tristate "Dell Laptop Extras"
@@ -116,11 +135,12 @@ config DELL_LAPTOP
laptops (except for some models covered by the Compal driver).
config DELL_WMI
- tristate "Dell WMI extras"
+ tristate "Dell WMI notifications"
depends on ACPI_WMI
depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select DELL_WMI_DESCRIPTOR
select DELL_SMBIOS
select INPUT_SPARSEKMAP
---help---
@@ -129,6 +149,10 @@ config DELL_WMI
To compile this driver as a module, choose M here: the module will
be called dell-wmi.
+config DELL_WMI_DESCRIPTOR
+ tristate
+ depends on ACPI_WMI
+
config DELL_WMI_AIO
tristate "WMI Hotkeys for Dell All-In-One series"
depends on ACPI_WMI
@@ -426,7 +450,6 @@ config THINKPAD_ACPI_ALSA_SUPPORT
config THINKPAD_ACPI_DEBUGFACILITIES
bool "Maintainer debug facilities"
depends on THINKPAD_ACPI
- default n
---help---
Enables extra stuff in the thinkpad-acpi which is completely useless
for normal use. Read the driver source to find out what it does.
@@ -437,7 +460,6 @@ config THINKPAD_ACPI_DEBUGFACILITIES
config THINKPAD_ACPI_DEBUG
bool "Verbose debug mode"
depends on THINKPAD_ACPI
- default n
---help---
Enables extra debugging information, at the expense of a slightly
increase in driver size.
@@ -447,7 +469,6 @@ config THINKPAD_ACPI_DEBUG
config THINKPAD_ACPI_UNSAFE_LEDS
bool "Allow control of important LEDs (unsafe)"
depends on THINKPAD_ACPI
- default n
---help---
Overriding LED state on ThinkPads can mask important
firmware alerts (like critical battery condition), or misled
@@ -515,7 +536,6 @@ config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
depends on INPUT
select INPUT_POLLDEV
- default n
help
This driver provides support for the IBM Hard Drive Active Protection
System (hdaps), which provides an accelerometer and other misc. data.
@@ -658,6 +678,18 @@ config WMI_BMOF
To compile this driver as a module, choose M here: the module will
be called wmi-bmof.
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
+ depends on ACPI_WMI
+ ---help---
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
+
config MSI_WMI
tristate "MSI WMI extras"
depends on ACPI_WMI
@@ -763,7 +795,6 @@ config TOSHIBA_HAPS
config TOSHIBA_WMI
tristate "Toshiba WMI Hotkeys Driver (EXPERIMENTAL)"
- default n
depends on ACPI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
@@ -785,7 +816,6 @@ config ACPI_CMPC
depends on RFKILL || RFKILL=n
select INPUT
select BACKLIGHT_CLASS_DEVICE
- default n
help
Support for Intel Classmate PC ACPI devices, including some
keys as input device, backlight device, tablet and accelerometer
@@ -793,7 +823,7 @@ config ACPI_CMPC
config INTEL_CHT_INT33FE
tristate "Intel Cherry Trail ACPI INT33FE Driver"
- depends on X86 && ACPI && I2C
+ depends on X86 && ACPI && I2C && REGULATOR
---help---
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
@@ -804,6 +834,10 @@ config INTEL_CHT_INT33FE
This driver instantiates i2c-clients for these, so that standard
i2c drivers for these chips can bind to the them.
+ If you enable this driver it is advised to also select
+ CONFIG_TYPEC_FUSB302=m, CONFIG_CHARGER_BQ24190=m and
+ CONFIG_BATTERY_MAX17042=m.
+
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
depends on GPIOLIB && ACPI
@@ -892,7 +926,6 @@ config INTEL_IPS
config INTEL_IMR
bool "Intel Isolated Memory Region support"
- default n
depends on X86_INTEL_QUARK && IOSF_MBI
---help---
This option provides a means to manipulate Isolated Memory Regions.
@@ -1088,7 +1121,6 @@ config INTEL_PUNIT_IPC
config INTEL_TELEMETRY
tristate "Intel SoC Telemetry Driver"
- default n
depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
---help---
This driver provides interfaces to configure and use
@@ -1111,7 +1143,6 @@ config MLX_PLATFORM
config MLX_CPLD_PLATFORM
tristate "Mellanox platform hotplug driver support"
- default n
select HWMON
select I2C
---help---
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index f9e3ae683bbe..c32b34a72467 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,8 +13,11 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
@@ -40,6 +43,7 @@ obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
# toshiba_acpi must link after wmi to ensure that wmi devices are found
# before toshiba_acpi initializes
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index f3796164329e..d4aeac3477f5 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
return;
}
input_report_key(data->idev, KEY_RFKILL, 1);
+ input_sync(data->idev);
input_report_key(data->idev, KEY_RFKILL, 0);
input_sync(data->idev);
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 48e1541dc8d4..a32c5c00e0e7 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -119,6 +119,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
@@ -148,6 +149,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
@@ -222,10 +224,13 @@ struct asus_wmi {
int tpd_led_wk;
struct led_classdev kbd_led;
int kbd_led_wk;
+ struct led_classdev lightbar_led;
+ int lightbar_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct kbd_led_work;
struct work_struct wlan_led_work;
+ struct work_struct lightbar_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
@@ -567,6 +572,48 @@ static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
+static void lightbar_led_update(struct work_struct *work)
+{
+ struct asus_wmi *asus;
+ int ctrl_param;
+
+ asus = container_of(work, struct asus_wmi, lightbar_led_work);
+
+ ctrl_param = asus->lightbar_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL);
+}
+
+static void lightbar_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+
+ asus->lightbar_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->lightbar_led_work);
+}
+
+static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+ return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
+}
+
+static int lightbar_led_presence(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+ return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
@@ -575,6 +622,8 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
led_classdev_unregister(&asus->tpd_led);
if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
led_classdev_unregister(&asus->wlan_led);
+ if (!IS_ERR_OR_NULL(asus->lightbar_led.dev))
+ led_classdev_unregister(&asus->lightbar_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
@@ -630,6 +679,20 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
rv = led_classdev_register(&asus->platform_device->dev,
&asus->wlan_led);
+ if (rv)
+ goto error;
+ }
+
+ if (lightbar_led_presence(asus)) {
+ INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
+
+ asus->lightbar_led.name = "asus::lightbar";
+ asus->lightbar_led.brightness_set = lightbar_led_set;
+ asus->lightbar_led.brightness_get = lightbar_led_get;
+ asus->lightbar_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->lightbar_led);
}
error:
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f42159fd2031..cd4725e7e0b5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -35,20 +35,9 @@
#include "dell-rbtn.h"
#include "dell-smbios.h"
-#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
-#define GLOBAL_MIC_MUTE_ENABLE 0x0364
-#define GLOBAL_MIC_MUTE_DISABLE 0x0365
-#define KBD_LED_AC_TOKEN 0x0451
-
struct quirk_entry {
u8 touchpad_led;
+ u8 kbd_led_levels_off_1;
int needs_kbd_timeouts;
/*
@@ -79,12 +68,17 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
.kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
};
+static struct quirk_entry quirk_dell_latitude_e6410 = {
+ .kbd_led_levels_off_1 = 1,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
}
};
+static struct calling_interface_buffer *buffer;
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
static struct rfkill *wifi_rfkill;
@@ -280,9 +274,39 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_xps13_9333,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude E6410",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
+ },
+ .driver_data = &quirk_dell_latitude_e6410,
+ },
{ }
};
+void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = arg0;
+ buffer->input[1] = arg1;
+ buffer->input[2] = arg2;
+ buffer->input[3] = arg3;
+}
+
+int dell_send_request(u16 class, u16 select)
+{
+ int ret;
+
+ buffer->cmd_class = class;
+ buffer->cmd_select = select;
+ ret = dell_smbios_call(buffer);
+ if (ret != 0)
+ return ret;
+ return dell_smbios_error(buffer->output[0]);
+}
+
/*
* Derived from information in smbios-wireless-ctl:
*
@@ -405,7 +429,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
static int dell_rfkill_set(void *data, bool blocked)
{
- struct calling_interface_buffer *buffer;
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
@@ -413,20 +436,16 @@ static int dell_rfkill_set(void *data, bool blocked)
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
status = buffer->output[1];
- if (ret != 0)
- goto out;
-
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0x2, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
hwswitch = buffer->output[1];
/* If the hardware switch controls this radio, and the hardware
@@ -435,28 +454,19 @@ static int dell_rfkill_set(void *data, bool blocked)
(status & BIT(0)) && !(status & BIT(16)))
disable = 1;
- dell_smbios_clear_buffer();
-
- buffer->input[0] = (1 | (radio<<8) | (disable << 16));
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
-
- out:
- dell_smbios_release_buffer();
- return dell_smbios_error(ret);
+ dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ return ret;
}
-/* Must be called with the buffer held */
static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
- int status,
- struct calling_interface_buffer *buffer)
+ int status)
{
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
int block = rfkill_blocked(rfkill);
- dell_smbios_clear_buffer();
- buffer->input[0] = (1 | (radio << 8) | (block << 16));
- dell_smbios_send_request(17, 11);
+ dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0);
+ dell_send_request(CLASS_INFO, SELECT_RFKILL);
} else {
/* No hw-switch, sync BIOS state to sw_state */
rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
@@ -472,32 +482,23 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
- struct calling_interface_buffer *buffer;
int radio = ((unsigned long)data & 0xF);
int hwswitch;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
if (ret != 0 || !(status & BIT(0))) {
- dell_smbios_release_buffer();
return;
}
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer->output[1];
- dell_smbios_release_buffer();
-
if (ret != 0)
return;
@@ -513,27 +514,23 @@ static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
- struct calling_interface_buffer *buffer;
int hwswitch_state;
int hwswitch_ret;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
status = buffer->output[1];
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- hwswitch_ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (hwswitch_ret)
+ return hwswitch_ret;
hwswitch_state = buffer->output[1];
- dell_smbios_release_buffer();
-
seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
@@ -613,46 +610,36 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
- struct calling_interface_buffer *buffer;
int hwswitch = 0;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
if (ret != 0)
- goto out;
-
- dell_smbios_clear_buffer();
+ return;
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0)))
hwswitch = buffer->output[1];
if (wifi_rfkill) {
dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
- dell_rfkill_update_sw_state(wifi_rfkill, 1, status, buffer);
+ dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
}
if (bluetooth_rfkill) {
dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
hwswitch);
- dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status,
- buffer);
+ dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
}
if (wwan_rfkill) {
dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
- dell_rfkill_update_sw_state(wwan_rfkill, 3, status, buffer);
+ dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
}
-
- out:
- dell_smbios_release_buffer();
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -696,7 +683,6 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
static int __init dell_setup_rfkill(void)
{
- struct calling_interface_buffer *buffer;
int status, ret, whitelisted;
const char *product;
@@ -712,11 +698,9 @@ static int __init dell_setup_rfkill(void)
if (!force_rfkill && !whitelisted)
return 0;
- buffer = dell_smbios_get_buffer();
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
- dell_smbios_release_buffer();
/* dell wireless info smbios call is not supported */
if (ret != 0)
@@ -869,7 +853,6 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -877,24 +860,17 @@ static int dell_send_intensity(struct backlight_device *bd)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = bd->props.brightness;
-
+ dell_set_arguments(token->location, bd->props.brightness, 0, 0);
if (power_supply_is_system_supplied() > 0)
- dell_smbios_send_request(1, 2);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
else
- dell_smbios_send_request(1, 1);
-
- ret = dell_smbios_error(buffer->output[0]);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
- dell_smbios_release_buffer();
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -902,20 +878,14 @@ static int dell_get_intensity(struct backlight_device *bd)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
-
+ dell_set_arguments(token->location, 0, 0, 0);
if (power_supply_is_system_supplied() > 0)
- dell_smbios_send_request(0, 2);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
else
- dell_smbios_send_request(0, 1);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
- if (buffer->output[0])
- ret = dell_smbios_error(buffer->output[0]);
- else
+ if (ret == 0)
ret = buffer->output[1];
-
- dell_smbios_release_buffer();
return ret;
}
@@ -1179,20 +1149,13 @@ static DEFINE_MUTEX(kbd_led_mutex);
static int kbd_get_info(struct kbd_info *info)
{
- struct calling_interface_buffer *buffer;
u8 units;
int ret;
- buffer = dell_smbios_get_buffer();
-
- buffer->input[0] = 0x0;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smbios_error(ret);
- goto out;
- }
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
info->modes = buffer->output[1] & 0xFFFF;
info->type = (buffer->output[1] >> 24) & 0xFF;
@@ -1200,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
units = (buffer->output[2] >> 8) & 0xFF;
info->levels = (buffer->output[2] >> 16) & 0xFF;
+ if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+ info->levels--;
+
if (units & BIT(0))
info->seconds = (buffer->output[3] >> 0) & 0xFF;
if (units & BIT(1))
@@ -1209,8 +1175,6 @@ static int kbd_get_info(struct kbd_info *info)
if (units & BIT(3))
info->days = (buffer->output[3] >> 24) & 0xFF;
- out:
- dell_smbios_release_buffer();
return ret;
}
@@ -1269,19 +1233,12 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
static int kbd_get_state(struct kbd_state *state)
{
- struct calling_interface_buffer *buffer;
int ret;
- buffer = dell_smbios_get_buffer();
-
- buffer->input[0] = 0x1;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smbios_error(ret);
- goto out;
- }
+ dell_set_arguments(0x1, 0, 0, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
if (state->mode_bit != 0)
@@ -1296,31 +1253,27 @@ static int kbd_get_state(struct kbd_state *state)
state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
- out:
- dell_smbios_release_buffer();
return ret;
}
static int kbd_set_state(struct kbd_state *state)
{
- struct calling_interface_buffer *buffer;
int ret;
+ u32 input1;
+ u32 input2;
+
+ input1 = BIT(state->mode_bit) & 0xFFFF;
+ input1 |= (state->triggers & 0xFF) << 16;
+ input1 |= (state->timeout_value & 0x3F) << 24;
+ input1 |= (state->timeout_unit & 0x3) << 30;
+ input2 = state->als_setting & 0xFF;
+ input2 |= (state->level & 0xFF) << 16;
+ input2 |= (state->timeout_value_ac & 0x3F) << 24;
+ input2 |= (state->timeout_unit_ac & 0x3) << 30;
+ dell_set_arguments(0x2, input1, input2, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- buffer->input[2] |= (state->timeout_value_ac & 0x3F) << 24;
- buffer->input[2] |= (state->timeout_unit_ac & 0x3) << 30;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
-
- return dell_smbios_error(ret);
+ return ret;
}
static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
@@ -1345,7 +1298,6 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
static int kbd_set_token_bit(u8 bit)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -1356,19 +1308,14 @@ static int kbd_set_token_bit(u8 bit)
if (!token)
return -EINVAL;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = token->value;
- dell_smbios_send_request(1, 0);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
+ dell_set_arguments(token->location, token->value, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
- return dell_smbios_error(ret);
+ return ret;
}
static int kbd_get_token_bit(u8 bit)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
int val;
@@ -1380,15 +1327,12 @@ static int kbd_get_token_bit(u8 bit)
if (!token)
return -EINVAL;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- dell_smbios_send_request(0, 0);
- ret = buffer->output[0];
+ dell_set_arguments(token->location, 0, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD);
val = buffer->output[1];
- dell_smbios_release_buffer();
if (ret)
- return dell_smbios_error(ret);
+ return ret;
return (val == token->value);
}
@@ -2102,7 +2046,6 @@ static struct notifier_block dell_laptop_notifier = {
int dell_micmute_led_set(int state)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
if (state == 0)
@@ -2115,11 +2058,8 @@ int dell_micmute_led_set(int state)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = token->value;
- dell_smbios_send_request(1, 0);
- dell_smbios_release_buffer();
+ dell_set_arguments(token->location, token->value, 0, 0);
+ dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
return state;
}
@@ -2127,7 +2067,6 @@ EXPORT_SYMBOL_GPL(dell_micmute_led_set);
static int __init dell_init(void)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int max_intensity = 0;
int ret;
@@ -2151,6 +2090,13 @@ static int __init dell_init(void)
if (ret)
goto fail_platform_device2;
+ buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail_buffer;
+ }
+
+
ret = dell_setup_rfkill();
if (ret) {
@@ -2175,12 +2121,10 @@ static int __init dell_init(void)
token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
if (token) {
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- dell_smbios_send_request(0, 2);
- if (buffer->output[0] == 0)
+ dell_set_arguments(token->location, 0, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ if (ret)
max_intensity = buffer->output[3];
- dell_smbios_release_buffer();
}
if (max_intensity) {
@@ -2214,6 +2158,8 @@ static int __init dell_init(void)
fail_get_brightness:
backlight_device_unregister(dell_backlight_device);
fail_backlight:
+ kfree(buffer);
+fail_buffer:
dell_cleanup_rfkill();
fail_rfkill:
platform_device_del(platform_device);
@@ -2233,6 +2179,7 @@ static void __exit dell_exit(void)
touchpad_led_exit();
kbd_led_exit();
backlight_device_unregister(dell_backlight_device);
+ kfree(buffer);
dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
new file mode 100644
index 000000000000..89f65c4651a0
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -0,0 +1,196 @@
+/*
+ * SMI methods for use with dell-smbios
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include "../../firmware/dcdbas.h"
+#include "dell-smbios.h"
+
+static int da_command_address;
+static int da_command_code;
+static struct calling_interface_buffer *buffer;
+struct platform_device *platform_device;
+static DEFINE_MUTEX(smm_mutex);
+
+static const struct dmi_system_id dell_device_table[] __initconst = {
+ {
+ .ident = "Dell laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
+ },
+ },
+ {
+ .ident = "Dell Computer Corporation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, dell_device_table);
+
+static void __init parse_da_table(const struct dmi_header *dm)
+{
+ struct calling_interface_structure *table =
+ container_of(dm, struct calling_interface_structure, header);
+
+ /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+ * 6 bytes of entry
+ */
+ if (dm->length < 17)
+ return;
+
+ da_command_address = table->cmdIOAddress;
+ da_command_code = table->cmdIOCode;
+}
+
+static void __init find_cmd_address(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xda: /* Calling interface */
+ parse_da_table(dm);
+ break;
+ }
+}
+
+int dell_smbios_smm_call(struct calling_interface_buffer *input)
+{
+ struct smi_cmd command;
+ size_t size;
+
+ size = sizeof(struct calling_interface_buffer);
+ command.magic = SMI_CMD_MAGIC;
+ command.command_address = da_command_address;
+ command.command_code = da_command_code;
+ command.ebx = virt_to_phys(buffer);
+ command.ecx = 0x42534931;
+
+ mutex_lock(&smm_mutex);
+ memcpy(buffer, input, size);
+ dcdbas_smi_request(&command);
+ memcpy(input, buffer, size);
+ mutex_unlock(&smm_mutex);
+ return 0;
+}
+
+/* When enabled this indicates that SMM won't work */
+static bool test_wsmt_enabled(void)
+{
+ struct calling_interface_token *wsmt;
+
+ /* if token doesn't exist, SMM will work */
+ wsmt = dell_smbios_find_token(WSMT_EN_TOKEN);
+ if (!wsmt)
+ return false;
+
+ /* If token exists, try to access over SMM but set a dummy return.
+ * - If WSMT disabled it will be overwritten by SMM
+ * - If WSMT enabled then dummy value will remain
+ */
+ buffer->cmd_class = CLASS_TOKEN_READ;
+ buffer->cmd_select = SELECT_TOKEN_STD;
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = wsmt->location;
+ buffer->output[0] = 99;
+ dell_smbios_smm_call(buffer);
+ if (buffer->output[0] == 99)
+ return true;
+
+ return false;
+}
+
+static int __init dell_smbios_smm_init(void)
+{
+ int ret;
+ /*
+ * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ * is passed to SMI handler.
+ */
+ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ if (!buffer)
+ return -ENOMEM;
+
+ dmi_walk(find_cmd_address, NULL);
+
+ if (test_wsmt_enabled()) {
+ pr_debug("Disabling due to WSMT enabled\n");
+ ret = -ENODEV;
+ goto fail_wsmt;
+ }
+
+ platform_device = platform_device_alloc("dell-smbios", 1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device_alloc;
+ }
+
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ ret = dell_smbios_register_device(&platform_device->dev,
+ &dell_smbios_smm_call);
+ if (ret)
+ goto fail_register;
+
+ return 0;
+
+fail_register:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_wsmt:
+fail_platform_device_alloc:
+ free_page((unsigned long)buffer);
+ return ret;
+}
+
+static void __exit dell_smbios_smm_exit(void)
+{
+ if (platform_device) {
+ dell_smbios_unregister_device(&platform_device->dev);
+ platform_device_unregister(platform_device);
+ free_page((unsigned long)buffer);
+ }
+}
+
+subsys_initcall(dell_smbios_smm_init);
+module_exit(dell_smbios_smm_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
new file mode 100644
index 000000000000..609557aa5868
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -0,0 +1,285 @@
+/*
+ * WMI methods for use with dell-smbios
+ *
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/wmi.h>
+#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
+
+static DEFINE_MUTEX(call_mutex);
+static DEFINE_MUTEX(list_mutex);
+static int wmi_supported;
+
+struct misc_bios_flags_structure {
+ struct dmi_header header;
+ u16 flags0;
+} __packed;
+#define FLAG_HAS_ACPI_WMI 0x02
+
+#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+
+struct wmi_smbios_priv {
+ struct dell_wmi_smbios_buffer *buf;
+ struct list_head list;
+ struct wmi_device *wdev;
+ struct device *child;
+ u32 req_buf_size;
+};
+static LIST_HEAD(wmi_list);
+
+static inline struct wmi_smbios_priv *get_first_smbios_priv(void)
+{
+ return list_first_entry_or_null(&wmi_list,
+ struct wmi_smbios_priv,
+ list);
+}
+
+static int run_smbios_call(struct wmi_device *wdev)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct wmi_smbios_priv *priv;
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+
+ priv = dev_get_drvdata(&wdev->dev);
+ input.length = priv->req_buf_size - sizeof(u64);
+ input.pointer = &priv->buf->std;
+
+ dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n",
+ priv->buf->std.cmd_class, priv->buf->std.cmd_select,
+ priv->buf->std.input[0], priv->buf->std.input[1],
+ priv->buf->std.input[2], priv->buf->std.input[3]);
+
+ status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ obj = (union acpi_object *)output.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_dbg(&wdev->dev, "received type: %d\n", obj->type);
+ if (obj->type == ACPI_TYPE_INTEGER)
+ dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
+ obj->integer.value);
+ return -EIO;
+ }
+ memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
+ dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
+ priv->buf->std.output[0], priv->buf->std.output[1],
+ priv->buf->std.output[2], priv->buf->std.output[3]);
+
+ return 0;
+}
+
+int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
+{
+ struct wmi_smbios_priv *priv;
+ size_t difference;
+ size_t size;
+ int ret;
+
+ mutex_lock(&call_mutex);
+ priv = get_first_smbios_priv();
+ if (!priv) {
+ ret = -ENODEV;
+ goto out_wmi_call;
+ }
+
+ size = sizeof(struct calling_interface_buffer);
+ difference = priv->req_buf_size - sizeof(u64) - size;
+
+ memset(&priv->buf->ext, 0, difference);
+ memcpy(&priv->buf->std, buffer, size);
+ ret = run_smbios_call(priv->wdev);
+ memcpy(buffer, &priv->buf->std, size);
+out_wmi_call:
+ mutex_unlock(&call_mutex);
+
+ return ret;
+}
+
+static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
+ struct wmi_ioctl_buffer *arg)
+{
+ struct wmi_smbios_priv *priv;
+ int ret = 0;
+
+ switch (cmd) {
+ case DELL_WMI_SMBIOS_CMD:
+ mutex_lock(&call_mutex);
+ priv = dev_get_drvdata(&wdev->dev);
+ if (!priv) {
+ ret = -ENODEV;
+ goto fail_smbios_cmd;
+ }
+ memcpy(priv->buf, arg, priv->req_buf_size);
+ if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
+ dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
+ priv->buf->std.cmd_class,
+ priv->buf->std.cmd_select,
+ priv->buf->std.input[0]);
+ ret = -EFAULT;
+ goto fail_smbios_cmd;
+ }
+ ret = run_smbios_call(priv->wdev);
+ if (ret)
+ goto fail_smbios_cmd;
+ memcpy(arg, priv->buf, priv->req_buf_size);
+fail_smbios_cmd:
+ mutex_unlock(&call_mutex);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static int dell_smbios_wmi_probe(struct wmi_device *wdev)
+{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
+ struct wmi_smbios_priv *priv;
+ u32 hotfix;
+ int count;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* WMI buffer size will be either 4k or 32k depending on machine */
+ if (!dell_wmi_get_size(&priv->req_buf_size))
+ return -EPROBE_DEFER;
+
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
+ /* add in the length object we will use internally with ioctl */
+ priv->req_buf_size += sizeof(u64);
+ ret = set_required_buffer_size(wdev, priv->req_buf_size);
+ if (ret)
+ return ret;
+
+ count = get_order(priv->req_buf_size);
+ priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
+ if (!priv->buf)
+ return -ENOMEM;
+
+ /* ID is used by dell-smbios to set priority of drivers */
+ wdev->dev.id = 1;
+ ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+ if (ret)
+ goto fail_register;
+
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ return 0;
+
+fail_register:
+ free_pages((unsigned long)priv->buf, count);
+ return ret;
+}
+
+static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+{
+ struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
+ int count;
+
+ mutex_lock(&call_mutex);
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+ dell_smbios_unregister_device(&wdev->dev);
+ count = get_order(priv->req_buf_size);
+ free_pages((unsigned long)priv->buf, count);
+ mutex_unlock(&call_mutex);
+ return 0;
+}
+
+static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
+ { .guid_string = DELL_WMI_SMBIOS_GUID },
+ { },
+};
+
+static void __init parse_b1_table(const struct dmi_header *dm)
+{
+ struct misc_bios_flags_structure *flags =
+ container_of(dm, struct misc_bios_flags_structure, header);
+
+ /* 4 bytes header, 8 bytes flags */
+ if (dm->length < 12)
+ return;
+ if (dm->handle != 0xb100)
+ return;
+ if ((flags->flags0 & FLAG_HAS_ACPI_WMI))
+ wmi_supported = 1;
+}
+
+static void __init find_b1(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xb1: /* misc bios flags */
+ parse_b1_table(dm);
+ break;
+ }
+}
+
+static struct wmi_driver dell_smbios_wmi_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+ .probe = dell_smbios_wmi_probe,
+ .remove = dell_smbios_wmi_remove,
+ .id_table = dell_smbios_wmi_id_table,
+ .filter_callback = dell_smbios_wmi_filter,
+};
+
+static int __init init_dell_smbios_wmi(void)
+{
+ dmi_walk(find_b1, NULL);
+
+ if (!wmi_supported)
+ return -ENODEV;
+
+ return wmi_driver_register(&dell_smbios_wmi_driver);
+}
+
+static void __exit exit_dell_smbios_wmi(void)
+{
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
+}
+
+module_init(init_dell_smbios_wmi);
+module_exit(exit_dell_smbios_wmi);
+
+MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios.c
index 0a5723468bff..6a60db515bda 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios.c
@@ -12,33 +12,119 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/dmi.h>
#include <linux/err.h>
-#include <linux/gfp.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/io.h>
-#include "../../firmware/dcdbas.h"
#include "dell-smbios.h"
-struct calling_interface_structure {
- struct dmi_header header;
- u16 cmdIOAddress;
- u8 cmdIOCode;
- u32 supportedCmds;
- struct calling_interface_token tokens[];
-} __packed;
-
-static struct calling_interface_buffer *buffer;
-static DEFINE_MUTEX(buffer_mutex);
-
-static int da_command_address;
-static int da_command_code;
+static u32 da_supported_commands;
static int da_num_tokens;
+static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
+static struct device_attribute *token_location_attrs;
+static struct device_attribute *token_value_attrs;
+static struct attribute **token_attrs;
+static DEFINE_MUTEX(smbios_mutex);
+
+struct smbios_device {
+ struct list_head list;
+ struct device *device;
+ int (*call_fn)(struct calling_interface_buffer *);
+};
+
+struct smbios_call {
+ u32 need_capability;
+ int cmd_class;
+ int cmd_select;
+};
+
+/* calls that are whitelisted for given capabilities */
+static struct smbios_call call_whitelist[] = {
+ /* generally tokens are allowed, but may be further filtered or
+ * restricted by token blacklist or whitelist
+ */
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_BAT},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT},
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CLASS_ADMIN_PROP, SELECT_ADMIN_PROP},
+ /* used by userspace: fwupd */
+ {CAP_SYS_ADMIN, CLASS_INFO, SELECT_DOCK},
+ {CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE, SELECT_FLASH_INTERFACE},
+};
+
+/* calls that are explicitly blacklisted */
+static struct smbios_call call_blacklist[] = {
+ {0x0000, 01, 07}, /* manufacturing use */
+ {0x0000, 06, 05}, /* manufacturing use */
+ {0x0000, 11, 03}, /* write once */
+ {0x0000, 11, 07}, /* write once */
+ {0x0000, 11, 11}, /* write once */
+ {0x0000, 19, -1}, /* diagnostics */
+ /* handled by kernel: dell-laptop */
+ {0x0000, CLASS_INFO, SELECT_RFKILL},
+ {0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
+};
+
+struct token_range {
+ u32 need_capability;
+ u16 min;
+ u16 max;
+};
+
+/* tokens that are whitelisted for given capabilities */
+static struct token_range token_whitelist[] = {
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CAPSULE_EN_TOKEN, CAPSULE_DIS_TOKEN},
+ /* can indicate to userspace that WMI is needed */
+ {0x0000, WSMT_EN_TOKEN, WSMT_DIS_TOKEN}
+};
+
+/* tokens that are explicitly blacklisted */
+static struct token_range token_blacklist[] = {
+ {0x0000, 0x0058, 0x0059}, /* ME use */
+ {0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */
+ {0x0000, 0x013A, 0x01FF}, /* sata shadow copy */
+ {0x0000, 0x0175, 0x0176}, /* write once */
+ {0x0000, 0x0195, 0x0197}, /* diagnostics */
+ {0x0000, 0x01DC, 0x01DD}, /* manufacturing use */
+ {0x0000, 0x027D, 0x0284}, /* diagnostics */
+ {0x0000, 0x02E3, 0x02E3}, /* manufacturing use */
+ {0x0000, 0x02FF, 0x02FF}, /* manufacturing use */
+ {0x0000, 0x0300, 0x0302}, /* manufacturing use */
+ {0x0000, 0x0325, 0x0326}, /* manufacturing use */
+ {0x0000, 0x0332, 0x0335}, /* fan control */
+ {0x0000, 0x0350, 0x0350}, /* manufacturing use */
+ {0x0000, 0x0363, 0x0363}, /* manufacturing use */
+ {0x0000, 0x0368, 0x0368}, /* manufacturing use */
+ {0x0000, 0x03F6, 0x03F7}, /* manufacturing use */
+ {0x0000, 0x049E, 0x049F}, /* manufacturing use */
+ {0x0000, 0x04A0, 0x04A3}, /* disagnostics */
+ {0x0000, 0x04E6, 0x04E7}, /* manufacturing use */
+ {0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */
+ {0x0000, 0x9000, 0x9001}, /* internal BIOS use */
+ {0x0000, 0xA000, 0xBFFF}, /* write only */
+ {0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */
+ /* handled by kernel: dell-laptop */
+ {0x0000, BRIGHTNESS_TOKEN, BRIGHTNESS_TOKEN},
+ {0x0000, KBD_LED_OFF_TOKEN, KBD_LED_AUTO_TOKEN},
+ {0x0000, KBD_LED_AC_TOKEN, KBD_LED_AC_TOKEN},
+ {0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN},
+ {0x0000, KBD_LED_AUTO_100_TOKEN, KBD_LED_AUTO_100_TOKEN},
+ {0x0000, GLOBAL_MIC_MUTE_ENABLE, GLOBAL_MIC_MUTE_DISABLE},
+};
+
+static LIST_HEAD(smbios_device_list);
int dell_smbios_error(int value)
{
@@ -55,42 +141,175 @@ int dell_smbios_error(int value)
}
EXPORT_SYMBOL_GPL(dell_smbios_error);
-struct calling_interface_buffer *dell_smbios_get_buffer(void)
+int dell_smbios_register_device(struct device *d, void *call_fn)
{
- mutex_lock(&buffer_mutex);
- dell_smbios_clear_buffer();
- return buffer;
+ struct smbios_device *priv;
+
+ priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ get_device(d);
+ priv->device = d;
+ priv->call_fn = call_fn;
+ mutex_lock(&smbios_mutex);
+ list_add_tail(&priv->list, &smbios_device_list);
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Added device: %s\n", d->driver->name);
+ return 0;
}
-EXPORT_SYMBOL_GPL(dell_smbios_get_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_register_device);
-void dell_smbios_clear_buffer(void)
+void dell_smbios_unregister_device(struct device *d)
{
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ struct smbios_device *priv;
+
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (priv->device == d) {
+ list_del(&priv->list);
+ put_device(d);
+ break;
+ }
+ }
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Remove device: %s\n", d->driver->name);
}
-EXPORT_SYMBOL_GPL(dell_smbios_clear_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_unregister_device);
-void dell_smbios_release_buffer(void)
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer)
{
- mutex_unlock(&buffer_mutex);
+ u16 t = 0;
+ int i;
+
+ /* can't make calls over 30 */
+ if (buffer->cmd_class > 30) {
+ dev_dbg(d, "class too big: %u\n", buffer->cmd_class);
+ return -EINVAL;
+ }
+
+ /* supported calls on the particular system */
+ if (!(da_supported_commands & (1 << buffer->cmd_class))) {
+ dev_dbg(d, "invalid command, supported commands: 0x%8x\n",
+ da_supported_commands);
+ return -EINVAL;
+ }
+
+ /* match against call blacklist */
+ for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) {
+ if (buffer->cmd_class != call_blacklist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_blacklist[i].cmd_select &&
+ call_blacklist[i].cmd_select != -1)
+ continue;
+ dev_dbg(d, "blacklisted command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return -EINVAL;
+ }
+
+ /* if a token call, find token ID */
+
+ if ((buffer->cmd_class == CLASS_TOKEN_READ ||
+ buffer->cmd_class == CLASS_TOKEN_WRITE) &&
+ buffer->cmd_select < 3) {
+ /* find the matching token ID */
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].location != buffer->input[0])
+ continue;
+ t = da_tokens[i].tokenID;
+ break;
+ }
+
+ /* token call; but token didn't exist */
+ if (!t) {
+ dev_dbg(d, "token at location %04x doesn't exist\n",
+ buffer->input[0]);
+ return -EINVAL;
+ }
+
+ /* match against token blacklist */
+ for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) {
+ if (!token_blacklist[i].min || !token_blacklist[i].max)
+ continue;
+ if (t >= token_blacklist[i].min &&
+ t <= token_blacklist[i].max)
+ return -EINVAL;
+ }
+
+ /* match against token whitelist */
+ for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) {
+ if (!token_whitelist[i].min || !token_whitelist[i].max)
+ continue;
+ if (t < token_whitelist[i].min ||
+ t > token_whitelist[i].max)
+ continue;
+ if (!token_whitelist[i].need_capability ||
+ capable(token_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted token: %x\n", t);
+ return 0;
+ }
+
+ }
+ }
+ /* match against call whitelist */
+ for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) {
+ if (buffer->cmd_class != call_whitelist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_whitelist[i].cmd_select)
+ continue;
+ if (!call_whitelist[i].need_capability ||
+ capable(call_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted capable command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+ dev_dbg(d, "missing capability %d for %u/%u\n",
+ call_whitelist[i].need_capability,
+ buffer->cmd_class, buffer->cmd_select);
+
+ }
+
+ /* not in a whitelist, only allow processes with capabilities */
+ if (capable(CAP_SYS_RAWIO)) {
+ dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+
+ return -EACCES;
}
-EXPORT_SYMBOL_GPL(dell_smbios_release_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
-void dell_smbios_send_request(int class, int select)
+int dell_smbios_call(struct calling_interface_buffer *buffer)
{
- struct smi_cmd command;
+ int (*call_fn)(struct calling_interface_buffer *) = NULL;
+ struct device *selected_dev = NULL;
+ struct smbios_device *priv;
+ int ret;
- command.magic = SMI_CMD_MAGIC;
- command.command_address = da_command_address;
- command.command_code = da_command_code;
- command.ebx = virt_to_phys(buffer);
- command.ecx = 0x42534931;
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (!selected_dev || priv->device->id >= selected_dev->id) {
+ dev_dbg(priv->device, "Trying device ID: %d\n",
+ priv->device->id);
+ call_fn = priv->call_fn;
+ selected_dev = priv->device;
+ }
+ }
+
+ if (!selected_dev) {
+ ret = -ENODEV;
+ pr_err("No dell-smbios drivers are loaded\n");
+ goto out_smbios_call;
+ }
- buffer->class = class;
- buffer->select = select;
+ ret = call_fn(buffer);
- dcdbas_smi_request(&command);
+out_smbios_call:
+ mutex_unlock(&smbios_mutex);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dell_smbios_send_request);
+EXPORT_SYMBOL_GPL(dell_smbios_call);
struct calling_interface_token *dell_smbios_find_token(int tokenid)
{
@@ -139,8 +358,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
if (dm->length < 17)
return;
- da_command_address = table->cmdIOAddress;
- da_command_code = table->cmdIOCode;
+ da_supported_commands = table->supportedCmds;
new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
sizeof(struct calling_interface_token),
@@ -156,6 +374,27 @@ static void __init parse_da_table(const struct dmi_header *dm)
da_num_tokens += tokens;
}
+static void zero_duplicates(struct device *dev)
+{
+ int i, j;
+
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ for (j = i+1; j < da_num_tokens; j++) {
+ if (da_tokens[j].tokenID == 0)
+ continue;
+ if (da_tokens[i].tokenID == da_tokens[j].tokenID) {
+ dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n",
+ da_tokens[j].tokenID,
+ da_tokens[j].location,
+ da_tokens[j].value);
+ da_tokens[j].tokenID = 0;
+ }
+ }
+ }
+}
+
static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
@@ -169,10 +408,160 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
+static int match_attribute(struct device *dev,
+ struct device_attribute *attr)
+{
+ int i;
+
+ for (i = 0; i < da_num_tokens * 2; i++) {
+ if (!token_attrs[i])
+ continue;
+ if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+ return i/2;
+ }
+ dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+ return -EINVAL;
+}
+
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location);
+ return 0;
+}
+
+static ssize_t value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value);
+ return 0;
+}
+
+static struct attribute_group smbios_attribute_group = {
+ .name = "tokens"
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+};
+
+static int build_tokens_sysfs(struct platform_device *dev)
+{
+ char *location_name;
+ char *value_name;
+ size_t size;
+ int ret;
+ int i, j;
+
+ /* (number of tokens + 1 for null terminated */
+ size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+ token_location_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_location_attrs)
+ return -ENOMEM;
+ token_value_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_value_attrs)
+ goto out_allocate_value;
+
+ /* need to store both location and value + terminator*/
+ size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+ token_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_attrs)
+ goto out_allocate_attrs;
+
+ for (i = 0, j = 0; i < da_num_tokens; i++) {
+ /* skip empty */
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ /* add location */
+ location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ da_tokens[i].tokenID);
+ if (location_name == NULL)
+ goto out_unwind_strings;
+ sysfs_attr_init(&token_location_attrs[i].attr);
+ token_location_attrs[i].attr.name = location_name;
+ token_location_attrs[i].attr.mode = 0444;
+ token_location_attrs[i].show = location_show;
+ token_attrs[j++] = &token_location_attrs[i].attr;
+
+ /* add value */
+ value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ da_tokens[i].tokenID);
+ if (value_name == NULL)
+ goto loop_fail_create_value;
+ sysfs_attr_init(&token_value_attrs[i].attr);
+ token_value_attrs[i].attr.name = value_name;
+ token_value_attrs[i].attr.mode = 0444;
+ token_value_attrs[i].show = value_show;
+ token_attrs[j++] = &token_value_attrs[i].attr;
+ continue;
+
+loop_fail_create_value:
+ kfree(value_name);
+ goto out_unwind_strings;
+ }
+ smbios_attribute_group.attrs = token_attrs;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group);
+ if (ret)
+ goto out_unwind_strings;
+ return 0;
+
+out_unwind_strings:
+ for (i = i-1; i > 0; i--) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+out_allocate_attrs:
+ kfree(token_value_attrs);
+out_allocate_value:
+ kfree(token_location_attrs);
+
+ return -ENOMEM;
+}
+
+static void free_group(struct platform_device *pdev)
+{
+ int i;
+
+ sysfs_remove_group(&pdev->dev.kobj,
+ &smbios_attribute_group);
+ for (i = 0; i < da_num_tokens; i++) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+ kfree(token_value_attrs);
+ kfree(token_location_attrs);
+}
+
static int __init dell_smbios_init(void)
{
+ const struct dmi_device *valid;
int ret;
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
+ if (!valid) {
+ pr_err("Unable to run on non-Dell system\n");
+ return -ENODEV;
+ }
+
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
@@ -180,27 +569,52 @@ static int __init dell_smbios_init(void)
return -ENODEV;
}
- /*
- * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
- * is passed to SMI handler.
- */
- buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
- if (!buffer) {
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+
+ platform_device = platform_device_alloc("dell-smbios", 0);
+ if (!platform_device) {
ret = -ENOMEM;
- goto fail_buffer;
+ goto fail_platform_device_alloc;
}
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ /* duplicate tokens will cause problems building sysfs files */
+ zero_duplicates(&platform_device->dev);
+
+ ret = build_tokens_sysfs(platform_device);
+ if (ret)
+ goto fail_create_group;
return 0;
-fail_buffer:
+fail_create_group:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_platform_device_alloc:
+ platform_driver_unregister(&platform_driver);
+
+fail_platform_driver:
kfree(da_tokens);
return ret;
}
static void __exit dell_smbios_exit(void)
{
+ mutex_lock(&smbios_mutex);
+ if (platform_device) {
+ free_group(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
kfree(da_tokens);
- free_page((unsigned long)buffer);
+ mutex_unlock(&smbios_mutex);
}
subsys_initcall(dell_smbios_init);
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index 45cbc2292cd3..138d478d9adc 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -16,17 +16,29 @@
#ifndef _DELL_SMBIOS_H_
#define _DELL_SMBIOS_H_
-struct notifier_block;
+#include <linux/device.h>
+#include <uapi/linux/wmi.h>
-/* This structure will be modified by the firmware when we enter
- * system management mode, hence the volatiles */
+/* Classes and selects used only in kernel drivers */
+#define CLASS_KBD_BACKLIGHT 4
+#define SELECT_KBD_BACKLIGHT 11
-struct calling_interface_buffer {
- u16 class;
- u16 select;
- volatile u32 input[4];
- volatile u32 output[4];
-} __packed;
+/* Tokens used in kernel drivers, any of these
+ * should be filtered from userspace access
+ */
+#define BRIGHTNESS_TOKEN 0x007d
+#define KBD_LED_AC_TOKEN 0x0451
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
+#define GLOBAL_MIC_MUTE_ENABLE 0x0364
+#define GLOBAL_MIC_MUTE_DISABLE 0x0365
+
+struct notifier_block;
struct calling_interface_token {
u16 tokenID;
@@ -37,12 +49,21 @@ struct calling_interface_token {
};
};
-int dell_smbios_error(int value);
+struct calling_interface_structure {
+ struct dmi_header header;
+ u16 cmdIOAddress;
+ u8 cmdIOCode;
+ u32 supportedCmds;
+ struct calling_interface_token tokens[];
+} __packed;
-struct calling_interface_buffer *dell_smbios_get_buffer(void);
-void dell_smbios_clear_buffer(void);
-void dell_smbios_release_buffer(void);
-void dell_smbios_send_request(int class, int select);
+int dell_smbios_register_device(struct device *d, void *call_fn);
+void dell_smbios_unregister_device(struct device *d);
+
+int dell_smbios_error(int value);
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer);
+int dell_smbios_call(struct calling_interface_buffer *buffer);
struct calling_interface_token *dell_smbios_find_token(int tokenid);
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index 37e646034ef8..1d87237bc731 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -90,7 +90,7 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
struct smo8800_device, miscdev);
u32 data = 0;
- unsigned char byte_data = 0;
+ unsigned char byte_data;
ssize_t retval = 1;
if (count < 1)
@@ -103,7 +103,6 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
if (retval)
return retval;
- byte_data = 1;
retval = 1;
if (data < 255)
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
new file mode 100644
index 000000000000..072821aa47fc
--- /dev/null
+++ b/drivers/platform/x86/dell-wmi-descriptor.c
@@ -0,0 +1,213 @@
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+#include "dell-wmi-descriptor.h"
+
+#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
+
+struct descriptor_priv {
+ struct list_head list;
+ u32 interface_version;
+ u32 size;
+ u32 hotfix;
+};
+static int descriptor_valid = -EPROBE_DEFER;
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+int dell_wmi_get_descriptor_valid(void)
+{
+ if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID))
+ return -ENODEV;
+
+ return descriptor_valid;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid);
+
+bool dell_wmi_get_interface_version(u32 *version)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *version = priv->interface_version;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version);
+
+bool dell_wmi_get_size(u32 *size)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *size = priv->size;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
+/*
+ * Descriptor buffer is 128 byte long and contains:
+ *
+ * Name Offset Length Value
+ * Vendor Signature 0 4 "DELL"
+ * Object Signature 4 4 " WMI"
+ * WMI Interface Version 8 4 <version>
+ * WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
+ */
+static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
+{
+ union acpi_object *obj = NULL;
+ struct descriptor_priv *priv;
+ u32 *buffer;
+ int ret;
+
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
+ dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ /* Although it's not technically a failure, this would lead to
+ * unexpected behavior
+ */
+ if (obj->buffer.length != 128) {
+ dev_err(&wdev->dev,
+ "Dell descriptor buffer has unexpected length (%d)\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ buffer = (u32 *)obj->buffer.pointer;
+
+ if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) {
+ dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n",
+ buffer);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+ descriptor_valid = 0;
+
+ if (buffer[2] != 0 && buffer[2] != 1)
+ dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n",
+ (unsigned long) buffer[2]);
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv),
+ GFP_KERNEL);
+
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->interface_version = buffer[2];
+ priv->size = buffer[3];
+ priv->hotfix = buffer[4];
+ ret = 0;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
+ (unsigned long) priv->interface_version,
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
+
+out:
+ kfree(obj);
+ return ret;
+}
+
+static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+{
+ struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
+static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
+ { .guid_string = DELL_WMI_DESCRIPTOR_GUID },
+ { },
+};
+
+static struct wmi_driver dell_wmi_descriptor_driver = {
+ .driver = {
+ .name = "dell-wmi-descriptor",
+ },
+ .probe = dell_wmi_descriptor_probe,
+ .remove = dell_wmi_descriptor_remove,
+ .id_table = dell_wmi_descriptor_id_table,
+};
+
+module_wmi_driver(dell_wmi_descriptor_driver);
+
+MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell WMI descriptor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h
new file mode 100644
index 000000000000..a6123a4d06a7
--- /dev/null
+++ b/drivers/platform/x86/dell-wmi-descriptor.h
@@ -0,0 +1,28 @@
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DELL_WMI_DESCRIPTOR_H_
+#define _DELL_WMI_DESCRIPTOR_H_
+
+#include <linux/wmi.h>
+
+/* possible return values:
+ * -ENODEV: Descriptor GUID missing from WMI bus
+ * -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run
+ * 0: valid descriptor, successfully probed
+ * < 0: invalid descriptor, don't probe dependent devices
+ */
+int dell_wmi_get_descriptor_valid(void);
+
+bool dell_wmi_get_interface_version(u32 *version);
+bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
+
+#endif
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 28d9f8696081..fb25b20df316 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -39,6 +39,7 @@
#include <linux/wmi.h>
#include <acpi/video.h>
#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -46,12 +47,10 @@ MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
-#define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
static bool wmi_requires_smbios_request;
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
-MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID);
struct dell_wmi_priv {
struct input_dev *input_dev;
@@ -619,78 +618,6 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
}
/*
- * Descriptor buffer is 128 byte long and contains:
- *
- * Name Offset Length Value
- * Vendor Signature 0 4 "DELL"
- * Object Signature 4 4 " WMI"
- * WMI Interface Version 8 4 <version>
- * WMI buffer length 12 4 4096
- */
-static int dell_wmi_check_descriptor_buffer(struct wmi_device *wdev)
-{
- struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- union acpi_object *obj = NULL;
- struct wmi_device *desc_dev;
- u32 *buffer;
- int ret;
-
- desc_dev = wmidev_get_other_guid(wdev, DELL_DESCRIPTOR_GUID);
- if (!desc_dev) {
- dev_err(&wdev->dev, "Dell WMI descriptor does not exist\n");
- return -ENODEV;
- }
-
- obj = wmidev_block_query(desc_dev, 0);
- if (!obj) {
- dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
- ret = -EIO;
- goto out;
- }
-
- if (obj->type != ACPI_TYPE_BUFFER) {
- dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (obj->buffer.length != 128) {
- dev_err(&wdev->dev,
- "Dell descriptor buffer has invalid length (%d)\n",
- obj->buffer.length);
- if (obj->buffer.length < 16) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- buffer = (u32 *)obj->buffer.pointer;
-
- if (buffer[0] != 0x4C4C4544 && buffer[1] != 0x494D5720)
- dev_warn(&wdev->dev, "Dell descriptor buffer has invalid signature (%*ph)\n",
- 8, buffer);
-
- if (buffer[2] != 0 && buffer[2] != 1)
- dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%d)\n",
- buffer[2]);
-
- if (buffer[3] != 4096)
- dev_warn(&wdev->dev, "Dell descriptor buffer has invalid buffer length (%d)\n",
- buffer[3]);
-
- priv->interface_version = buffer[2];
- ret = 0;
-
- dev_info(&wdev->dev, "Detected Dell WMI interface version %u\n",
- priv->interface_version);
-
-out:
- kfree(obj);
- put_device(&desc_dev->dev);
- return ret;
-}
-
-/*
* According to Dell SMBIOS documentation:
*
* 17 3 Application Program Registration
@@ -711,13 +638,18 @@ static int dell_wmi_events_set_enabled(bool enable)
struct calling_interface_buffer *buffer;
int ret;
- buffer = dell_smbios_get_buffer();
+ buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ buffer->cmd_class = CLASS_INFO;
+ buffer->cmd_select = SELECT_APP_REGISTRATION;
buffer->input[0] = 0x10000;
buffer->input[1] = 0x51534554;
buffer->input[3] = enable;
- dell_smbios_send_request(17, 3);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
+ ret = dell_smbios_call(buffer);
+ if (ret == 0)
+ ret = buffer->output[0];
+ kfree(buffer);
return dell_smbios_error(ret);
}
@@ -725,7 +657,11 @@ static int dell_wmi_events_set_enabled(bool enable)
static int dell_wmi_probe(struct wmi_device *wdev)
{
struct dell_wmi_priv *priv;
- int err;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
priv = devm_kzalloc(
&wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
@@ -733,9 +669,8 @@ static int dell_wmi_probe(struct wmi_device *wdev)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, priv);
- err = dell_wmi_check_descriptor_buffer(wdev);
- if (err)
- return err;
+ if (!dell_wmi_get_interface_version(&priv->interface_version))
+ return -EPROBE_DEFER;
return dell_wmi_input_setup(wdev);
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 56a8195096a2..2cfbd3fa5136 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -691,6 +691,7 @@ static enum led_brightness eco_led_get(struct led_classdev *cdev)
static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
struct led_classdev *led;
int result;
@@ -724,12 +725,15 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
}
/*
- * BTNI bit 24 seems to indicate the presence of a radio toggle
- * button in place of a slide switch, and all such machines appear
- * to also have an RF LED. Therefore use bit 24 as an indicator
- * that an RF LED is present.
+ * Some Fujitsu laptops have a radio toggle button in place of a slide
+ * switch and all such machines appear to also have an RF LED. Based on
+ * comparing DSDT tables of four Fujitsu Lifebook models (E744, E751,
+ * S7110, S8420; the first one has a radio toggle button, the other
+ * three have slide switches), bit 17 of flags_supported (the value
+ * returned by method S000 of ACPI device FUJ02E3) seems to indicate
+ * whether given model has a radio toggle button.
*/
- if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ if (priv->flags_supported & BIT(17)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index b4ed3dc983d5..b4224389febe 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
if (state < 0)
return state;
- return state & 0x1;
+ return !!(state & mask);
}
static int __init hp_wmi_bios_2008_later(void)
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 493d8910a74e..7b12abe86b94 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+ AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index fe98d4ac0df3..53ab4e0f8962 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1166,6 +1166,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
},
},
+ {
+ .ident = "Lenovo YOGA 920-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index e34fd70b67af..f470279c4c10 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -226,6 +226,24 @@ wakeup:
return;
}
+ /*
+ * Needed for suspend to work on some platforms that don't expose
+ * the 5-button array, but still send notifies with power button
+ * event code to this device object on power button actions.
+ *
+ * Report the power button press; catch and ignore the button release.
+ */
+ if (!priv->array) {
+ if (event == 0xce) {
+ input_report_key(priv->input_dev, KEY_POWER, 1);
+ input_sync(priv->input_dev);
+ return;
+ }
+
+ if (event == 0xcf)
+ return;
+ }
+
/* 0xC0 is for HID events, other values are for 5 button array */
if (event != 0xc0) {
if (!priv->array ||
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
new file mode 100644
index 000000000000..c2257bd06f18
--- /dev/null
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -0,0 +1,98 @@
+/*
+ * WMI Thunderbolt driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+static ssize_t force_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u8 mode;
+
+ input.length = sizeof(u8);
+ input.pointer = &mode;
+ mode = hex_to_bin(buf[0]);
+ if (mode == 0 || mode == 1) {
+ status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
+ &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR_WO(force_power);
+
+static struct attribute *tbt_attrs[] = {
+ &dev_attr_force_power.attr,
+ NULL
+};
+
+static const struct attribute_group tbt_attribute_group = {
+ .attrs = tbt_attrs,
+};
+
+static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group);
+ kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+ return ret;
+}
+
+static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+{
+ sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
+ kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
+ { .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
+ { },
+};
+
+static struct wmi_driver intel_wmi_thunderbolt_driver = {
+ .driver = {
+ .name = "intel-wmi-thunderbolt",
+ },
+ .probe = intel_wmi_thunderbolt_probe,
+ .remove = intel_wmi_thunderbolt_remove,
+ .id_table = intel_wmi_thunderbolt_id_table,
+};
+
+module_wmi_driver(intel_wmi_thunderbolt_driver);
+
+MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index da706e2c4232..380ef7ec094f 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define EXPECTED_PTYPE 4
@@ -34,6 +35,42 @@ struct cht_int33fe_data {
struct i2c_client *pi3usb30532;
};
+/*
+ * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
+ * the max17047 both through the INT33FE ACPI device (it is right there
+ * in the resources table) as well as through a separate MAX17047 device.
+ *
+ * These helpers are used to work around this by checking if an i2c-client
+ * for the max17047 has already been registered.
+ */
+static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
+{
+ struct i2c_client **max17047 = data;
+ struct acpi_device *adev;
+ const char *hid;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return 0;
+
+ hid = acpi_device_hid(adev);
+
+ /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
+ if (strcmp(hid, "MAX17047"))
+ return 0;
+
+ *max17047 = to_i2c_client(dev);
+ return 1;
+}
+
+static struct i2c_client *cht_int33fe_find_max17047(void)
+{
+ struct i2c_client *max17047 = NULL;
+
+ i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+ return max17047;
+}
+
static const char * const max17047_suppliers[] = { "bq24190-charger" };
static const struct property_entry max17047_props[] = {
@@ -41,14 +78,25 @@ static const struct property_entry max17047_props[] = {
{ }
};
+static const struct property_entry fusb302_props[] = {
+ PROPERTY_ENTRY_STRING("fcs,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microvolt", 12000000),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microamp", 3000000),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microwatt", 36000000),
+ { }
+};
+
static int cht_int33fe_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_board_info board_info;
struct cht_int33fe_data *data;
+ struct i2c_client *max17047;
+ struct regulator *regulator;
unsigned long long ptyp;
acpi_status status;
int fusb302_irq;
+ int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
if (ACPI_FAILURE(status)) {
@@ -63,6 +111,34 @@ static int cht_int33fe_probe(struct i2c_client *client)
if (ptyp != EXPECTED_PTYPE)
return -ENODEV;
+ /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
+ if (!acpi_dev_present("INT34D3", "1", 3)) {
+ dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
+ EXPECTED_PTYPE);
+ return -ENODEV;
+ }
+
+ /*
+ * We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
+ * We check for the bq24292i vbus regulator here, this has 2 purposes:
+ * 1) The bq24292i allows charging with up to 12V, setting the fusb302's
+ * max-snk voltage to 12V with another charger-IC is not good.
+ * 2) For the fusb302 driver to get the bq24292i vbus regulator, the
+ * regulator-map, which is part of the bq24292i regulator_init_data,
+ * must be registered before the fusb302 is instantiated, otherwise
+ * it will end up with a dummy-regulator.
+ * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
+ * which is defined in i2c-cht-wc.c from where the bq24292i i2c-client
+ * gets instantiated. We use regulator_get_optional here so that we
+ * don't end up getting a dummy-regulator ourselves.
+ */
+ regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus");
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ return (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ }
+ regulator_put(regulator);
+
/* The FUSB302 uses the irq at index 1 and is the only irq user */
fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
if (fusb302_irq < 0) {
@@ -75,16 +151,31 @@ static int cht_int33fe_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
- board_info.properties = max17047_props;
-
- data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
- if (!data->max17047)
- return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+ /* Work around BIOS bug, see comment on cht_int33fe_find_max17047 */
+ max17047 = cht_int33fe_find_max17047();
+ if (max17047) {
+ /* Pre-existing i2c-client for the max17047, add device-props */
+ ret = device_add_properties(&max17047->dev, max17047_props);
+ if (ret)
+ return ret;
+ /* And re-probe to get the new device-props applied. */
+ ret = device_reprobe(&max17047->dev);
+ if (ret)
+ dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+ } else {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.dev_name = "max17047";
+ board_info.properties = max17047_props;
+ data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+ if (!data->max17047)
+ return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
+ }
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "fusb302", I2C_NAME_SIZE);
+ strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ board_info.dev_name = "fusb302";
+ board_info.properties = fusb302_props;
board_info.irq = fusb302_irq;
data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
@@ -92,6 +183,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
goto out_unregister_max17047;
memset(&board_info, 0, sizeof(board_info));
+ board_info.dev_name = "pi3usb30532";
strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
@@ -106,7 +198,8 @@ out_unregister_fusb302:
i2c_unregister_device(data->fusb302);
out_unregister_max17047:
- i2c_unregister_device(data->max17047);
+ if (data->max17047)
+ i2c_unregister_device(data->max17047);
return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
}
@@ -117,7 +210,8 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
- i2c_unregister_device(data->max17047);
+ if (data->max17047)
+ i2c_unregister_device(data->max17047);
return 0;
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 58dcee562d64..a0c95853fd3f 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -10,10 +10,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -259,8 +255,6 @@ static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
/* Per-SKU limits */
struct ips_mcp_limits {
- int cpu_family;
- int cpu_model; /* includes extended model... */
int mcp_power_limit; /* mW units */
int core_power_limit;
int mch_power_limit;
@@ -295,11 +289,14 @@ static struct ips_mcp_limits ips_ulv_limits = {
};
struct ips_driver {
- struct pci_dev *dev;
- void *regmap;
+ struct device *dev;
+ void __iomem *regmap;
+ int irq;
+
struct task_struct *monitor;
struct task_struct *adjust;
struct dentry *debug_root;
+ struct timer_list timer;
/* Average CPU core temps (all averages in .01 degrees C for precision) */
u16 ctv1_avg_temp;
@@ -594,7 +591,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
return;
if (!ips->gpu_turbo_disable())
- dev_err(&ips->dev->dev, "failed to disable graphics turbo\n");
+ dev_err(ips->dev, "failed to disable graphics turbo\n");
else
ips->__gpu_turbo_on = false;
}
@@ -649,8 +646,7 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
if (ret)
- dev_info(&ips->dev->dev,
- "CPU power or thermal limit exceeded\n");
+ dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
return ret;
}
@@ -769,7 +765,7 @@ static int ips_adjust(void *data)
struct ips_driver *ips = data;
unsigned long flags;
- dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n");
+ dev_dbg(ips->dev, "starting ips-adjust thread\n");
/*
* Adjust CPU and GPU clamps every 5s if needed. Doing it more
@@ -816,7 +812,7 @@ sleep:
schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
} while (!kthread_should_stop());
- dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
+ dev_dbg(ips->dev, "ips-adjust thread stopped\n");
return 0;
}
@@ -942,9 +938,10 @@ static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
return avg;
}
-static void monitor_timeout(unsigned long arg)
+static void monitor_timeout(struct timer_list *t)
{
- wake_up_process((struct task_struct *)arg);
+ struct ips_driver *ips = from_timer(ips, t, timer);
+ wake_up_process(ips->monitor);
}
/**
@@ -961,7 +958,6 @@ static void monitor_timeout(unsigned long arg)
static int ips_monitor(void *data)
{
struct ips_driver *ips = data;
- struct timer_list timer;
unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
int i;
u32 *cpu_samples, *mchp_samples, old_cpu_power;
@@ -976,7 +972,7 @@ static int ips_monitor(void *data)
mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
!cpu_samples || !mchp_samples) {
- dev_err(&ips->dev->dev,
+ dev_err(ips->dev,
"failed to allocate sample array, ips disabled\n");
kfree(mcp_samples);
kfree(ctv1_samples);
@@ -1049,8 +1045,7 @@ static int ips_monitor(void *data)
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
last_sample_period = IPS_SAMPLE_PERIOD;
- setup_deferrable_timer_on_stack(&timer, monitor_timeout,
- (unsigned long)current);
+ timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
do {
u32 cpu_val, mch_val;
u16 val;
@@ -1097,7 +1092,8 @@ static int ips_monitor(void *data)
ITV_ME_SEQNO_SHIFT;
if (cur_seqno == last_seqno &&
time_after(jiffies, seqno_timestamp + HZ)) {
- dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n");
+ dev_warn(ips->dev,
+ "ME failed to update for more than 1s, likely hung\n");
} else {
seqno_timestamp = get_jiffies_64();
last_seqno = cur_seqno;
@@ -1107,7 +1103,7 @@ static int ips_monitor(void *data)
expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
__set_current_state(TASK_INTERRUPTIBLE);
- mod_timer(&timer, expire);
+ mod_timer(&ips->timer, expire);
schedule();
/* Calculate actual sample period for power averaging */
@@ -1116,10 +1112,9 @@ static int ips_monitor(void *data)
last_sample_period = 1;
} while (!kthread_should_stop());
- del_timer_sync(&timer);
- destroy_timer_on_stack(&timer);
+ del_timer_sync(&ips->timer);
- dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n");
+ dev_dbg(ips->dev, "ips-monitor thread stopped\n");
return 0;
}
@@ -1128,17 +1123,17 @@ static int ips_monitor(void *data)
#define THM_DUMPW(reg) \
{ \
u16 val = thm_readw(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
}
#define THM_DUMPL(reg) \
{ \
u32 val = thm_readl(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
}
#define THM_DUMPQ(reg) \
{ \
u64 val = thm_readq(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
}
static void dump_thermal_info(struct ips_driver *ips)
@@ -1146,7 +1141,7 @@ static void dump_thermal_info(struct ips_driver *ips)
u16 ptl;
ptl = thm_readw(THM_PTL);
- dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
+ dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
THM_DUMPW(THM_CTA);
THM_DUMPW(THM_TRC);
@@ -1175,8 +1170,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
if (!tses && !tes)
return IRQ_NONE;
- dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses);
- dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes);
+ dev_info(ips->dev, "TSES: 0x%02x\n", tses);
+ dev_info(ips->dev, "TES: 0x%02x\n", tes);
/* STS update from EC? */
if (tes & 1) {
@@ -1214,8 +1209,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
/* Thermal trip */
if (tses) {
- dev_warn(&ips->dev->dev,
- "thermal trip occurred, tses: 0x%04x\n", tses);
+ dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
+ tses);
thm_writeb(THM_TSES, tses);
}
@@ -1330,8 +1325,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
ips->debug_root = debugfs_create_dir("ips", NULL);
if (!ips->debug_root) {
- dev_err(&ips->dev->dev,
- "failed to create debugfs entries: %ld\n",
+ dev_err(ips->dev, "failed to create debugfs entries: %ld\n",
PTR_ERR(ips->debug_root));
return;
}
@@ -1345,8 +1339,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
ips->debug_root, node,
&ips_debugfs_ops);
if (!ent) {
- dev_err(&ips->dev->dev,
- "failed to create debug file: %ld\n",
+ dev_err(ips->dev, "failed to create debug file: %ld\n",
PTR_ERR(ent));
goto err_cleanup;
}
@@ -1373,8 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
u16 tdp;
if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
- dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n");
- goto out;
+ dev_info(ips->dev, "Non-IPS CPU detected.\n");
+ return NULL;
}
rdmsrl(IA32_MISC_ENABLE, misc_en);
@@ -1395,8 +1388,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
limits = &ips_ulv_limits;
else {
- dev_info(&ips->dev->dev, "No CPUID match found.\n");
- goto out;
+ dev_info(ips->dev, "No CPUID match found.\n");
+ return NULL;
}
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
@@ -1404,12 +1397,12 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
/* Sanity check TDP against CPU */
if (limits->core_power_limit != (tdp / 8) * 1000) {
- dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+ dev_info(ips->dev,
+ "CPU TDP doesn't match expected value (found %d, expected %d)\n",
tdp / 8, limits->core_power_limit / 1000);
limits->core_power_limit = (tdp / 8) * 1000;
}
-out:
return limits;
}
@@ -1459,7 +1452,7 @@ ips_gpu_turbo_enabled(struct ips_driver *ips)
{
if (!ips->gpu_busy && late_i915_load) {
if (ips_get_i915_syms(ips)) {
- dev_info(&ips->dev->dev,
+ dev_info(ips->dev,
"i915 driver attached, reenabling gpu turbo\n");
ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
}
@@ -1480,8 +1473,7 @@ ips_link_to_i915_driver(void)
EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
static const struct pci_device_id ips_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
{ 0, }
};
@@ -1517,62 +1509,45 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (dmi_check_system(ips_blacklist))
return -ENODEV;
- ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+ ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
if (!ips)
return -ENOMEM;
- pci_set_drvdata(dev, ips);
- ips->dev = dev;
+ spin_lock_init(&ips->turbo_status_lock);
+ ips->dev = &dev->dev;
ips->limits = ips_detect_cpu(ips);
if (!ips->limits) {
dev_info(&dev->dev, "IPS not supported on this CPU\n");
- ret = -ENXIO;
- goto error_free;
+ return -ENXIO;
}
- spin_lock_init(&ips->turbo_status_lock);
-
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret) {
dev_err(&dev->dev, "can't enable PCI device, aborting\n");
- goto error_free;
+ return ret;
}
- if (!pci_resource_start(dev, 0)) {
- dev_err(&dev->dev, "TBAR not assigned, aborting\n");
- ret = -ENXIO;
- goto error_free;
- }
-
- ret = pci_request_regions(dev, "ips thermal sensor");
+ ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
if (ret) {
- dev_err(&dev->dev, "thermal resource busy, aborting\n");
- goto error_free;
- }
-
-
- ips->regmap = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
- if (!ips->regmap) {
dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
- ret = -EBUSY;
- goto error_release;
+ return ret;
}
+ ips->regmap = pcim_iomap_table(dev)[0];
+
+ pci_set_drvdata(dev, ips);
tse = thm_readb(THM_TSE);
if (tse != TSE_EN) {
dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
- ret = -ENXIO;
- goto error_unmap;
+ return -ENXIO;
}
trc = thm_readw(THM_TRC);
trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
if ((trc & trc_required_mask) != trc_required_mask) {
dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
- ret = -ENXIO;
- goto error_unmap;
+ return -ENXIO;
}
if (trc & TRC_CORE2_EN)
@@ -1602,20 +1577,23 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
rdmsrl(PLATFORM_INFO, platform_info);
if (!(platform_info & PLATFORM_TDP)) {
dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
- ret = -ENODEV;
- goto error_unmap;
+ return -ENODEV;
}
/*
* IRQ handler for ME interaction
* Note: don't use MSI here as the PCH has bugs.
*/
- pci_disable_msi(dev);
- ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips",
- ips);
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ if (ret < 0)
+ return ret;
+
+ ips->irq = pci_irq_vector(dev, 0);
+
+ ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
if (ret) {
dev_err(&dev->dev, "request irq failed, aborting\n");
- goto error_unmap;
+ return ret;
}
/* Enable aux, hot & critical interrupts */
@@ -1672,13 +1650,8 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
error_thread_cleanup:
kthread_stop(ips->adjust);
error_free_irq:
- free_irq(ips->dev->irq, ips);
-error_unmap:
- iounmap(ips->regmap);
-error_release:
- pci_release_regions(dev);
-error_free:
- kfree(ips);
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
return ret;
}
@@ -1709,27 +1682,20 @@ static void ips_remove(struct pci_dev *dev)
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
- free_irq(ips->dev->irq, ips);
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
if (ips->adjust)
kthread_stop(ips->adjust);
if (ips->monitor)
kthread_stop(ips->monitor);
- iounmap(ips->regmap);
- pci_release_regions(dev);
- kfree(ips);
dev_dbg(&dev->dev, "IPS driver removed\n");
}
-static void ips_shutdown(struct pci_dev *dev)
-{
-}
-
static struct pci_driver ips_pci_driver = {
.name = "intel ips",
.id_table = ips_id_table,
.probe = ips_probe,
.remove = ips_remove,
- .shutdown = ips_shutdown,
};
module_pci_driver(ips_pci_driver);
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 73299beff5b3..60f4e3ddbe9f 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -10,10 +10,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index a47a41fc10ad..b5b890127479 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index 0d4c3808a6d8..f378621b5fe9 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -15,9 +15,8 @@
* Telemetry Framework provides platform related PM and performance statistics.
* This file provides the core telemetry API implementation.
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <asm/intel_telemetry.h>
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index d4fc42b4cbeb..4249e8267bbc 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -21,14 +21,12 @@
* /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing
* Verbosity via firmware
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+#include <linux/device.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
@@ -76,8 +74,6 @@
#define TELEM_IOSS_DX_D0IX_EVTS 25
#define TELEM_IOSS_PG_EVTS 30
-#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0]))
-
#define TELEM_DEBUGFS_CPU(model, data) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
@@ -304,13 +300,13 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
.ioss_d0ix_data = telem_apl_ioss_d0ix_data,
.ioss_pg_data = telem_apl_ioss_pg_data,
- .pss_idle_evts = TELEM_EVT_LEN(telem_apl_pss_idle_data),
- .pcs_idle_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_idle_blkd_data),
- .pcs_s0ix_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_s0ix_blkd_data),
- .pss_ltr_evts = TELEM_EVT_LEN(telem_apl_pss_ltr_data),
- .pss_wakeup_evts = TELEM_EVT_LEN(telem_apl_pss_wakeup),
- .ioss_d0ix_evts = TELEM_EVT_LEN(telem_apl_ioss_d0ix_data),
- .ioss_pg_evts = TELEM_EVT_LEN(telem_apl_ioss_pg_data),
+ .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data),
+ .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data),
+ .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data),
+ .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data),
+ .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup),
+ .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data),
+ .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data),
.pstates_id = TELEM_APL_PSS_PSTATES_ID,
.pss_idle_id = TELEM_APL_PSS_IDLE_ID,
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index e0424d5a795a..2f889d6c270e 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -16,15 +16,9 @@
* It used the PUNIT and PMC IPC interfaces for configuring the counters.
* The accumulated results are fetched from SRAM.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+
#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pci.h>
-#include <linux/suspend.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
@@ -256,7 +250,7 @@ static int telemetry_check_evtid(enum telemetry_unit telem_unit,
break;
default:
- pr_err("Unknown Telemetry action Specified %d\n", action);
+ pr_err("Unknown Telemetry action specified %d\n", action);
return -EINVAL;
}
@@ -659,7 +653,7 @@ static int telemetry_setup(struct platform_device *pdev)
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_RESET);
if (ret) {
- dev_err(&pdev->dev, "TELEMTRY Setup Failed\n");
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed\n");
return ret;
}
return 0;
@@ -685,7 +679,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_UPDATE);
if (ret)
- pr_err("TELEMTRY Config Failed\n");
+ pr_err("TELEMETRY Config Failed\n");
return ret;
}
@@ -822,7 +816,7 @@ static int telemetry_plt_reset_events(void)
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_RESET);
if (ret)
- pr_err("TELEMTRY Reset Failed\n");
+ pr_err("TELEMETRY Reset Failed\n");
return ret;
}
@@ -885,7 +879,7 @@ static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts,
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_ADD);
if (ret)
- pr_err("TELEMTRY ADD Failed\n");
+ pr_err("TELEMETRY ADD Failed\n");
return ret;
}
@@ -1195,7 +1189,7 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
ret = telemetry_set_pltdata(&telm_pltops, telm_conf);
if (ret) {
- dev_err(&pdev->dev, "TELEMTRY Set Pltops Failed.\n");
+ dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n");
goto out;
}
@@ -1210,7 +1204,7 @@ out:
iounmap(telm_conf->pss_config.regmap);
if (telm_conf->ioss_config.regmap)
iounmap(telm_conf->ioss_config.regmap);
- dev_err(&pdev->dev, "TELEMTRY Setup Failed.\n");
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
}
@@ -1234,7 +1228,6 @@ static struct platform_driver telemetry_soc_driver = {
static int __init telemetry_module_init(void)
{
- pr_info(DRIVER_NAME ": version %s loaded\n", DRIVER_VERSION);
return platform_driver_register(&telemetry_soc_driver);
}
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
index 4f60d8e32a0a..d4ea01805879 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel_turbo_max_3.c
@@ -125,6 +125,7 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
{}
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 4f3de2a8c4df..504256c3660d 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -216,8 +216,8 @@ static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"),
};
-struct platform_device *mlxplat_dev;
-struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
+static struct platform_device *mlxplat_dev;
+static struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
index bc98ef95514a..9b9e1f39bbfb 100644
--- a/drivers/platform/x86/peaq-wmi.c
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
}
}
+/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
+static const struct dmi_system_id peaq_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ },
+ {}
+};
+
static int __init peaq_wmi_init(void)
{
+ /* WMI GUID is not unique, also check for a DMI match */
+ if (!dmi_check_system(peaq_dmi_table))
+ return -ENODEV;
+
if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
return -ENODEV;
@@ -86,9 +102,6 @@ static int __init peaq_wmi_init(void)
static void __exit peaq_wmi_exit(void)
{
- if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
- return;
-
input_unregister_polled_device(peaq_poll_dev);
}
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c
index 1157a7b646d6..266535c2a72f 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/silead_dmi.c
@@ -58,6 +58,7 @@ static const struct property_entry dexp_ursus_7w_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -72,6 +73,7 @@ static const struct property_entry surftab_wintron70_st70416_6_props[] = {
PROPERTY_ENTRY_STRING("firmware-name",
"gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -83,6 +85,8 @@ static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl1680-gp-electronic-t701.fw"),
{ }
@@ -114,6 +118,7 @@ static const struct property_entry pov_mobii_wintab_p800w_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -136,6 +141,36 @@ static const struct silead_ts_dmi_data itworks_tw891_data = {
.properties = itworks_tw891_props,
};
+static const struct property_entry chuwi_hi8_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct silead_ts_dmi_data chuwi_hi8_pro_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi8_pro_props,
+};
+
+static const struct property_entry digma_citi_e200_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct silead_ts_dmi_data digma_citi_e200_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = digma_citi_e200_props,
+};
+
static const struct dmi_system_id silead_ts_dmi_table[] = {
{
/* CUBE iwork8 Air */
@@ -219,6 +254,23 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
},
},
+ {
+ /* Chuwi Hi8 Pro */
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
+ },
+ },
+ {
+ /* Digma Citi E200 */
+ .driver_data = (void *)&digma_citi_e200_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
{ },
};
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index a16cea2be9c3..935121814c97 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -363,7 +363,7 @@ static int sony_laptop_input_keycode_map[] = {
};
/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
{
struct sony_laptop_keypress kp;
unsigned long flags;
@@ -470,7 +470,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
goto err_dec_users;
}
- setup_timer(&sony_laptop_input.release_key_timer,
+ timer_setup(&sony_laptop_input.release_key_timer,
do_sony_laptop_release_key, 0);
/* input keys */
@@ -1627,7 +1627,7 @@ static const struct rfkill_ops sony_rfkill_ops = {
static int sony_nc_setup_rfkill(struct acpi_device *device,
enum sony_nc_rfkill nc_type)
{
- int err = 0;
+ int err;
struct rfkill *rfk;
enum rfkill_type type;
const char *name;
@@ -1660,17 +1660,19 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
if (!rfk)
return -ENOMEM;
- if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+ err = sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ if (err < 0) {
rfkill_destroy(rfk);
- return -1;
+ return err;
}
hwblock = !(result & 0x1);
- if (sony_call_snc_handle(sony_rfkill_handle,
- sony_rfkill_address[nc_type],
- &result) < 0) {
+ err = sony_call_snc_handle(sony_rfkill_handle,
+ sony_rfkill_address[nc_type],
+ &result);
+ if (err < 0) {
rfkill_destroy(rfk);
- return -1;
+ return err;
}
swblock = !(result & 0x2);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3887dfeafc96..117be48ff4de 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -310,8 +310,7 @@ static struct {
enum {
TP_HOTKEY_TABLET_NONE = 0,
TP_HOTKEY_TABLET_USES_MHKG,
- /* X1 Yoga 2016, seen on BIOS N1FET44W */
- TP_HOTKEY_TABLET_USES_CMMD,
+ TP_HOTKEY_TABLET_USES_GMMS,
} hotkey_tablet;
u32 kbdlight:1;
u32 light:1;
@@ -2044,8 +2043,28 @@ static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
-/* ThinkPad X1 Yoga (2016) */
-#define TP_EC_CMMD_TABLET_MODE 0x6
+enum {
+ TP_ACPI_MULTI_MODE_INVALID = 0,
+ TP_ACPI_MULTI_MODE_UNKNOWN = 1 << 0,
+ TP_ACPI_MULTI_MODE_LAPTOP = 1 << 1,
+ TP_ACPI_MULTI_MODE_TABLET = 1 << 2,
+ TP_ACPI_MULTI_MODE_FLAT = 1 << 3,
+ TP_ACPI_MULTI_MODE_STAND = 1 << 4,
+ TP_ACPI_MULTI_MODE_TENT = 1 << 5,
+ TP_ACPI_MULTI_MODE_STAND_TENT = 1 << 6,
+};
+
+enum {
+ /* The following modes are considered tablet mode for the purpose of
+ * reporting the status to userspace. i.e. in all these modes it makes
+ * sense to disable the laptop input devices such as touchpad and
+ * keyboard.
+ */
+ TP_ACPI_MULTI_MODE_TABLET_LIKE = TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT |
+ TP_ACPI_MULTI_MODE_STAND_TENT,
+};
static int hotkey_get_wlsw(void)
{
@@ -2066,6 +2085,90 @@ static int hotkey_get_wlsw(void)
return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
+static int hotkey_gmms_get_tablet_mode(int s, int *has_tablet_mode)
+{
+ int type = (s >> 16) & 0xffff;
+ int value = s & 0xffff;
+ int mode = TP_ACPI_MULTI_MODE_INVALID;
+ int valid_modes = 0;
+
+ if (has_tablet_mode)
+ *has_tablet_mode = 0;
+
+ switch (type) {
+ case 1:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND_TENT;
+ break;
+ case 2:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ case 3:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 4:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ case 5:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ pr_err("Unknown multi mode status type %d with value 0x%04X, please report this to %s\n",
+ type, value, TPACPI_MAIL);
+ return 0;
+ }
+
+ if (has_tablet_mode && (valid_modes & TP_ACPI_MULTI_MODE_TABLET_LIKE))
+ *has_tablet_mode = 1;
+
+ switch (value) {
+ case 1:
+ mode = TP_ACPI_MULTI_MODE_LAPTOP;
+ break;
+ case 2:
+ mode = TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 3:
+ mode = TP_ACPI_MULTI_MODE_TABLET;
+ break;
+ case 4:
+ if (type == 1)
+ mode = TP_ACPI_MULTI_MODE_STAND_TENT;
+ else
+ mode = TP_ACPI_MULTI_MODE_STAND;
+ break;
+ case 5:
+ mode = TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ if (type == 5 && value == 0xffff) {
+ pr_warn("Multi mode status is undetected, assuming laptop\n");
+ return 0;
+ }
+ }
+
+ if (!(mode & valid_modes)) {
+ pr_err("Unknown/reserved multi mode value 0x%04X for type %d, please report this to %s\n",
+ value, type, TPACPI_MAIL);
+ return 0;
+ }
+
+ return !!(mode & TP_ACPI_MULTI_MODE_TABLET_LIKE);
+}
+
static int hotkey_get_tablet_mode(int *status)
{
int s;
@@ -2077,11 +2180,11 @@ static int hotkey_get_tablet_mode(int *status)
*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
break;
- case TP_HOTKEY_TABLET_USES_CMMD:
- if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+ case TP_HOTKEY_TABLET_USES_GMMS:
+ if (!acpi_evalf(hkey_handle, &s, "GMMS", "dd", 0))
return -EIO;
- *status = (s == TP_EC_CMMD_TABLET_MODE);
+ *status = hotkey_gmms_get_tablet_mode(s, NULL);
break;
default:
break;
@@ -3113,16 +3216,19 @@ static int hotkey_init_tablet_mode(void)
int in_tablet_mode = 0, res;
char *type = NULL;
- if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+ if (acpi_evalf(hkey_handle, &res, "GMMS", "qdd", 0)) {
+ int has_tablet_mode;
+
+ in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+ &has_tablet_mode);
+ if (has_tablet_mode)
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+ type = "GMMS";
+ } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
/* For X41t, X60t, X61t Tablets... */
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
type = "MHKG";
- } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
- /* For X1 Yoga (2016) */
- tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
- in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
- type = "CMMD";
}
if (!tp_features.hotkey_tablet)
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 0765b1797d4c..791449a2370f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -33,17 +33,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/acpi.h>
-#include <linux/slab.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/wmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uuid.h>
+#include <linux/wmi.h>
+#include <uapi/linux/wmi.h>
ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
@@ -69,9 +72,12 @@ struct wmi_block {
struct wmi_device dev;
struct list_head list;
struct guid_block gblock;
+ struct miscdevice char_dev;
+ struct mutex char_mutex;
struct acpi_device *acpi_device;
wmi_notify_handler handler;
void *handler_data;
+ u64 req_buf_size;
bool read_takes_no_args;
};
@@ -188,6 +194,25 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
/*
* Exported WMI functions
*/
+
+/**
+ * set_required_buffer_size - Sets the buffer size needed for performing IOCTL
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ *
+ * Allocates memory needed for buffer, stores the buffer size in that memory
+ */
+int set_required_buffer_size(struct wmi_device *wdev, u64 length)
+{
+ struct wmi_block *wblock;
+
+ wblock = container_of(wdev, struct wmi_block, dev);
+ wblock->req_buf_size = length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_required_buffer_size);
+
/**
* wmi_evaluate_method - Evaluate a WMI method
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -201,6 +226,28 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
{
+ struct wmi_block *wblock = NULL;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+ return wmidev_evaluate_method(&wblock->dev, instance, method_id,
+ in, out);
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmidev_evaluate_method - Evaluate a WMI method
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
+ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
acpi_handle handle;
@@ -209,9 +256,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
union acpi_object params[3];
char method[5] = "WM";
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
-
+ wblock = container_of(wdev, struct wmi_block, dev);
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
@@ -246,7 +291,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
return status;
}
-EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
struct acpi_buffer *out)
@@ -348,23 +393,6 @@ union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
}
EXPORT_SYMBOL_GPL(wmidev_block_query);
-struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
- const char *guid_string)
-{
- struct wmi_block *this_wb = container_of(wdev, struct wmi_block, dev);
- struct wmi_block *other_wb;
-
- if (!find_guid(guid_string, &other_wb))
- return NULL;
-
- if (other_wb->acpi_device != this_wb->acpi_device)
- return NULL;
-
- get_device(&other_wb->dev.dev);
- return &other_wb->dev;
-}
-EXPORT_SYMBOL_GPL(wmidev_get_other_guid);
-
/**
* wmi_set_block - Write to a WMI block
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -761,6 +789,113 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
return 0;
}
+static int wmi_char_open(struct inode *inode, struct file *filp)
+{
+ const char *driver_name = filp->f_path.dentry->d_iname;
+ struct wmi_block *wblock = NULL;
+ struct wmi_block *next = NULL;
+
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ if (!wblock->dev.dev.driver)
+ continue;
+ if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+ filp->private_data = wblock;
+ break;
+ }
+ }
+
+ if (!filp->private_data)
+ return -ENODEV;
+
+ return nonseekable_open(inode, filp);
+}
+
+static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct wmi_block *wblock = filp->private_data;
+
+ return simple_read_from_buffer(buffer, length, offset,
+ &wblock->req_buf_size,
+ sizeof(wblock->req_buf_size));
+}
+
+static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct wmi_ioctl_buffer __user *input =
+ (struct wmi_ioctl_buffer __user *) arg;
+ struct wmi_block *wblock = filp->private_data;
+ struct wmi_ioctl_buffer *buf = NULL;
+ struct wmi_driver *wdriver = NULL;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != WMI_IOC)
+ return -ENOTTY;
+
+ /* make sure we're not calling a higher instance than exists*/
+ if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
+ return -EINVAL;
+
+ mutex_lock(&wblock->char_mutex);
+ buf = wblock->handler_data;
+ if (get_user(buf->length, &input->length)) {
+ dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+ /* if it's too small, abort */
+ if (buf->length < wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Buffer %lld too small, need at least %lld\n",
+ buf->length, wblock->req_buf_size);
+ ret = -EINVAL;
+ goto out_ioctl;
+ }
+ /* if it's too big, warn, driver will only use what is needed */
+ if (buf->length > wblock->req_buf_size)
+ dev_warn(&wblock->dev.dev,
+ "Buffer %lld is bigger than required %lld\n",
+ buf->length, wblock->req_buf_size);
+
+ /* copy the structure from userspace */
+ if (copy_from_user(buf, input, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+
+ /* let the driver do any filtering and do the call */
+ wdriver = container_of(wblock->dev.dev.driver,
+ struct wmi_driver, driver);
+ if (!try_module_get(wdriver->driver.owner)) {
+ ret = -EBUSY;
+ goto out_ioctl;
+ }
+ ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
+ module_put(wdriver->driver.owner);
+ if (ret)
+ goto out_ioctl;
+
+ /* return the result (only up to our internal buffer size) */
+ if (copy_to_user(input, buf, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ }
+
+out_ioctl:
+ mutex_unlock(&wblock->char_mutex);
+ return ret;
+}
+
+static const struct file_operations wmi_fops = {
+ .owner = THIS_MODULE,
+ .read = wmi_char_read,
+ .open = wmi_char_open,
+ .unlocked_ioctl = wmi_ioctl,
+ .compat_ioctl = wmi_ioctl,
+};
static int wmi_dev_probe(struct device *dev)
{
@@ -768,16 +903,63 @@ static int wmi_dev_probe(struct device *dev)
struct wmi_driver *wdriver =
container_of(dev->driver, struct wmi_driver, driver);
int ret = 0;
+ int count;
+ char *buf;
if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev));
- if (ret != 0 && ACPI_FAILURE(wmi_method_enable(wblock, 0)))
- dev_warn(dev, "failed to disable device\n");
+ if (ret != 0)
+ goto probe_failure;
+ }
+
+ /* driver wants a character device made */
+ if (wdriver->filter_callback) {
+ /* check that required buffer size declared by driver or MOF */
+ if (!wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Required buffer size not set\n");
+ ret = -EINVAL;
+ goto probe_failure;
+ }
+
+ count = get_order(wblock->req_buf_size);
+ wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
+ count);
+ if (!wblock->handler_data) {
+ ret = -ENOMEM;
+ goto probe_failure;
+ }
+
+ buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto probe_string_failure;
+ }
+ sprintf(buf, "wmi/%s", wdriver->driver.name);
+ wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
+ wblock->char_dev.name = buf;
+ wblock->char_dev.fops = &wmi_fops;
+ wblock->char_dev.mode = 0444;
+ ret = misc_register(&wblock->char_dev);
+ if (ret) {
+ dev_warn(dev, "failed to register char dev: %d", ret);
+ ret = -ENOMEM;
+ goto probe_misc_failure;
+ }
}
+ return 0;
+
+probe_misc_failure:
+ kfree(buf);
+probe_string_failure:
+ kfree(wblock->handler_data);
+probe_failure:
+ if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ dev_warn(dev, "failed to disable device\n");
return ret;
}
@@ -788,6 +970,13 @@ static int wmi_dev_remove(struct device *dev)
container_of(dev->driver, struct wmi_driver, driver);
int ret = 0;
+ if (wdriver->filter_callback) {
+ misc_deregister(&wblock->char_dev);
+ kfree(wblock->char_dev.name);
+ free_pages((unsigned long)wblock->handler_data,
+ get_order(wblock->req_buf_size));
+ }
+
if (wdriver->remove)
ret = wdriver->remove(dev_to_wdev(dev));
@@ -844,6 +1033,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
if (gblock->flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
+ mutex_init(&wblock->char_mutex);
goto out_init;
}
@@ -1145,7 +1335,7 @@ static int acpi_wmi_remove(struct platform_device *device)
acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
wmi_free_devices(acpi_device);
- device_unregister((struct device *)dev_get_drvdata(&device->dev));
+ device_destroy(&wmi_bus_class, MKDEV(0, 0));
return 0;
}
@@ -1199,7 +1389,7 @@ static int acpi_wmi_probe(struct platform_device *device)
return 0;
err_remove_busdev:
- device_unregister(wmi_bus_dev);
+ device_destroy(&wmi_bus_class, MKDEV(0, 0));
err_remove_notify_handler:
acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
@@ -1264,8 +1454,8 @@ err_unreg_class:
static void __exit acpi_wmi_exit(void)
{
platform_driver_unregister(&acpi_wmi_driver);
- class_unregister(&wmi_bus_class);
bus_unregister(&wmi_bus_type);
+ class_unregister(&wmi_bus_class);
}
subsys_initcall(acpi_wmi_init);
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 436b4e4e71a1..04735649052a 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -39,7 +39,7 @@ static struct timer_list ktimer;
* The kernel timer
*/
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
{
struct pps_event_time ts;
@@ -85,7 +85,7 @@ static int __init pps_ktimer_init(void)
return -ENOMEM;
}
- setup_timer(&ktimer, pps_ktimer_event, 0);
+ timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
dev_info(pps->dev, "ktimer PPS source registered\n");
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 2b1b212c219e..c67dd11e08b1 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void)
{
long ret;
+ if (!kvm_para_available())
+ return -ENODEV;
+
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
- hv_clock = pvclock_pvti_cpu0_va();
+ hv_clock = pvclock_get_pvti_cpu0_va();
if (!hv_clock)
return -ENODEV;
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 75db585a2a94..acd3ce8ecf3f 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device {
unsigned period; /* PWM period expressed in clk cycles */
};
+struct atmel_tcb_channel {
+ u32 enabled;
+ u32 cmr;
+ u32 ra;
+ u32 rb;
+ u32 rc;
+};
+
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
struct atmel_tc *tc;
struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_channel bkup[NPWM / 2];
};
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
@@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
- if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
__raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
regs + ATMEL_TC_REG(group, CCR));
- else
+ tcbpwmc->bkup[group].enabled = 1;
+ } else {
__raw_writel(ATMEL_TC_SWTRG, regs +
ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 0;
+ }
spin_unlock(&tcbpwmc->lock);
}
@@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Use software trigger to apply the new setting */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
regs + ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_tcb_pwm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
+ chan->ra = readl(base + ATMEL_TC_REG(i, RA));
+ chan->rb = readl(base + ATMEL_TC_REG(i, RB));
+ chan->rc = readl(base + ATMEL_TC_REG(i, RC));
+ }
+ return 0;
+}
+
+static int atmel_tcb_pwm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
+ writel(chan->ra, base + ATMEL_TC_REG(i, RA));
+ writel(chan->rb, base + ATMEL_TC_REG(i, RB));
+ writel(chan->rc, base + ATMEL_TC_REG(i, RC));
+ if (chan->enabled) {
+ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ base + ATMEL_TC_REG(i, CCR));
+ }
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+ atmel_tcb_pwm_resume);
+
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
+ .pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove = atmel_tcb_pwm_remove,
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 2fb30deee345..815f5333bb8f 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
+#define IMG_PWM_PM_TIMEOUT 1000 /* ms */
+
/*
* PWM period is specified with a timebase register,
* in number of step periods. The PWM duty cycle is also
@@ -52,6 +55,8 @@
*/
#define MIN_TMBASE_STEPS 16
+#define IMG_PWM_NPWM 4
+
struct img_pwm_soc_data {
u32 max_timebase;
};
@@ -66,6 +71,8 @@ struct img_pwm_chip {
int max_period_ns;
int min_period_ns;
const struct img_pwm_soc_data *data;
+ u32 suspend_ctrl_cfg;
+ u32 suspend_ch_cfg[IMG_PWM_NPWM];
};
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long mul, output_clk_hz, input_clk_hz;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
unsigned int max_timebase = pwm_chip->data->max_timebase;
+ int ret;
if (period_ns < pwm_chip->min_period_ns ||
period_ns > pwm_chip->max_period_ns) {
@@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
+
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
@@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
+
return 0;
}
@@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
u32 val;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val |= BIT(pwm->hwpwm);
@@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
}
static const struct pwm_ops img_pwm_ops = {
@@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+static int img_pwm_runtime_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pwm_chip->pwm_clk);
+ clk_disable_unprepare(pwm_chip->sys_clk);
+
+ return 0;
+}
+
+static int img_pwm_runtime_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(pwm_chip->sys_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pwm_chip->pwm_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable pwm clock\n");
+ clk_disable_unprepare(pwm_chip->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int img_pwm_probe(struct platform_device *pdev)
{
int ret;
@@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
- ret = clk_prepare_enable(pwm->sys_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pwm->pwm_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
- goto disable_sysclk;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_pwm_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
}
clk_rate = clk_get_rate(pwm->pwm_clk);
if (!clk_rate) {
dev_err(&pdev->dev, "pwm clock has no frequency\n");
ret = -EINVAL;
- goto disable_pwmclk;
+ goto err_suspend;
}
/* The maximum input clock divider is 512 */
@@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = 4;
+ pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
- goto disable_pwmclk;
+ goto err_suspend;
}
platform_set_drvdata(pdev, pwm);
return 0;
-disable_pwmclk:
- clk_disable_unprepare(pwm->pwm_clk);
-disable_sysclk:
- clk_disable_unprepare(pwm->sys_clk);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev)
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
u32 val;
unsigned int i;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < pwm_chip->chip.npwm; i++) {
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
@@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev)
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
}
- clk_disable_unprepare(pwm_chip->pwm_clk);
- clk_disable_unprepare(pwm_chip->sys_clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
return pwmchip_remove(&pwm_chip->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int img_pwm_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
+ PWM_CH_CFG(i));
+
+ pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int img_pwm_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
+ pwm_chip->suspend_ch_cfg[i]);
+
+ img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ if (pwm_chip->suspend_ctrl_cfg & BIT(i))
+ regmap_update_bits(pwm_chip->periph_regs,
+ PERIP_PWM_PDM_CONTROL,
+ PERIP_PWM_PDM_CONTROL_CH_MASK <<
+ PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
+ 0);
+
+ if (pm_runtime_status_suspended(dev))
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops img_pwm_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend,
+ img_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume)
+};
+
static struct platform_driver img_pwm_driver = {
.driver = {
.name = "img-pwm",
+ .pm = &img_pwm_pm_ops,
.of_match_table = img_pwm_of_match,
},
.probe = img_pwm_probe,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b52f3afb2ba1..f5d97e0ad52b 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -40,11 +41,19 @@ enum {
MTK_CLK_PWM3,
MTK_CLK_PWM4,
MTK_CLK_PWM5,
+ MTK_CLK_PWM6,
+ MTK_CLK_PWM7,
+ MTK_CLK_PWM8,
MTK_CLK_MAX,
};
-static const char * const mtk_pwm_clk_name[] = {
- "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+ "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
+ "pwm8"
+};
+
+struct mtk_pwm_platform_data {
+ unsigned int num_pwms;
};
/**
@@ -59,6 +68,10 @@ struct mtk_pwm_chip {
struct clk *clks[MTK_CLK_MAX];
};
+static const unsigned int mtk_pwm_reg_offset[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+};
+
static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct mtk_pwm_chip, chip);
@@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
unsigned int offset)
{
- return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+ return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+ writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = {
static int mtk_pwm_probe(struct platform_device *pdev)
{
+ const struct mtk_pwm_platform_data *data;
struct mtk_pwm_chip *pc;
struct resource *res;
unsigned int i;
@@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data == NULL)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < MTK_CLK_MAX; i++) {
+ for (i = 0; i < data->num_pwms + 2; i++) {
pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
- if (IS_ERR(pc->clks[i]))
+ if (IS_ERR(pc->clks[i])) {
+ dev_err(&pdev->dev, "clock: %s fail: %ld\n",
+ mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
return PTR_ERR(pc->clks[i]);
+ }
}
platform_set_drvdata(pdev, pc);
@@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &mtk_pwm_ops;
pc->chip.base = -1;
- pc->chip.npwm = 5;
+ pc->chip.npwm = data->num_pwms;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+ .num_pwms = 8,
+};
+
+static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+ .num_pwms = 6,
+};
+
+static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+ .num_pwms = 5,
+};
+
static const struct of_device_id mtk_pwm_of_match[] = {
- { .compatible = "mediatek,mt7623-pwm" },
- { }
+ { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+ { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { },
};
MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 9793b296108f..1ac9e4384142 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
unsigned int i;
for (i = 0; i < priv->chip.npwm; i++)
- if (pwm_is_enabled(&priv->chip.pwms[i]))
- pwm_disable(&priv->chip.pwms[i]);
+ pwm_disable(&priv->chip.pwms[i]);
return pwmchip_remove(&priv->chip);
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 6d23f1d1c9b7..334199c58f1d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
struct sun4i_pwm_chip *pwm;
struct resource *res;
int ret;
- const struct of_device_id *match;
-
- match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
+ pwm->data = of_device_get_match_data(&pdev->dev);
+ if (!pwm->data)
+ return -ENODEV;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pwm->base))
@@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- pwm->data = match->data;
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
pwm->chip.base = -1;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5beb0c361076..ec4bc1515f0d 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -876,10 +876,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
* offset within the internal buffer specified by handle parameter.
*/
if (xfer->loc_addr) {
- unsigned long offset;
+ unsigned int offset;
long pinned;
- offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
+ offset = lower_32_bits(offset_in_page(xfer->loc_addr));
nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
page_list = kmalloc_array(nr_pages,
@@ -889,11 +889,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req;
}
- pinned = get_user_pages_unlocked(
+ pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages,
- page_list,
- dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
+ nr_pages, dir == DMA_FROM_DEVICE, page_list);
if (pinned != nr_pages) {
if (pinned < 0) {
@@ -961,9 +959,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir);
- if (nents == -EFAULT) {
+ if (nents == 0) {
rmcd_error("Failed to map SG list");
- return -EFAULT;
+ ret = -EFAULT;
+ goto err_pg;
}
ret = do_dma_request(req, xfer, sync, nents);
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index e67b923b1ca6..4931ed790428 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -458,7 +458,7 @@ static void idtg2_remove(struct rio_dev *rdev)
idtg2_sysfs(rdev, false);
}
-static struct rio_device_id idtg2_id_table[] = {
+static const struct rio_device_id idtg2_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)},
diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c
index c5923a547bed..85a3908294d9 100644
--- a/drivers/rapidio/switches/idt_gen3.c
+++ b/drivers/rapidio/switches/idt_gen3.c
@@ -348,7 +348,7 @@ static void idtg3_shutdown(struct rio_dev *rdev)
}
}
-static struct rio_device_id idtg3_id_table[] = {
+static const struct rio_device_id idtg3_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
{ 0, } /* terminate list */
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 7fbb60d31796..4058ce2c76fa 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -168,7 +168,7 @@ static void idtcps_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id idtcps_id_table[] = {
+static const struct rio_device_id idtcps_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)},
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index 8a43561b9d17..1214628b7ded 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -169,7 +169,7 @@ static void tsi568_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id tsi568_id_table[] = {
+static const struct rio_device_id tsi568_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
{ 0, } /* terminate list */
};
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2700d15f7584..9f063e214836 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -336,7 +336,7 @@ static void tsi57x_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id tsi57x_id_table[] = {
+static const struct rio_device_id tsi57x_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 5324dc9e6d6e..7b12e880d1ea 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -228,11 +228,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
int i, ret;
unsigned int val;
- if (tps65217_chip_id(tps) != TPS65217) {
- dev_err(&pdev->dev, "Invalid tps chip version\n");
- return -ENODEV;
- }
-
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65217_NUM_REGULATOR, GFP_KERNEL);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index bf04479456a0..b609e1d3654b 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -28,7 +28,6 @@ config OMAP_REMOTEPROC
depends on OMAP_IOMMU
select MAILBOX
select OMAP2PLUS_MBOX
- select RPMSG_VIRTIO
help
Say y here to support OMAP's remote processors (dual M3
and DSP on OMAP4) via the remote processor framework.
@@ -58,7 +57,6 @@ config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
depends on DMA_CMA
- select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
remote processor framework.
@@ -79,7 +77,6 @@ config DA8XX_REMOTEPROC
config KEYSTONE_REMOTEPROC
tristate "Keystone Remoteproc support"
depends on ARCH_KEYSTONE
- select RPMSG_VIRTIO
help
Say Y here here to support Keystone remote processors (DSP)
via the remote processor framework.
@@ -135,7 +132,6 @@ config ST_REMOTEPROC
depends on ARCH_STI
select MAILBOX
select STI_MBOX
- select RPMSG_VIRTIO
help
Say y here to support ST's adjunct processors via the remote
processor framework.
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2d3d5ac92c06..8a3fa2bcc9f6 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -32,6 +32,7 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
+#include <linux/iopoll.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
@@ -64,6 +65,8 @@
#define QDSP6SS_RESET_REG 0x014
#define QDSP6SS_GFMUX_CTL_REG 0x020
#define QDSP6SS_PWR_CTL_REG 0x030
+#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6SS_STRAP_ACC 0x110
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -92,6 +95,15 @@
#define QDSS_BHS_ON BIT(21)
#define QDSS_LDO_BYP BIT(22)
+/* QDSP6v56 parameters */
+#define QDSP6v56_LDO_BYP BIT(25)
+#define QDSP6v56_BHS_ON BIT(24)
+#define QDSP6v56_CLAMP_WL BIT(21)
+#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
+#define HALT_CHECK_MAX_LOOPS 200
+#define QDSP6SS_XO_CBCR 0x0038
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
struct reg_info {
struct regulator *reg;
int uV;
@@ -110,6 +122,8 @@ struct rproc_hexagon_res {
struct qcom_mss_reg_res *active_supply;
char **proxy_clk_names;
char **active_clk_names;
+ int version;
+ bool need_mem_protection;
};
struct q6v5 {
@@ -154,6 +168,16 @@ struct q6v5 {
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
+ bool need_mem_protection;
+ int mpss_perm;
+ int mba_perm;
+ int version;
+};
+
+enum {
+ MSS_MSM8916,
+ MSS_MSM8974,
+ MSS_MSM8996,
};
static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
@@ -289,6 +313,26 @@ static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
return &table;
}
+static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
+ bool remote_owner, phys_addr_t addr,
+ size_t size)
+{
+ struct qcom_scm_vmperm next;
+
+ if (!qproc->need_mem_protection)
+ return 0;
+ if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
+ return 0;
+ if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+ return 0;
+
+ next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
+ next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+
+ return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
+ current_perm, &next, 1);
+}
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
@@ -353,33 +397,98 @@ static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
int ret;
+ int i;
- /* Assert resets, stop core */
- val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
- val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
- writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
- /* Enable power block headswitch, and wait for it to stabilize */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= QDSS_BHS_ON | QDSS_LDO_BYP;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- udelay(1);
-
- /*
- * Turn on memories. L2 banks should be done individually
- * to minimize inrush current.
- */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
- Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_2;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_1;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_0;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ if (qproc->version == MSS_MSM8996) {
+ /* Override the ACC value if required */
+ writel(QDSP6SS_ACC_OVERRIDE_VAL,
+ qproc->reg_base + QDSP6SS_STRAP_ACC);
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* BHS require xo cbcr to be enabled */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= 0x1;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(qproc->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSP6v56_BHS_ON;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* Put LDO in bypass mode */
+ val |= QDSP6v56_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 0; i--) {
+ val |= BIT(i);
+ writel(val, qproc->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Read back value to ensure the write is done then
+ * wait for 1us for both memory peripheral and data
+ * array to turn on.
+ */
+ val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ udelay(1);
+ }
+ /* Remove word line clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_WL;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ } else {
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
@@ -451,6 +560,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
+ int mdata_perm;
+ int xferop_ret;
void *ptr;
int ret;
@@ -462,6 +573,17 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
memcpy(ptr, fw->data, fw->size);
+ /* Hypervisor mapping to access metadata by modem */
+ mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+ true, phys, fw->size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to metadata failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto free_dma_attrs;
+ }
+
writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
@@ -471,6 +593,14 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
else if (ret < 0)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
+ /* Metadata authentication done, remove modem access */
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+ false, phys, fw->size);
+ if (xferop_ret)
+ dev_warn(qproc->dev,
+ "mdt buffer not reclaimed system may become unstable\n");
+
+free_dma_attrs:
dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
return ret < 0 ? ret : 0;
@@ -504,7 +634,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
bool relocate = false;
char seg_name[10];
ssize_t offset;
- size_t size;
+ size_t size = 0;
void *ptr;
int ret;
int i;
@@ -542,7 +672,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
-
+ /* Load firmware segments */
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -575,18 +705,24 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
-
- size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
- if (!size) {
- boot_addr = relocate ? qproc->mpss_phys : min_addr;
- writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
- writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
- }
-
size += phdr->p_memsz;
- writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
}
+ /* Transfer ownership of modem ddr region to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
@@ -602,6 +738,7 @@ release_firmware:
static int q6v5_start(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int xfermemop_ret;
int ret;
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
@@ -637,11 +774,22 @@ static int q6v5_start(struct rproc *rproc)
goto assert_reset;
}
+ /* Assign MBA image access in DDR to q6 */
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mba memory failed: %d\n",
+ xfermemop_ret);
+ goto disable_active_clks;
+ }
+
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
ret = q6v5proc_reset(qproc);
if (ret)
- goto halt_axi_ports;
+ goto reclaim_mba;
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
@@ -658,16 +806,22 @@ static int q6v5_start(struct rproc *rproc)
ret = q6v5_mpss_load(qproc);
if (ret)
- goto halt_axi_ports;
+ goto reclaim_mpss;
ret = wait_for_completion_timeout(&qproc->start_done,
msecs_to_jiffies(5000));
if (ret == 0) {
dev_err(qproc->dev, "start timed out\n");
ret = -ETIMEDOUT;
- goto halt_axi_ports;
+ goto reclaim_mpss;
}
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret)
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer system may become unstable\n");
qproc->running = true;
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
@@ -677,12 +831,30 @@ static int q6v5_start(struct rproc *rproc)
return 0;
+reclaim_mpss:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, qproc->mpss_phys,
+ qproc->mpss_size);
+ WARN_ON(xfermemop_ret);
+
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+reclaim_mba:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer, system may become unstable\n");
+ }
+
+disable_active_clks:
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
+
assert_reset:
reset_control_assert(qproc->mss_restart);
disable_vdd:
@@ -702,6 +874,7 @@ static int q6v5_stop(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
+ u32 val;
qproc->running = false;
@@ -718,6 +891,20 @@ static int q6v5_stop(struct rproc *rproc)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ if (qproc->version == MSS_MSM8996) {
+ /*
+ * To avoid high MX current during LPASS/MSS restart.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+ QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+
+
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
+ qproc->mpss_phys, qproc->mpss_size);
+ WARN_ON(ret);
reset_control_assert(qproc->mss_restart);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
@@ -1017,6 +1204,8 @@ static int q6v5_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
+ qproc->version = desc->version;
+ qproc->need_mem_protection = desc->need_mem_protection;
ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
if (ret < 0)
goto free_rproc;
@@ -1038,7 +1227,8 @@ static int q6v5_probe(struct platform_device *pdev)
ret = PTR_ERR(qproc->state);
goto free_rproc;
}
-
+ qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
+ qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
@@ -1067,6 +1257,24 @@ static int q6v5_remove(struct platform_device *pdev)
return 0;
}
+static const struct rproc_hexagon_res msm8996_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pnoc",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ "gpll0_mss_clk",
+ NULL
+ },
+ .need_mem_protection = true,
+ .version = MSS_MSM8996,
+};
+
static const struct rproc_hexagon_res msm8916_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
@@ -1094,6 +1302,8 @@ static const struct rproc_hexagon_res msm8916_mss = {
"mem",
NULL
},
+ .need_mem_protection = false,
+ .version = MSS_MSM8916,
};
static const struct rproc_hexagon_res msm8974_mss = {
@@ -1131,12 +1341,15 @@ static const struct rproc_hexagon_res msm8974_mss = {
"mem",
NULL
},
+ .need_mem_protection = false,
+ .version = MSS_MSM8974,
};
static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
+ { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_of_match);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 1c122e230cec..a20488336aa0 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -155,6 +155,132 @@ static const struct file_operations rproc_recovery_ops = {
.llseek = generic_file_llseek,
};
+/* Expose resource table content via debugfs */
+static int rproc_rsc_table_show(struct seq_file *seq, void *p)
+{
+ static const char * const types[] = {"carveout", "devmem", "trace", "vdev"};
+ struct rproc *rproc = seq->private;
+ struct resource_table *table = rproc->table_ptr;
+ struct fw_rsc_carveout *c;
+ struct fw_rsc_devmem *d;
+ struct fw_rsc_trace *t;
+ struct fw_rsc_vdev *v;
+ int i, j;
+
+ if (!table) {
+ seq_puts(seq, "No resource table found\n");
+ return 0;
+ }
+
+ for (i = 0; i < table->num; i++) {
+ int offset = table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)table + offset;
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ switch (hdr->type) {
+ case RSC_CARVEOUT:
+ c = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", c->da);
+ seq_printf(seq, " Physical Address 0x%x\n", c->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", c->len);
+ seq_printf(seq, " Flags 0x%x\n", c->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved);
+ seq_printf(seq, " Name %s\n\n", c->name);
+ break;
+ case RSC_DEVMEM:
+ d = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", d->da);
+ seq_printf(seq, " Physical Address 0x%x\n", d->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", d->len);
+ seq_printf(seq, " Flags 0x%x\n", d->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", d->reserved);
+ seq_printf(seq, " Name %s\n\n", d->name);
+ break;
+ case RSC_TRACE:
+ t = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", t->da);
+ seq_printf(seq, " Length 0x%x Bytes\n", t->len);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", t->reserved);
+ seq_printf(seq, " Name %s\n\n", t->name);
+ break;
+ case RSC_VDEV:
+ v = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+
+ seq_printf(seq, " ID %d\n", v->id);
+ seq_printf(seq, " Notify ID %d\n", v->notifyid);
+ seq_printf(seq, " Device features 0x%x\n", v->dfeatures);
+ seq_printf(seq, " Guest features 0x%x\n", v->gfeatures);
+ seq_printf(seq, " Config length 0x%x\n", v->config_len);
+ seq_printf(seq, " Status 0x%x\n", v->status);
+ seq_printf(seq, " Number of vrings %d\n", v->num_of_vrings);
+ seq_printf(seq, " Reserved (should be zero) [%d][%d]\n\n",
+ v->reserved[0], v->reserved[1]);
+
+ for (j = 0; j < v->num_of_vrings; j++) {
+ seq_printf(seq, " Vring %d\n", j);
+ seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da);
+ seq_printf(seq, " Alignment %d\n", v->vring[j].align);
+ seq_printf(seq, " Number of buffers %d\n", v->vring[j].num);
+ seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid);
+ seq_printf(seq, " Physical Address 0x%x\n\n",
+ v->vring[j].pa);
+ }
+ break;
+ default:
+ seq_printf(seq, "Unknown resource type found: %d [hdr: %p]\n",
+ hdr->type, hdr);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rproc_rsc_table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rproc_rsc_table_show, inode->i_private);
+}
+
+static const struct file_operations rproc_rsc_table_ops = {
+ .open = rproc_rsc_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Expose carveout content via debugfs */
+static int rproc_carveouts_show(struct seq_file *seq, void *p)
+{
+ struct rproc *rproc = seq->private;
+ struct rproc_mem_entry *carveout;
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ seq_puts(seq, "Carveout memory entry:\n");
+ seq_printf(seq, "\tVirtual address: %p\n", carveout->va);
+ seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
+ seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
+ seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+ }
+
+ return 0;
+}
+
+static int rproc_carveouts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rproc_carveouts_show, inode->i_private);
+}
+
+static const struct file_operations rproc_carveouts_ops = {
+ .open = rproc_carveouts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void rproc_remove_trace_file(struct dentry *tfile)
{
debugfs_remove(tfile);
@@ -198,6 +324,10 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_name_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
+ debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
+ rproc, &rproc_rsc_table_ops);
+ debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
+ rproc, &rproc_carveouts_ops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e2baecbb9dd3..7fc77696bb1e 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -28,6 +28,12 @@ config RESET_ATH79
This enables the ATH79 reset controller driver that supports the
AR71xx SoC reset controller.
+config RESET_AXS10X
+ bool "AXS10x Reset Driver" if COMPILE_TEST
+ default ARC_PLAT_AXS10X
+ help
+ This enables the reset controller driver for AXS10x.
+
config RESET_BERLIN
bool "Berlin Reset Driver" if COMPILE_TEST
default ARCH_BERLIN
@@ -75,21 +81,21 @@ config RESET_PISTACHIO
help
This enables the reset driver for ImgTec Pistachio SoCs.
-config RESET_SOCFPGA
- bool "SoCFPGA Reset Driver" if COMPILE_TEST
- default ARCH_SOCFPGA
+config RESET_SIMPLE
+ bool "Simple Reset Controller Driver" if COMPILE_TEST
+ default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX
help
- This enables the reset controller driver for Altera SoCFPGAs.
+ This enables a simple reset controller driver for reset lines that
+ that can be asserted and deasserted by toggling bits in a contiguous,
+ exclusive register space.
-config RESET_STM32
- bool "STM32 Reset Driver" if COMPILE_TEST
- default ARCH_STM32
- help
- This enables the RCC reset controller driver for STM32 MCUs.
+ Currently this driver supports Altera SoCFPGAs, the RCC reset
+ controller in STM32 MCUs, Allwinner SoCs, and ZTE's zx2967 family.
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
+ select RESET_SIMPLE
help
This enables the reset driver for Allwinner SoCs.
@@ -121,12 +127,6 @@ config RESET_UNIPHIER
Say Y if you want to control reset signals provided by System Control
block, Media I/O block, Peripheral Block.
-config RESET_ZX2967
- bool "ZTE ZX2967 Reset Driver"
- depends on ARCH_ZX || COMPILE_TEST
- help
- This enables the reset controller driver for ZTE's zx2967 family.
-
config RESET_ZYNQ
bool "ZYNQ Reset Driver" if COMPILE_TEST
default ARCH_ZYNQ
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index c1fd702ac57c..132c24f5ddb5 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
@@ -13,12 +14,10 @@ obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
-obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
-obj-$(CONFIG_RESET_STM32) += reset-stm32.o
+obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
-obj-$(CONFIG_RESET_ZX2967) += reset-zx2967.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c
new file mode 100644
index 000000000000..afb298e46bd9
--- /dev/null
+++ b/drivers/reset/reset-axs10x.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 Synopsys.
+ *
+ * Synopsys AXS10x reset driver.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#define to_axs10x_rst(p) container_of((p), struct axs10x_rst, rcdev)
+
+#define AXS10X_MAX_RESETS 32
+
+struct axs10x_rst {
+ void __iomem *regs_rst;
+ spinlock_t lock;
+ struct reset_controller_dev rcdev;
+};
+
+static int axs10x_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct axs10x_rst *rst = to_axs10x_rst(rcdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rst->lock, flags);
+ writel(BIT(id), rst->regs_rst);
+ spin_unlock_irqrestore(&rst->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops axs10x_reset_ops = {
+ .reset = axs10x_reset_reset,
+};
+
+static int axs10x_reset_probe(struct platform_device *pdev)
+{
+ struct axs10x_rst *rst;
+ struct resource *mem;
+
+ rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
+ if (!rst)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rst->regs_rst))
+ return PTR_ERR(rst->regs_rst);
+
+ spin_lock_init(&rst->lock);
+
+ rst->rcdev.owner = THIS_MODULE;
+ rst->rcdev.ops = &axs10x_reset_ops;
+ rst->rcdev.of_node = pdev->dev.of_node;
+ rst->rcdev.nr_resets = AXS10X_MAX_RESETS;
+
+ return devm_reset_controller_register(&pdev->dev, &rst->rcdev);
+}
+
+static const struct of_device_id axs10x_reset_dt_match[] = {
+ { .compatible = "snps,axs10x-reset" },
+ { },
+};
+
+static struct platform_driver axs10x_reset_driver = {
+ .probe = axs10x_reset_probe,
+ .driver = {
+ .name = "axs10x-reset",
+ .of_match_table = axs10x_reset_dt_match,
+ },
+};
+builtin_platform_driver(axs10x_reset_driver);
+
+MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys AXS10x reset driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index a8b915eb8b58..c419a3753d00 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -62,13 +62,16 @@
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/of_device.h>
#define REG_COUNT 8
#define BITS_PER_REG 32
+#define LEVEL_OFFSET 0x7c
struct meson_reset {
void __iomem *reg_base;
struct reset_controller_dev rcdev;
+ spinlock_t lock;
};
static int meson_reset_reset(struct reset_controller_dev *rcdev,
@@ -80,26 +83,68 @@ static int meson_reset_reset(struct reset_controller_dev *rcdev,
unsigned int offset = id % BITS_PER_REG;
void __iomem *reg_addr = data->reg_base + (bank << 2);
- if (bank >= REG_COUNT)
- return -EINVAL;
-
writel(BIT(offset), reg_addr);
return 0;
}
-static const struct reset_control_ops meson_reset_ops = {
+static int meson_reset_level(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct meson_reset *data =
+ container_of(rcdev, struct meson_reset, rcdev);
+ unsigned int bank = id / BITS_PER_REG;
+ unsigned int offset = id % BITS_PER_REG;
+ void __iomem *reg_addr = data->reg_base + LEVEL_OFFSET + (bank << 2);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(reg_addr);
+ if (assert)
+ writel(reg & ~BIT(offset), reg_addr);
+ else
+ writel(reg | BIT(offset), reg_addr);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int meson_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_reset_level(rcdev, id, true);
+}
+
+static int meson_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_reset_level(rcdev, id, false);
+}
+
+static const struct reset_control_ops meson_reset_meson8_ops = {
.reset = meson_reset_reset,
};
+static const struct reset_control_ops meson_reset_gx_ops = {
+ .reset = meson_reset_reset,
+ .assert = meson_reset_assert,
+ .deassert = meson_reset_deassert,
+};
+
static const struct of_device_id meson_reset_dt_ids[] = {
- { .compatible = "amlogic,meson8b-reset", },
- { .compatible = "amlogic,meson-gxbb-reset", },
+ { .compatible = "amlogic,meson8b-reset",
+ .data = &meson_reset_meson8_ops, },
+ { .compatible = "amlogic,meson-gxbb-reset",
+ .data = &meson_reset_gx_ops, },
{ /* sentinel */ },
};
static int meson_reset_probe(struct platform_device *pdev)
{
+ const struct reset_control_ops *ops;
struct meson_reset *data;
struct resource *res;
@@ -107,6 +152,10 @@ static int meson_reset_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->reg_base))
@@ -114,9 +163,11 @@ static int meson_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG;
- data->rcdev.ops = &meson_reset_ops;
+ data->rcdev.ops = ops;
data->rcdev.of_node = pdev->dev.of_node;
return devm_reset_controller_register(&pdev->dev, &data->rcdev);
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
new file mode 100644
index 000000000000..2d4f362ef025
--- /dev/null
+++ b/drivers/reset/reset-simple.c
@@ -0,0 +1,186 @@
+/*
+ * Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include "reset-simple.h"
+
+static inline struct reset_simple_data *
+to_reset_simple_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct reset_simple_data, rcdev);
+}
+
+static int reset_simple_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * reg_width));
+ if (assert ^ data->active_low)
+ reg |= BIT(offset);
+ else
+ reg &= ~BIT(offset);
+ writel(reg, data->membase + (bank * reg_width));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int reset_simple_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return reset_simple_update(rcdev, id, true);
+}
+
+static int reset_simple_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return reset_simple_update(rcdev, id, false);
+}
+
+static int reset_simple_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ u32 reg;
+
+ reg = readl(data->membase + (bank * reg_width));
+
+ return !(reg & BIT(offset)) ^ !data->status_active_low;
+}
+
+const struct reset_control_ops reset_simple_ops = {
+ .assert = reset_simple_assert,
+ .deassert = reset_simple_deassert,
+ .status = reset_simple_status,
+};
+
+/**
+ * struct reset_simple_devdata - simple reset controller properties
+ * @reg_offset: offset between base address and first reset register.
+ * @nr_resets: number of resets. If not set, default to resource size in bits.
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ */
+struct reset_simple_devdata {
+ u32 reg_offset;
+ u32 nr_resets;
+ bool active_low;
+ bool status_active_low;
+};
+
+#define SOCFPGA_NR_BANKS 8
+
+static const struct reset_simple_devdata reset_simple_socfpga = {
+ .reg_offset = 0x10,
+ .nr_resets = SOCFPGA_NR_BANKS * 32,
+ .status_active_low = true,
+};
+
+static const struct reset_simple_devdata reset_simple_active_low = {
+ .active_low = true,
+ .status_active_low = true,
+};
+
+static const struct of_device_id reset_simple_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga },
+ { .compatible = "st,stm32-rcc", },
+ { .compatible = "allwinner,sun6i-a31-clock-reset",
+ .data = &reset_simple_active_low },
+ { .compatible = "zte,zx296718-reset",
+ .data = &reset_simple_active_low },
+ { /* sentinel */ },
+};
+
+static int reset_simple_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct reset_simple_devdata *devdata;
+ struct reset_simple_data *data;
+ void __iomem *membase;
+ struct resource *res;
+ u32 reg_offset = 0;
+
+ devdata = of_device_get_match_data(dev);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(membase))
+ return PTR_ERR(membase);
+
+ spin_lock_init(&data->lock);
+ data->membase = membase;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
+ data->rcdev.ops = &reset_simple_ops;
+ data->rcdev.of_node = dev->of_node;
+
+ if (devdata) {
+ reg_offset = devdata->reg_offset;
+ if (devdata->nr_resets)
+ data->rcdev.nr_resets = devdata->nr_resets;
+ data->active_low = devdata->active_low;
+ data->status_active_low = devdata->status_active_low;
+ }
+
+ if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
+ of_property_read_u32(dev->of_node, "altr,modrst-offset",
+ &reg_offset)) {
+ dev_warn(dev,
+ "missing altr,modrst-offset property, assuming 0x%x!\n",
+ reg_offset);
+ }
+
+ data->membase += reg_offset;
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static struct platform_driver reset_simple_driver = {
+ .probe = reset_simple_probe,
+ .driver = {
+ .name = "simple-reset",
+ .of_match_table = reset_simple_dt_ids,
+ },
+};
+builtin_platform_driver(reset_simple_driver);
diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h
new file mode 100644
index 000000000000..8a496022baef
--- /dev/null
+++ b/drivers/reset/reset-simple.h
@@ -0,0 +1,45 @@
+/*
+ * Simple Reset Controller ops
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RESET_SIMPLE_H__
+#define __RESET_SIMPLE_H__
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct reset_simple_data - driver data for simple reset controllers
+ * @lock: spinlock to protect registers during read-modify-write cycles
+ * @membase: memory mapped I/O register range
+ * @rcdev: reset controller device base structure
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset. Note that this says nothing about
+ * the voltage level of the actual reset line.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ */
+struct reset_simple_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+ bool active_low;
+ bool status_active_low;
+};
+
+extern const struct reset_control_ops reset_simple_ops;
+
+#endif /* __RESET_SIMPLE_H__ */
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
deleted file mode 100644
index 3907bbc9c6cf..000000000000
--- a/drivers/reset/reset-socfpga.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Socfpga Reset Controller Driver
- *
- * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
- *
- * based on
- * Allwinner SoCs Reset Controller driver
- *
- * Copyright 2013 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#define BANK_INCREMENT 4
-#define NR_BANKS 8
-
-struct socfpga_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
- writel(reg | BIT(offset), data->membase + (bank * BANK_INCREMENT));
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data,
- rcdev);
-
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
- writel(reg & ~BIT(offset), data->membase + (bank * BANK_INCREMENT));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int socfpga_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data, rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- u32 reg;
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
-
- return !(reg & BIT(offset));
-}
-
-static const struct reset_control_ops socfpga_reset_ops = {
- .assert = socfpga_reset_assert,
- .deassert = socfpga_reset_deassert,
- .status = socfpga_reset_status,
-};
-
-static int socfpga_reset_probe(struct platform_device *pdev)
-{
- struct socfpga_reset_data *data;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- u32 modrst_offset;
-
- /*
- * The binding was mainlined without the required property.
- * Do not continue, when we encounter an old DT.
- */
- if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
- dev_err(&pdev->dev, "%pOF missing #reset-cells property\n",
- pdev->dev.of_node);
- return -EINVAL;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) {
- dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
- modrst_offset = 0x10;
- }
- data->membase += modrst_offset;
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
- data->rcdev.ops = &socfpga_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(dev, &data->rcdev);
-}
-
-static const struct of_device_id socfpga_reset_dt_ids[] = {
- { .compatible = "altr,rst-mgr", },
- { /* sentinel */ },
-};
-
-static struct platform_driver socfpga_reset_driver = {
- .probe = socfpga_reset_probe,
- .driver = {
- .name = "socfpga-reset",
- .of_match_table = socfpga_reset_dt_ids,
- },
-};
-builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c
deleted file mode 100644
index 3a7c8527e66a..000000000000
--- a/drivers/reset/reset-stm32.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) Maxime Coquelin 2015
- * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
- * License terms: GNU General Public License (GPL), version 2
- *
- * Heavily based on sunxi driver from Maxime Ripard.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-struct stm32_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int stm32_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct stm32_reset_data *data = container_of(rcdev,
- struct stm32_reset_data,
- rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * 4));
- writel(reg | BIT(offset), data->membase + (bank * 4));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct stm32_reset_data *data = container_of(rcdev,
- struct stm32_reset_data,
- rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * 4));
- writel(reg & ~BIT(offset), data->membase + (bank * 4));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops stm32_reset_ops = {
- .assert = stm32_reset_assert,
- .deassert = stm32_reset_deassert,
-};
-
-static const struct of_device_id stm32_reset_dt_ids[] = {
- { .compatible = "st,stm32-rcc", },
- { /* sentinel */ },
-};
-
-static int stm32_reset_probe(struct platform_device *pdev)
-{
- struct stm32_reset_data *data;
- struct resource *res;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = resource_size(res) * 8;
- data->rcdev.ops = &stm32_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &data->rcdev);
-}
-
-static struct platform_driver stm32_reset_driver = {
- .probe = stm32_reset_probe,
- .driver = {
- .name = "stm32-rcc-reset",
- .of_match_table = stm32_reset_dt_ids,
- },
-};
-builtin_platform_driver(stm32_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 2c7dd1fd08df..db9a1a75523f 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -22,64 +22,11 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-struct sunxi_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int sunxi_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct sunxi_reset_data *data = container_of(rcdev,
- struct sunxi_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * reg_width));
- writel(reg & ~BIT(offset), data->membase + (bank * reg_width));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct sunxi_reset_data *data = container_of(rcdev,
- struct sunxi_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * reg_width));
- writel(reg | BIT(offset), data->membase + (bank * reg_width));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops sunxi_reset_ops = {
- .assert = sunxi_reset_assert,
- .deassert = sunxi_reset_deassert,
-};
+#include "reset-simple.h"
static int sunxi_reset_init(struct device_node *np)
{
- struct sunxi_reset_data *data;
+ struct reset_simple_data *data;
struct resource res;
resource_size_t size;
int ret;
@@ -108,8 +55,9 @@ static int sunxi_reset_init(struct device_node *np)
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 8;
- data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.ops = &reset_simple_ops;
data->rcdev.of_node = np;
+ data->active_low = true;
return reset_controller_register(&data->rcdev);
@@ -122,6 +70,8 @@ err_alloc:
* These are the reset controller we need to initialize early on in
* our system, before we can even think of using a regular device
* driver for it.
+ * The controllers that we can register through the regular device
+ * model are handled by the simple reset driver directly.
*/
static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = {
{ .compatible = "allwinner,sun6i-a31-ahb1-reset", },
@@ -135,45 +85,3 @@ void __init sun6i_reset_init(void)
for_each_matching_node(np, sunxi_early_reset_dt_ids)
sunxi_reset_init(np);
}
-
-/*
- * And these are the controllers we can register through the regular
- * device model.
- */
-static const struct of_device_id sunxi_reset_dt_ids[] = {
- { .compatible = "allwinner,sun6i-a31-clock-reset", },
- { /* sentinel */ },
-};
-
-static int sunxi_reset_probe(struct platform_device *pdev)
-{
- struct sunxi_reset_data *data;
- struct resource *res;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = resource_size(res) * 8;
- data->rcdev.ops = &sunxi_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &data->rcdev);
-}
-
-static struct platform_driver sunxi_reset_driver = {
- .probe = sunxi_reset_probe,
- .driver = {
- .name = "sunxi-reset",
- .of_match_table = sunxi_reset_dt_ids,
- },
-};
-builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index bda2dd196ae5..e8bb023ff15e 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -58,6 +58,7 @@ static const struct uniphier_reset_data uniphier_ld4_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
+ UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */
UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
@@ -76,6 +77,7 @@ static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
+ UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */
@@ -92,6 +94,7 @@ static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */
UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */
@@ -102,6 +105,7 @@ static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
@@ -114,6 +118,20 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESET_END,
};
+static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
+ UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
+ UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
+ UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */
+ UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */
+ UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
+ UNIPHIER_RESETX(20, 0x200c, 17), /* USB31-PHY0 */
+ UNIPHIER_RESETX(21, 0x200c, 19), /* USB31-PHY1 */
+ UNIPHIER_RESET_END,
+};
+
/* Media I/O reset data */
#define UNIPHIER_MIO_RESET_SD(id, ch) \
UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
@@ -359,6 +377,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-reset",
+ .data = uniphier_pxs3_sys_reset_data,
+ },
/* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-ld4-mio-reset",
@@ -392,6 +414,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
/* Peripheral reset */
{
.compatible = "socionext,uniphier-ld4-peri-reset",
@@ -421,6 +447,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
/* Analog signal amplifiers reset */
{
.compatible = "socionext,uniphier-ld11-adamv-reset",
diff --git a/drivers/reset/reset-zx2967.c b/drivers/reset/reset-zx2967.c
deleted file mode 100644
index 4f319f7753d4..000000000000
--- a/drivers/reset/reset-zx2967.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * ZTE's zx2967 family reset controller driver
- *
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-
-struct zx2967_reset {
- void __iomem *reg_base;
- spinlock_t lock;
- struct reset_controller_dev rcdev;
-};
-
-static int zx2967_reset_act(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct zx2967_reset *reset = NULL;
- int bank = id / 32;
- int offset = id % 32;
- u32 reg;
- unsigned long flags;
-
- reset = container_of(rcdev, struct zx2967_reset, rcdev);
-
- spin_lock_irqsave(&reset->lock, flags);
-
- reg = readl_relaxed(reset->reg_base + (bank * 4));
- if (assert)
- reg &= ~BIT(offset);
- else
- reg |= BIT(offset);
- writel_relaxed(reg, reset->reg_base + (bank * 4));
-
- spin_unlock_irqrestore(&reset->lock, flags);
-
- return 0;
-}
-
-static int zx2967_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return zx2967_reset_act(rcdev, id, true);
-}
-
-static int zx2967_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return zx2967_reset_act(rcdev, id, false);
-}
-
-static const struct reset_control_ops zx2967_reset_ops = {
- .assert = zx2967_reset_assert,
- .deassert = zx2967_reset_deassert,
-};
-
-static int zx2967_reset_probe(struct platform_device *pdev)
-{
- struct zx2967_reset *reset;
- struct resource *res;
-
- reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
- if (!reset)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reset->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(reset->reg_base))
- return PTR_ERR(reset->reg_base);
-
- spin_lock_init(&reset->lock);
-
- reset->rcdev.owner = THIS_MODULE;
- reset->rcdev.nr_resets = resource_size(res) * 8;
- reset->rcdev.ops = &zx2967_reset_ops;
- reset->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
-}
-
-static const struct of_device_id zx2967_reset_dt_ids[] = {
- { .compatible = "zte,zx296718-reset", },
- {},
-};
-
-static struct platform_driver zx2967_reset_driver = {
- .probe = zx2967_reset_probe,
- .driver = {
- .name = "zx2967-reset",
- .of_match_table = zx2967_reset_dt_ids,
- },
-};
-builtin_platform_driver(zx2967_reset_driver);
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 0fe6eac46512..65a9f6b892f0 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -47,7 +47,8 @@ config RPMSG_QCOM_SMD
platforms.
config RPMSG_VIRTIO
- tristate
+ tristate "Virtio RPMSG bus driver"
+ depends on HAS_DMA
select RPMSG
select VIRTIO
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5dcc9bf1c5bc..40d76d2a5eff 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
+ init_completion(&channel->intent_req_comp);
INIT_LIST_HEAD(&channel->done_intents);
INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
@@ -1148,19 +1149,38 @@ static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
{
struct glink_channel *channel = to_glink_channel(rpdev->ept);
- struct glink_core_rx_intent *intent;
+ struct device_node *np = rpdev->dev.of_node;
struct qcom_glink *glink = channel->glink;
- int num_intents = glink->intentless ? 0 : 5;
+ struct glink_core_rx_intent *intent;
+ const struct property *prop = NULL;
+ __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
+ int num_intents;
+ int num_groups = 1;
+ __be32 *val = defaults;
+ int size;
+
+ if (glink->intentless)
+ return 0;
+
+ prop = of_find_property(np, "qcom,intents", NULL);
+ if (prop) {
+ val = prop->value;
+ num_groups = prop->length / sizeof(u32) / 2;
+ }
/* Channel is now open, advertise base set of intents */
- while (num_intents--) {
- intent = qcom_glink_alloc_intent(glink, channel, SZ_1K, true);
- if (!intent)
- break;
+ while (num_groups--) {
+ size = be32_to_cpup(val++);
+ num_intents = be32_to_cpup(val++);
+ while (num_intents--) {
+ intent = qcom_glink_alloc_intent(glink, channel, size,
+ true);
+ if (!intent)
+ break;
- qcom_glink_advertise_intent(glink, channel, intent);
+ qcom_glink_advertise_intent(glink, channel, intent);
+ }
}
-
return 0;
}
@@ -1237,11 +1257,16 @@ static int __qcom_glink_send(struct glink_channel *channel,
spin_lock_irqsave(&channel->intent_lock, flags);
idr_for_each_entry(&channel->riids, tmp, iid) {
if (tmp->size >= len && !tmp->in_use) {
- tmp->in_use = true;
- intent = tmp;
- break;
+ if (!intent)
+ intent = tmp;
+ else if (intent->size > tmp->size)
+ intent = tmp;
+ if (intent->size == len)
+ break;
}
}
+ if (intent)
+ intent->in_use = true;
spin_unlock_irqrestore(&channel->intent_lock, flags);
/* We found an available intent */
@@ -1551,6 +1576,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->rcids);
glink->mbox_client.dev = dev;
+ glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
if (IS_ERR(glink->mbox_chan)) {
if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
@@ -1616,3 +1642,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink)
device_unregister(glink->dev);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
+
+MODULE_DESCRIPTION("Qualcomm GLINK driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e0e58f3b1420..b59a31b079a5 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -433,6 +433,19 @@ config RTC_DRV_PCF85063
This driver can also be built as a module. If so, the module
will be called rtc-pcf85063.
+config RTC_DRV_PCF85363
+ tristate "NXP PCF85363"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the PCF85363 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85363.
+
+ The nvmem interface will be named pcf85363-#, where # is the
+ zero-based instance number.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350
This driver can also be built as a module. If so, the module
will be called "rtc-wm8350".
+config RTC_DRV_SC27XX
+ tristate "Spreadtrum SC27xx RTC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ If you say Y here you will get support for the RTC subsystem
+ of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs
+ includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sc27xx.
+
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART
will be called rtc-moxart
config RTC_DRV_MT6397
- tristate "Mediatek Real Time Clock driver"
+ tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
help
- This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
+ This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
- Mediatek(R) RTC driver.
+ MediaTek(R) RTC driver.
+
+ If you want to use MediaTek(R) RTC interface, select Y or M here.
- If you want to use Mediatek(R) RTC interface, select Y or M here.
+config RTC_DRV_MT7622
+ tristate "MediaTek SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt7622.
config RTC_DRV_XGENE
tristate "APM X-Gene RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0bf1fc02b82c..f2f50c11dc38 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
+obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
@@ -144,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
+obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8cec9a02c0b8..672b192f8153 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (!next) {
+ if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
@@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
* to compensate for differences in the actual clock rate due to temperature,
* the crystal, capacitor, etc.
*
+ * The adjustment applied is as follows:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t0 is the measured length of 1 RTC second with offset = 0
+ *
* Kernel interface to adjust an rtc clock offset.
* Return 0 on success, or a negative number on error.
* If the rtc offset is not setable (or not implemented), return -EINVAL
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index fea9a60b06cf..b033bc556f5d 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client,
if (err)
return err;
- rtc = devm_rtc_device_register(&client->dev, "abx8xx",
- &abx80x_rtc_ops, THIS_MODULE);
-
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &abx80x_rtc_ops;
+
i2c_set_clientdata(client, rtc);
if (client->irq > 0) {
@@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client,
err = devm_add_action_or_reset(&client->dev,
rtc_calib_remove_sysfs_group,
&client->dev);
- if (err)
+ if (err) {
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
+ return err;
+ }
+
+ err = rtc_register_device(rtc);
return err;
}
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 21f355c37eab..1e4978c96ffd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -28,6 +28,8 @@
#define RTC_IRQ_AL_EN BIT(0)
#define RTC_IRQ_FREQ_EN BIT(1)
#define RTC_IRQ_FREQ_1HZ BIT(2)
+#define RTC_CCR 0x18
+#define RTC_CCR_MODE BIT(15)
#define RTC_TIME 0xC
#define RTC_ALARM1 0x10
@@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The information given in the Armada 388 functional spec is complex.
+ * They give two different formulas for calculating the offset value,
+ * but when considering "Offset" as an 8-bit signed integer, they both
+ * reduce down to (we shall rename "Offset" as "val" here):
+ *
+ * val = (f_ideal / f_measured - 1) / resolution where f_ideal = 32768
+ *
+ * Converting to time, f = 1/t:
+ * val = (t_measured / t_ideal - 1) / resolution where t_ideal = 1/32768
+ *
+ * => t_measured / t_ideal = val * resolution + 1
+ *
+ * "offset" in the RTC interface is defined as:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t is the desired period, t0 is the measured period with a zero
+ * offset, which is t_measured above. With t0 = t_measured and t = t_ideal,
+ * offset = (t_ideal / t_measured - 1) / 1e-9
+ *
+ * => t_ideal / t_measured = offset * 1e-9 + 1
+ *
+ * so:
+ *
+ * offset * 1e-9 + 1 = 1 / (val * resolution + 1)
+ *
+ * We want "resolution" to be an integer, so resolution = R * 1e-9, giving
+ * offset = 1e18 / (val * R + 1e9) - 1e9
+ * val = (1e18 / (offset + 1e9) - 1e9) / R
+ * with a common transformation:
+ * f(x) = 1e18 / (x + 1e9) - 1e9
+ * offset = f(val * R)
+ * val = f(offset) / R
+ *
+ * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb).
+ */
+static long armada38x_ppb_convert(long ppb)
+{
+ long div = ppb + 1000000000L;
+
+ return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L;
+}
+
+static int armada38x_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr, flags;
+ long ppb_cor;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr;
+ /* ppb_cor + 1000000000L can never be zero */
+ *offset = armada38x_ppb_convert(ppb_cor);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_offset(struct device *dev, long offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr = 0;
+ long ppb_cor, off;
+
+ /*
+ * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we
+ * need to clamp the input. This equates to -484270 .. 488558.
+ * Not only is this to stop out of range "off" but also to
+ * avoid the division by zero in armada38x_ppb_convert().
+ */
+ offset = clamp(offset, -484270L, 488558L);
+
+ ppb_cor = armada38x_ppb_convert(offset);
+
+ /*
+ * Use low update mode where possible, which gives a better
+ * resolution of correction.
+ */
+ off = DIV_ROUND_CLOSEST(ppb_cor, 954);
+ if (off > 127 || off < -128) {
+ ccr = RTC_CCR_MODE;
+ off = DIV_ROUND_CLOSEST(ppb_cor, 3815);
+ }
+
+ /*
+ * Armada 388 requires a bit pattern in bits 14..8 depending on
+ * the sign bit: { 0, ~S, S, S, S, S, S }
+ */
+ ccr |= (off & 0x3fff) ^ 0x2000;
+ rtc_delayed_write(ccr, rtc, RTC_CCR);
+
+ return 0;
+}
+
static const struct rtc_class_ops armada38x_rtc_ops = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
.set_alarm = armada38x_rtc_set_alarm,
.alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct armada38x_rtc_data armada38x_data = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e221b78b6f10..de81ecedd571 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -42,8 +42,6 @@
#define at91_rtc_write(field, val) \
writel_relaxed((val), at91_rtc_regs + field)
-#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
-
struct at91_rtc_config {
bool use_shadow_imr;
};
@@ -51,7 +49,6 @@ struct at91_rtc_config {
static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static DECLARE_COMPLETION(at91_rtc_upd_rdy);
-static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
static DEFINE_SPINLOCK(at91_rtc_lock);
@@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/*
* The Calendar Alarm register does not have a field for
- * the year - so these will return an invalid value. When an
- * alarm is set, at91_alarm_year will store the current year.
+ * the year - so these will return an invalid value.
*/
tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */
tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */
@@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
- tm->tm_year = at91_alarm_year - 1900;
+ tm->tm_year = -1;
alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
- dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__,
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ alrm->enabled ? "en" : "dis");
return 0;
}
@@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
- at91_alarm_year = tm.tm_year;
-
tm.tm_mon = alrm->time.tm_mon;
tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
@@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
+ tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 00efe24a6063..215eac68ae2d 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -71,9 +71,9 @@ static void rtc_uie_task(struct work_struct *work)
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
{
- struct rtc_device *rtc = (struct rtc_device *)data;
+ struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
@@ -460,7 +460,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
- setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+ timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 72b22935eb62..d8df2e9e14ad 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
spi_message_add_tail(x, m);
}
-static ssize_t
-ds1305_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static ssize_t
-ds1305_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static struct bin_attribute nvram = {
- .attr.name = "nvram",
- .attr.mode = S_IRUGO | S_IWUSR,
- .read = ds1305_nvram_read,
- .write = ds1305_nvram_write,
- .size = DS1305_NVRAM_LEN,
+static struct nvmem_config ds1305_nvmem_cfg = {
+ .name = "ds1305_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = DS1305_NVRAM_LEN,
+ .reg_read = ds1305_nvram_read,
+ .reg_write = ds1305_nvram_write,
};
/*----------------------------------------------------------------------*/
@@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305",
- &ds1305_ops, THIS_MODULE);
+ ds1305->rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(ds1305->rtc)) {
- status = PTR_ERR(ds1305->rtc);
+ return PTR_ERR(ds1305->rtc);
+ }
+
+ ds1305->rtc->ops = &ds1305_ops;
+
+ ds1305_nvmem_cfg.priv = ds1305;
+ ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg;
+ ds1305->rtc->nvram_old_abi = true;
+
+ status = rtc_register_device(ds1305->rtc);
+ if (status) {
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
return status;
}
@@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi)
}
}
- /* export NVRAM */
- status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
- if (status < 0) {
- dev_err(&spi->dev, "register nvram --> %d\n", status);
- }
-
return 0;
}
@@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
-
/* carefully shut down irq and workqueue, if present */
if (spi->irq) {
set_bit(FLAG_EXITING, &ds1305->flags);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e7d9215c9201..923dde912f60 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = {
.compatible = "isil,isl12057",
.data = (void *)ds_1337
},
+ {
+ .compatible = "epson,rx8130",
+ .data = (void *)rx_8130
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
@@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "PT7C4338", .driver_data = ds_1307 },
{ .id = "RX8025", .driver_data = rx_8025 },
{ .id = "ISL12057", .driver_data = ds_1337 },
+ { .id = "RX8130", .driver_data = rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
@@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
* Alarm support for mcp794xx devices.
*/
-#define MCP794XX_REG_WEEKDAY 0x3
-#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
@@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * We may have a random RTC weekday, therefore calculate alarm weekday based
+ * on current weekday we read from the RTC timekeeping regs
+ */
+static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
+{
+ struct rtc_time tm_now;
+ int days_now, days_alarm, ret;
+
+ ret = ds1307_get_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
+ days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
+
+ return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
+}
+
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[10];
- int ret;
+ int wday, ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
+ wday = mcp794xx_alm_weekday(dev, &t->time);
+ if (wday < 0)
+ return wday;
+
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
regs[3] = bin2bcd(t->time.tm_sec);
regs[4] = bin2bcd(t->time.tm_min);
regs[5] = bin2bcd(t->time.tm_hour);
- regs[6] = bin2bcd(t->time.tm_wday + 1);
+ regs[6] = wday;
regs[7] = bin2bcd(t->time.tm_mday);
regs[8] = bin2bcd(t->time.tm_mon + 1);
@@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client,
{
struct ds1307 *ds1307;
int err = -ENODEV;
- int tmp, wday;
+ int tmp;
const struct chip_desc *chip;
bool want_irq;
bool ds1307_can_wakeup_device = false;
unsigned char regs[8];
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
- struct rtc_time tm;
- unsigned long timestamp;
u8 trickle_charger_setup = 0;
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
@@ -1641,25 +1665,6 @@ read_rtc:
bin2bcd(tmp));
}
- /*
- * Some IPs have weekday reset value = 0x1 which might not correct
- * hence compute the wday using the current date/month/year values
- */
- ds1307_get_time(ds1307->dev, &tm);
- wday = tm.tm_wday;
- timestamp = rtc_tm_to_time64(&tm);
- rtc_time64_to_tm(timestamp, &tm);
-
- /*
- * Check if reset wday is different from the computed wday
- * If different then set the wday which we computed using
- * timestamp
- */
- if (wday != tm.tm_wday)
- regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
- MCP794XX_REG_WEEKDAY_WDAY_MASK,
- tm.tm_wday + 1);
-
if (want_irq || ds1307_can_wakeup_device) {
device_set_wakeup_capable(ds1307->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index aa0d2c6f1edc..4d5b007d7fc6 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi)
return res;
}
+static const struct of_device_id ds1390_of_match[] = {
+ { .compatible = "dallas,ds1390" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1390_of_match);
+
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
+ .of_match_table = of_match_ptr(ds1390_of_match),
},
.probe = ds1390_probe,
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1b2dcb58c0ab..1e95312a6f2e 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
};
-static ssize_t
-ds1511_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- *buf++ = rtc_read(DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ *(char *)buf++ = rtc_read(DS1511_RAMDATA);
- return count;
+ return 0;
}
-static ssize_t
-ds1511_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- rtc_write(*buf++, DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ rtc_write(*(char *)buf++, DS1511_RAMDATA);
- return count;
+ return 0;
}
-static struct bin_attribute ds1511_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
+static struct nvmem_config ds1511_nvmem_cfg = {
+ .name = "ds1511_nvram",
+ .word_size = 1,
+ .stride = 1,
.size = DS1511_RAM_MAX,
- .read = ds1511_nvram_read,
- .write = ds1511_nvram_write,
+ .reg_read = ds1511_nvram_read,
+ .reg_write = ds1511_nvram_write,
};
static int ds1511_rtc_probe(struct platform_device *pdev)
@@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &ds1511_rtc_ops, THIS_MODULE);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ pdata->rtc->ops = &ds1511_rtc_ops;
+
+ ds1511_nvmem_cfg.priv = &pdev->dev;
+ pdata->rtc->nvmem_config = &ds1511_nvmem_cfg;
+ pdata->rtc->nvram_old_abi = true;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (ret)
+ return ret;
+
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
@@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
}
}
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret)
- dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
- ds1511_nvram_attr.attr.name);
-
- return 0;
-}
-
-static int ds1511_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (pdata->irq > 0) {
- /*
- * disable the alarm interrupt
- */
- rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
- rtc_read(RTC_CMD1);
- }
return 0;
}
@@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
- .remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
},
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 64989afffa3d..ff65a7d2b9c9 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int timeout = 1000;
+ int timeout = 10000;
do {
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
@@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int ret, timeout = 1000;
+ int ret, timeout = 10000;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
@@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
if (ret) {
- dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+ dev_err(&pdev->dev, "Could not write to RTC registers\n");
return ret;
}
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f4c070ea8384..c90fba3ed861 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -154,6 +154,8 @@ struct m41t80_data {
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw sqw;
+ unsigned long freq;
+ unsigned int sqwe;
#endif
};
@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
#ifdef CONFIG_COMMON_CLK
#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long m41t80_decode_freq(int setting)
+{
+ return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
+ M41T80_SQW_MAX_FREQ >> setting;
+}
+
+static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int ret = i2c_smbus_read_byte_data(client, reg_sqw);
- unsigned long val = M41T80_SQW_MAX_FREQ;
if (ret < 0)
return 0;
+ return m41t80_decode_freq(ret >> 4);
+}
- ret >>= 4;
- if (ret == 0)
- val = 0;
- else if (ret > 1)
- val = val / (1 << ret);
-
- return val;
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return sqw_to_m41t80_data(hw)->freq;
}
static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- int i, freq = M41T80_SQW_MAX_FREQ;
-
- if (freq <= rate)
- return freq;
-
- for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
- freq /= 1 << i;
- if (freq <= rate)
- return freq;
- }
-
- return 0;
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ return M41T80_SQW_MAX_FREQ;
+ if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ return M41T80_SQW_MAX_FREQ / 4;
+ if (!rate)
+ return 0;
+ return 1 << ilog2(rate);
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
M41T80_REG_WDAY : M41T80_REG_SQW;
int reg, ret, val = 0;
- if (rate) {
- if (!is_power_of_2(rate))
- return -EINVAL;
- val = ilog2(rate);
- if (val == ilog2(M41T80_SQW_MAX_FREQ))
- val = 1;
- else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
- val = ilog2(M41T80_SQW_MAX_FREQ) - val;
- else
- return -EINVAL;
- }
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ val = 1;
+ else if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ val = 2;
+ else if (rate)
+ val = 15 - ilog2(rate);
reg = i2c_smbus_read_byte_data(client, reg_sqw);
if (reg < 0)
@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
reg = (reg & 0x0f) | (val << 4);
ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
- if (ret < 0)
- return ret;
-
- return -EINVAL;
+ if (!ret)
+ m41t80->freq = m41t80_decode_freq(val);
+ return ret;
}
static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
else
ret &= ~M41T80_ALMON_SQWE;
- return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ if (!ret)
+ m41t80->sqwe = enable;
+ return ret;
}
static int m41t80_sqw_prepare(struct clk_hw *hw)
@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
static int m41t80_sqw_is_prepared(struct clk_hw *hw)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
- struct i2c_client *client = m41t80->client;
- int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-
- if (ret < 0)
- return ret;
-
- return !!(ret & M41T80_ALMON_SQWE);
+ return sqw_to_m41t80_data(hw)->sqwe;
}
static const struct clk_ops m41t80_sqw_ops = {
@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
init.parent_names = NULL;
init.num_parents = 0;
m41t80->sqw.init = &init;
+ m41t80->freq = m41t80_get_freq(m41t80);
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 02af045305dd..d9aea9b6d9cd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
.proc = m48t86_rtc_proc,
};
-static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
+ ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i));
+ m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write,
- M48T86_NVRAM_LEN);
-
/*
* The RTC is an optional feature at purchase time on some Technologic Systems
* boards. Verify that it actually exists by checking if the last two bytes
@@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
return false;
}
+static struct nvmem_config m48t86_nvmem_cfg = {
+ .name = "m48t86_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = M48T86_NVRAM_LEN,
+ .reg_read = m48t86_nvram_read,
+ .reg_write = m48t86_nvram_write,
+};
+
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
struct resource *res;
unsigned char reg;
+ int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86",
- &m48t86_rtc_ops, THIS_MODULE);
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
+ info->rtc->ops = &m48t86_rtc_ops;
+
+ m48t86_nvmem_cfg.priv = &pdev->dev;
+ info->rtc->nvmem_config = &m48t86_nvmem_cfg;
+ info->rtc->nvram_old_abi = true;
+
+ err = rtc_register_device(info->rtc);
+ if (err)
+ return err;
+
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
dev_info(&pdev->dev, "battery %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
- if (device_create_bin_file(&pdev->dev, &bin_attr_nvram))
- dev_err(&pdev->dev, "failed to create nvram sysfs entry\n");
-
- return 0;
-}
-
-static int m48t86_rtc_remove(struct platform_device *pdev)
-{
- device_remove_bin_file(&pdev->dev, &bin_attr_nvram);
return 0;
}
@@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.name = "rtc-m48t86",
},
.probe = m48t86_rtc_probe,
- .remove = m48t86_rtc_remove,
};
module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
new file mode 100644
index 000000000000..d79b9ae4d237
--- /dev/null
+++ b/drivers/rtc/rtc-mt7622.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for MediaTek SoC based RTC
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MTK_RTC_DEV KBUILD_MODNAME
+
+#define MTK_RTC_PWRCHK1 0x4
+#define RTC_PWRCHK1_MAGIC 0xc6
+
+#define MTK_RTC_PWRCHK2 0x8
+#define RTC_PWRCHK2_MAGIC 0x9a
+
+#define MTK_RTC_KEY 0xc
+#define RTC_KEY_MAGIC 0x59
+
+#define MTK_RTC_PROT1 0x10
+#define RTC_PROT1_MAGIC 0xa3
+
+#define MTK_RTC_PROT2 0x14
+#define RTC_PROT2_MAGIC 0x57
+
+#define MTK_RTC_PROT3 0x18
+#define RTC_PROT3_MAGIC 0x67
+
+#define MTK_RTC_PROT4 0x1c
+#define RTC_PROT4_MAGIC 0xd2
+
+#define MTK_RTC_CTL 0x20
+#define RTC_RC_STOP BIT(0)
+
+#define MTK_RTC_DEBNCE 0x2c
+#define RTC_DEBNCE_MASK GENMASK(2, 0)
+
+#define MTK_RTC_INT 0x30
+#define RTC_INT_AL_STA BIT(4)
+
+/*
+ * Ranges from 0x40 to 0x78 provide RTC time setup for year, month,
+ * day of month, day of week, hour, minute and second.
+ */
+#define MTK_RTC_TREG(_t, _f) (0x40 + (0x4 * (_f)) + ((_t) * 0x20))
+
+#define MTK_RTC_AL_CTL 0x7c
+#define RTC_AL_EN BIT(0)
+#define RTC_AL_ALL GENMASK(7, 0)
+
+/*
+ * The offset is used in the translation for the year between in struct
+ * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA)
+ */
+#define MTK_RTC_TM_YR_OFFSET 100
+
+/*
+ * The lowest value for the valid tm_year. RTC hardware would take incorrectly
+ * tm_year 100 as not a leap year and thus it is also required being excluded
+ * from the valid options.
+ */
+#define MTK_RTC_TM_YR_L (MTK_RTC_TM_YR_OFFSET + 1)
+
+/*
+ * The most year the RTC can hold is 99 and the next to 99 in year register
+ * would be wraparound to 0, for MT7622.
+ */
+#define MTK_RTC_HW_YR_LIMIT 99
+
+/* The highest value for the valid tm_year */
+#define MTK_RTC_TM_YR_H (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT)
+
+/* Simple macro helps to check whether the hardware supports the tm_year */
+#define MTK_RTC_TM_YR_VALID(_y) ((_y) >= MTK_RTC_TM_YR_L && \
+ (_y) <= MTK_RTC_TM_YR_H)
+
+/* Types of the function the RTC provides are time counter and alarm. */
+enum {
+ MTK_TC,
+ MTK_AL,
+};
+
+/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */
+enum {
+ MTK_YEA,
+ MTK_MON,
+ MTK_DOM,
+ MTK_DOW,
+ MTK_HOU,
+ MTK_MIN,
+ MTK_SEC
+};
+
+struct mtk_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+};
+
+static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ writel_relaxed(val, rtc->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg)
+{
+ return readl_relaxed(rtc->base + reg);
+}
+
+static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mtk_r32(rtc, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(rtc, reg, val);
+}
+
+static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, 0, val);
+}
+
+static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, val, 0);
+}
+
+static void mtk_rtc_hw_init(struct mtk_rtc *hw)
+{
+ /* The setup of the init sequence is for allowing RTC got to work */
+ mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC);
+ mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC);
+ mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0);
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+}
+
+static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year, mon, mday, wday, hour, min, sec;
+
+ /*
+ * Read again until the field of the second is not changed which
+ * ensures all fields in the consistent state. Note that MTK_SEC must
+ * be read first. In this way, it guarantees the others remain not
+ * changed when the results for two MTK_SEC consecutive reads are same.
+ */
+ do {
+ sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC));
+ min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN));
+ hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU));
+ wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW));
+ mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM));
+ mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON));
+ year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA));
+ } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)));
+
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_wday = wday;
+ tm->tm_mday = mday;
+ tm->tm_mon = mon - 1;
+
+ /* Rebase to the absolute year which userspace queries */
+ tm->tm_year = year + MTK_RTC_TM_YR_OFFSET;
+}
+
+static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year;
+
+ /* Rebase to the relative year which RTC hardware requires */
+ year = tm->tm_year - MTK_RTC_TM_YR_OFFSET;
+
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec);
+}
+
+static irqreturn_t mtk_rtc_alarmirq(int irq, void *id)
+{
+ struct mtk_rtc *hw = (struct mtk_rtc *)id;
+ u32 irq_sta;
+
+ irq_sta = mtk_r32(hw, MTK_RTC_INT);
+ if (irq_sta & RTC_INT_AL_STA) {
+ /* Stop alarm also implicitly disables the alarm interrupt */
+ mtk_w32(hw, MTK_RTC_AL_CTL, 0);
+ rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Ack alarm interrupt status */
+ mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (!MTK_RTC_TM_YR_VALID(tm->tm_year))
+ return -EINVAL;
+
+ /* Stop time counter before setting a new one*/
+ mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC);
+
+ /* Restart the time counter */
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ return 0;
+}
+
+static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN);
+ wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA);
+
+ return 0;
+}
+
+static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year))
+ return -EINVAL;
+
+ /*
+ * Stop the alarm also implicitly including disables interrupt before
+ * setting a new one.
+ */
+ mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN);
+
+ /*
+ * Avoid contention between mtk_rtc_setalarm and IRQ handler so that
+ * disabling the interrupt and awaiting for pending IRQ handler to
+ * complete.
+ */
+ synchronize_irq(hw->irq);
+
+ mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ /* Restart the alarm with the new setup */
+ mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops mtk_rtc_ops = {
+ .read_time = mtk_rtc_gettime,
+ .set_time = mtk_rtc_settime,
+ .read_alarm = mtk_rtc_getalarm,
+ .set_alarm = mtk_rtc_setalarm,
+};
+
+static const struct of_device_id mtk_rtc_match[] = {
+ { .compatible = "mediatek,mt7622-rtc" },
+ { .compatible = "mediatek,soc-rtc" },
+ {},
+};
+
+static int mtk_rtc_probe(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw;
+ struct resource *res;
+ int ret;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base))
+ return PTR_ERR(hw->base);
+
+ hw->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock\n");
+ return PTR_ERR(hw->clk);
+ }
+
+ ret = clk_prepare_enable(hw->clk);
+ if (ret)
+ return ret;
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ ret = hw->irq;
+ goto err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ goto err;
+ }
+
+ mtk_rtc_hw_init(hw);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &mtk_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hw->rtc)) {
+ ret = PTR_ERR(hw->rtc);
+ dev_err(&pdev->dev, "Unable to register device\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ clk_disable_unprepare(hw->clk);
+
+ return ret;
+}
+
+static int mtk_rtc_remove(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(hw->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_rtc_suspend(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static int mtk_rtc_resume(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume);
+
+#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops)
+#else /* CONFIG_PM */
+#define MTK_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver mtk_rtc_driver = {
+ .probe = mtk_rtc_probe,
+ .remove = mtk_rtc_remove,
+ .driver = {
+ .name = MTK_RTC_DEV,
+ .of_match_table = mtk_rtc_match,
+ .pm = MTK_RTC_PM_OPS,
+ },
+};
+
+module_platform_driver(mtk_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek SoC based RTC Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 13f7cd11c07e..1d666ac9ef70 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,10 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_SCRATCH0_REG 0x60
+#define OMAP_RTC_SCRATCH1_REG 0x64
+#define OMAP_RTC_SCRATCH2_REG 0x68
+
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
@@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; i++)
+ val[i] = rtc_readl(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4));
+
+ return 0;
+}
+
+static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ for (i = 0; i < bytes / 4; i++)
+ rtc_writel(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static struct nvmem_config omap_rtc_nvmem_config = {
+ .name = "omap_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG,
+ .reg_read = omap_rtc_scratch_read,
+ .reg_write = omap_rtc_scratch_write,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &omap_rtc_ops, THIS_MODULE);
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto err;
}
+ rtc->rtc->ops = &omap_rtc_ops;
+ omap_rtc_nvmem_config.priv = rtc;
+ rtc->rtc->nvmem_config = &omap_rtc_nvmem_config;
+
/* handle periodic and alarm irqs */
ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
dev_name(&rtc->rtc->dev), rtc);
@@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(rtc->pctldev);
+ ret = PTR_ERR(rtc->pctldev);
+ goto err;
}
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ goto err;
+
return 0;
err:
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 28c48b3c1946..c312af0db729 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,6 +35,9 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_OFFSET 0x0e
+#define REG_OFFSET_MODE BIT(7)
+
struct pcf8523 {
struct rtc_device *rtc;
};
@@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
#define pcf8523_rtc_ioctl NULL
#endif
+static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+ u8 value;
+ s8 val;
+
+ err = pcf8523_read(client, REG_OFFSET, &value);
+ if (err < 0)
+ return err;
+
+ /* sign extend the 7-bit offset value */
+ val = value << 1;
+ *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_offset(struct device *dev, long offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ long reg_m0, reg_m1;
+ u8 value;
+
+ reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
+ reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
+
+ if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
+ value = reg_m0 & 0x7f;
+ else
+ value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+
+ return pcf8523_write(client, REG_OFFSET, value);
+}
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
.ioctl = pcf8523_rtc_ioctl,
+ .read_offset = pcf8523_rtc_read_offset,
+ .set_offset = pcf8523_rtc_set_offset,
};
static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
new file mode 100644
index 000000000000..ea04e9f0930b
--- /dev/null
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/rtc/rtc-pcf85363.c
+ *
+ * Driver for NXP PCF85363 real-time clock.
+ *
+ * Copyright (C) 2017 Eric Nelson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/*
+ * Date/Time registers
+ */
+#define DT_100THS 0x00
+#define DT_SECS 0x01
+#define DT_MINUTES 0x02
+#define DT_HOURS 0x03
+#define DT_DAYS 0x04
+#define DT_WEEKDAYS 0x05
+#define DT_MONTHS 0x06
+#define DT_YEARS 0x07
+
+/*
+ * Alarm registers
+ */
+#define DT_SECOND_ALM1 0x08
+#define DT_MINUTE_ALM1 0x09
+#define DT_HOUR_ALM1 0x0a
+#define DT_DAY_ALM1 0x0b
+#define DT_MONTH_ALM1 0x0c
+#define DT_MINUTE_ALM2 0x0d
+#define DT_HOUR_ALM2 0x0e
+#define DT_WEEKDAY_ALM2 0x0f
+#define DT_ALARM_EN 0x10
+
+/*
+ * Time stamp registers
+ */
+#define DT_TIMESTAMP1 0x11
+#define DT_TIMESTAMP2 0x17
+#define DT_TIMESTAMP3 0x1d
+#define DT_TS_MODE 0x23
+
+/*
+ * control registers
+ */
+#define CTRL_OFFSET 0x24
+#define CTRL_OSCILLATOR 0x25
+#define CTRL_BATTERY 0x26
+#define CTRL_PIN_IO 0x27
+#define CTRL_FUNCTION 0x28
+#define CTRL_INTA_EN 0x29
+#define CTRL_INTB_EN 0x2a
+#define CTRL_FLAGS 0x2b
+#define CTRL_RAMBYTE 0x2c
+#define CTRL_WDOG 0x2d
+#define CTRL_STOP_EN 0x2e
+#define CTRL_RESETS 0x2f
+#define CTRL_RAM 0x40
+
+#define NVRAM_SIZE 0x40
+
+static struct i2c_driver pcf85363_driver;
+
+struct pcf85363 {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct nvmem_config nvmem_cfg;
+ struct regmap *regmap;
+};
+
+static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int ret, len = sizeof(buf);
+
+ /* read the RTC date and time registers all at once */
+ ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len);
+ if (ret) {
+ dev_err(dev, "%s: error %d\n", __func__, ret);
+ return ret;
+ }
+
+ tm->tm_year = bcd2bin(buf[DT_YEARS]);
+ /* adjust for 1900 base of rtc_time */
+ tm->tm_year += 100;
+
+ tm->tm_wday = buf[DT_WEEKDAYS] & 7;
+ buf[DT_SECS] &= 0x7F;
+ tm->tm_sec = bcd2bin(buf[DT_SECS]);
+ buf[DT_MINUTES] &= 0x7F;
+ tm->tm_min = bcd2bin(buf[DT_MINUTES]);
+ tm->tm_hour = bcd2bin(buf[DT_HOURS]);
+ tm->tm_mday = bcd2bin(buf[DT_DAYS]);
+ tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1;
+
+ return 0;
+}
+
+static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int len = sizeof(buf);
+
+ buf[DT_100THS] = 0;
+ buf[DT_SECS] = bin2bcd(tm->tm_sec);
+ buf[DT_MINUTES] = bin2bcd(tm->tm_min);
+ buf[DT_HOURS] = bin2bcd(tm->tm_hour);
+ buf[DT_DAYS] = bin2bcd(tm->tm_mday);
+ buf[DT_WEEKDAYS] = tm->tm_wday;
+ buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1);
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ return regmap_bulk_write(pcf85363->regmap, DT_100THS,
+ buf, len);
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = pcf85363_rtc_read_time,
+ .set_time = pcf85363_rtc_set_time,
+};
+
+static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int pcf85363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf85363 *pcf85363;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
+ GFP_KERNEL);
+ if (!pcf85363)
+ return -ENOMEM;
+
+ pcf85363->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(pcf85363->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(pcf85363->regmap);
+ }
+
+ pcf85363->dev = &client->dev;
+ i2c_set_clientdata(client, pcf85363);
+
+ pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+ if (IS_ERR(pcf85363->rtc))
+ return PTR_ERR(pcf85363->rtc);
+
+ pcf85363->nvmem_cfg.name = "pcf85363-";
+ pcf85363->nvmem_cfg.word_size = 1;
+ pcf85363->nvmem_cfg.stride = 1;
+ pcf85363->nvmem_cfg.size = NVRAM_SIZE;
+ pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read;
+ pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write;
+ pcf85363->nvmem_cfg.priv = pcf85363;
+ pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg;
+ pcf85363->rtc->ops = &rtc_ops;
+
+ return rtc_register_device(pcf85363->rtc);
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "nxp,pcf85363" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver pcf85363_driver = {
+ .driver = {
+ .name = "pcf85363",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = pcf85363_probe,
+};
+
+module_i2c_driver(pcf85363_driver);
+
+MODULE_AUTHOR("Eric Nelson");
+MODULE_DESCRIPTION("pcf85363 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index cea6ea4df970..3efc86c25d27 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- return pcf8563_set_alarm_mode(client, 1);
+ return pcf8563_set_alarm_mode(client, !!tm->enabled);
}
static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
- return clkout_rates[ret];
+ return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e19c59f..82eb7da2c478 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev)
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
- free_irq(adev->irq[0], ldata);
+ if (adev->irq[0])
+ free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
- iounmap(ldata->base);
- kfree(ldata);
amba_release_regions(adev);
return 0;
@@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
- struct rtc_class_ops *ops = &vendor->ops;
+ struct rtc_class_ops *ops;
unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
goto err_req;
- ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL);
- if (!ldata) {
+ ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
+ GFP_KERNEL);
+ ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
+ GFP_KERNEL);
+ if (!ldata || !ops) {
ret = -ENOMEM;
goto out;
}
- ldata->vendor = vendor;
-
- ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
+ ldata->vendor = vendor;
+ ldata->base = devm_ioremap(&adev->dev, adev->res.start,
+ resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
- goto out_no_remap;
+ goto out;
}
amba_set_drvdata(adev, ldata);
@@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ if (!adev->irq[0]) {
+ /* When there's no interrupt, no point in exposing the alarm */
+ ops->read_alarm = NULL;
+ ops->set_alarm = NULL;
+ ops->alarm_irq_enable = NULL;
+ }
+
device_init_wakeup(&adev->dev, true);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
- goto out_no_rtc;
+ goto out;
}
- if (request_irq(adev->irq[0], pl031_interrupt,
- vendor->irqflags, "rtc-pl031", ldata)) {
- ret = -EIO;
- goto out_no_irq;
+ if (adev->irq[0]) {
+ ret = request_irq(adev->irq[0], pl031_interrupt,
+ vendor->irqflags, "rtc-pl031", ldata);
+ if (ret)
+ goto out_no_irq;
+ dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
return 0;
out_no_irq:
rtc_device_unregister(ldata->rtc);
-out_no_rtc:
- iounmap(ldata->base);
-out_no_remap:
- kfree(ldata);
out:
amba_release_regions(adev);
err_req:
@@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = {
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
};
-static struct amba_id pl031_ids[] = {
+static const struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index aa09771de04f..3d6174eb32f6 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
static int rv3029_eeprom_write(struct device *dev, u8 reg,
u8 const buf[], size_t len)
{
- int ret, err;
+ int ret;
size_t i;
u8 tmp;
- err = rv3029_eeprom_enter(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_enter(dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < len; i++, reg++) {
ret = rv3029_read_regs(dev, reg, &tmp, 1);
@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
break;
}
- err = rv3029_eeprom_exit(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_exit(dev);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static int rv3029_eeprom_update_bits(struct device *dev,
@@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = {
MODULE_DEVICE_TABLE(i2c, rv3029_id);
static const struct of_device_id rv3029_of_match[] = {
+ { .compatible = "microcrystal,rv3029" },
+ /* Backward compatibility only, do not use compatibles below: */
{ .compatible = "rv3029" },
{ .compatible = "rv3029c2" },
{ .compatible = "mc,rv3029c2" },
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 1ed3403ff8ac..5c5938ab3d86 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -24,7 +24,6 @@
#define RX8010_MDAY 0x14
#define RX8010_MONTH 0x15
#define RX8010_YEAR 0x16
-#define RX8010_YEAR 0x16
#define RX8010_RESV17 0x17
#define RX8010_ALMIN 0x18
#define RX8010_ALHOUR 0x19
@@ -36,7 +35,7 @@
#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
#define RX8010_RESV30 0x30
-#define RX8010_RESV31 0x32
+#define RX8010_RESV31 0x31
#define RX8010_IRQ 0x32
#define RX8010_EXT_WADA BIT(3)
@@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client)
rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
- return err;
+ return 0;
}
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
- return err;
+ return 0;
}
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
new file mode 100644
index 000000000000..d544d5268757
--- /dev/null
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SPRD_RTC_SEC_CNT_VALUE 0x0
+#define SPRD_RTC_MIN_CNT_VALUE 0x4
+#define SPRD_RTC_HOUR_CNT_VALUE 0x8
+#define SPRD_RTC_DAY_CNT_VALUE 0xc
+#define SPRD_RTC_SEC_CNT_UPD 0x10
+#define SPRD_RTC_MIN_CNT_UPD 0x14
+#define SPRD_RTC_HOUR_CNT_UPD 0x18
+#define SPRD_RTC_DAY_CNT_UPD 0x1c
+#define SPRD_RTC_SEC_ALM_UPD 0x20
+#define SPRD_RTC_MIN_ALM_UPD 0x24
+#define SPRD_RTC_HOUR_ALM_UPD 0x28
+#define SPRD_RTC_DAY_ALM_UPD 0x2c
+#define SPRD_RTC_INT_EN 0x30
+#define SPRD_RTC_INT_RAW_STS 0x34
+#define SPRD_RTC_INT_CLR 0x38
+#define SPRD_RTC_INT_MASK_STS 0x3C
+#define SPRD_RTC_SEC_ALM_VALUE 0x40
+#define SPRD_RTC_MIN_ALM_VALUE 0x44
+#define SPRD_RTC_HOUR_ALM_VALUE 0x48
+#define SPRD_RTC_DAY_ALM_VALUE 0x4c
+#define SPRD_RTC_SPG_VALUE 0x50
+#define SPRD_RTC_SPG_UPD 0x54
+#define SPRD_RTC_SEC_AUXALM_UPD 0x60
+#define SPRD_RTC_MIN_AUXALM_UPD 0x64
+#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
+#define SPRD_RTC_DAY_AUXALM_UPD 0x6c
+
+/* BIT & MASK definition for SPRD_RTC_INT_* registers */
+#define SPRD_RTC_SEC_EN BIT(0)
+#define SPRD_RTC_MIN_EN BIT(1)
+#define SPRD_RTC_HOUR_EN BIT(2)
+#define SPRD_RTC_DAY_EN BIT(3)
+#define SPRD_RTC_ALARM_EN BIT(4)
+#define SPRD_RTC_HRS_FORMAT_EN BIT(5)
+#define SPRD_RTC_AUXALM_EN BIT(6)
+#define SPRD_RTC_SPG_UPD_EN BIT(7)
+#define SPRD_RTC_SEC_UPD_EN BIT(8)
+#define SPRD_RTC_MIN_UPD_EN BIT(9)
+#define SPRD_RTC_HOUR_UPD_EN BIT(10)
+#define SPRD_RTC_DAY_UPD_EN BIT(11)
+#define SPRD_RTC_ALMSEC_UPD_EN BIT(12)
+#define SPRD_RTC_ALMMIN_UPD_EN BIT(13)
+#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14)
+#define SPRD_RTC_ALMDAY_UPD_EN BIT(15)
+#define SPRD_RTC_INT_MASK GENMASK(15, 0)
+
+#define SPRD_RTC_TIME_INT_MASK \
+ (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \
+ SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
+
+#define SPRD_RTC_ALMTIME_INT_MASK \
+ (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \
+ SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
+
+#define SPRD_RTC_ALM_INT_MASK \
+ (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \
+ SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
+
+/* second/minute/hour/day values mask definition */
+#define SPRD_RTC_SEC_MASK GENMASK(5, 0)
+#define SPRD_RTC_MIN_MASK GENMASK(5, 0)
+#define SPRD_RTC_HOUR_MASK GENMASK(4, 0)
+#define SPRD_RTC_DAY_MASK GENMASK(15, 0)
+
+/* alarm lock definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0)
+#define SPRD_RTC_ALM_UNLOCK 0xa5
+#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \
+ SPRD_RTC_ALMLOCK_MASK)
+
+/* SPG values definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
+#define SPRD_RTC_POWER_RESET_FLAG BIT(9)
+
+/* timeout of synchronizing time and alarm registers (us) */
+#define SPRD_RTC_POLL_TIMEOUT 200000
+#define SPRD_RTC_POLL_DELAY_US 20000
+
+struct sprd_rtc {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ struct device *dev;
+ u32 base;
+ int irq;
+ bool valid;
+};
+
+/*
+ * The Spreadtrum RTC controller has 3 groups registers, including time, normal
+ * alarm and auxiliary alarm. The time group registers are used to set RTC time,
+ * the normal alarm registers are used to set normal alarm, and the auxiliary
+ * alarm registers are used to set auxiliary alarm. Both alarm event and
+ * auxiliary alarm event can wake up system from deep sleep, but only alarm
+ * event can power up system from power down status.
+ */
+enum sprd_rtc_reg_types {
+ SPRD_RTC_TIME,
+ SPRD_RTC_ALARM,
+ SPRD_RTC_AUX_ALARM,
+};
+
+static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
+{
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALM_INT_MASK);
+}
+
+static int sprd_rtc_disable_ints(struct sprd_rtc *rtc)
+{
+ int ret;
+
+ ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_INT_MASK, 0);
+ if (ret)
+ return ret;
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_INT_MASK);
+}
+
+static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG);
+ if (lock)
+ val |= SPRD_RTC_ALM_LOCK;
+ else
+ val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
+
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
+ if (ret)
+ return ret;
+
+ /* wait until the SPG value is updated successfully */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t *secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg;
+ u32 val, sec, min, hour, day;
+ int ret;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_VALUE;
+ min_reg = SPRD_RTC_MIN_CNT_VALUE;
+ hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
+ day_reg = SPRD_RTC_DAY_CNT_VALUE;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_VALUE;
+ min_reg = SPRD_RTC_MIN_ALM_VALUE;
+ hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
+ day_reg = SPRD_RTC_DAY_ALM_VALUE;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
+ if (ret)
+ return ret;
+
+ sec = val & SPRD_RTC_SEC_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
+ if (ret)
+ return ret;
+
+ min = val & SPRD_RTC_MIN_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
+ if (ret)
+ return ret;
+
+ hour = val & SPRD_RTC_HOUR_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
+ if (ret)
+ return ret;
+
+ day = val & SPRD_RTC_DAY_MASK;
+ *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
+ return 0;
+}
+
+static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
+ u32 sec, min, hour, day, val;
+ int ret, rem;
+
+ /* convert seconds to RTC time format */
+ day = div_s64_rem(secs, 86400, &rem);
+ hour = rem / 3600;
+ rem -= hour * 3600;
+ min = rem / 60;
+ sec = rem - min * 60;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_UPD;
+ min_reg = SPRD_RTC_MIN_CNT_UPD;
+ hour_reg = SPRD_RTC_HOUR_CNT_UPD;
+ day_reg = SPRD_RTC_DAY_CNT_UPD;
+ sts_mask = SPRD_RTC_TIME_INT_MASK;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_UPD;
+ min_reg = SPRD_RTC_MIN_ALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_ALM_UPD;
+ day_reg = SPRD_RTC_DAY_ALM_UPD;
+ sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ sts_mask = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
+ if (ret)
+ return ret;
+
+ if (type == SPRD_RTC_AUX_ALARM)
+ return 0;
+
+ /*
+ * Since the time and normal alarm registers are put in always-power-on
+ * region supplied by VDDRTC, then these registers changing time will
+ * be very long, about 125ms. Thus here we should wait until all
+ * values are updated successfully.
+ */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ ((val & sts_mask) == sts_mask),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret < 0) {
+ dev_err(rtc->dev, "set time/alarm values timeout\n");
+ return ret;
+ }
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ sts_mask);
+}
+
+static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ int ret;
+
+ /* clear the auxiliary alarm interrupt status */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_AUXALM_EN);
+ } else {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN, 0);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+
+ if (!rtc->valid) {
+ dev_warn(dev, "RTC values are invalid\n");
+ return -EINVAL;
+ }
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(tm);
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
+ if (ret)
+ return ret;
+
+ if (!rtc->valid) {
+ /*
+ * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
+ * time values.
+ */
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_SPG_UPD,
+ SPRD_RTC_POWER_RESET_FLAG,
+ SPRD_RTC_POWER_RESET_FLAG);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS,
+ val, (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n",
+ ret);
+ return ret;
+ }
+
+ rtc->valid = true;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+ u32 val;
+
+ /*
+ * If aie_timer is enabled, we should get the normal alarm time.
+ * Otherwise we should get auxiliary alarm time.
+ */
+ if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0)
+ return sprd_rtc_read_aux_alarm(dev, alrm);
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ struct rtc_time aie_time =
+ rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
+ int ret;
+
+ /*
+ * We have 2 groups alarms: normal alarm and auxiliary alarm. Since
+ * both normal alarm event and auxiliary alarm event can wake up system
+ * from deep sleep, but only alarm event can power up system from power
+ * down status. Moreover we do not need to poll about 125ms when
+ * updating auxiliary alarm registers. Thus we usually set auxiliary
+ * alarm when wake up system from deep sleep, and for other scenarios,
+ * we should set normal alarm with polling status.
+ *
+ * So here we check if the alarm time is set by aie_timer, if yes, we
+ * should set normal alarm, if not, we should set auxiliary alarm which
+ * means it is just a wake event.
+ */
+ if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
+ return sprd_rtc_set_aux_alarm(dev, alrm);
+
+ /* clear the alarm interrupt status firstly */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ /* unlock the alarm to enable the alarm function. */
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN, 0);
+
+ /*
+ * Lock the alarm function in case fake alarm event will power
+ * up systems.
+ */
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ if (enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
+
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops sprd_rtc_ops = {
+ .read_time = sprd_rtc_read_time,
+ .set_time = sprd_rtc_set_time,
+ .read_alarm = sprd_rtc_read_alarm,
+ .set_alarm = sprd_rtc_set_alarm,
+ .alarm_irq_enable = sprd_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
+{
+ struct sprd_rtc *rtc = dev_id;
+ int ret;
+
+ ret = sprd_rtc_clear_alarm_ints(rtc);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
+ * been powered down, so the RTC time values are invalid.
+ */
+ rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+ return 0;
+}
+
+static int sprd_rtc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct sprd_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc->regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &rtc->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RTC base address\n");
+ return ret;
+ }
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get RTC irq number\n");
+ return rtc->irq;
+ }
+
+ rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rtc);
+
+ /* clear all RTC interrupts and disable all RTC interrupts */
+ ret = sprd_rtc_disable_ints(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to disable RTC interrupts\n");
+ return ret;
+ }
+
+ /* check if RTC time values are valid */
+ ret = sprd_rtc_check_power_down(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to check RTC time values\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ sprd_rtc_handler,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ pdev->name, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request RTC irq\n");
+ return ret;
+ }
+
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &sprd_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+}
+
+static int sprd_rtc_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ return 0;
+}
+
+static const struct of_device_id sprd_rtc_of_match[] = {
+ { .compatible = "sprd,sc2731-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
+
+static struct platform_driver sprd_rtc_driver = {
+ .driver = {
+ .name = "sprd-rtc",
+ .of_match_table = sprd_rtc_of_match,
+ },
+ .probe = sprd_rtc_probe,
+ .remove = sprd_rtc_remove,
+};
+module_platform_driver(sprd_rtc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index e364550eb9a7..92ff2edb86a6 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
retval = rtc_read_time(to_rtc_device(dev), &tm);
if (retval == 0) {
- unsigned long time;
- rtc_tm_to_time(&tm, &time);
- retval = sprintf(buf, "%lu\n", time);
+ time64_t time;
+
+ time = rtc_tm_to_time64(&tm);
+ retval = sprintf(buf, "%lld\n", time);
}
return retval;
@@ -132,7 +133,7 @@ static ssize_t
wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
- unsigned long alarm;
+ time64_t alarm;
struct rtc_wkalrm alm;
/* Don't show disabled alarms. For uniformity, RTC alarms are
@@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
- rtc_tm_to_time(&alm.time, &alarm);
- retval = sprintf(buf, "%lu\n", alarm);
+ alarm = rtc_tm_to_time64(&alm.time);
+ retval = sprintf(buf, "%lld\n", alarm);
}
return retval;
@@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
ssize_t retval;
- unsigned long now, alarm;
- unsigned long push = 0;
+ time64_t now, alarm;
+ time64_t push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
const char *buf_ptr;
@@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
retval = rtc_read_time(rtc, &alm.time);
if (retval < 0)
return retval;
- rtc_tm_to_time(&alm.time, &now);
+ now = rtc_tm_to_time64(&alm.time);
buf_ptr = buf;
if (*buf_ptr == '+') {
@@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
} else
adjust = 1;
}
- retval = kstrtoul(buf_ptr, 0, &alarm);
+ retval = kstrtos64(buf_ptr, 0, &alarm);
if (retval)
return retval;
if (adjust) {
@@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
if (alm.enabled) {
if (push) {
- rtc_tm_to_time(&alm.time, &push);
+ push = rtc_tm_to_time64(&alm.time);
alarm += push;
} else
return -EBUSY;
@@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
*/
alarm = now + 300;
}
- rtc_time_to_tm(alarm, &alm.time);
+ rtc_time64_to_tm(alarm, &alm.time);
retval = rtc_set_alarm(rtc, &alm);
return (retval < 0) ? retval : n;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 65b432a096fe..0c34d3b81279 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -52,6 +52,7 @@ struct xgene_rtc_dev {
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
+ unsigned int irq_enabled;
};
static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
return 0;
}
+static int xgene_rtc_alarm_irq_enabled(struct device *dev)
+{
+ struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
+
+ return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0;
+}
+
static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- unsigned long rtc_time;
unsigned long alarm_time;
- rtc_time = readl(pdata->csr_base + RTC_CCVR);
rtc_tm_to_time(&alrm->time, &alarm_time);
-
pdata->alarm_time = alarm_time;
writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
@@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Couldn't get the clock for RTC\n");
return -ENODEV;
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk);
+ return ret;
+ }
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&xgene_rtc_ops, THIS_MODULE);
@@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xgene_rtc_suspend(struct device *dev)
+static int __maybe_unused xgene_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
+
+ /*
+ * If this RTC alarm will be used for waking the system up,
+ * don't disable it of course. Else we just disable the alarm
+ * and await suspension.
+ */
if (device_may_wakeup(&pdev->dev)) {
if (!enable_irq_wake(irq))
pdata->irq_wake = 1;
} else {
+ pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev);
xgene_rtc_alarm_irq_enable(dev, 0);
- clk_disable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
}
-
return 0;
}
-static int xgene_rtc_resume(struct device *dev)
+static int __maybe_unused xgene_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
+ int rc;
irq = platform_get_irq(pdev, 0);
+
if (device_may_wakeup(&pdev->dev)) {
if (pdata->irq_wake) {
disable_irq_wake(irq);
pdata->irq_wake = 0;
}
} else {
- clk_enable(pdata->clk);
- xgene_rtc_alarm_irq_enable(dev, 1);
+ rc = clk_prepare_enable(pdata->clk);
+ if (rc) {
+ dev_err(dev, "Unable to enable clock error %d\n", rc);
+ return rc;
+ }
+ xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index e5225ad9c5b1..2fdab400c1fe 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the S/390 specific device drivers
#
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 31f014b57bfc..bc27d716aa6b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
comment "S/390 block device drivers"
depends on S390 && BLOCK
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29f35e29d480..d4e8dff673cc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -70,8 +71,8 @@ static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
@@ -119,9 +120,7 @@ struct dasd_device *dasd_alloc_device(void)
(void (*)(unsigned long)) dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
- init_timer(&device->timer);
- device->timer.function = dasd_device_timeout;
- device->timer.data = (unsigned long) device;
+ timer_setup(&device->timer, dasd_device_timeout, 0);
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
@@ -163,9 +162,7 @@ struct dasd_block *dasd_alloc_block(void)
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
- init_timer(&block->timer);
- block->timer.function = dasd_block_timeout;
- block->timer.data = (unsigned long) block;
+ timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
return block;
@@ -762,7 +759,7 @@ static void dasd_profile_end_add_data(struct dasd_profile_info *data,
/* in case of an overflow, reset the whole profile */
if (data->dasd_io_reqs == UINT_MAX) {
memset(data, 0, sizeof(*data));
- getnstimeofday(&data->starttod);
+ ktime_get_real_ts64(&data->starttod);
}
data->dasd_io_reqs++;
data->dasd_io_sects += sectors;
@@ -897,7 +894,7 @@ void dasd_profile_reset(struct dasd_profile *profile)
return;
}
memset(data, 0, sizeof(*data));
- getnstimeofday(&data->starttod);
+ ktime_get_real_ts64(&data->starttod);
spin_unlock_bh(&profile->lock);
}
@@ -914,7 +911,7 @@ int dasd_profile_on(struct dasd_profile *profile)
kfree(data);
return 0;
}
- getnstimeofday(&data->starttod);
+ ktime_get_real_ts64(&data->starttod);
profile->data = data;
spin_unlock_bh(&profile->lock);
return 0;
@@ -998,8 +995,8 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array)
static void dasd_stats_seq_print(struct seq_file *m,
struct dasd_profile_info *data)
{
- seq_printf(m, "start_time %ld.%09ld\n",
- data->starttod.tv_sec, data->starttod.tv_nsec);
+ seq_printf(m, "start_time %lld.%09ld\n",
+ (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
@@ -1560,12 +1557,12 @@ EXPORT_SYMBOL(dasd_start_IO);
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_device *device;
- device = (struct dasd_device *) ptr;
+ device = from_timer(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2628,12 +2625,12 @@ EXPORT_SYMBOL(dasd_cancel_req);
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_block *block;
- block = (struct dasd_block *) ptr;
+ block = from_timer(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c95a4784c191..e7cd28ff1984 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 98fb28e49d2c..f035c2f25d35 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8eafcd5fa004..a2edf2a7ace9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -530,10 +531,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
- if (startpriv->uid.type != UA_BASE_DEVICE) {
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata->validity.verify_base = 1;
- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
- pfxdata->validity.hyper_pav = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+ pfxdata->validity.verify_base = 1;
+ pfxdata->validity.hyper_pav = 1;
}
rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
@@ -3414,10 +3417,12 @@ static int prepare_itcw(struct itcw *itcw,
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
- if (startpriv->uid.type != UA_BASE_DEVICE) {
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+ pfxdata.validity.verify_base = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata.validity.verify_base = 1;
- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
- pfxdata.validity.hyper_pav = 1;
+ pfxdata.validity.hyper_pav = 1;
}
switch (cmd) {
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 6168ccdb389c..a6b132f7e869 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b095a23bcc0c..96709b1a7bf8 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -441,7 +441,7 @@ struct dasd_profile_info {
unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
/* new data */
- struct timespec starttod; /* time of start or last reset */
+ struct timespec64 starttod; /* time of start or last reset */
unsigned int dasd_io_alias; /* requests using an alias */
unsigned int dasd_io_tpm; /* requests using transport mode */
unsigned int dasd_read_reqs; /* total number of read requests */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 7abb240847c0..6aaefb780436 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dcssblk.c -- the S/390 block driver for dcss memory
*
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index eb51893c74a4..b4130c7880d8 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for s390 storage class memory.
*
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 571a0709e1e5..2a6334ca750e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xpram.c -- the S/390 expanded memory RAM-disk
*
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 97c4c9fdd53d..ab0b243a947d 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
comment "S/390 character device drivers"
depends on S390
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 353f0bebcf8c..8c9d412b6d33 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -282,9 +282,9 @@ static void raw3215_start_io(struct raw3215_info *raw)
/*
* Function to start a delayed output after RAW3215_TIMEOUT seconds
*/
-static void raw3215_timeout(unsigned long __data)
+static void raw3215_timeout(struct timer_list *t)
{
- struct raw3215_info *raw = (struct raw3215_info *) __data;
+ struct raw3215_info *raw = from_timer(raw, t, timer);
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -670,7 +670,7 @@ static struct raw3215_info *raw3215_alloc_info(void)
return NULL;
}
- setup_timer(&info->timer, raw3215_timeout, (unsigned long)info);
+ timer_setup(&info->timer, raw3215_timeout, 0);
init_waitqueue_head(&info->empty_wait);
tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info);
tty_port_init(&info->port);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index be3e3c1206c2..fd2146bcc0ad 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,7 +69,7 @@ static struct con3270 *condev;
#define CON_UPDATE_STATUS 4 /* Update status line. */
#define CON_UPDATE_ALL 8 /* Recreate screen. */
-static void con3270_update(struct con3270 *);
+static void con3270_update(struct timer_list *);
/*
* Setup timeout for a device. On timeout trigger an update.
@@ -205,8 +205,9 @@ con3270_write_callback(struct raw3270_request *rq, void *data)
* Update console display.
*/
static void
-con3270_update(struct con3270 *cp)
+con3270_update(struct timer_list *t)
{
+ struct con3270 *cp = from_timer(cp, t, timer);
struct raw3270_request *wrq;
char wcc, prolog[6];
unsigned long flags;
@@ -552,7 +553,7 @@ con3270_flush(void)
con3270_update_status(cp);
while (cp->update_flags != 0) {
spin_unlock_irqrestore(&cp->view.lock, flags);
- con3270_update(cp);
+ con3270_update(&cp->timer);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
}
@@ -623,8 +624,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
- setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
- (unsigned long) condev);
+ timer_setup(&condev->timer, con3270_update, 0);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
index 353b3f268824..f4c095612a02 100644
--- a/drivers/s390/char/defkeymap.map
+++ b/drivers/s390/char/defkeymap.map
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Default keymap for 3270 (ebcdic codepage 037).
keymaps 0-1,4-5
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index c4518168fd02..61822480a2a0 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - fullscreen driver.
*
diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c
index 251a318a9b75..1447d0887225 100644
--- a/drivers/s390/char/hmcdrv_mod.c
+++ b/drivers/s390/char/hmcdrv_mod.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive DVD Module
*
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 027ac6ae5eea..bf4ab4efed73 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for reading z/VM *MONITOR service records.
*
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 571a7e352755..76c158c41510 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for writing z/VM *MONITOR service records.
*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 5d4f053d7c38..f8cd2935fbfd 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - core functions.
*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 41d8aa96801f..e4e2df7a478e 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -136,6 +136,7 @@ static enum sclp_suspend_state_t {
#define SCLP_BUSY_INTERVAL 10
#define SCLP_RETRY_INTERVAL 30
+static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void);
static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
@@ -154,25 +155,32 @@ __sclp_queue_read_req(void)
/* Set up request retry timer. Called while sclp_lock is locked. */
static inline void
-__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
- unsigned long data)
+__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
{
del_timer(&sclp_request_timer);
- sclp_request_timer.function = function;
- sclp_request_timer.data = data;
+ sclp_request_timer.function = cb;
sclp_request_timer.expires = jiffies + time;
add_timer(&sclp_request_timer);
}
-/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+static void sclp_request_timeout_restart(struct timer_list *unused)
+{
+ sclp_request_timeout(true);
+}
+
+static void sclp_request_timeout_normal(struct timer_list *unused)
+{
+ sclp_request_timeout(false);
+}
+
+/* Request timeout handler. Restart the request queue. If force_restart,
* force restart of running request. */
-static void
-sclp_request_timeout(unsigned long data)
+static void sclp_request_timeout(bool force_restart)
{
unsigned long flags;
spin_lock_irqsave(&sclp_lock, flags);
- if (data) {
+ if (force_restart) {
if (sclp_running_state == sclp_running_state_running) {
/* Break running state and queue NOP read event request
* to get a defined interface state. */
@@ -181,7 +189,7 @@ sclp_request_timeout(unsigned long data)
}
} else {
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
}
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_process_queue();
@@ -239,7 +247,7 @@ out:
* invokes callback. This timer can be set per request in situations where
* waiting too long would be harmful to the system, e.g. during SE reboot.
*/
-static void sclp_req_queue_timeout(unsigned long data)
+static void sclp_req_queue_timeout(struct timer_list *unused)
{
unsigned long flags, expires_next;
struct sclp_req *req;
@@ -276,12 +284,12 @@ __sclp_start_request(struct sclp_req *req)
req->status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
- sclp_request_timeout, 1);
+ sclp_request_timeout_restart);
return 0;
} else if (rc == -EBUSY) {
/* Try again later */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
return 0;
}
/* Request failed */
@@ -315,7 +323,7 @@ sclp_process_queue(void)
/* Cannot abort already submitted request - could still
* be active at the SCLP */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
break;
}
do_post:
@@ -558,7 +566,7 @@ sclp_sync_wait(void)
if (timer_pending(&sclp_request_timer) &&
get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
- sclp_request_timer.function(sclp_request_timer.data);
+ sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
local_irq_disable();
@@ -915,7 +923,7 @@ static void sclp_check_handler(struct ext_code ext_code,
/* Initial init mask request timed out. Modify request state to failed. */
static void
-sclp_check_timeout(unsigned long data)
+sclp_check_timeout(struct timer_list *unused)
{
unsigned long flags;
@@ -954,7 +962,7 @@ sclp_check_interface(void)
sclp_init_req.status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
- sclp_check_timeout, 0);
+ sclp_check_timeout);
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
@@ -1159,9 +1167,8 @@ sclp_init(void)
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list);
- init_timer(&sclp_request_timer);
- init_timer(&sclp_queue_timer);
- sclp_queue_timer.function = sclp_req_queue_timeout;
+ timer_setup(&sclp_request_timer, NULL, 0);
+ timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 19c25427f27f..ee6f3b563728 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Enable Asynchronous Notification via SCLP.
*
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 7027e61a6931..8966a1c1b548 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -125,7 +125,7 @@ static void sclp_console_sync_queue(void)
* temporary write buffer without further waiting on a final new line.
*/
static void
-sclp_console_timeout(unsigned long data)
+sclp_console_timeout(struct timer_list *unused)
{
sclp_conbuf_emit();
}
@@ -211,7 +211,6 @@ sclp_console_write(struct console *console, const char *message,
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
- setup_timer(&sclp_con_timer, sclp_console_timeout, 0UL);
mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
out:
@@ -332,7 +331,7 @@ sclp_console_init(void)
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
sclp_conbuf = NULL;
- init_timer(&sclp_con_timer);
+ timer_setup(&sclp_con_timer, sclp_console_timeout, 0);
/* Set output format */
if (MACHINE_IS_VM)
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 1cceefdc03e0..9f7b87d6d434 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -151,7 +151,7 @@ __sclp_ttybuf_emit(struct sclp_buffer *buffer)
* temporary write buffer.
*/
static void
-sclp_tty_timeout(unsigned long data)
+sclp_tty_timeout(struct timer_list *unused)
{
unsigned long flags;
struct sclp_buffer *buf;
@@ -218,7 +218,6 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
- setup_timer(&sclp_tty_timer, sclp_tty_timeout, 0UL);
mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
@@ -526,7 +525,7 @@ sclp_tty_init(void)
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
- init_timer(&sclp_tty_timer);
+ timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index e84395d71389..3f9a6ef650fa 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -357,7 +357,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
-sclp_vt220_timeout(unsigned long data)
+sclp_vt220_timeout(struct timer_list *unused)
{
sclp_vt220_emit_current();
}
@@ -454,8 +454,6 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
- sclp_vt220_timer.function = sclp_vt220_timeout;
- sclp_vt220_timer.data = 0UL;
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
@@ -699,7 +697,7 @@ static int __init __sclp_vt220_init(int num_pages)
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
- init_timer(&sclp_vt220_timer);
+ timer_setup(&sclp_vt220_timer, sclp_vt220_timeout, 0);
tty_port_init(&sclp_vt220_port);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index de69f0ddc321..6d73ee3f827a 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3480/3490 tapes.
*
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e352047ed9f7..37e65a05517f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3590 tapes.
*
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index e7d23048d3f0..a07102472ce9 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004
*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 9dd4534823b3..8d3370da2dfc 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* basic function of the tape device driver
*
@@ -32,7 +33,7 @@
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(struct work_struct *);
-static void tape_long_busy_timeout(unsigned long data);
+static void tape_long_busy_timeout(struct timer_list *t);
/*
* One list to contain all tape devices of all disciplines, so
@@ -381,8 +382,7 @@ tape_generic_online(struct tape_device *device,
return -EINVAL;
}
- init_timer(&device->lb_timeout);
- device->lb_timeout.function = tape_long_busy_timeout;
+ timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
/* Let the discipline have a go at the device. */
device->discipline = discipline;
@@ -867,18 +867,16 @@ tape_delayed_next_request(struct work_struct *work)
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
-static void tape_long_busy_timeout(unsigned long data)
+static void tape_long_busy_timeout(struct timer_list *t)
{
+ struct tape_device *device = from_timer(device, t, lb_timeout);
struct tape_request *request;
- struct tape_device *device;
- device = (struct tape_device *) data;
spin_lock_irq(get_ccwdev_lock(device->cdev));
request = list_entry(device->req_queue.next, struct tape_request, list);
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
- device->lb_timeout.data = 0UL;
tape_put_device(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
@@ -1157,7 +1155,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
if (del_timer(&device->lb_timeout)) {
- device->lb_timeout.data = 0UL;
tape_put_device(device);
__tape_start_next_request(device);
}
@@ -1212,8 +1209,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case TAPE_IO_PENDING:
break;
case TAPE_IO_LONG_BUSY:
- device->lb_timeout.data =
- (unsigned long) tape_get_device(device);
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e5ebe2fbee23..1c98023cffd4 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - tty functions.
*
@@ -118,7 +119,7 @@ struct tty3270 {
#define TTY_UPDATE_STATUS 8 /* Update status line. */
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
-static void tty3270_update(struct tty3270 *);
+static void tty3270_update(struct timer_list *);
static void tty3270_resize_work(struct work_struct *work);
/*
@@ -361,8 +362,9 @@ tty3270_write_callback(struct raw3270_request *rq, void *data)
* Update 3270 display.
*/
static void
-tty3270_update(struct tty3270 *tp)
+tty3270_update(struct timer_list *t)
{
+ struct tty3270 *tp = from_timer(tp, t, timer);
static char invalid_sba[2] = { 0xff, 0xff };
struct raw3270_request *wrq;
unsigned long updated;
@@ -748,8 +750,7 @@ tty3270_alloc_view(void)
goto out_reset;
tty_port_init(&tp->port);
- setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
- (unsigned long) tp);
+ timer_setup(&tp->timer, tty3270_update, 0);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 62559dc0169f..069b9ef08206 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* character device driver for reading z/VM system service records
*
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index fa90ef05afc0..52aa89424318 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index aaed778f67c4..4369662cfff5 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
@@ -7,7 +8,6 @@
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
- * License: GPL
*/
#define KMSG_COMPONENT "zdump"
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index 95e25c1df922..140e3e4ee2fd 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e2f7b6e93efd..bfec1485ca23 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bus driver for ccwgroup
*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index f4166f80c4d4..5c94a3aec4dd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 7b0b295b2313..c08fc5a8df0c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- channel subsystem call
*
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 8e7e19b9e92c..0015729d917d 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 chsc subchannels
*
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89216174fcbb..987bf9a8c9f7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- low level i/o calls
*
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 7d59230e88bb..5e495c62cfa7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Linux on zSeries Channel Measurement Facility support
*
@@ -7,20 +8,6 @@
* Cornelia Huck <cornelia.huck@de.ibm.com>
*
* original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "cio"
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index d3e504c3c362..0f11dce6e224 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for channel subsystem
*
@@ -5,8 +6,6 @@
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
*/
#define KMSG_COMPONENT "cio"
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e5c32f4b5287..75a245f38e2e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* bus driver for ccw devices
*
@@ -5,8 +6,6 @@
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
*/
#define KMSG_COMPONENT "cio"
@@ -142,7 +141,7 @@ static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
-static void recovery_func(unsigned long data);
+static void recovery_func(struct timer_list *unused);
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -194,7 +193,7 @@ int __init io_subchannel_init(void)
{
int ret;
- setup_timer(&recovery_timer, recovery_func, 0);
+ timer_setup(&recovery_timer, recovery_func, 0);
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
@@ -726,7 +725,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
INIT_WORK(&priv->todo_work, ccw_device_todo);
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
- init_timer(&priv->timer);
+ timer_setup(&priv->timer, ccw_device_timeout, 0);
atomic_set(&priv->onoff, 0);
cdev->ccwlock = sch->lock;
@@ -1271,7 +1270,7 @@ static void recovery_work_func(struct work_struct *unused)
static DECLARE_WORK(recovery_work, recovery_work_func);
-static void recovery_func(unsigned long data)
+static void recovery_func(struct timer_list *unused)
{
/*
* We can't do our recovery in softirq context and it's not
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index b37c22adcc7a..f5c427ec24b1 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,6 +4,7 @@
#include <asm/ccwdev.h>
#include <linux/atomic.h>
+#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/notifier.h>
#include <linux/kernel_stat.h>
@@ -134,6 +135,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
+void ccw_device_timeout(struct timer_list *t);
void ccw_device_set_timeout(struct ccw_device *, int);
void ccw_device_schedule_recovery(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index f98ea674c3d8..1319122e9d12 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* finite state machine for device handling
*
@@ -91,12 +92,12 @@ static void ccw_timeout_log(struct ccw_device *cdev)
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
-static void
-ccw_device_timeout(unsigned long data)
+void
+ccw_device_timeout(struct timer_list *t)
{
- struct ccw_device *cdev;
+ struct ccw_device_private *priv = from_timer(priv, t, timer);
+ struct ccw_device *cdev = priv->cdev;
- cdev = (struct ccw_device *) data;
spin_lock_irq(cdev->ccwlock);
if (timeout_log_enabled)
ccw_timeout_log(cdev);
@@ -118,8 +119,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
if (mod_timer(&cdev->private->timer, jiffies + expires))
return;
}
- cdev->private->timer.function = ccw_device_timeout;
- cdev->private->timer.data = (unsigned long) cdev;
cdev->private->timer.expires = jiffies + expires;
add_timer(&cdev->private->timer);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index cf8c4ac6323a..1caf6a398760 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
*/
#include <linux/export.h>
#include <linux/init.h>
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index d14795f7110b..53468ae64b99 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 eadm subchannels
*
@@ -94,9 +95,10 @@ static int eadm_subchannel_clear(struct subchannel *sch)
return 0;
}
-static void eadm_subchannel_timeout(unsigned long data)
+static void eadm_subchannel_timeout(struct timer_list *t)
{
- struct subchannel *sch = (struct subchannel *) data;
+ struct eadm_private *private = from_timer(private, t, timer);
+ struct subchannel *sch = private->sch;
spin_lock_irq(sch->lock);
EADM_LOG(1, "timeout");
@@ -118,8 +120,6 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
if (mod_timer(&private->timer, jiffies + expires))
return;
}
- private->timer.function = eadm_subchannel_timeout;
- private->timer.data = (unsigned long) sch;
private->timer.expires = jiffies + expires;
add_timer(&private->timer);
}
@@ -224,7 +224,7 @@ static int eadm_subchannel_probe(struct subchannel *sch)
return -ENOMEM;
INIT_LIST_HEAD(&private->head);
- init_timer(&private->timer);
+ timer_setup(&private->timer, eadm_subchannel_timeout, 0);
spin_lock_irq(sch->lock);
set_eadm_private(sch, private);
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
index c592087be0f1..77fde9f5ea8b 100644
--- a/drivers/s390/cio/isc.c
+++ b/drivers/s390/cio/isc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions for registration of I/O interruption subclasses on s390.
*
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 29d6b5222f1c..a6f7c2986b94 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -393,7 +393,7 @@ int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
void qdio_outbound_processing(unsigned long data);
-void qdio_outbound_timer(unsigned long data);
+void qdio_outbound_timer(struct timer_list *t);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a4ad39ba3873..95b0efe28afb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
@@ -430,8 +431,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
- if ((!q->is_input_q &&
- (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
+ q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
@@ -535,7 +536,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_ERROR:
process_buffer_error(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
break;
@@ -894,9 +896,9 @@ void qdio_outbound_processing(unsigned long data)
__qdio_outbound_processing(q);
}
-void qdio_outbound_timer(unsigned long data)
+void qdio_outbound_timer(struct timer_list *t)
{
- struct qdio_q *q = (struct qdio_q *)data;
+ struct qdio_q *q = from_timer(q, t, u.out.timer);
qdio_tasklet_schedule(q);
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..98f3cfdc0d02 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* qdio queue initialization
*
@@ -252,8 +253,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
- setup_timer(&q->u.out.timer, (void(*)(unsigned long))
- &qdio_outbound_timer, (unsigned long)q);
+ timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
}
}
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 1fa53ecdc2aa..6bca1d5455d4 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Recognize and maintain s390 storage class memory.
*
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 82f05c4b8c52..ea6a2d0b2894 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* VFIO based Physical Subchannel device driver
*
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8b5658b0bec3..48d55dc9e986 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
* Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "ap"
@@ -374,13 +361,13 @@ void ap_wait(enum ap_wait wait)
/**
* ap_request_timeout(): Handling of request timeouts
- * @data: Holds the AP device.
+ * @t: timer making this callback
*
* Handles request timeouts.
*/
-void ap_request_timeout(unsigned long data)
+void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = (struct ap_queue *) data;
+ struct ap_queue *aq = from_timer(aq, t, timeout);
if (ap_suspend_flag)
return;
@@ -1203,7 +1190,7 @@ out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
-static void ap_config_timeout(unsigned long ptr)
+static void ap_config_timeout(struct timer_list *unused)
{
if (ap_suspend_flag)
return;
@@ -1306,7 +1293,7 @@ int __init ap_module_init(void)
goto out_bus;
/* Setup the AP bus rescan timer. */
- setup_timer(&ap_config_timer, ap_config_timeout, 0);
+ timer_setup(&ap_config_timer, ap_config_timeout, 0);
/*
* Setup the high resultion poll timer.
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 3a0e19d87e7c..e0827eaa42f1 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
* Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _AP_BUS_H_
@@ -241,7 +228,7 @@ void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
void ap_wait(enum ap_wait wait);
-void ap_request_timeout(unsigned long data);
+void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a550d40921e7..ba3a2e13b0eb 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -634,7 +634,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
- setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
+ timer_setup(&aq->timeout, ap_request_timeout, 0);
return aq;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 8dda5bb34a2f..e7c2e4f9529a 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pkey device driver
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#define KMSG_COMPONENT "pkey"
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index b5f4006198b9..ce15f101ee28 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -10,20 +11,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -218,8 +205,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
- return &zq->queue->total_request_count >
- &pref_zq->queue->total_request_count;
+ return zq->queue->total_request_count >
+ pref_zq->queue->total_request_count;
return weight > pref_weight;
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 73541a798db7..9fff8912f6e3 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -10,20 +11,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_API_H_
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index f85dacf1c284..233e1e695208 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -10,16 +11,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 12cff6262566..011d61d8a4ae 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -7,20 +8,6 @@
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_CCA_KEY_H_
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index b97c5d5ee5a4..e701194d3611 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -9,20 +10,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 0dce4b9af184..c3c116777c93 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -7,20 +8,6 @@
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_CEX2A_H_
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index e2eebc775a37..f305538334ad 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
* Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 13df60209ed3..01598d83c60a 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -7,20 +8,6 @@
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_ERROR_H_
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index db5bde47dfb0..afe1b2bcd7ec 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -9,20 +10,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 5cc280318ee7..0a36545cfb8e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -8,20 +9,6 @@
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_MSGTYPE50_H_
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 785620d30504..f54bef4a928e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -9,20 +10,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 7a0d5b57821f..d314f4525518 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -8,20 +9,6 @@
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_MSGTYPE6_H_
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 600604782b65..159b0a0dd211 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -9,20 +10,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index eacafc8962f2..d678a3af83a7 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -8,20 +9,6 @@
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_PCIXCC_H_
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 4742be0eec24..720434e18007 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
@@ -10,16 +11,6 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index b2837b1c70b7..a782a207ad31 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "S/390 network device drivers"
depends on NETDEVICES && S390
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index be9f17218531..7ce98b70cad3 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2009
* Author(s):
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 8c14c6c3ad3d..eb07862bd36a 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* A generic FSM based on fsm used in isdn4linux
*
@@ -129,8 +130,9 @@ fsm_getstate_str(fsm_instance *fi)
}
static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
{
+ fsm_timer *this = from_timer(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
@@ -142,13 +144,11 @@ void
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
- init_timer(&this->tl);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
}
void
@@ -170,7 +170,7 @@ fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
this->fi->name, this, millisec);
#endif
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -189,7 +189,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
#endif
del_timer(&this->tl);
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e131a03262ad..92ae84a927fc 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Linux for S/390 Lan Channel Station Network Driver
*
@@ -7,20 +8,6 @@
* Rewritten by
* Frank Pavlic <fpavlic@de.ibm.com> and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "lcs"
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b9c7c1e61da2..5ce2424ca729 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV network driver
*
@@ -18,21 +19,6 @@
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define KMSG_COMPONENT "netiucv"
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 9cd569ef43ec..badf42acbf95 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -565,9 +565,9 @@ enum qeth_cq {
};
struct qeth_ipato {
- int enabled;
- int invert4;
- int invert6;
+ bool enabled;
+ bool invert4;
+ bool invert6;
struct list_head entries;
};
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_recover_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 49b9efeba1bd..6c815207f4f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -19,6 +20,11 @@
#include <linux/mii.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
@@ -1474,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
qeth_set_intial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
- card->ipato.enabled = 0;
- card->ipato.invert4 = 0;
- card->ipato.invert6 = 0;
+ card->ipato.enabled = false;
+ card->ipato.invert4 = false;
+ card->ipato.invert6 = false;
/* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -6438,6 +6444,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ /* GSO segmentation builds skbs with
+ * a (small) linear part for the headers, and
+ * page frags for the data.
+ * Compared to a linear skb, the header-only part consumes an
+ * additional buffer element. This reduces buffer utilization, and
+ * hurts throughput. So compress small segments into one element.
+ */
+ if (netif_needs_gso(skb, features)) {
+ /* match skb_segment(): */
+ unsigned int doffset = skb->data - skb_mac_header(skb);
+ unsigned int hsize = skb_shinfo(skb)->gso_size;
+ unsigned int hroom = skb_headroom(skb);
+
+ /* linearize only if resulting skb allocations are order-0: */
+ if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+ features &= ~NETIF_F_SG;
+ }
+
+ return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
static int __init qeth_core_init(void)
{
int rc;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index b22ed2a57acd..ae81534de912 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d2537c09126d..5863ea170ff2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -960,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_stop = qeth_l2_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
+ .ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -1010,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_SG;
card->dev->vlan_features = NETIF_F_SG;
+ card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1028,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
- card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
- PAGE_SIZE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b577cc..e5833837b799 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_update_ipato(struct qeth_card *card);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index aadd384316a3..ef0961e18686 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -163,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
}
}
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
- struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
{
struct qeth_ipato_entry *ipatoe;
u8 addr_bits[128] = {0, };
@@ -173,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
if (!card->ipato.enabled)
return 0;
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ return 0;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -289,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
addr->ref_counter = 1;
- if (addr->type == QETH_IP_TYPE_NORMAL &&
- qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
@@ -604,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
/*
* IP address takeover related functions
*/
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ unsigned int i;
+
+ hash_for_each(card->ip_htable, i, addr, hnode) {
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ continue;
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+ else
+ addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+}
+
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
@@ -615,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
kfree(ipatoe);
}
+ qeth_l3_update_ipato(card);
spin_unlock_bh(&card->ip_lock);
}
@@ -639,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
}
- if (!rc)
+ if (!rc) {
list_add_tail(&new->entry, &card->ipato.entries);
+ qeth_l3_update_ipato(card);
+ }
spin_unlock_bh(&card->ip_lock);
@@ -663,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
(proto == QETH_PROT_IPV4)? 4:16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
+ qeth_l3_update_ipato(card);
kfree(ipatoe);
}
}
@@ -1376,6 +1403,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
memcpy(tmp->mac, buf, sizeof(tmp->mac));
+ tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp);
if (ipm) {
@@ -2917,6 +2945,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
+ .ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2957,6 +2986,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
+ card->dev->features |= NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2984,8 +3014,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
- card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
- PAGE_SIZE;
+ netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+ PAGE_SIZE);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf678be..6ea2b528a64e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- struct qeth_ipaddr *addr;
- int i, rc = 0;
+ bool enable;
+ int rc = 0;
if (!card)
return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
}
if (sysfs_streq(buf, "toggle")) {
- card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
- } else if (sysfs_streq(buf, "1")) {
- card->ipato.enabled = 1;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if ((addr->type == QETH_IP_TYPE_NORMAL) &&
- qeth_l3_is_addr_covered_by_ipato(card, addr))
- addr->set_flags |=
- QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else if (sysfs_streq(buf, "0")) {
- card->ipato.enabled = 0;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if (addr->set_flags &
- QETH_IPA_SETIP_TAKEOVER_FLAG)
- addr->set_flags &=
- ~QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else
+ enable = !card->ipato.enabled;
+ } else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.enabled != enable) {
+ card->ipato.enabled = enable;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert4 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert4 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert4;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert4 != invert) {
+ card->ipato.invert4 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert6 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert6 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert6;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert6 != invert) {
+ card->ipato.invert6 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index a851d34c642b..3b0c8b8a7634 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV special message driver
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
index 32515a201bbc..0a263999f7ae 100644
--- a/drivers/s390/net/smsgiucv_app.c
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Deliver z/VM CP special messages (SMSG) as uevents.
*
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9259039e886d..9dda431ec8f3 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the S/390 specific device drivers
#
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 84752152d41f..a3a8c8d9d717 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cbb8156bf5e0..1d91a32db08e 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -564,21 +564,24 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
* @data: ERP action (from timer data)
*/
-void zfcp_erp_timeout_handler(unsigned long data)
+void zfcp_erp_timeout_handler(struct timer_list *t)
{
- struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
+ struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_erp_action *act = fsf_req->erp_action;
+
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
-static void zfcp_erp_memwait_handler(unsigned long data)
+static void zfcp_erp_memwait_handler(struct timer_list *t)
{
- zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
+ struct zfcp_erp_action *act = from_timer(act, t, timer);
+
+ zfcp_erp_notify(act, 0);
}
static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
{
- setup_timer(&erp_action->timer, zfcp_erp_memwait_handler,
- (unsigned long) erp_action);
+ timer_setup(&erp_action->timer, zfcp_erp_memwait_handler, 0);
erp_action->timer.expires = jiffies + HZ;
add_timer(&erp_action->timer);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8ca2ab7deaa9..bf8ea4df2bb8 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -69,7 +69,7 @@ extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
-extern void zfcp_erp_timeout_handler(unsigned long);
+extern void zfcp_erp_timeout_handler(struct timer_list *t);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 00fb98f7b2cd..b12cb81ad8a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -21,9 +21,11 @@
struct kmem_cache *zfcp_fsf_qtcb_cache;
-static void zfcp_fsf_request_timeout_handler(unsigned long data)
+static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"fsrth_1");
@@ -33,7 +35,6 @@ static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
unsigned long timeout)
{
fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
- fsf_req->timer.data = (unsigned long) fsf_req->adapter;
fsf_req->timer.expires = jiffies + timeout;
add_timer(&fsf_req->timer);
}
@@ -42,7 +43,6 @@ static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
{
BUG_ON(!fsf_req->erp_action);
fsf_req->timer.function = zfcp_erp_timeout_handler;
- fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
fsf_req->timer.expires = jiffies + 30 * HZ;
add_timer(&fsf_req->timer);
}
@@ -692,7 +692,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
adapter->req_no++;
INIT_LIST_HEAD(&req->list);
- init_timer(&req->timer);
+ timer_setup(&req->timer, NULL, 0);
init_completion(&req->completion);
req->adapter = adapter;
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index f68af1f317f1..2dc4d9aab634 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -1,9 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for kvm guest drivers on s390
#
# Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index b18fe2014cf2..ba2e0856d22c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ccw based virtio transport
*
* Copyright IBM Corp. 2012, 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index f32765d3cbd8..5c8ed7350a04 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -22,7 +22,6 @@
#include <asm/display7seg.h>
-#define D7S_MINOR 193
#define DRIVER_NAME "d7s"
#define PFX DRIVER_NAME ": "
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 5402b85b0bdc..2dbc8330d7d3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1175,7 +1175,7 @@ static void asd_start_scb_timers(struct list_head *list)
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
- ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
+ ascb->timer.function = asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 4637119c09d8..2a01702d5ba7 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -42,7 +42,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
ascb->tasklet_complete = tasklet_complete;
ascb->uldd_timer = 1;
- ascb->timer.function = (TIMER_FUNC_TYPE)timed_out;
+ ascb->timer.function = timed_out;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index af032c46ec0e..21f6421536a0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -101,7 +101,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
@@ -837,10 +837,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
@@ -930,10 +928,8 @@ static int arcmsr_resume(struct pci_dev *pdev)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
return 0;
controller_stop:
@@ -3459,9 +3455,9 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
}
}
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_hbaA_request_device_map(acb);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 24388795ee9a..f4775ca70bab 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2318,9 +2318,9 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
* Error handler timeout function. Indicate that we timed out,
* and wake up any error handler process so it can continue.
*/
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
{
- FAS216_Info *info = (FAS216_Info *)data;
+ FAS216_Info *info = from_timer(info, t, eh_timer);
fas216_log(info, LOG_ERROR, "error handling timed out\n");
@@ -2849,9 +2849,7 @@ int fas216_init(struct Scsi_Host *host)
info->rst_dev_status = -1;
info->rst_bus_status = -1;
init_waitqueue_head(&info->eh_wait);
- init_timer(&info->eh_timer);
- info->eh_timer.data = (unsigned long)info;
- info->eh_timer.function = fas216_eh_timer;
+ timer_setup(&info->eh_timer, fas216_eh_timer, 0);
spin_lock_init(&info->host_lock);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index be96aa1e5077..b3cfdd5f4d1c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5279,7 +5279,7 @@ static void beiscsi_hw_health_check(struct timer_list *t)
if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
return;
/* modify this timer to check TPE */
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
+ phba->hw_check.function = beiscsi_hw_tpe_check;
}
mod_timer(&phba->hw_check,
@@ -5367,7 +5367,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
* Timer function gets modified for TPE detection.
* Always reinit to do health check first.
*/
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
+ phba->hw_check.function = beiscsi_hw_health_check;
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
return 0;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 5caf5f3ff642..cf0466686804 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -692,9 +692,9 @@ ext:
}
void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
{
- struct bfad_s *bfad = (struct bfad_s *) data;
+ struct bfad_s *bfad = from_timer(bfad, t, hal_tmo);
unsigned long flags;
struct list_head doneq;
@@ -719,9 +719,7 @@ bfad_bfa_tmo(unsigned long data)
void
bfad_init_timer(struct bfad_s *bfad)
{
- init_timer(&bfad->hal_tmo);
- bfad->hal_tmo.function = bfad_bfa_tmo;
- bfad->hal_tmo.data = (unsigned long)bfad;
+ timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
mod_timer(&bfad->hal_tmo,
jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index cfcfff48e8e1..4fe980a6441f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -314,7 +314,7 @@ int bfad_setup_intr(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
-void bfad_bfa_tmo(unsigned long data);
+void bfad_bfa_tmo(struct timer_list *t);
void bfad_init_timer(struct bfad_s *bfad);
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 59a2dfbcbc69..a8ae1a019eea 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -14,8 +14,8 @@
*/
#include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
struct fcoe_port *port,
struct fc_rport_priv *rdata);
@@ -27,10 +27,10 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt);
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
@@ -40,10 +40,10 @@ static void bnx2fc_upld_timer(unsigned long data)
wake_up_interruptible(&tgt->upld_wait);
}
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
/* NOTE: This function should never be called, as
@@ -65,7 +65,7 @@ static void bnx2fc_ofld_timer(unsigned long data)
static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->ofld_wait,
@@ -277,7 +277,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->upld_wait,
(test_bit(
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index babd79361a46..bf07735275a4 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -586,8 +586,8 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
if (rpl->status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer;
+ csk->retry_timer.function != act_open_retry_timer) {
+ csk->retry_timer.function = act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 266eddf17a99..406e94312d4e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -963,8 +963,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
spin_lock_bh(&csk->lock);
if (status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
+ csk->retry_timer.function != csk_act_open_retry_timer) {
+ csk->retry_timer.function = csk_act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 617802855233..38b3a9c84fd1 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3393,12 +3393,6 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
goto out;
}
- if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
- ubuf, ulen))) {
- rc = -EFAULT;
- goto out;
- }
-
buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
if (unlikely(!buf)) {
rc = -ENOMEM;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 81f226be3e3b..4eb14301a497 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1631,23 +1631,21 @@ void esas2r_adapter_tasklet(unsigned long context)
}
}
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
void esas2r_kickoff_timer(struct esas2r_adapter *a)
{
- init_timer(&a->timer);
+ timer_setup(&a->timer, esas2r_timer_callback, 0);
- a->timer.function = esas2r_timer_callback;
- a->timer.data = (unsigned long)a;
a->timer.expires = jiffies +
msecs_to_jiffies(100);
add_timer(&a->timer);
}
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
{
- struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+ struct esas2r_adapter *a = from_timer(a, t, timer);
set_bit(AF2_TIMER_TICK, &a->flags2);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index fff6f1851dc1..097f37de6ce9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -49,7 +49,7 @@
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
@@ -156,7 +156,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
mutex_init(&fip->ctlr_mutex);
spin_lock_init(&fip->ctlr_lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
- setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+ timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
@@ -1786,9 +1786,9 @@ unlock:
* fcoe_ctlr_timeout() - FIP timeout handler
* @arg: The FCoE controller that timed out
*/
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
{
- struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+ struct fcoe_ctlr *fip = from_timer(fip, t, timer);
schedule_work(&fip->timer_work);
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index aacadbf20b69..e52599f44170 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -407,18 +407,18 @@ static int fnic_notify_set(struct fnic *fnic)
return err;
}
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, fip_timer);
fnic_handle_fip_timer(fnic);
}
@@ -777,8 +777,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
- (unsigned long)fnic);
+ timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
@@ -809,8 +808,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- setup_timer(&fnic->notify_timer,
- fnic_notify_timer, (unsigned long)fnic);
+ timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 61a85ff8e459..5f503cb09508 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -839,7 +839,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -1451,7 +1451,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->dev = device;
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index d02c2a791981..5d3467fd728d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1268,7 +1268,7 @@ static void link_timeout_enable_link(struct timer_list *t)
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
@@ -1289,13 +1289,13 @@ static void link_timeout_disable_link(struct timer_list *t)
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
+ hisi_hba->timer.function = link_timeout_enable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
}
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d53429371127..cc0187965eee 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -997,7 +997,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
ipr_cmd->done = done;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ ipr_cmd->timer.function = timeout_func;
add_timer(&ipr_cmd->timer);
@@ -8312,7 +8312,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done;
+ ipr_cmd->timer.function = ipr_reset_timer_done;
add_timer(&ipr_cmd->timer);
}
@@ -8397,7 +8397,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
}
ipr_cmd->timer.expires = jiffies + stage_time * HZ;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
@@ -8468,7 +8468,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
}
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 1a4e701a8449..4fae253d4f3d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1214,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
@@ -1307,7 +1307,7 @@ static void fc_lun_reset_send(struct timer_list *t)
return;
if (fc_fcp_lock_pkt(fsp))
return;
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send;
+ fsp->timer.function = fc_lun_reset_send;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
fc_fcp_unlock_pkt(fsp);
}
@@ -1445,7 +1445,7 @@ static void fc_fcp_timeout(struct timer_list *t)
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
fsp->timer_delay);
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
fc_fcp_timer_set(fsp, fsp->timer_delay);
goto unlock;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c7f21661b3cd..3183d63de4da 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -92,7 +92,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->task_done = smp_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout;
+ task->slow_task->timer.function = smp_task_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 91795eb56206..58476b728c57 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task)
return;
if (!del_timer(&slow->timer))
return;
- slow->timer.function((TIMER_DATA_TYPE)&slow->timer);
+ slow->timer.function(&slow->timer);
return;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff1c37b8d2e..cff43bd9f675 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1310,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = mvs_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout;
+ task->slow_task->timer.function = mvs_tmf_timedout;
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -2020,7 +2020,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
tmp | PHYEV_SIG_FIS);
if (phy->timer.function == NULL) {
- phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out;
+ phy->timer.function = mvs_sig_time_out;
phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5b93ed810f6e..dc4e801b2cef 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8093,9 +8093,9 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
{
- struct ncb *np = (struct ncb *) npref;
+ struct ncb *np = from_timer(np, t, timer);
unsigned long flags;
struct scsi_cmnd *done_list;
@@ -8357,9 +8357,7 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
if (!np->scripth0)
goto attach_error;
- init_timer(&np->timer);
- np->timer.data = (unsigned long) np;
- np->timer.function = ncr53c8xx_timeout;
+ timer_setup(&np->timer, ncr53c8xx_timeout, 0);
/* Try to map the controller chip to virtual and physical memory. */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 0e294e80c169..947d6017d004 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -695,7 +695,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -781,7 +781,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4f9f115fb6a0..e58be98430b0 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -604,7 +604,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done;
+ cmd->timer.function = pmcraid_bist_done;
add_timer(&cmd->timer);
}
@@ -636,7 +636,7 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
@@ -673,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
@@ -923,7 +923,7 @@ static void pmcraid_send_cmd(
if (timeout_func) {
/* setup timeout handler */
cmd->timer.expires = jiffies + timeout;
- cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ cmd->timer.function = timeout_func;
add_timer(&cmd->timer);
}
@@ -1951,7 +1951,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
cmd->cmd_done = pmcraid_ioa_reset;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler;
+ cmd->timer.function = pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 94e402ed30f6..b141d7641a2e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4920,11 +4920,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages_unlocked(
- uaddr,
- nr_pages,
- pages,
- rw == READ ? FOLL_WRITE : 0); /* don't force */
+ res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d32e3ba8863e..791a2182de53 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -565,9 +565,9 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
/*
* Linux entry point of the timer handler
*/
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
{
- struct sym_hcb *np = (struct sym_hcb *)npref;
+ struct sym_hcb *np = from_timer(np, t, s.timer);
unsigned long flags;
spin_lock_irqsave(np->s.host->host_lock, flags);
@@ -1351,9 +1351,7 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
/*
* Start the timer daemon
*/
- init_timer(&np->s.timer);
- np->s.timer.data = (unsigned long) np;
- np->s.timer.function = sym53c8xx_timer;
+ timer_setup(&np->s.timer, sym53c8xx_timer, 0);
np->s.lasttime=0;
sym_timer (np);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36dec140ea0d..deecb16e7256 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
+obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += amlogic/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-y += renesas/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 22acf064531f..b04f6e4aedbc 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -9,4 +9,25 @@ config MESON_GX_SOCINFO
Say yes to support decoding of Amlogic Meson GX SoC family
information about the type, package and version.
+config MESON_GX_PM_DOMAINS
+ bool "Amlogic Meson GX Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson GX Power Domains as
+ Generic Power Domains.
+
+config MESON_MX_SOCINFO
+ bool "Amlogic Meson MX SoC Information driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson6, Meson8,
+ Meson8b and Meson8m2 SoC family information about the type
+ and version.
+
endmenu
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index 3e85fc462c21..8fa321893928 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
+obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
+obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
new file mode 100644
index 000000000000..2bdeebc48901
--- /dev/null
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+/* AO Offsets */
+
+#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+
+#define GEN_PWR_VPU_HDMI BIT(8)
+#define GEN_PWR_VPU_HDMI_ISO BIT(9)
+
+/* HHI Offsets */
+
+#define HHI_MEM_PD_REG0 (0x40 << 2)
+#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
+#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+
+struct meson_gx_pwrc_vpu {
+ struct generic_pm_domain genpd;
+ struct regmap *regmap_ao;
+ struct regmap *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+};
+
+static inline
+struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct meson_gx_pwrc_vpu, genpd);
+}
+
+static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
+ udelay(20);
+
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x2 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x2 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), BIT(i));
+ udelay(5);
+ }
+ udelay(20);
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
+
+ msleep(20);
+
+ clk_disable_unprepare(pd->vpu_clk);
+ clk_disable_unprepare(pd->vapb_clk);
+
+ return 0;
+}
+
+static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pd->vpu_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pd->vapb_clk);
+ if (ret)
+ clk_disable_unprepare(pd->vpu_clk);
+
+ return ret;
+}
+
+static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int ret;
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, 0);
+ udelay(20);
+
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x2 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x2 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), 0);
+ udelay(5);
+ }
+ udelay(20);
+
+ ret = reset_control_assert(pd->rstc);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, 0);
+
+ ret = reset_control_deassert(pd->rstc);
+ if (ret)
+ return ret;
+
+ ret = meson_gx_pwrc_vpu_setup_clk(pd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
+{
+ u32 reg;
+
+ regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg);
+
+ return (reg & GEN_PWR_VPU_HDMI);
+}
+
+static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
+ .genpd = {
+ .name = "vpu_hdmi",
+ .power_off = meson_gx_pwrc_vpu_power_off,
+ .power_on = meson_gx_pwrc_vpu_power_on,
+ },
+};
+
+static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_ao, *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+ bool powered_off;
+ int ret;
+
+ regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ if (IS_ERR(regmap_ao)) {
+ dev_err(&pdev->dev, "failed to get regmap\n");
+ return PTR_ERR(regmap_ao);
+ }
+
+ regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "amlogic,hhi-sysctrl");
+ if (IS_ERR(regmap_hhi)) {
+ dev_err(&pdev->dev, "failed to get HHI regmap\n");
+ return PTR_ERR(regmap_hhi);
+ }
+
+ rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ if (IS_ERR(rstc)) {
+ dev_err(&pdev->dev, "failed to get reset lines\n");
+ return PTR_ERR(rstc);
+ }
+
+ vpu_clk = devm_clk_get(&pdev->dev, "vpu");
+ if (IS_ERR(vpu_clk)) {
+ dev_err(&pdev->dev, "vpu clock request failed\n");
+ return PTR_ERR(vpu_clk);
+ }
+
+ vapb_clk = devm_clk_get(&pdev->dev, "vapb");
+ if (IS_ERR(vapb_clk)) {
+ dev_err(&pdev->dev, "vapb clock request failed\n");
+ return PTR_ERR(vapb_clk);
+ }
+
+ vpu_hdmi_pd.regmap_ao = regmap_ao;
+ vpu_hdmi_pd.regmap_hhi = regmap_hhi;
+ vpu_hdmi_pd.rstc = rstc;
+ vpu_hdmi_pd.vpu_clk = vpu_clk;
+ vpu_hdmi_pd.vapb_clk = vapb_clk;
+
+ powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+
+ /* If already powered, sync the clock states */
+ if (!powered_off) {
+ ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd);
+ if (ret)
+ return ret;
+ }
+
+ pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov,
+ powered_off);
+
+ return of_genpd_add_provider_simple(pdev->dev.of_node,
+ &vpu_hdmi_pd.genpd);
+}
+
+static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
+{
+ meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+}
+
+static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
+ { .compatible = "amlogic,meson-gx-pwrc-vpu" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_gx_pwrc_vpu_driver = {
+ .probe = meson_gx_pwrc_vpu_probe,
+ .shutdown = meson_gx_pwrc_vpu_shutdown,
+ .driver = {
+ .name = "meson_gx_pwrc_vpu",
+ .of_match_table = meson_gx_pwrc_vpu_match_table,
+ },
+};
+builtin_platform_driver(meson_gx_pwrc_vpu_driver);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 89f4cf507be6..f2d8c3c53ea4 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -20,8 +20,8 @@
#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
#define SOCINFO_MAJOR GENMASK(31, 24)
-#define SOCINFO_MINOR GENMASK(23, 16)
-#define SOCINFO_PACK GENMASK(15, 8)
+#define SOCINFO_PACK GENMASK(23, 16)
+#define SOCINFO_MINOR GENMASK(15, 8)
#define SOCINFO_MISC GENMASK(7, 0)
static const struct meson_gx_soc_id {
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
new file mode 100644
index 000000000000..7bfff5ff22a2
--- /dev/null
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define MESON_SOCINFO_MAJOR_VER_MESON6 0x16
+#define MESON_SOCINFO_MAJOR_VER_MESON8 0x19
+#define MESON_SOCINFO_MAJOR_VER_MESON8B 0x1b
+
+#define MESON_MX_ASSIST_HW_REV 0x14c
+
+#define MESON_MX_ANALOG_TOP_METAL_REVISION 0x0
+
+#define MESON_MX_BOOTROM_MISC_VER 0x4
+
+static const char *meson_mx_socinfo_revision(unsigned int major_ver,
+ unsigned int misc_ver,
+ unsigned int metal_rev)
+{
+ unsigned int minor_ver;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ minor_ver = 0xa;
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ major_ver = 0x1d;
+
+ if (metal_rev == 0x11111111 || metal_rev == 0x11111112)
+ minor_ver = 0xa;
+ else if (metal_rev == 0x11111113)
+ minor_ver = 0xb;
+ else if (metal_rev == 0x11111133)
+ minor_ver = 0xc;
+ else
+ minor_ver = 0xd;
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ if (metal_rev == 0x11111111)
+ minor_ver = 0xa;
+ else
+ minor_ver = 0xb;
+
+ break;
+
+ default:
+ minor_ver = 0x0;
+ break;
+ }
+
+ return kasprintf(GFP_KERNEL, "Rev%X (%x - 0:%X)", minor_ver, major_ver,
+ misc_ver);
+}
+
+static const char *meson_mx_socinfo_soc_id(unsigned int major_ver,
+ unsigned int metal_rev)
+{
+ const char *soc_id;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ soc_id = "Meson6 (AML8726-MX)";
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ soc_id = "Meson8m2 (S812)";
+ else
+ soc_id = "Meson8 (S802)";
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ soc_id = "Meson8b (S805)";
+ break;
+
+ default:
+ soc_id = "Unknown";
+ break;
+ }
+
+ return kstrdup_const(soc_id, GFP_KERNEL);
+}
+
+static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = {
+ { .compatible = "amlogic,meson8-analog-top", },
+ { .compatible = "amlogic,meson8b-analog-top", },
+ { /* sentinel */ }
+};
+
+int __init meson_mx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *assist_regmap, *bootrom_regmap, *analog_top_regmap;
+ unsigned int major_ver, misc_ver, metal_rev = 0;
+ int ret;
+
+ assist_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-assist");
+ if (IS_ERR(assist_regmap))
+ return PTR_ERR(assist_regmap);
+
+ bootrom_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-bootrom");
+ if (IS_ERR(bootrom_regmap))
+ return PTR_ERR(bootrom_regmap);
+
+ np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
+ if (np) {
+ analog_top_regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(analog_top_regmap))
+ return PTR_ERR(analog_top_regmap);
+
+ ret = regmap_read(analog_top_regmap,
+ MESON_MX_ANALOG_TOP_METAL_REVISION,
+ &metal_rev);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(assist_regmap, MESON_MX_ASSIST_HW_REV, &major_ver);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(bootrom_regmap, MESON_MX_BOOTROM_MISC_VER,
+ &misc_ver);
+ if (ret < 0)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->revision = meson_mx_socinfo_revision(major_ver, misc_ver,
+ metal_rev);
+ soc_dev_attr->soc_id = meson_mx_socinfo_soc_id(major_ver, metal_rev);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree_const(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ dev_info(soc_device_to_device(soc_dev), "Amlogic %s %s detected\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+}
+device_initcall(meson_mx_socinfo_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index c1363c83c352..4dd03b099c89 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -72,6 +72,8 @@ static const struct at91_soc __initconst socs[] = {
"sama5d21", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
"sama5d22", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D225C_D1M_EXID_MATCH,
+ "sama5d225c 16MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
"sama5d23", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
@@ -84,10 +86,16 @@ static const struct at91_soc __initconst socs[] = {
"sama5d27", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
"sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D1G_EXID_MATCH,
+ "sama5d27c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH,
+ "sama5d27c 64MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
"sama5d28", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
"sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH,
+ "sama5d28c 128MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
"sama5d31", "sama5d3"),
AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index a90bd5b0ef8f..94cd5d1ab502 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -64,14 +64,18 @@ at91_soc_init(const struct at91_soc *socs);
#define SAMA5D2_CIDR_MATCH 0x0a5c08c0
#define SAMA5D21CU_EXID_MATCH 0x0000005a
+#define SAMA5D225C_D1M_EXID_MATCH 0x00000053
#define SAMA5D22CU_EXID_MATCH 0x00000059
#define SAMA5D22CN_EXID_MATCH 0x00000069
#define SAMA5D23CU_EXID_MATCH 0x00000058
#define SAMA5D24CX_EXID_MATCH 0x00000004
#define SAMA5D24CU_EXID_MATCH 0x00000014
#define SAMA5D26CU_EXID_MATCH 0x00000012
+#define SAMA5D27C_D1G_EXID_MATCH 0x00000033
+#define SAMA5D27C_D5M_EXID_MATCH 0x00000032
#define SAMA5D27CU_EXID_MATCH 0x00000011
#define SAMA5D27CN_EXID_MATCH 0x00000021
+#define SAMA5D28C_D1G_EXID_MATCH 0x00000013
#define SAMA5D28CU_EXID_MATCH 0x00000010
#define SAMA5D28CN_EXID_MATCH 0x00000020
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 49f1e2a75d61..055a845ed979 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -20,4 +20,6 @@ config SOC_BRCMSTB
If unsure, say N.
+source "drivers/soc/bcm/brcmstb/Kconfig"
+
endmenu
diff --git a/drivers/soc/bcm/brcmstb/Kconfig b/drivers/soc/bcm/brcmstb/Kconfig
new file mode 100644
index 000000000000..d36f6e03c1a6
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Kconfig
@@ -0,0 +1,10 @@
+if SOC_BRCMSTB
+
+config BRCMSTB_PM
+ bool "Support suspend/resume for STB platforms"
+ default y
+ depends on PM
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ select ARM_CPU_SUSPEND if ARM
+
+endif # SOC_BRCMSTB
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
index 9120b2715d3e..01687c26535b 100644
--- a/drivers/soc/bcm/brcmstb/Makefile
+++ b/drivers/soc/bcm/brcmstb/Makefile
@@ -1 +1,2 @@
obj-y += common.o biuctrl.o
+obj-$(CONFIG_BRCMSTB_PM) += pm/
diff --git a/drivers/soc/bcm/brcmstb/pm/Makefile b/drivers/soc/bcm/brcmstb/pm/Makefile
new file mode 100644
index 000000000000..08bbd244ef11
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARM) += s2-arm.o pm-arm.o
+AFLAGS_s2-arm.o := -march=armv7-a
+obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o
diff --git a/drivers/soc/bcm/brcmstb/pm/aon_defs.h b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
new file mode 100644
index 000000000000..fb936abd847d
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
@@ -0,0 +1,113 @@
+/*
+ * Always ON (AON) register interface between bootloader and Linux
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_AON_DEFS_H__
+#define __BRCMSTB_AON_DEFS_H__
+
+#include <linux/compiler.h>
+
+/* Magic number in upper 16-bits */
+#define BRCMSTB_S3_MAGIC_MASK 0xffff0000
+#define BRCMSTB_S3_MAGIC_SHORT 0x5AFE0000
+
+enum {
+ /* Restore random key for AES memory verification (off = fixed key) */
+ S3_FLAG_LOAD_RANDKEY = (1 << 0),
+
+ /* Scratch buffer page table is present */
+ S3_FLAG_SCRATCH_BUFFER_TABLE = (1 << 1),
+
+ /* Skip all memory verification */
+ S3_FLAG_NO_MEM_VERIFY = (1 << 2),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=PSCI started Linux, 0=Direct jump to Linux.
+ */
+ S3_FLAG_PSCI_BOOT = (1 << 3),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=64 bit boot, 0=32 bit boot.
+ */
+ S3_FLAG_BOOTED64 = (1 << 4),
+};
+
+#define BRCMSTB_HASH_LEN (128 / 8) /* 128-bit hash */
+
+#define AON_REG_MAGIC_FLAGS 0x00
+#define AON_REG_CONTROL_LOW 0x04
+#define AON_REG_CONTROL_HIGH 0x08
+#define AON_REG_S3_HASH 0x0c /* hash of S3 params */
+#define AON_REG_CONTROL_HASH_LEN 0x1c
+#define AON_REG_PANIC 0x20
+
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+#define BRCMSTB_PANIC_MAGIC 0x512E115E
+#define BOOTLOADER_SCRATCH_SIZE 64
+#define BRCMSTB_DTU_STATE_MAP_ENTRIES (8*1024)
+#define BRCMSTB_DTU_CONFIG_ENTRIES (512)
+#define BRCMSTB_DTU_COUNT (2)
+
+#define IMAGE_DESCRIPTORS_BUFSIZE (2 * 1024)
+#define S3_BOOTLOADER_RESERVED (S3_FLAG_PSCI_BOOT | S3_FLAG_BOOTED64)
+
+struct brcmstb_bootloader_dtu_table {
+ uint32_t dtu_state_map[BRCMSTB_DTU_STATE_MAP_ENTRIES];
+ uint32_t dtu_config[BRCMSTB_DTU_CONFIG_ENTRIES];
+};
+
+/*
+ * Bootloader utilizes a custom parameter block left in DRAM for handling S3
+ * warm resume
+ */
+struct brcmstb_s3_params {
+ /* scratch memory for bootloader */
+ uint8_t scratch[BOOTLOADER_SCRATCH_SIZE];
+
+ uint32_t magic; /* BRCMSTB_S3_MAGIC */
+ uint64_t reentry; /* PA */
+
+ /* descriptors */
+ uint32_t hash[BRCMSTB_HASH_LEN / 4];
+
+ /*
+ * If 0, then ignore this parameter (there is only one set of
+ * descriptors)
+ *
+ * If non-0, then a second set of descriptors is stored at:
+ *
+ * descriptors + desc_offset_2
+ *
+ * The MAC result of both descriptors is XOR'd and stored in @hash
+ */
+ uint32_t desc_offset_2;
+
+ /*
+ * (Physical) address of a brcmstb_bootloader_scratch_table, for
+ * providing a large DRAM buffer to the bootloader
+ */
+ uint64_t buffer_table;
+
+ uint32_t spare[70];
+
+ uint8_t descriptors[IMAGE_DESCRIPTORS_BUFSIZE];
+ /*
+ * Must be last member of struct. See brcmstb_pm_s3_finish() for reason.
+ */
+ struct brcmstb_bootloader_dtu_table dtu[BRCMSTB_DTU_COUNT];
+} __packed;
+
+#endif /* __BRCMSTB_AON_DEFS_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
new file mode 100644
index 000000000000..dcf8c8065508
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -0,0 +1,822 @@
+/*
+ * ARM-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * S2: clock gate CPUs and as many peripherals as possible
+ * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
+ * self-refresh
+ * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
+ * treat this mode like a soft power-off, with wakeup allowed from AON
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb-pm: " fmt
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#include <asm/fncpy.h>
+#include <asm/setup.h>
+#include <asm/suspend.h>
+
+#include "pm.h"
+#include "aon_defs.h"
+
+#define SHIMPHY_DDR_PAD_CNTRL 0x8c
+
+/* Method #0 */
+#define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
+#define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
+
+/* Method #1 */
+#define PWRDWN_SEQ_NO_SEQUENCING 0
+#define PWRDWN_SEQ_HOLD_CHANNEL 1
+#define PWRDWN_SEQ_RESET_PLL 2
+#define PWRDWN_SEQ_POWERDOWN_PLL 3
+
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
+
+#define DDR_FORCE_CKE_RST_N BIT(3)
+#define DDR_PHY_RST_N BIT(2)
+#define DDR_PHY_CKE BIT(1)
+
+#define DDR_PHY_NO_CHANNEL 0xffffffff
+
+#define MAX_NUM_MEMC 3
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *ddr_shimphy_base;
+ void __iomem *ddr_ctrl;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+
+ void __iomem *boot_sram;
+ size_t boot_sram_len;
+
+ bool support_warm_boot;
+ size_t pll_status_offset;
+ int num_memc;
+
+ struct brcmstb_s3_params *s3_params;
+ dma_addr_t s3_params_pa;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+ bool needs_ddr_pad;
+ struct platform_device *pdev;
+};
+
+enum bsp_initiate_command {
+ BSP_CLOCK_STOP = 0x00,
+ BSP_GEN_RANDOM_KEY = 0x4A,
+ BSP_RESTORE_RANDOM_KEY = 0x55,
+ BSP_GEN_FIXED_KEY = 0x63,
+};
+
+#define PM_INITIATE 0x01
+#define PM_INITIATE_SUCCESS 0x00
+#define PM_INITIATE_FAIL 0xfe
+
+static struct brcmstb_pm_control ctrl;
+
+static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+
+static int brcmstb_init_sram(struct device_node *dn)
+{
+ void __iomem *sram;
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret)
+ return ret;
+
+ /* Uncached, executable remapping of SRAM */
+ sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
+ if (!sram)
+ return -ENOMEM;
+
+ ctrl.boot_sram = sram;
+ ctrl.boot_sram_len = resource_size(&res);
+
+ return 0;
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { /* sentinel */ }
+};
+
+static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ int ret;
+ int timeo = 1000 * 1000; /* 1 second */
+
+ writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+
+ /* Go! */
+ writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
+
+ /*
+ * If firmware doesn't support the 'ack', then just assume it's done
+ * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
+ */
+ if (of_machine_is_compatible("brcm,bcm74371a0")) {
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ mdelay(10);
+ return 0;
+ }
+
+ for (;;) {
+ ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ if (!(ret & PM_INITIATE))
+ break;
+ if (timeo <= 0) {
+ pr_err("error: timeout waiting for BSP (%x)\n", ret);
+ break;
+ }
+ timeo -= 50;
+ udelay(50);
+ }
+
+ return (ret & 0xff) != PM_INITIATE_SUCCESS;
+}
+
+static int brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+ int ret;
+
+ /* BSP power handshake, v1 */
+ tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+
+ ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
+ if (ret)
+ pr_err("BSP handshake failed\n");
+
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+
+ return ret;
+}
+
+static inline void shimphy_set(u32 value, u32 mask)
+{
+ int i;
+
+ if (!ctrl.needs_ddr_pad)
+ return;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ tmp = value | (tmp & mask);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ }
+ wmb(); /* Complete sequence in order. */
+}
+
+static inline void ddr_ctrl_set(bool warmboot)
+{
+ int i;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ if (warmboot)
+ tmp |= 1;
+ else
+ tmp &= ~1; /* Cold boot */
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ }
+ /* Complete sequence in order */
+ wmb();
+}
+
+static inline void s3entry_method0(void)
+{
+ shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
+ 0xffffffff);
+}
+
+static inline void s3entry_method1(void)
+{
+ /*
+ * S3 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(true);
+}
+
+static inline void s5entry_method1(void)
+{
+ int i;
+
+ /*
+ * S5 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
+ * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
+ * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(false);
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ /* Step 3: Channel A (RST_N = CKE = 0) */
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+
+ /* Step 3: Channel B? */
+ if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ }
+ }
+ /* Must complete */
+ wmb();
+}
+
+/*
+ * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
+ * into a low-power mode
+ */
+static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
+ s5entry_method1();
+
+ /* pm_start_pwrdn transition 0->1 */
+ writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
+
+ if (!onewrite) {
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+
+ writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+ }
+ wfi();
+}
+
+/* Support S5 cold boot out of "poweroff" */
+static void brcmstb_pm_poweroff(void)
+{
+ brcmstb_pm_handshake();
+
+ /* Clear magic S3 warm-boot value */
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ /* Skip wait-for-interrupt signal; just use a countdown */
+ writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
+ return; /* We should never actually get here */
+ }
+
+ brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
+}
+
+static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
+{
+ unsigned int size = ALIGN(len, FNCPY_ALIGN);
+
+ if (ctrl.boot_sram_len < size) {
+ pr_err("standby code will not fit in SRAM\n");
+ return NULL;
+ }
+
+ return fncpy(ctrl.boot_sram, fn, size);
+}
+
+/*
+ * S2 suspend/resume picks up where we left off, so we must execute carefully
+ * from SRAM, in order to allow DDR to come back up safely before we continue.
+ */
+static int brcmstb_pm_s2(void)
+{
+ /* A previous S3 can set a value hazardous to S2, so make sure. */
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ }
+
+ brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
+ brcmstb_pm_do_s2_sz);
+ if (!brcmstb_pm_do_s2_sram)
+ return -EINVAL;
+
+ return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
+ ctrl.memcs[0].ddr_phy_base +
+ ctrl.pll_status_offset);
+}
+
+/*
+ * This function is called on a new stack, so don't allow inlining (which will
+ * generate stack references on the old stack). It cannot be made static because
+ * it is referenced from brcmstb_pm_s3()
+ */
+noinline int brcmstb_pm_s3_finish(void)
+{
+ struct brcmstb_s3_params *params = ctrl.s3_params;
+ dma_addr_t params_pa = ctrl.s3_params_pa;
+ phys_addr_t reentry = virt_to_phys(&cpu_resume);
+ enum bsp_initiate_command cmd;
+ u32 flags;
+
+ /*
+ * Clear parameter structure, but not DTU area, which has already been
+ * filled in. We know DTU is a the end, so we can just subtract its
+ * size.
+ */
+ memset(params, 0, sizeof(*params) - sizeof(params->dtu));
+
+ flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ flags &= S3_BOOTLOADER_RESERVED;
+ flags |= S3_FLAG_NO_MEM_VERIFY;
+ flags |= S3_FLAG_LOAD_RANDKEY;
+
+ /* Load random / fixed key */
+ if (flags & S3_FLAG_LOAD_RANDKEY)
+ cmd = BSP_GEN_RANDOM_KEY;
+ else
+ cmd = BSP_GEN_FIXED_KEY;
+ if (do_bsp_initiate_command(cmd)) {
+ pr_info("key loading failed\n");
+ return -EIO;
+ }
+
+ params->magic = BRCMSTB_S3_MAGIC;
+ params->reentry = reentry;
+
+ /* No more writes to DRAM */
+ flush_cache_all();
+
+ flags |= BRCMSTB_S3_MAGIC_SHORT;
+
+ writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ writel_relaxed(lower_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_LOW);
+ writel_relaxed(upper_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_HIGH);
+
+ switch (ctrl.s3entry_method) {
+ case 0:
+ s3entry_method0();
+ brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
+ break;
+ case 1:
+ s3entry_method1();
+ brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Must have been interrupted from wfi()? */
+ return -EINTR;
+}
+
+static int brcmstb_pm_do_s3(unsigned long sp)
+{
+ unsigned long save_sp;
+ int ret;
+
+ asm volatile (
+ "mov %[save], sp\n"
+ "mov sp, %[new]\n"
+ "bl brcmstb_pm_s3_finish\n"
+ "mov %[ret], r0\n"
+ "mov %[new], sp\n"
+ "mov sp, %[save]\n"
+ : [save] "=&r" (save_sp), [ret] "=&r" (ret)
+ : [new] "r" (sp)
+ );
+
+ return ret;
+}
+
+static int brcmstb_pm_s3(void)
+{
+ void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
+
+ return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ int ret;
+
+ if (brcmstb_pm_handshake())
+ return -EIO;
+
+ if (deep_standby)
+ ret = brcmstb_pm_s3();
+ else
+ ret = brcmstb_pm_s2();
+ if (ret)
+ pr_err("%s: standby failed\n", __func__);
+
+ return ret;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return ctrl.support_warm_boot;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ {}
+};
+
+struct ddr_phy_ofdata {
+ bool supports_warm_boot;
+ size_t pll_status_offset;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+};
+
+static struct ddr_phy_ofdata ddr_phy_71_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x0c,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x2c,
+ .phy_a_standby_ctrl_offs = 0x198,
+ .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
+};
+
+static struct ddr_phy_ofdata ddr_phy_72_0 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x10,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x40,
+ .phy_a_standby_ctrl_offs = 0x2a4,
+ .phy_b_standby_ctrl_offs = 0x8a4
+};
+
+static struct ddr_phy_ofdata ddr_phy_225_1 = {
+ .supports_warm_boot = false,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static struct ddr_phy_ofdata ddr_phy_240_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v71.1",
+ .data = &ddr_phy_71_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v72.0",
+ .data = &ddr_phy_72_0,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v225.1",
+ .data = &ddr_phy_225_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v240.1",
+ .data = &ddr_phy_240_1,
+ },
+ {
+ /* Same as v240.1, for the registers we care about */
+ .compatible = "brcm,brcmstb-ddr-phy-v240.2",
+ .data = &ddr_phy_240_1,
+ },
+ {}
+};
+
+struct ddr_seq_ofdata {
+ bool needs_ddr_pad;
+ u32 warm_boot_offset;
+};
+
+static const struct ddr_seq_ofdata ddr_seq_b22 = {
+ .needs_ddr_pad = false,
+ .warm_boot_offset = 0x2c,
+};
+
+static const struct ddr_seq_ofdata ddr_seq = {
+ .needs_ddr_pad = true,
+};
+
+static const struct of_device_id ddr_shimphy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
+ {}
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &ddr_seq,
+ },
+ {},
+};
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static int brcmstb_pm_panic_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block brcmstb_pm_panic_nb = {
+ .notifier_call = brcmstb_pm_panic_notify,
+};
+
+static int brcmstb_pm_probe(struct platform_device *pdev)
+{
+ const struct ddr_phy_ofdata *ddr_phy_data;
+ const struct ddr_seq_ofdata *ddr_seq_data;
+ const struct of_device_id *of_id = NULL;
+ struct device_node *dn;
+ void __iomem *base;
+ int ret, i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ return PTR_ERR(base);
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ /* Assume standard offset */
+ ctrl.aon_sram = ctrl.aon_ctrl_base +
+ AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ } else {
+ ctrl.aon_sram = base;
+ }
+
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
+
+ /* DDR PHY registers */
+ base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
+ (const void **)&ddr_phy_data);
+ if (IS_ERR(base)) {
+ pr_err("error mapping DDR PHY\n");
+ return PTR_ERR(base);
+ }
+ ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
+ ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
+ /* Only need DDR PHY 0 for now? */
+ ctrl.memcs[0].ddr_phy_base = base;
+ ctrl.s3entry_method = ddr_phy_data->s3entry_method;
+ ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
+ ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
+ /*
+ * Slightly grosss to use the phy ver to get a memc,
+ * offset but that is the only versioned things so far
+ * we can test for.
+ */
+ ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
+
+ /* DDR SHIM-PHY registers */
+ for_each_matching_node(dn, ddr_shimphy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+
+ base = of_io_request_and_map(dn, 0, dn->full_name);
+ if (IS_ERR(base)) {
+ if (!ctrl.support_warm_boot)
+ break;
+
+ pr_err("error mapping DDR SHIMPHY %d\n", i);
+ return PTR_ERR(base);
+ }
+ ctrl.memcs[i].ddr_shimphy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* Sequencer DRAM Param and Control Registers */
+ i = 0;
+ for_each_matching_node(dn, brcmstb_memc_of_match) {
+ base = of_iomap(dn, 0);
+ if (!base) {
+ pr_err("error mapping DDR Sequencer %d\n", i);
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(brcmstb_memc_of_match, dn);
+ if (!of_id) {
+ iounmap(base);
+ return -EINVAL;
+ }
+
+ ddr_seq_data = of_id->data;
+ ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
+ /* Adjust warm boot offset based on the DDR sequencer */
+ if (ddr_seq_data->warm_boot_offset)
+ ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
+
+ ctrl.memcs[i].ddr_ctrl = base;
+ i++;
+ }
+
+ pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
+ ctrl.support_warm_boot, ctrl.s3entry_method,
+ ctrl.warm_boot_offset);
+
+ dn = of_find_matching_node(NULL, sram_dt_ids);
+ if (!dn) {
+ pr_err("SRAM not found\n");
+ return -EINVAL;
+ }
+
+ ret = brcmstb_init_sram(dn);
+ if (ret) {
+ pr_err("error setting up SRAM for PM\n");
+ return ret;
+ }
+
+ ctrl.pdev = pdev;
+
+ ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
+ if (!ctrl.s3_params)
+ return -ENOMEM;
+ ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
+ sizeof(*ctrl.s3_params),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
+ pr_err("error mapping DMA memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &brcmstb_pm_panic_nb);
+
+ pm_power_off = brcmstb_pm_poweroff;
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+out:
+ kfree(ctrl.s3_params);
+
+ pr_warn("PM: initialization failed with code %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver brcmstb_pm_driver = {
+ .driver = {
+ .name = "brcmstb-pm",
+ .of_match_table = aon_ctrl_dt_ids,
+ },
+};
+
+static int __init brcmstb_pm_init(void)
+{
+ return platform_driver_probe(&brcmstb_pm_driver,
+ brcmstb_pm_probe);
+}
+module_init(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
new file mode 100644
index 000000000000..9300b5f62e56
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
@@ -0,0 +1,461 @@
+/*
+ * MIPS-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * Copyright (C) 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/bmips.h>
+#include <asm/tlbflush.h>
+
+#include "pm.h"
+
+#define S2_NUM_PARAMS 6
+#define MAX_NUM_MEMC 3
+
+/* S3 constants */
+#define MAX_GP_REGS 16
+#define MAX_CP0_REGS 32
+#define NUM_MEMC_CLIENTS 128
+#define AON_CTRL_RAM_SIZE 128
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+
+#define CLEAR_RESET_MASK 0x01
+
+/* Index each CP0 register that needs to be saved */
+#define CONTEXT 0
+#define USER_LOCAL 1
+#define PGMK 2
+#define HWRENA 3
+#define COMPARE 4
+#define STATUS 5
+#define CONFIG 6
+#define MODE 7
+#define EDSP 8
+#define BOOT_VEC 9
+#define EBASE 10
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *arb_base;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram_base;
+ void __iomem *timers_base;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+ int num_memc;
+};
+
+struct brcm_pm_s3_context {
+ u32 cp0_regs[MAX_CP0_REGS];
+ u32 memc0_rts[NUM_MEMC_CLIENTS];
+ u32 sc_boot_vec;
+};
+
+struct brcmstb_mem_transfer;
+
+struct brcmstb_mem_transfer {
+ struct brcmstb_mem_transfer *next;
+ void *src;
+ void *dst;
+ dma_addr_t pa_src;
+ dma_addr_t pa_dst;
+ u32 len;
+ u8 key;
+ u8 mode;
+ u8 src_remapped;
+ u8 dst_remapped;
+ u8 src_dst_remapped;
+};
+
+#define AON_SAVE_SRAM(base, idx, val) \
+ __raw_writel(val, base + (idx << 2))
+
+/* Used for saving registers in asm */
+u32 gp_regs[MAX_GP_REGS];
+
+#define BSP_CLOCK_STOP 0x00
+#define PM_INITIATE 0x01
+
+static struct brcmstb_pm_control ctrl;
+
+static void brcm_pm_save_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Generic MIPS */
+ ctx->cp0_regs[CONTEXT] = read_c0_context();
+ ctx->cp0_regs[USER_LOCAL] = read_c0_userlocal();
+ ctx->cp0_regs[PGMK] = read_c0_pagemask();
+ ctx->cp0_regs[HWRENA] = read_c0_cache();
+ ctx->cp0_regs[COMPARE] = read_c0_compare();
+ ctx->cp0_regs[STATUS] = read_c0_status();
+
+ /* Broadcom specific */
+ ctx->cp0_regs[CONFIG] = read_c0_brcm_config();
+ ctx->cp0_regs[MODE] = read_c0_brcm_mode();
+ ctx->cp0_regs[EDSP] = read_c0_brcm_edsp();
+ ctx->cp0_regs[BOOT_VEC] = read_c0_brcm_bootvec();
+ ctx->cp0_regs[EBASE] = read_c0_ebase();
+
+ ctx->sc_boot_vec = bmips_read_zscm_reg(0xa0);
+}
+
+static void brcm_pm_restore_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Restore cp0 state */
+ bmips_write_zscm_reg(0xa0, ctx->sc_boot_vec);
+
+ /* Generic MIPS */
+ write_c0_context(ctx->cp0_regs[CONTEXT]);
+ write_c0_userlocal(ctx->cp0_regs[USER_LOCAL]);
+ write_c0_pagemask(ctx->cp0_regs[PGMK]);
+ write_c0_cache(ctx->cp0_regs[HWRENA]);
+ write_c0_compare(ctx->cp0_regs[COMPARE]);
+ write_c0_status(ctx->cp0_regs[STATUS]);
+
+ /* Broadcom specific */
+ write_c0_brcm_config(ctx->cp0_regs[CONFIG]);
+ write_c0_brcm_mode(ctx->cp0_regs[MODE]);
+ write_c0_brcm_edsp(ctx->cp0_regs[EDSP]);
+ write_c0_brcm_bootvec(ctx->cp0_regs[BOOT_VEC]);
+ write_c0_ebase(ctx->cp0_regs[EBASE]);
+}
+
+static void brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+
+ /* BSP power handshake, v1 */
+ tmp = __raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ __raw_writel(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)__raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+
+ __raw_writel(0, base + AON_CTRL_PM_INITIATE);
+ (void)__raw_readl(base + AON_CTRL_PM_INITIATE);
+ __raw_writel(BSP_CLOCK_STOP | PM_INITIATE,
+ base + AON_CTRL_PM_INITIATE);
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+}
+
+static void brcmstb_pm_s5(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ brcmstb_pm_handshake();
+
+ /* Clear magic s3 warm-boot value */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, 0);
+
+ /* Set the countdown */
+ __raw_writel(0x10, base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)__raw_readl(base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ /* Prepare to S5 cold boot */
+ __raw_writel(PM_COLD_CONFIG, base + AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __raw_writel((PM_COLD_CONFIG | PM_PWR_DOWN), base +
+ AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __asm__ __volatile__(
+ " wait\n"
+ : : : "memory");
+}
+
+static int brcmstb_pm_s3(void)
+{
+ struct brcm_pm_s3_context s3_context;
+ void __iomem *memc_arb_base;
+ unsigned long flags;
+ u32 tmp;
+ int i;
+
+ /* Prepare for s3 */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, BRCMSTB_S3_MAGIC);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 1, (u32)&s3_reentry);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 2, 0);
+
+ /* Clear RESET_HISTORY */
+ tmp = __raw_readl(ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+ tmp &= ~CLEAR_RESET_MASK;
+ __raw_writel(tmp, ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+
+ local_irq_save(flags);
+
+ /* Inhibit DDR_RSTb pulse for both MMCs*/
+ for (i = 0; i < ctrl.num_memc; i++) {
+ tmp = __raw_readl(ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+
+ tmp &= ~0x0f;
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ tmp |= (0x05 | BIT(5));
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ }
+
+ /* Save CP0 context */
+ brcm_pm_save_cp0_context(&s3_context);
+
+ /* Save RTS(skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ s3_context.memc0_rts[i] = __raw_readl(memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* Save I/O context */
+ local_flush_tlb_all();
+ _dma_cache_wback_inv(0, ~0);
+
+ brcm_pm_do_s3(ctrl.aon_ctrl_base, current_cpu_data.dcache.linesz);
+
+ /* CPU reconfiguration */
+ local_flush_tlb_all();
+ bmips_cpu_setup();
+ cpumask_clear(&bmips_booted_mask);
+
+ /* Restore RTS (skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ __raw_writel(s3_context.memc0_rts[i], memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* restore CP0 context */
+ brcm_pm_restore_cp0_context(&s3_context);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int brcmstb_pm_s2(void)
+{
+ /*
+ * We need to pass 6 arguments to an assembly function. Lets avoid the
+ * stack and pass arguments in a explicit 4 byte array. The assembly
+ * code assumes all arguments are 4 bytes and arguments are ordered
+ * like so:
+ *
+ * 0: AON_CTRl base register
+ * 1: DDR_PHY base register
+ * 2: TIMERS base resgister
+ * 3: I-Cache line size
+ * 4: Restart vector address
+ * 5: Restart vector size
+ */
+ u32 s2_params[6];
+
+ /* Prepare s2 parameters */
+ s2_params[0] = (u32)ctrl.aon_ctrl_base;
+ s2_params[1] = (u32)ctrl.memcs[0].ddr_phy_base;
+ s2_params[2] = (u32)ctrl.timers_base;
+ s2_params[3] = (u32)current_cpu_data.icache.linesz;
+ s2_params[4] = (u32)BMIPS_WARM_RESTART_VEC;
+ s2_params[5] = (u32)(bmips_smp_int_vec_end -
+ bmips_smp_int_vec);
+
+ /* Drop to standby */
+ brcm_pm_do_s2(s2_params);
+
+ return 0;
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ brcmstb_pm_handshake();
+
+ /* Send IRQs to BMIPS_WARM_RESTART_VEC */
+ clear_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+ set_c0_status(ST0_BEV);
+ irq_disable_hazard();
+
+ if (deep_standby)
+ brcmstb_pm_s3();
+ else
+ brcmstb_pm_s2();
+
+ /* Send IRQs to normal runtime vectors */
+ clear_c0_status(ST0_BEV);
+ irq_disable_hazard();
+ set_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+
+ return 0;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-phy" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id arb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-memc-arb" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id timers_ids[] = {
+ { .compatible = "brcm,brcmstb-timers" },
+ { /* sentinel */ }
+};
+
+static inline void __iomem *brcmstb_ioremap_node(struct device_node *dn,
+ int index)
+{
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return brcmstb_ioremap_node(dn, index);
+}
+
+static int brcmstb_pm_init(void)
+{
+ struct device_node *dn;
+ void __iomem *base;
+ int i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ goto aon_err;
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_SRAM\n");
+ goto sram_err;
+ }
+ ctrl.aon_sram_base = base;
+
+ ctrl.num_memc = 0;
+ /* Map MEMC DDR PHY registers */
+ for_each_matching_node(dn, ddr_phy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+ base = brcmstb_ioremap_node(dn, 0);
+ if (IS_ERR(base))
+ goto ddr_err;
+
+ ctrl.memcs[i].ddr_phy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* MEMC ARB registers */
+ base = brcmstb_ioremap_match(arb_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping MEMC ARB\n");
+ goto ddr_err;
+ }
+ ctrl.memcs[0].arb_base = base;
+
+ /* Timer registers */
+ base = brcmstb_ioremap_match(timers_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping timers\n");
+ goto tmr_err;
+ }
+ ctrl.timers_base = base;
+
+ /* s3 cold boot aka s5 */
+ pm_power_off = brcmstb_pm_s5;
+
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+tmr_err:
+ iounmap(ctrl.memcs[0].arb_base);
+ddr_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_phy_base);
+
+ iounmap(ctrl.aon_sram_base);
+sram_err:
+ iounmap(ctrl.aon_ctrl_base);
+aon_err:
+ return PTR_ERR(base);
+}
+arch_initcall(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
new file mode 100644
index 000000000000..b7d35ac70e60
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -0,0 +1,89 @@
+/*
+ * Definitions for Broadcom STB power management / Always ON (AON) block
+ *
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_PM_H__
+#define __BRCMSTB_PM_H__
+
+#define AON_CTRL_RESET_CTRL 0x00
+#define AON_CTRL_PM_CTRL 0x04
+#define AON_CTRL_PM_STATUS 0x08
+#define AON_CTRL_PM_CPU_WAIT_COUNT 0x10
+#define AON_CTRL_PM_INITIATE 0x88
+#define AON_CTRL_HOST_MISC_CMDS 0x8c
+#define AON_CTRL_SYSTEM_DATA_RAM_OFS 0x200
+
+/* MIPS PM constants */
+/* MEMC0 offsets */
+#define DDR40_PHY_CONTROL_REGS_0_PLL_STATUS 0x10
+#define DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL 0xa4
+
+/* TIMER offsets */
+#define TIMER_TIMER1_CTRL 0x0c
+#define TIMER_TIMER1_STAT 0x1c
+
+/* TIMER defines */
+#define RESET_TIMER 0x0
+#define START_TIMER 0xbfffffff
+#define TIMER_MASK 0x3fffffff
+
+/* PM_CTRL bitfield (Method #0) */
+#define PM_FAST_PWRDOWN (1 << 6)
+#define PM_WARM_BOOT (1 << 5)
+#define PM_DEEP_STANDBY (1 << 4)
+#define PM_CPU_PWR (1 << 3)
+#define PM_USE_CPU_RDY (1 << 2)
+#define PM_PLL_PWRDOWN (1 << 1)
+#define PM_PWR_DOWN (1 << 0)
+
+/* PM_CTRL bitfield (Method #1) */
+#define PM_DPHY_STANDBY_CLEAR (1 << 20)
+#define PM_MIN_S3_WIDTH_TIMER_BYPASS (1 << 7)
+
+#define PM_S2_COMMAND (PM_PLL_PWRDOWN | PM_USE_CPU_RDY | PM_PWR_DOWN)
+
+/* Method 0 bitmasks */
+#define PM_COLD_CONFIG (PM_PLL_PWRDOWN | PM_DEEP_STANDBY)
+#define PM_WARM_CONFIG (PM_COLD_CONFIG | PM_USE_CPU_RDY | PM_WARM_BOOT)
+
+/* Method 1 bitmask */
+#define M1_PM_WARM_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_WARM_BOOT | PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#define M1_PM_COLD_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MIPS
+extern const unsigned long brcmstb_pm_do_s2_sz;
+extern asmlinkage int brcmstb_pm_do_s2(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+#else
+/* s2 asm */
+extern asmlinkage int brcm_pm_do_s2(u32 *s2_params);
+
+/* s3 asm */
+extern asmlinkage int brcm_pm_do_s3(void __iomem *aon_ctrl_base,
+ int dcache_linesz);
+extern int s3_reentry;
+#endif /* CONFIG_MIPS */
+
+#endif
+
+#endif /* __BRCMSTB_PM_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-arm.S b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
new file mode 100644
index 000000000000..1d472d564638
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include "pm.h"
+
+ .text
+ .align 3
+
+#define AON_CTRL_REG r10
+#define DDR_PHY_STATUS_REG r11
+
+/*
+ * r0: AON_CTRL base address
+ * r1: DDRY PHY PLL status register address
+ */
+ENTRY(brcmstb_pm_do_s2)
+ stmfd sp!, {r4-r11, lr}
+ mov AON_CTRL_REG, r0
+ mov DDR_PHY_STATUS_REG, r1
+
+ /* Flush memory transactions */
+ dsb
+
+ /* Cache DDR_PHY_STATUS_REG translation */
+ ldr r0, [DDR_PHY_STATUS_REG]
+
+ /* power down request */
+ ldr r0, =PM_S2_COMMAND
+ ldr r1, =0
+ str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Wait for interrupt */
+ wfi
+ nop
+
+ /* Bring MEMC back up */
+1: ldr r0, [DDR_PHY_STATUS_REG]
+ ands r0, #1
+ beq 1b
+
+ /* Power-up handshake */
+ ldr r0, =1
+ str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+
+ ldr r0, =0
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Return to caller */
+ ldr r0, =0
+ ldmfd sp!, {r4-r11, pc}
+
+ ENDPROC(brcmstb_pm_do_s2)
+
+ /* Place literal pool here */
+ .ltorg
+
+ENTRY(brcmstb_pm_do_s2_sz)
+ .word . - brcmstb_pm_do_s2
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-mips.S b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
new file mode 100644
index 000000000000..27a14bc46043
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+
+/*
+ * a0: u32 params array
+ */
+LEAF(brcm_pm_do_s2)
+
+ subu sp, 64
+ sw ra, 0(sp)
+ sw s0, 4(sp)
+ sw s1, 8(sp)
+ sw s2, 12(sp)
+ sw s3, 16(sp)
+ sw s4, 20(sp)
+ sw s5, 24(sp)
+ sw s6, 28(sp)
+ sw s7, 32(sp)
+
+ /*
+ * Dereference the params array
+ * s0: AON_CTRL base register
+ * s1: DDR_PHY base register
+ * s2: TIMERS base register
+ * s3: I-Cache line size
+ * s4: Restart vector address
+ * s5: Restart vector size
+ */
+ move t0, a0
+
+ lw s0, 0(t0)
+ lw s1, 4(t0)
+ lw s2, 8(t0)
+ lw s3, 12(t0)
+ lw s4, 16(t0)
+ lw s5, 20(t0)
+
+ /* Lock this asm section into the I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x1c, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Lock the interrupt vector into the I-cache */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x1c, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ sync
+
+ /* Power down request */
+ li t0, PM_S2_COMMAND
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+ sw t0, AON_CTRL_PM_CTRL(s0)
+ lw t0, AON_CTRL_PM_CTRL(s0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+ /* Save cp0 sr for restoring later */
+ move s6, t0
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+ /* Wait for memc0 */
+1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
+ andi t0, 1
+ beqz t0, 1b
+ nop
+
+ /* 1ms delay needed for stable recovery */
+ /* Use TIMER1 to count 1 ms */
+ li t0, RESET_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ li t0, START_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ /* Prepare delay */
+ li t0, TIMER_MASK
+ lw t1, TIMER_TIMER1_STAT(s2)
+ and t1, t0
+ /* 1ms delay */
+ addi t1, 27000
+
+ /* Wait for the timer value to exceed t1 */
+1: lw t0, TIMER_TIMER1_STAT(s2)
+ sgtu t2, t1, t0
+ bnez t2, 1b
+ nop
+
+ /* Power back up */
+ li t1, 1
+ sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+ lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+
+ /* Unlock I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x00, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Unlock interrupt vector */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x00, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ /* Restore cp0 sr */
+ sync
+ nop
+ mtc0 s6, CP0_STATUS
+ nop
+
+ /* Set return value to success */
+ li v0, 0
+
+ /* Return to caller */
+ lw s7, 32(sp)
+ lw s6, 28(sp)
+ lw s5, 24(sp)
+ lw s4, 20(sp)
+ lw s3, 16(sp)
+ lw s2, 12(sp)
+ lw s1, 8(sp)
+ lw s0, 4(sp)
+ lw ra, 0(sp)
+ addiu sp, 64
+
+ jr ra
+ nop
+END(brcm_pm_do_s2)
+
+ .globl asm_end
+asm_end:
+ nop
+
diff --git a/drivers/soc/bcm/brcmstb/pm/s3-mips.S b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
new file mode 100644
index 000000000000..1242308a8868
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+ .global s3_reentry
+
+/*
+ * a0: AON_CTRL base register
+ * a1: D-Cache line size
+ */
+LEAF(brcm_pm_do_s3)
+
+ /* Get the address of s3_context */
+ la t0, gp_regs
+ sw ra, 0(t0)
+ sw s0, 4(t0)
+ sw s1, 8(t0)
+ sw s2, 12(t0)
+ sw s3, 16(t0)
+ sw s4, 20(t0)
+ sw s5, 24(t0)
+ sw s6, 28(t0)
+ sw s7, 32(t0)
+ sw gp, 36(t0)
+ sw sp, 40(t0)
+ sw fp, 44(t0)
+
+ /* Save CP0 Status */
+ mfc0 t1, CP0_STATUS
+ sw t1, 48(t0)
+
+ /* Write-back gp registers - cache will be gone */
+ addiu t1, a1, -1
+ not t1
+ and t0, t1
+
+ /* Flush at least 64 bytes */
+ addiu t2, t0, 64
+ and t2, t1
+
+1: cache 0x17, 0(t0)
+ bne t0, t2, 1b
+ addu t0, a1
+
+ /* Drop to deep standby */
+ li t1, PM_WARM_CONFIG
+ sw zero, AON_CTRL_PM_CTRL(a0)
+ lw zero, AON_CTRL_PM_CTRL(a0)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+s3_reentry:
+
+ /* Clear call/return stack */
+ li t0, (0x06 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ /* Clear jump target buffer */
+ li t0, (0x04 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ sync
+ nop
+
+ /* Setup mmu defaults */
+ mtc0 zero, CP0_WIRED
+ mtc0 zero, CP0_ENTRYHI
+ li k0, PM_DEFAULT_MASK
+ mtc0 k0, CP0_PAGEMASK
+
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+ nop
+
+ /* Restore general purpose registers */
+ la t0, gp_regs
+ lw fp, 44(t0)
+ lw sp, 40(t0)
+ lw gp, 36(t0)
+ lw s7, 32(t0)
+ lw s6, 28(t0)
+ lw s5, 24(t0)
+ lw s4, 20(t0)
+ lw s3, 16(t0)
+ lw s2, 12(t0)
+ lw s1, 8(t0)
+ lw s0, 4(t0)
+ lw ra, 0(t0)
+
+ /* Restore CP0 status */
+ lw t1, 48(t0)
+ mtc0 t1, CP0_STATUS
+
+ /* Return to caller */
+ li v0, 0
+ jr ra
+ nop
+
+END(brcm_pm_do_s3)
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 6af7a11f09a5..d89a6a80c8ef 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -213,6 +213,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1021a-dcfg", },
{ .compatible = "fsl,ls1043a-dcfg", },
{ .compatible = "fsl,ls2080a-dcfg", },
+ { .compatible = "fsl,ls1088a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index 757033c0586c..fb4e6bf0a0c4 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -1,6 +1,6 @@
menuconfig FSL_DPAA
bool "Freescale DPAA 1.x support"
- depends on FSL_SOC_BOOKE
+ depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
select GENERIC_ALLOCATOR
help
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
index 363982b83ab5..811312ad526f 100644
--- a/drivers/soc/fsl/qbman/Makefile
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
bman_portal.o qman_portal.o \
- bman.o qman.o
+ bman.o qman.o dpaa_sys.o
obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
bman-test-y = bman_test.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index a3d6d7cfa929..f9485cedc648 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -35,6 +35,27 @@
/* Portal register assists */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IER 0x3e40
+#define BM_REG_ISDR 0x3e80
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#else
/* Cache-inhibited register offsets */
#define BM_REG_RCR_PI_CINH 0x0000
#define BM_REG_RCR_CI_CINH 0x0004
@@ -53,6 +74,7 @@
#define BM_CL_RCR 0x1000
#define BM_CL_RCR_PI_CENA 0x3000
#define BM_CL_RCR_CI_CENA 0x3100
+#endif
/*
* Portal modes.
@@ -154,7 +176,8 @@ struct bm_mc {
};
struct bm_addr {
- void __iomem *ce; /* cache-enabled */
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* Same as above but for direct access */
void __iomem *ci; /* cache-inhibited */
};
@@ -167,12 +190,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+ return ioread32be(p->addr.ci + offset);
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+ iowrite32be(val, p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +211,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ce + offset));
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}
struct bman_portal {
@@ -408,7 +431,7 @@ static int bm_mc_init(struct bm_portal *portal)
mc->cr = portal->addr.ce + BM_CL_CR;
mc->rr = portal->addr.ce + BM_CL_RR0;
- mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
0 : 1;
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -466,7 +489,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering...
*/
- if (!__raw_readb(&rr->verb)) {
+ if (!rr->verb) {
dpaa_invalidate_touch_ro(rr);
return NULL;
}
@@ -512,8 +535,9 @@ static int bman_create_portal(struct bman_portal *portal,
* config, everything that follows depends on it and "config" is more
* for (de)reference...
*/
- p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
- p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
dev_err(c->dev, "RCR initialisation failed\n");
goto fail_rcr;
@@ -607,7 +631,7 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
unsigned long irqflags;
local_irq_save(irqflags);
- set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
bm_out(&p->p, BM_REG_IER, p->irq_sources);
local_irq_restore(irqflags);
return 0;
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index eaa9585c7347..05c42235dd41 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -201,6 +201,21 @@ static int fsl_bman_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /*
+ * If FBPR memory wasn't defined using the qbman compatible string
+ * try using the of_reserved_mem_device method
+ */
+ if (!fbpr_a) {
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
bm_set_memory(fbpr_a, fbpr_sz);
err_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 39b39c8f1399..2f71f7df3465 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct bm_portal_config *pcfg;
struct resource *addr_phys[2];
- void __iomem *va;
int irq, cpu;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
@@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev)
}
pcfg->irq = irq;
- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1;
}
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va) {
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
}
- pcfg->addr_virt[DPAA_PORTAL_CI] = va;
-
spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
@@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
err_portal_init:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+ iounmap(pcfg->addr_virt_ci);
err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ memunmap(pcfg->addr_virt_ce);
err_ioremap1:
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
index f6896a2f6d90..751ce90383b7 100644
--- a/drivers/soc/fsl/qbman/bman_priv.h
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
extern struct gen_pool *bm_bpalloc;
struct bm_portal_config {
- /*
- * Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited.
- */
- void __iomem *addr_virt[2];
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
/* Allow these to be joined in lists */
struct list_head list;
struct device *dev;
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 000000000000..9436aa83ff1b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,78 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size)
+{
+ int ret;
+ struct device_node *mem_node;
+ u64 size64;
+
+ ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
+ if (ret) {
+ dev_err(dev,
+ "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
+ idx, ret);
+ return -ENODEV;
+ }
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (mem_node) {
+ ret = of_property_read_u64(mem_node, "size", &size64);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource fails 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ *size = size64;
+ } else {
+ dev_err(dev, "No memory-region found for index %d\n", idx);
+ return -ENODEV;
+ }
+
+ if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ dev_err(dev, "DMA Alloc memory failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Disassociate the reserved memory area from the device
+ * because a device can only have one DMA memory area. This
+ * should be fine since the memory is allocated and initialized
+ * and only ever accessed by the QBMan device from now on
+ */
+ of_reserved_mem_device_release(dev);
+ return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index 2ce394aa4c95..9f379000da85 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -44,23 +44,21 @@
#include <linux/prefetch.h>
#include <linux/genalloc.h>
#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
#define DPAA_PORTAL_CE 0
#define DPAA_PORTAL_CI 1
-#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
-#error "Unsupported Cacheline Size"
-#endif
-
static inline void dpaa_flush(void *p)
{
+ /*
+ * Only PPC needs to flush the cache currently - on ARM the mapping
+ * is non cacheable
+ */
#ifdef CONFIG_PPC
flush_dcache_range((unsigned long)p, (unsigned long)p+64);
-#elif defined(CONFIG_ARM32)
- __cpuc_flush_dcache_area(p, 64);
-#elif defined(CONFIG_ARM64)
- __flush_dcache_area(p, 64);
#endif
}
@@ -102,4 +100,15 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
/* Offset applied to genalloc pools due to zero being an error return */
#define DPAA_GENALLOC_OFF 0x80000000
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
+#endif
+
#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 18eefc3f1abe..e4f5bb056fd2 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -41,6 +41,43 @@
/* Portal register assists */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IER 0x3640
+#define QM_REG_ISDR 0x3680
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#else
/* Cache-inhibited register offsets */
#define QM_REG_EQCR_PI_CINH 0x0000
#define QM_REG_EQCR_CI_CINH 0x0004
@@ -75,6 +112,7 @@
#define QM_CL_CR 0x3800
#define QM_CL_RR0 0x3900
#define QM_CL_RR1 0x3940
+#endif
/*
* BTW, the drivers (and h/w programming model) already obtain the required
@@ -300,7 +338,8 @@ struct qm_mc {
};
struct qm_addr {
- void __iomem *ce; /* cache-enabled */
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* same value as above but for direct access */
void __iomem *ci; /* cache-inhibited */
};
@@ -321,12 +360,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+ return ioread32be(p->addr.ci + offset);
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+ iowrite32be(val, p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -342,7 +381,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ce + offset));
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}
/* --- EQCR API --- */
@@ -646,11 +685,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
*/
dpaa_invalidate_touch_ro(res);
#endif
- /*
- * when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads".
- */
- if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
if (!dqrr->pi)
dqrr->vbit ^= QM_DQRR_VERB_VBIT;
@@ -777,11 +812,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
DPAA_ASSERT(mr->pmode == qm_mr_pvb);
- /*
- * when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads".
- */
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+
+ if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT;
@@ -822,7 +854,7 @@ static inline int qm_mc_init(struct qm_portal *portal)
mc->cr = portal->addr.ce + QM_CL_CR;
mc->rr = portal->addr.ce + QM_CL_RR0;
- mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
? 0 : 1;
mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -880,7 +912,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering...
*/
- if (!__raw_readb(&rr->verb)) {
+ if (!rr->verb) {
dpaa_invalidate_touch_ro(rr);
return NULL;
}
@@ -909,12 +941,12 @@ static inline int qm_mc_result_timeout(struct qm_portal *portal,
static inline void fq_set(struct qman_fq *fq, u32 mask)
{
- set_bits(mask, &fq->flags);
+ fq->flags |= mask;
}
static inline void fq_clear(struct qman_fq *fq, u32 mask)
{
- clear_bits(mask, &fq->flags);
+ fq->flags &= ~mask;
}
static inline int fq_isset(struct qman_fq *fq, u32 mask)
@@ -1084,11 +1116,7 @@ loop:
* entries well before the ring has been fully consumed, so
* we're being *really* paranoid here.
*/
- u64 now, then = jiffies;
-
- do {
- now = jiffies;
- } while ((then + 10000) > now);
+ msleep(1);
msg = qm_mr_current(p);
if (!msg)
return 0;
@@ -1124,8 +1152,9 @@ static int qman_create_portal(struct qman_portal *portal,
* config, everything that follows depends on it and "config" is more
* for (de)reference
*/
- p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
- p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
/*
* If CI-stashing is used, the current defaults use a threshold of 3,
* and stash with high-than-DQRR priority.
@@ -1566,7 +1595,7 @@ void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
unsigned long irqflags;
local_irq_save(irqflags);
- set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
qm_out(&p->p, QM_REG_IER, p->irq_sources);
local_irq_restore(irqflags);
}
@@ -1589,7 +1618,7 @@ void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
*/
local_irq_save(irqflags);
bits &= QM_PIRQ_VISIBLE;
- clear_bits(bits, &p->irq_sources);
+ p->irq_sources &= ~bits;
qm_out(&p->p, QM_REG_IER, p->irq_sources);
ier = qm_in(&p->p, QM_REG_IER);
/*
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 835ce947ffca..79cba58387a5 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
}
/*
- * Ideally we would use the DMA API to turn rmem->base into a DMA address
- * (especially if iommu translations ever get involved). Unfortunately, the
- * DMA API currently does not allow mapping anything that is not backed with
- * a struct page.
+ * QMan needs two global memory areas initialized at boot time:
+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
*/
static dma_addr_t fqd_a, pfdr_a;
static size_t fqd_sz, pfdr_sz;
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
static int qman_fqd(struct reserved_mem *rmem)
{
fqd_a = rmem->base;
fqd_sz = rmem->size;
WARN_ON(!(fqd_a && fqd_sz));
-
return 0;
}
RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
@@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem *rmem)
}
RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+#endif
+
static unsigned int qm_get_fqid_maxcnt(void)
{
return fqd_sz / 64;
}
-/*
- * Flush this memory range from data cache so that QMAN originated
- * transactions for this memory region could be marked non-coherent.
- */
-static int zero_priv_mem(struct device *dev, struct device_node *node,
- phys_addr_t addr, size_t sz)
-{
- /* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
-
- if (!tmpp)
- return -ENOMEM;
-
- memset_io(tmpp, 0, sz);
- flush_dcache_range((unsigned long)tmpp,
- (unsigned long)tmpp + sz);
- iounmap(tmpp);
-
- return 0;
-}
-
static void log_edata_bits(struct device *dev, u32 bit_count)
{
u32 i, j, mask = 0xffffffff;
@@ -717,6 +719,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
qman_ip_rev = QMAN_REV30;
else if (major == 3 && minor == 1)
qman_ip_rev = QMAN_REV31;
+ else if (major == 3 && minor == 2)
+ qman_ip_rev = QMAN_REV32;
else {
dev_err(dev, "Unknown QMan version\n");
return -ENODEV;
@@ -727,10 +731,41 @@ static int fsl_qman_probe(struct platform_device *pdev)
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
- ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
- WARN_ON(ret);
- if (ret)
- return -ENODEV;
+ if (fqd_a) {
+#ifdef CONFIG_PPC
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
+#else
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+ } else {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+ if (!pfdr_a) {
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
ret = qman_init_ccsr(dev);
if (ret) {
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index cbacdf4f98ed..a120002b630e 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- void __iomem *va;
int irq, cpu, err;
u32 val;
@@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev)
}
pcfg->irq = irq;
- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1;
}
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va) {
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
}
- pcfg->addr_virt[DPAA_PORTAL_CI] = va;
-
pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock);
@@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
err_portal_init:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+ iounmap(pcfg->addr_virt_ci);
err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ memunmap(pcfg->addr_virt_ce);
err_ioremap1:
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5fe9faf6232e..75a8f905f8f7 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -28,8 +28,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "dpaa_sys.h"
#include <soc/fsl/qman.h>
@@ -155,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest,
void qman_init_cgr_all(void);
struct qm_portal_config {
- /*
- * Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited.
- */
- void __iomem *addr_virt[2];
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
struct device *dev;
struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */
@@ -187,6 +183,7 @@ struct qm_portal_config {
#define QMAN_REV20 0x0200
#define QMAN_REV30 0x0300
#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
index d5f8cb2260dc..41bdbc48cade 100644
--- a/drivers/soc/fsl/qbman/qman_test.h
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -30,7 +30,5 @@
#include "qman_priv.h"
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
int qman_test_stash(void);
int qman_test_api(void);
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 609bb3424c14..a7d0667338f2 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -1,9 +1,11 @@
#
# MediaTek SoC drivers
#
+menu "MediaTek SoC drivers"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
- depends on ARCH_MEDIATEK || COMPILE_TEST
select REGMAP
help
Say yes here to add support for the MediaTek INFRACFG controller. The
@@ -12,7 +14,6 @@ config MTK_INFRACFG
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
- depends on ARCH_MEDIATEK
depends on RESET_CONTROLLER
select REGMAP
help
@@ -22,7 +23,6 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
- depends on ARCH_MEDIATEK || COMPILE_TEST
default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
@@ -30,3 +30,5 @@ config MTK_SCPSYS
help
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+
+endmenu
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index c2048382830f..e9e054a15b7d 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -70,6 +70,12 @@
PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
+/* Group of bits used for shown slave capability */
+#define PWRAP_SLV_CAP_SPI BIT(0)
+#define PWRAP_SLV_CAP_DUALIO BIT(1)
+#define PWRAP_SLV_CAP_SECURITY BIT(2)
+#define HAS_CAP(_c, _x) (((_c) & (_x)) == (_x))
+
/* defines for slave device wrapper registers */
enum dew_regs {
PWRAP_DEW_BASE,
@@ -208,6 +214,36 @@ enum pwrap_regs {
PWRAP_ADC_RDATA_ADDR1,
PWRAP_ADC_RDATA_ADDR2,
+ /* MT7622 only regs */
+ PWRAP_EINT_STA0_ADR,
+ PWRAP_EINT_STA1_ADR,
+ PWRAP_STA,
+ PWRAP_CLR,
+ PWRAP_DVFS_ADR8,
+ PWRAP_DVFS_WDATA8,
+ PWRAP_DVFS_ADR9,
+ PWRAP_DVFS_WDATA9,
+ PWRAP_DVFS_ADR10,
+ PWRAP_DVFS_WDATA10,
+ PWRAP_DVFS_ADR11,
+ PWRAP_DVFS_WDATA11,
+ PWRAP_DVFS_ADR12,
+ PWRAP_DVFS_WDATA12,
+ PWRAP_DVFS_ADR13,
+ PWRAP_DVFS_WDATA13,
+ PWRAP_DVFS_ADR14,
+ PWRAP_DVFS_WDATA14,
+ PWRAP_DVFS_ADR15,
+ PWRAP_DVFS_WDATA15,
+ PWRAP_EXT_CK,
+ PWRAP_ADC_RDATA_ADDR,
+ PWRAP_GPS_STA,
+ PWRAP_SW_RST,
+ PWRAP_DVFS_STEP_CTRL0,
+ PWRAP_DVFS_STEP_CTRL1,
+ PWRAP_DVFS_STEP_CTRL2,
+ PWRAP_SPI2_CTRL,
+
/* MT8135 only regs */
PWRAP_CSHEXT,
PWRAP_EVENT_IN_EN,
@@ -330,6 +366,118 @@ static int mt2701_regs[] = {
[PWRAP_ADC_RDATA_ADDR2] = 0x154,
};
+static int mt7622_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2C,
+ [PWRAP_EINT_STA0_ADR] = 0x30,
+ [PWRAP_EINT_STA1_ADR] = 0x34,
+ [PWRAP_STA] = 0x38,
+ [PWRAP_CLR] = 0x3C,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4C,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5C,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6C,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7C,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8C,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xAC,
+ [PWRAP_INT_FLG_RAW] = 0xB0,
+ [PWRAP_INT_FLG] = 0xB4,
+ [PWRAP_INT_CLR] = 0xB8,
+ [PWRAP_SIG_ADR] = 0xBC,
+ [PWRAP_SIG_MODE] = 0xC0,
+ [PWRAP_SIG_VALUE] = 0xC4,
+ [PWRAP_SIG_ERRVAL] = 0xC8,
+ [PWRAP_CRC_EN] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xD0,
+ [PWRAP_TIMER_STA] = 0xD4,
+ [PWRAP_WDT_UNIT] = 0xD8,
+ [PWRAP_WDT_SRC_EN] = 0xDC,
+ [PWRAP_WDT_FLG] = 0xE0,
+ [PWRAP_DEBUG_INT_SEL] = 0xE4,
+ [PWRAP_DVFS_ADR0] = 0xE8,
+ [PWRAP_DVFS_WDATA0] = 0xEC,
+ [PWRAP_DVFS_ADR1] = 0xF0,
+ [PWRAP_DVFS_WDATA1] = 0xF4,
+ [PWRAP_DVFS_ADR2] = 0xF8,
+ [PWRAP_DVFS_WDATA2] = 0xFC,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10C,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11C,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_DVFS_ADR8] = 0x128,
+ [PWRAP_DVFS_WDATA8] = 0x12C,
+ [PWRAP_DVFS_ADR9] = 0x130,
+ [PWRAP_DVFS_WDATA9] = 0x134,
+ [PWRAP_DVFS_ADR10] = 0x138,
+ [PWRAP_DVFS_WDATA10] = 0x13C,
+ [PWRAP_DVFS_ADR11] = 0x140,
+ [PWRAP_DVFS_WDATA11] = 0x144,
+ [PWRAP_DVFS_ADR12] = 0x148,
+ [PWRAP_DVFS_WDATA12] = 0x14C,
+ [PWRAP_DVFS_ADR13] = 0x150,
+ [PWRAP_DVFS_WDATA13] = 0x154,
+ [PWRAP_DVFS_ADR14] = 0x158,
+ [PWRAP_DVFS_WDATA14] = 0x15C,
+ [PWRAP_DVFS_ADR15] = 0x160,
+ [PWRAP_DVFS_WDATA15] = 0x164,
+ [PWRAP_SPMINF_STA] = 0x168,
+ [PWRAP_CIPHER_KEY_SEL] = 0x16C,
+ [PWRAP_CIPHER_IV_SEL] = 0x170,
+ [PWRAP_CIPHER_EN] = 0x174,
+ [PWRAP_CIPHER_RDY] = 0x178,
+ [PWRAP_CIPHER_MODE] = 0x17C,
+ [PWRAP_CIPHER_SWRST] = 0x180,
+ [PWRAP_DCM_EN] = 0x184,
+ [PWRAP_DCM_DBC_PRD] = 0x188,
+ [PWRAP_EXT_CK] = 0x18C,
+ [PWRAP_ADC_CMD_ADDR] = 0x190,
+ [PWRAP_PWRAP_ADC_CMD] = 0x194,
+ [PWRAP_ADC_RDATA_ADDR] = 0x198,
+ [PWRAP_GPS_STA] = 0x19C,
+ [PWRAP_SW_RST] = 0x1A0,
+ [PWRAP_DVFS_STEP_CTRL0] = 0x238,
+ [PWRAP_DVFS_STEP_CTRL1] = 0x23C,
+ [PWRAP_DVFS_STEP_CTRL2] = 0x240,
+ [PWRAP_SPI2_CTRL] = 0x244,
+};
+
static int mt8173_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -487,18 +635,31 @@ static int mt8135_regs[] = {
enum pmic_type {
PMIC_MT6323,
+ PMIC_MT6380,
PMIC_MT6397,
};
enum pwrap_type {
PWRAP_MT2701,
+ PWRAP_MT7622,
PWRAP_MT8135,
PWRAP_MT8173,
};
+struct pmic_wrapper;
struct pwrap_slv_type {
const u32 *dew_regs;
enum pmic_type type;
+ const struct regmap_config *regmap;
+ /* Flags indicating the capability for the target slave */
+ u32 caps;
+ /*
+ * pwrap operations are highly associated with the PMIC types,
+ * so the pointers added increases flexibility allowing determination
+ * which type is used by the detection through device tree.
+ */
+ int (*pwrap_read)(struct pmic_wrapper *wrp, u32 adr, u32 *rdata);
+ int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
};
struct pmic_wrapper {
@@ -522,7 +683,7 @@ struct pmic_wrapper_type {
u32 int_en_all;
u32 spi_w;
u32 wdt_src;
- int has_bridge:1;
+ unsigned int has_bridge:1;
int (*init_reg_clock)(struct pmic_wrapper *wrp);
int (*init_soc_specific)(struct pmic_wrapper *wrp);
};
@@ -593,7 +754,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
} while (1);
}
-static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
int ret;
@@ -603,14 +764,54 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
return ret;
}
- pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
- PWRAP_WACS2_CMD);
+ pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ return 0;
+}
+
+static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ int ret, msb;
+
+ *rdata = 0;
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
+ PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata += (PWRAP_GET_WACS_RDATA(pwrap_readl(wrp,
+ PWRAP_WACS2_RDATA)) << (16 * msb));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ }
return 0;
}
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ return wrp->slave->pwrap_read(wrp, adr, rdata);
+}
+
+static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
int ret;
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
@@ -619,19 +820,46 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
return ret;
}
- pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+ pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
- if (ret)
- return ret;
+ return 0;
+}
- *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ int ret, msb, rdata;
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (1 << 31) | (msb << 30) | (adr << 16) |
+ ((wdata >> (msb * 16)) & 0xffff),
+ PWRAP_WACS2_CMD);
+
+ /*
+ * The pwrap_read operation is the requirement of hardware used
+ * for the synchronization between two successive 16-bit
+ * pwrap_writel operations composing one 32-bit bus writing.
+ * Otherwise, we'll find the result fails on the lower 16-bit
+ * pwrap writing.
+ */
+ if (!msb)
+ pwrap_read(wrp, adr, &rdata);
+ }
return 0;
}
+static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ return wrp->slave->pwrap_write(wrp, adr, wdata);
+}
+
static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
{
return pwrap_read(context, adr, rdata);
@@ -711,23 +939,75 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
return 0;
}
-static int pwrap_mt8135_init_reg_clock(struct pmic_wrapper *wrp)
+static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
{
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+ int ret;
+ u32 rdata;
+
+ /* Enable dual IO mode */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
+
+ /* Check IDLE & INIT_DONE in advance */
+ ret = pwrap_wait_for_state(wrp,
+ pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_DIO_EN);
+
+ /* Read Test */
+ pwrap_read(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
+ if (rdata != PWRAP_DEW_READ_TEST_VAL) {
+ dev_err(wrp->dev,
+ "Read failed on DIO mode: 0x%04x!=0x%04x\n",
+ PWRAP_DEW_READ_TEST_VAL, rdata);
+ return -EFAULT;
+ }
return 0;
}
-static int pwrap_mt8173_init_reg_clock(struct pmic_wrapper *wrp)
+/*
+ * pwrap_init_chip_select_ext is used to configure CS extension time for each
+ * phase during data transactions on the pwrap bus.
+ */
+static void pwrap_init_chip_select_ext(struct pmic_wrapper *wrp, u8 hext_write,
+ u8 hext_read, u8 lext_start,
+ u8 lext_end)
{
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ /*
+ * After finishing a write and read transaction, extends CS high time
+ * to be at least xT of BUS CLK as hext_write and hext_read specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, hext_write, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, hext_read, PWRAP_CSHEXT_READ);
+
+ /*
+ * Extends CS low time after CSL and before CSH command to be at
+ * least xT of BUS CLK as lext_start and lext_end specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, lext_start, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, lext_end, PWRAP_CSLEXT_END);
+}
+
+static int pwrap_common_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->master->type) {
+ case PWRAP_MT8173:
+ pwrap_init_chip_select_ext(wrp, 0, 4, 2, 2);
+ break;
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_init_chip_select_ext(wrp, 0, 4, 0, 0);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -737,20 +1017,16 @@ static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
switch (wrp->slave->type) {
case PMIC_MT6397:
pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ pwrap_init_chip_select_ext(wrp, 4, 0, 2, 2);
break;
case PMIC_MT6323:
pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
0x8);
- pwrap_writel(wrp, 0x5, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ pwrap_init_chip_select_ext(wrp, 5, 0, 2, 2);
+ break;
+ default:
break;
}
@@ -794,6 +1070,9 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT8173:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
+ case PWRAP_MT7622:
+ pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
+ break;
}
/* Config cipher mode @PMIC */
@@ -815,6 +1094,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
0x1);
break;
+ default:
+ break;
}
/* wait for cipher data ready@AP */
@@ -827,7 +1108,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
/* wait for cipher data ready@PMIC */
ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
if (ret) {
- dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n");
+ dev_err(wrp->dev,
+ "timeout waiting for cipher data ready@PMIC\n");
return ret;
}
@@ -854,6 +1136,30 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
return 0;
}
+static int pwrap_init_security(struct pmic_wrapper *wrp)
+{
+ int ret;
+
+ /* Enable encryption */
+ ret = pwrap_init_cipher(wrp);
+ if (ret)
+ return ret;
+
+ /* Signature checking - using CRC */
+ if (pwrap_write(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
+ return -EFAULT;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
+ pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
+ PWRAP_SIG_ADR);
+ pwrap_writel(wrp,
+ wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ return 0;
+}
+
static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
{
/* enable pwrap events and pwrap bridge in AP side */
@@ -911,10 +1217,18 @@ static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
return 0;
}
+static int pwrap_mt7622_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0, PWRAP_STAUPD_PRD);
+ /* enable 2wire SPI master */
+ pwrap_writel(wrp, 0x8000000, PWRAP_SPI2_CTRL);
+
+ return 0;
+}
+
static int pwrap_init(struct pmic_wrapper *wrp)
{
int ret;
- u32 rdata;
reset_control_reset(wrp->rstc);
if (wrp->rstc_bridge)
@@ -926,10 +1240,12 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
}
- /* Reset SPI slave */
- ret = pwrap_reset_spislave(wrp);
- if (ret)
- return ret;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Reset SPI slave */
+ ret = pwrap_reset_spislave(wrp);
+ if (ret)
+ return ret;
+ }
pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
@@ -941,45 +1257,26 @@ static int pwrap_init(struct pmic_wrapper *wrp)
if (ret)
return ret;
- /* Setup serial input delay */
- ret = pwrap_init_sidly(wrp);
- if (ret)
- return ret;
-
- /* Enable dual IO mode */
- pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
-
- /* Check IDLE & INIT_DONE in advance */
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
- if (ret) {
- dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
- return ret;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Setup serial input delay */
+ ret = pwrap_init_sidly(wrp);
+ if (ret)
+ return ret;
}
- pwrap_writel(wrp, 1, PWRAP_DIO_EN);
-
- /* Read Test */
- pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
- if (rdata != PWRAP_DEW_READ_TEST_VAL) {
- dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n",
- PWRAP_DEW_READ_TEST_VAL, rdata);
- return -EFAULT;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_DUALIO)) {
+ /* Enable dual I/O mode */
+ ret = pwrap_init_dual_io(wrp);
+ if (ret)
+ return ret;
}
- /* Enable encryption */
- ret = pwrap_init_cipher(wrp);
- if (ret)
- return ret;
-
- /* Signature checking - using CRC */
- if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
- return -EFAULT;
-
- pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
- pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
- pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
- PWRAP_SIG_ADR);
- pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SECURITY)) {
+ /* Enable security on bus */
+ ret = pwrap_init_security(wrp);
+ if (ret)
+ return ret;
+ }
if (wrp->master->type == PWRAP_MT8135)
pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
@@ -1023,7 +1320,7 @@ static irqreturn_t pwrap_interrupt(int irqno, void *dev_id)
return IRQ_HANDLED;
}
-static const struct regmap_config pwrap_regmap_config = {
+static const struct regmap_config pwrap_regmap_config16 = {
.reg_bits = 16,
.val_bits = 16,
.reg_stride = 2,
@@ -1032,14 +1329,42 @@ static const struct regmap_config pwrap_regmap_config = {
.max_register = 0xffff,
};
+static const struct regmap_config pwrap_regmap_config32 = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
static const struct pwrap_slv_type pmic_mt6323 = {
.dew_regs = mt6323_regs,
.type = PMIC_MT6323,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6380 = {
+ .dew_regs = NULL,
+ .type = PMIC_MT6380,
+ .regmap = &pwrap_regmap_config32,
+ .caps = 0,
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
};
static const struct pwrap_slv_type pmic_mt6397 = {
.dew_regs = mt6397_regs,
.type = PMIC_MT6397,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
};
static const struct of_device_id of_slave_match_tbl[] = {
@@ -1047,6 +1372,12 @@ static const struct of_device_id of_slave_match_tbl[] = {
.compatible = "mediatek,mt6323",
.data = &pmic_mt6323,
}, {
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ .compatible = "mediatek,mt6380-regulator",
+ .data = &pmic_mt6380,
+ }, {
.compatible = "mediatek,mt6397",
.data = &pmic_mt6397,
}, {
@@ -1067,6 +1398,18 @@ static const struct pmic_wrapper_type pwrap_mt2701 = {
.init_soc_specific = pwrap_mt2701_init_soc_specific,
};
+static const struct pmic_wrapper_type pwrap_mt7622 = {
+ .regs = mt7622_regs,
+ .type = PWRAP_MT7622,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)BIT(31),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt7622_init_soc_specific,
+};
+
static const struct pmic_wrapper_type pwrap_mt8135 = {
.regs = mt8135_regs,
.type = PWRAP_MT8135,
@@ -1075,7 +1418,7 @@ static const struct pmic_wrapper_type pwrap_mt8135 = {
.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
.has_bridge = 1,
- .init_reg_clock = pwrap_mt8135_init_reg_clock,
+ .init_reg_clock = pwrap_common_init_reg_clock,
.init_soc_specific = pwrap_mt8135_init_soc_specific,
};
@@ -1087,7 +1430,7 @@ static const struct pmic_wrapper_type pwrap_mt8173 = {
.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
.wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
.has_bridge = 0,
- .init_reg_clock = pwrap_mt8173_init_reg_clock,
+ .init_reg_clock = pwrap_common_init_reg_clock,
.init_soc_specific = pwrap_mt8173_init_soc_specific,
};
@@ -1096,6 +1439,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt2701-pwrap",
.data = &pwrap_mt2701,
}, {
+ .compatible = "mediatek,mt7622-pwrap",
+ .data = &pwrap_mt7622,
+ }, {
.compatible = "mediatek,mt8135-pwrap",
.data = &pwrap_mt8135,
}, {
@@ -1159,23 +1505,27 @@ static int pwrap_probe(struct platform_device *pdev)
if (IS_ERR(wrp->bridge_base))
return PTR_ERR(wrp->bridge_base);
- wrp->rstc_bridge = devm_reset_control_get(wrp->dev, "pwrap-bridge");
+ wrp->rstc_bridge = devm_reset_control_get(wrp->dev,
+ "pwrap-bridge");
if (IS_ERR(wrp->rstc_bridge)) {
ret = PTR_ERR(wrp->rstc_bridge);
- dev_dbg(wrp->dev, "cannot get pwrap-bridge reset: %d\n", ret);
+ dev_dbg(wrp->dev,
+ "cannot get pwrap-bridge reset: %d\n", ret);
return ret;
}
}
wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
if (IS_ERR(wrp->clk_spi)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_spi));
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_spi));
return PTR_ERR(wrp->clk_spi);
}
wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
if (IS_ERR(wrp->clk_wrap)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_wrap));
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_wrap));
return PTR_ERR(wrp->clk_wrap);
}
@@ -1220,12 +1570,13 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
- "mt-pmic-pwrap", wrp);
+ ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
+ IRQF_TRIGGER_HIGH,
+ "mt-pmic-pwrap", wrp);
if (ret)
goto err_out2;
- wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, &pwrap_regmap_config);
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap);
if (IS_ERR(wrp->regmap)) {
ret = PTR_ERR(wrp->regmap);
goto err_out2;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b00bccddcd3b..b81374bb6713 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,6 +35,17 @@ config QCOM_PM
modes. It interface with various system drivers to put the cores in
low power modes.
+config QCOM_RMTFS_MEM
+ tristate "Qualcomm Remote Filesystem memory driver"
+ depends on ARCH_QCOM
+ help
+ The Qualcomm remote filesystem memory driver is used for allocating
+ and exposing regions of shared memory with remote processors for the
+ purpose of exchanging sector-data between the remote filesystem
+ service and its clients.
+
+ Say y here if you intend to boot the modem remoteproc.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index fab44666b214..40c56f67e94a 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 000000000000..ce35ff748adf
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+ struct device dev;
+ struct cdev cdev;
+
+ void *base;
+ phys_addr_t addr;
+ phys_addr_t size;
+
+ unsigned int client_id;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ if (attr == &dev_attr_phys_addr)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+ if (attr == &dev_attr_size)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+ if (attr == &dev_attr_client_id)
+ return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+ return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+ &dev_attr_phys_addr.attr,
+ &dev_attr_size.attr,
+ &dev_attr_client_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+ struct qcom_rmtfs_mem,
+ cdev);
+
+ get_device(&rmtfs_mem->dev);
+ filp->private_data = rmtfs_mem;
+
+ return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+ .owner = THIS_MODULE,
+ .open = qcom_rmtfs_mem_open,
+ .read = qcom_rmtfs_mem_read,
+ .write = qcom_rmtfs_mem_write,
+ .release = qcom_rmtfs_mem_release,
+ .llseek = default_llseek,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct reserved_mem *rmem;
+ struct qcom_rmtfs_mem *rmtfs_mem;
+ u32 client_id;
+ int ret;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+ return ret;
+
+ }
+
+ rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+ if (!rmtfs_mem)
+ return -ENOMEM;
+
+ rmtfs_mem->addr = rmem->base;
+ rmtfs_mem->client_id = client_id;
+ rmtfs_mem->size = rmem->size;
+
+ device_initialize(&rmtfs_mem->dev);
+ rmtfs_mem->dev.parent = &pdev->dev;
+ rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+
+ rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+ rmtfs_mem->size, MEMREMAP_WC);
+ if (IS_ERR(rmtfs_mem->base)) {
+ dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+ ret = PTR_ERR(rmtfs_mem->base);
+ goto put_device;
+ }
+
+ cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+ rmtfs_mem->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+ rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+ ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+ goto put_device;
+ }
+
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+ dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+ return 0;
+
+put_device:
+ put_device(&rmtfs_mem->dev);
+
+ return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+ .probe = qcom_rmtfs_mem_probe,
+ .remove = qcom_rmtfs_mem_remove,
+ .driver = {
+ .name = "qcom_rmtfs_mem",
+ .of_match_table = qcom_rmtfs_mem_of_match,
+ },
+};
+
+static int qcom_rmtfs_mem_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+ QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+ unregister_chrdev_region(qcom_rmtfs_mem_major,
+ QCOM_RMTFS_MEM_DEV_MAX);
+ }
+
+ return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void qcom_rmtfs_mem_exit(void)
+{
+ platform_driver_unregister(&qcom_rmtfs_mem_driver);
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+}
+module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 18ec52f2078a..0b94d62fad2b 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -52,8 +52,13 @@
*
* Items in the non-cached region are allocated from the start of the partition
* while items in the cached region are allocated from the end. The free area
- * is hence the region between the cached and non-cached offsets.
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
*
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
*
* To synchronize allocations in the shared memory heaps a remote spinlock must
* be held - currently lock number 3 of the sfpb or tcsr is used for this on all
@@ -62,13 +67,13 @@
*/
/*
- * Item 3 of the global heap contains an array of versions for the various
- * software components in the SoC. We verify that the boot loader version is
- * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
*/
-#define SMEM_ITEM_VERSION 3
-#define SMEM_MASTER_SBL_VERSION_INDEX 7
-#define SMEM_EXPECTED_VERSION 11
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_GLOBAL_HEAP_VERSION 11
+#define SMEM_GLOBAL_PART_VERSION 12
/*
* The first 8 items are only to be allocated by the boot loader while
@@ -82,8 +87,11 @@
/* Processor/host identifier for the application processor */
#define SMEM_HOST_APPS 0
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST 0xfffe
+
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 9
+#define SMEM_HOST_COUNT 10
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
@@ -140,6 +148,7 @@ struct smem_header {
* @flags: flags for the partition (currently unused)
* @host0: first processor/host with access to this partition
* @host1: second processor/host with access to this partition
+ * @cacheline: alignment for "cached" entries
* @reserved: reserved entries for later use
*/
struct smem_ptable_entry {
@@ -148,7 +157,8 @@ struct smem_ptable_entry {
__le32 flags;
__le16 host0;
__le16 host1;
- __le32 reserved[8];
+ __le32 cacheline;
+ __le32 reserved[7];
};
/**
@@ -213,6 +223,24 @@ struct smem_private_entry {
#define SMEM_PRIVATE_CANARY 0xa5a5
/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic: magic number, must be SMEM_INFO_MAGIC
+ * @size: size of the smem region
+ * @base_addr: base address of the smem region
+ * @reserved: for now reserved entry
+ * @num_items: highest accepted item number
+ */
+struct smem_info {
+ u8 magic[4];
+ __le32 size;
+ __le32 base_addr;
+ __le32 reserved;
+ __le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
* struct smem_region - representation of a chunk of memory used for smem
* @aux_base: identifier of aux_mem base
* @virt_base: virtual base address of memory with this aux_mem identifier
@@ -228,8 +256,12 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
+ * @global_partition: pointer to global partition when in use
+ * @global_cacheline: cacheline size for global partition
* @partitions: list of pointers to partitions affecting the current
* processor/host
+ * @cacheline: list of cacheline sizes for each host
+ * @item_count: max accepted item number
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
*/
@@ -238,21 +270,33 @@ struct qcom_smem {
struct hwspinlock *hwlock;
+ struct smem_partition_header *global_partition;
+ size_t global_cacheline;
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+ size_t cacheline[SMEM_HOST_COUNT];
+ u32 item_count;
unsigned num_regions;
struct smem_region regions[0];
};
static struct smem_private_entry *
-phdr_to_last_private_entry(struct smem_partition_header *phdr)
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_uncached);
}
-static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
+static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+ size_t cacheline)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
+}
+
+static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
@@ -260,7 +304,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
}
static struct smem_private_entry *
-phdr_to_first_private_entry(struct smem_partition_header *phdr)
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
@@ -268,7 +312,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr)
}
static struct smem_private_entry *
-private_entry_next(struct smem_private_entry *e)
+uncached_entry_next(struct smem_private_entry *e)
{
void *p = e;
@@ -276,13 +320,28 @@ private_entry_next(struct smem_private_entry *e)
le32_to_cpu(e->size);
}
-static void *entry_to_item(struct smem_private_entry *e)
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size);
+}
+
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
@@ -290,32 +349,30 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
- unsigned host,
+ struct smem_partition_header *phdr,
unsigned item,
size_t size)
{
- struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end;
size_t alloc_size;
void *cached;
- phdr = smem->partitions[host];
- hdr = phdr_to_first_private_entry(phdr);
- end = phdr_to_last_private_entry(phdr);
- cached = phdr_to_first_cached_entry(phdr);
+ hdr = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+ cached = phdr_to_last_cached_entry(phdr);
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
- "Found invalid canary in host %d partition\n",
- host);
+ "Found invalid canary in hosts %d:%d partition\n",
+ phdr->host0, phdr->host1);
return -EINVAL;
}
if (le16_to_cpu(hdr->item) == item)
return -EEXIST;
- hdr = private_entry_next(hdr);
+ hdr = uncached_entry_next(hdr);
}
/* Check that we don't grow into the cached region */
@@ -346,11 +403,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
unsigned item,
size_t size)
{
- struct smem_header *header;
struct smem_global_entry *entry;
-
- if (WARN_ON(item >= SMEM_ITEM_COUNT))
- return -EINVAL;
+ struct smem_header *header;
header = smem->regions[0].virt_base;
entry = &header->toc[item];
@@ -389,6 +443,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
+ struct smem_partition_header *phdr;
unsigned long flags;
int ret;
@@ -401,16 +456,24 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
return -EINVAL;
}
+ if (WARN_ON(item >= __smem->item_count))
+ return -EINVAL;
+
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT,
&flags);
if (ret)
return ret;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host])
- ret = qcom_smem_alloc_private(__smem, host, item, size);
- else
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else {
ret = qcom_smem_alloc_global(__smem, item, size);
+ }
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@@ -428,9 +491,6 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
u32 aux_base;
unsigned i;
- if (WARN_ON(item >= SMEM_ITEM_COUNT))
- return ERR_PTR(-EINVAL);
-
header = smem->regions[0].virt_base;
entry = &header->toc[item];
if (!entry->allocated)
@@ -452,37 +512,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
- unsigned host,
+ struct smem_partition_header *phdr,
+ size_t cacheline,
unsigned item,
size_t *size)
{
- struct smem_partition_header *phdr;
struct smem_private_entry *e, *end;
- phdr = smem->partitions[host];
- e = phdr_to_first_private_entry(phdr);
- end = phdr_to_last_private_entry(phdr);
+ e = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
while (e < end) {
- if (e->canary != SMEM_PRIVATE_CANARY) {
- dev_err(smem->dev,
- "Found invalid canary in host %d partition\n",
- host);
- return ERR_PTR(-EINVAL);
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL)
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
+
+ return uncached_entry_to_item(e);
}
+ e = uncached_entry_next(e);
+ }
+
+ /* Item was not found in the uncached list, search the cached list */
+
+ e = phdr_to_first_cached_entry(phdr, cacheline);
+ end = phdr_to_last_cached_entry(phdr);
+
+ while (e > end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
if (le16_to_cpu(e->item) == item) {
if (size != NULL)
*size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
- return entry_to_item(e);
+ return cached_entry_to_item(e);
}
- e = private_entry_next(e);
+ e = cached_entry_next(e, cacheline);
}
return ERR_PTR(-ENOENT);
+
+invalid_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
+ phdr->host0, phdr->host1);
+
+ return ERR_PTR(-EINVAL);
}
/**
@@ -496,23 +577,35 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
+ struct smem_partition_header *phdr;
unsigned long flags;
+ size_t cacheln;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem)
return ptr;
+ if (WARN_ON(item >= __smem->item_count))
+ return ERR_PTR(-EINVAL);
+
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT,
&flags);
if (ret)
return ERR_PTR(ret);
- if (host < SMEM_HOST_COUNT && __smem->partitions[host])
- ptr = qcom_smem_get_private(__smem, host, item, size);
- else
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ cacheln = __smem->cacheline[host];
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ cacheln = __smem->global_cacheline;
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else {
ptr = qcom_smem_get_global(__smem, item, size);
+ }
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@@ -541,6 +634,10 @@ int qcom_smem_get_free_space(unsigned host)
phdr = __smem->partitions[host];
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
@@ -552,44 +649,131 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
+ struct smem_header *header;
__le32 *versions;
- size_t size;
- versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
- if (IS_ERR(versions)) {
- dev_err(smem->dev, "Unable to read the version item\n");
- return -ENOENT;
- }
-
- if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
- dev_err(smem->dev, "Version item is too small\n");
- return -EINVAL;
- }
+ header = smem->regions[0].virt_base;
+ versions = header->version;
return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
}
-static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
- unsigned local_host)
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
{
- struct smem_partition_header *header;
- struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
- unsigned remote_host;
- u32 version, host0, host1;
- int i;
+ u32 version;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
- return 0;
+ return ERR_PTR(-ENOENT);
version = le32_to_cpu(ptable->version);
if (version != 1) {
dev_err(smem->dev,
"Unsupported partition header version %d\n", version);
+ return ERR_PTR(-EINVAL);
+ }
+ return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ struct smem_info *info;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR_OR_NULL(ptable))
+ return SMEM_ITEM_COUNT;
+
+ info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+ return SMEM_ITEM_COUNT;
+
+ return le16_to_cpu(info->num_items);
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry = NULL;
+ struct smem_ptable *ptable;
+ u32 host0, host1, size;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+
+ if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
+ break;
+ }
+
+ if (!entry) {
+ dev_err(smem->dev, "Missing entry for global partition\n");
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Invalid entry for global partition\n");
+ return -EINVAL;
+ }
+
+ if (smem->global_partition) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+ dev_err(smem->dev, "Global partition has invalid magic\n");
+ return -EINVAL;
+ }
+
+ if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
+ dev_err(smem->dev, "Global partition hosts are invalid\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Global partition has invalid size\n");
return -EINVAL;
}
+ size = le32_to_cpu(header->offset_free_uncached);
+ if (size > le32_to_cpu(header->size)) {
+ dev_err(smem->dev,
+ "Global partition has invalid free pointer\n");
+ return -EINVAL;
+ }
+
+ smem->global_partition = header;
+ smem->global_cacheline = le32_to_cpu(entry->cacheline);
+
+ return 0;
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned int local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned int remote_host;
+ u32 host0, host1;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
@@ -646,7 +830,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
- if (header->size != entry->size) {
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev,
"Partition %d has invalid size\n", i);
return -EINVAL;
@@ -659,6 +843,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
}
smem->partitions[remote_host] = header;
+ smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
}
return 0;
@@ -729,13 +914,23 @@ static int qcom_smem_probe(struct platform_device *pdev)
}
version = qcom_smem_get_sbl_version(smem);
- if (version >> 16 != SMEM_EXPECTED_VERSION) {
+ switch (version >> 16) {
+ case SMEM_GLOBAL_PART_VERSION:
+ ret = qcom_smem_set_global_partition(smem);
+ if (ret < 0)
+ return ret;
+ smem->item_count = qcom_smem_get_item_count(smem);
+ break;
+ case SMEM_GLOBAL_HEAP_VERSION:
+ smem->item_count = SMEM_ITEM_COUNT;
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
return -EINVAL;
}
ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOENT)
return ret;
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 567414cb42ba..09550b1da56d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -3,7 +3,8 @@ config SOC_RENESAS
default y if ARCH_RENESAS
select SOC_BUS
select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
- ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77995
+ ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77970 || \
+ ARCH_R8A77995
select SYSC_R8A7743 if ARCH_R8A7743
select SYSC_R8A7745 if ARCH_R8A7745
select SYSC_R8A7779 if ARCH_R8A7779
@@ -13,6 +14,7 @@ config SOC_RENESAS
select SYSC_R8A7794 if ARCH_R8A7794
select SYSC_R8A7795 if ARCH_R8A7795
select SYSC_R8A7796 if ARCH_R8A7796
+ select SYSC_R8A77970 if ARCH_R8A77970
select SYSC_R8A77995 if ARCH_R8A77995
if SOC_RENESAS
@@ -54,6 +56,10 @@ config SYSC_R8A7796
bool "R-Car M3-W System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A77970
+ bool "R-Car V3M System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A77995
bool "R-Car D3 System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 763c03d80436..845d62a08ce1 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
# Family
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
new file mode 100644
index 000000000000..8c614164718e
--- /dev/null
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -0,0 +1,39 @@
+/*
+ * Renesas R-Car V3M System Controller
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77970-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77970_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77970_PD_CA53_SCU, R8A77970_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77970_PD_CA53_CPU0, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
+ { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON },
+ { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 },
+ { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 },
+ { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 },
+ { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON },
+ { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 },
+};
+
+const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
+ .areas = r8a77970_areas,
+ .num_areas = ARRAY_SIZE(r8a77970_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index baa47014e96b..3316b028f231 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -41,6 +41,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
/* R-Car Gen3 is handled like R-Car Gen2 */
{ .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index c8406e81640f..55a47e509e49 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -284,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_SYSC_R8A7796
{ .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A77970
+ { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A77995
{ .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 2f524922c4d2..9d9daf9eb91b 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -58,6 +58,7 @@ extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern const struct rcar_sysc_info r8a77970_sysc_info;
extern const struct rcar_sysc_info r8a77995_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 90d6b7a4340a..9f4ee2567c72 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -144,6 +144,11 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
.id = 0x52,
};
+static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x54,
+};
+
static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
.family = &fam_rcar_gen3,
.id = 0x58,
@@ -204,6 +209,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7796
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
+#ifdef CONFIG_ARCH_R8A77970
+ { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
+#endif
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
#endif
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index bd4a76f27bc2..938f8ccfcb74 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -60,12 +60,6 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
if (pmu_data->powerdown_conf_extra)
pmu_data->powerdown_conf_extra(mode);
-
- if (pmu_data->pmu_config_extra) {
- for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
- pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
- pmu_data->pmu_config_extra[i].offset);
- }
}
/*
@@ -89,9 +83,6 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
.compatible = "samsung,exynos4210-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
}, {
- .compatible = "samsung,exynos4212-pmu",
- .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
- }, {
.compatible = "samsung,exynos4412-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
}, {
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 40d4229abfb5..86b3f2f8966d 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -23,7 +23,6 @@ struct exynos_pmu_conf {
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
- const struct exynos_pmu_conf *pmu_config_extra;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
@@ -36,7 +35,6 @@ extern void __iomem *pmu_base_addr;
/* list of all exported SoC specific data */
extern const struct exynos_pmu_data exynos3250_pmu_data;
extern const struct exynos_pmu_data exynos4210_pmu_data;
-extern const struct exynos_pmu_data exynos4212_pmu_data;
extern const struct exynos_pmu_data exynos4412_pmu_data;
extern const struct exynos_pmu_data exynos5250_pmu_data;
extern const struct exynos_pmu_data exynos5420_pmu_data;
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index bc4fa73bed11..5dbfe4e31f4c 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -90,7 +90,7 @@ static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
{ PMU_TABLE_END,},
};
-static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -195,10 +195,6 @@ static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
{ S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
{ S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
{ S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
- { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
{ S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
@@ -212,11 +208,6 @@ const struct exynos_pmu_data exynos4210_pmu_data = {
.pmu_config = exynos4210_pmu_config,
};
-const struct exynos_pmu_data exynos4212_pmu_data = {
- .pmu_config = exynos4x12_pmu_config,
-};
-
const struct exynos_pmu_data exynos4412_pmu_data = {
- .pmu_config = exynos4x12_pmu_config,
- .pmu_config_extra = exynos4412_pmu_config,
+ .pmu_config = exynos4412_pmu_config,
};
diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c
index 8fc356039401..82c7e27cd8bb 100644
--- a/drivers/soc/tegra/powergate-bpmp.c
+++ b/drivers/soc/tegra/powergate-bpmp.c
@@ -42,6 +42,7 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
{
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_SET_STATE;
@@ -53,7 +54,13 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
@@ -80,6 +87,8 @@ static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return PG_STATE_OFF;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
return response.get_state.state;
}
@@ -106,6 +115,8 @@ static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp)
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
return response.get_max_id.max_id;
}
@@ -132,7 +143,7 @@ static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp,
msg.rx.size = sizeof(response);
err = tegra_bpmp_transfer(bpmp, &msg);
- if (err < 0)
+ if (err < 0 || msg.rx.ret < 0)
return NULL;
return kstrdup(response.get_name.name, GFP_KERNEL);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 77fe55ce790c..d65345312527 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -79,6 +79,7 @@
#define A3700_SPI_BYTE_LEN BIT(5)
#define A3700_SPI_CLK_PRESCALE BIT(0)
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS (0x10)
#define A3700_SPI_WFIFO_THRS_BIT 28
#define A3700_SPI_RFIFO_THRS_BIT 24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+ /* For prescaler values over 15, we can only set it by steps of 2.
+ * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+ * 30. We only use this range from 16 to 30.
+ */
+ if (prescale > 15)
+ prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f95da364c283..669470971023 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
- spin_lock_irq(&as->lock);
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
}
+ spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2ce875764ca6..0835a8d88fb8 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets SPCMD */
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in master mode */
- rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index c5cd635c28f3..41410031f8e9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -525,7 +525,7 @@ err_free_master:
static int sun4i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b93dfc..e0b9fe1d0e37 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
while (remaining_words) {
int n_words, tx_words, rx_words;
u32 sr;
+ int stalled;
n_words = min(remaining_words, xspi->buffer_size);
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
+ stalled = 10;
while (rx_words) {
+ if (rx_words == n_words && !(stalled--) &&
+ !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+ (sr & XSPI_SR_RX_EMPTY_MASK)) {
+ dev_err(&spi->dev,
+ "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+ xspi_init_hw(xspi);
+ return -EIO;
+ }
+
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
xilinx_spi_rx(xspi);
rx_words--;
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index d79090ed7f9c..2035835b62dc 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
- int rc;
+ int rc = 0;
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC) {
@@ -1778,9 +1778,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
}
in += sizeof(u32);
- rc = ssi_hash_init(state, ctx);
- if (rc)
- goto out;
+ /* call init() to allocate bufs if the user hasn't */
+ if (!state->digest_buff) {
+ rc = ssi_hash_init(state, ctx);
+ if (rc)
+ goto out;
+ }
dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 2d62a8c57332..ae6ed96d7874 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -361,3 +361,8 @@ static struct comedi_driver ni_atmio_driver = {
.detach = ni_atmio_detach,
};
module_comedi_driver(ni_atmio_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 609332b3e15b..c462b1c046cd 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -293,9 +293,9 @@ static void gb_operation_work(struct work_struct *work)
gb_operation_put(operation);
}
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
{
- struct gb_operation *operation = (void *)arg;
+ struct gb_operation *operation = from_timer(operation, t, timer);
if (gb_operation_result_set(operation, -ETIMEDOUT)) {
/*
@@ -540,8 +540,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
goto err_request;
}
- setup_timer(&operation->timer, gb_operation_timeout,
- (unsigned long)operation);
+ timer_setup(&operation->timer, gb_operation_timeout, 0);
}
operation->flags = op_flags;
diff --git a/drivers/staging/irda/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h
index a6635f0afae9..6dab15f5dae1 100644
--- a/drivers/staging/irda/include/net/irda/timer.h
+++ b/drivers/staging/irda/include/net/irda/timer.h
@@ -75,7 +75,7 @@ struct lap_cb;
static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
void (*callback)(struct timer_list *))
{
- ptimer->function = (TIMER_FUNC_TYPE) callback;
+ ptimer->function = callback;
/* Set new value for timer (update or add timer).
* We use mod_timer() because it's more efficient and also
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index f8ea523863ba..986c2a40d978 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1683,10 +1683,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
- CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
+ CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
+ iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni,
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 35a7b396def4..d50ebdf863fa 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -358,11 +358,7 @@ struct ksock_conn {
__u8 ksnc_rx_scheduled; /* being progressed */
__u8 ksnc_rx_state; /* what is being read */
int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- int ksnc_rx_nob_wanted;/* bytes actually wanted */
- int ksnc_rx_niov; /* # iovec frags */
- struct kvec *ksnc_rx_iov; /* the iovec frags */
- int ksnc_rx_nkiov; /* # page frags */
- struct bio_vec *ksnc_rx_kiov; /* the page frags */
+ struct iov_iter ksnc_rx_to; /* copy destination */
union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data
@@ -701,8 +697,7 @@ int ksocknal_lib_setup_sock(struct socket *so);
int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
void ksocknal_lib_eager_ack(struct ksock_conn *conn);
-int ksocknal_lib_recv_iov(struct ksock_conn *conn);
-int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+int ksocknal_lib_recv(struct ksock_conn *conn);
int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
int *rxmem, int *nagle);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index a5f2ecb966fa..27c56d5ae4e5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -250,66 +250,16 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
}
static int
-ksocknal_recv_iov(struct ksock_conn *conn)
+ksocknal_recv_iter(struct ksock_conn *conn)
{
- struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
- LASSERT(conn->ksnc_rx_niov > 0);
-
- /*
- * Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov
- */
- rc = ksocknal_lib_recv_iov(conn);
-
- if (rc <= 0)
- return rc;
-
- /* received something... */
- nob = rc;
-
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
-
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
-
- do {
- LASSERT(conn->ksnc_rx_niov > 0);
-
- if (nob < (int)iov->iov_len) {
- iov->iov_len -= nob;
- iov->iov_base += nob;
- return -EAGAIN;
- }
-
- nob -= iov->iov_len;
- conn->ksnc_rx_iov = ++iov;
- conn->ksnc_rx_niov--;
- } while (nob);
-
- return rc;
-}
-
-static int
-ksocknal_recv_kiov(struct ksock_conn *conn)
-{
- struct bio_vec *kiov = conn->ksnc_rx_kiov;
- int nob;
- int rc;
-
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
/*
- * Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov
+ * Never touch conn->ksnc_rx_to or change connection
+ * status inside ksocknal_lib_recv
*/
- rc = ksocknal_lib_recv_kiov(conn);
+ rc = ksocknal_lib_recv(conn);
if (rc <= 0)
return rc;
@@ -323,22 +273,11 @@ ksocknal_recv_kiov(struct ksock_conn *conn)
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
- do {
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
- if (nob < (int)kiov->bv_len) {
- kiov->bv_offset += nob;
- kiov->bv_len -= nob;
- return -EAGAIN;
- }
-
- nob -= kiov->bv_len;
- conn->ksnc_rx_kiov = ++kiov;
- conn->ksnc_rx_nkiov--;
- } while (nob);
+ iov_iter_advance(&conn->ksnc_rx_to, nob);
+ if (iov_iter_count(&conn->ksnc_rx_to))
+ return -EAGAIN;
return 1;
}
@@ -348,7 +287,7 @@ ksocknal_receive(struct ksock_conn *conn)
{
/*
* Return 1 on success, 0 on EOF, < 0 on error.
- * Caller checks ksnc_rx_nob_wanted to determine
+ * Caller checks ksnc_rx_to to determine
* progress/completion.
*/
int rc;
@@ -365,11 +304,7 @@ ksocknal_receive(struct ksock_conn *conn)
}
for (;;) {
- if (conn->ksnc_rx_niov)
- rc = ksocknal_recv_iov(conn);
- else
- rc = ksocknal_recv_kiov(conn);
-
+ rc = ksocknal_recv_iter(conn);
if (rc <= 0) {
/* error/EOF or partial receive */
if (rc == -EAGAIN) {
@@ -383,7 +318,7 @@ ksocknal_receive(struct ksock_conn *conn)
/* Completed a fragment */
- if (!conn->ksnc_rx_nob_wanted) {
+ if (!iov_iter_count(&conn->ksnc_rx_to)) {
rc = 1;
break;
}
@@ -1051,6 +986,7 @@ int
ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
+ struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
int nob;
unsigned int niov;
@@ -1071,32 +1007,26 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
case KSOCK_PROTO_V2:
case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
-
- conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
+ kvec->iov_base = &conn->ksnc_msg;
+ kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
- conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, offsetof(struct ksock_msg, ksm_u));
break;
case KSOCK_PROTO_V1:
/* Receiving bare struct lnet_hdr */
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
+ kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
+ kvec->iov_len = sizeof(struct lnet_hdr);
conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
-
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, sizeof(struct lnet_hdr));
break;
default:
LBUG();
}
- conn->ksnc_rx_niov = 1;
-
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_csum = ~0;
return 1;
}
@@ -1107,15 +1037,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
*/
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
skipped = 0;
niov = 0;
do {
nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
- conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
- conn->ksnc_rx_iov[niov].iov_len = nob;
+ kvec[niov].iov_base = ksocknal_slop_buffer;
+ kvec[niov].iov_len = nob;
niov++;
skipped += nob;
nob_to_skip -= nob;
@@ -1123,16 +1052,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
} while (nob_to_skip && /* mustn't overflow conn's rx iov */
niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
- conn->ksnc_rx_niov = niov;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_nob_wanted = skipped;
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
return 0;
}
static int
ksocknal_process_receive(struct ksock_conn *conn)
{
+ struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
int rc;
@@ -1146,7 +1073,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
- if (conn->ksnc_rx_nob_wanted) {
+ if (iov_iter_count(&conn->ksnc_rx_to)) {
rc = ksocknal_receive(conn);
if (rc <= 0) {
@@ -1171,7 +1098,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
return (!rc ? -ESHUTDOWN : rc);
}
- if (conn->ksnc_rx_nob_wanted) {
+ if (iov_iter_count(&conn->ksnc_rx_to)) {
/* short read */
return -EAGAIN;
}
@@ -1234,16 +1161,13 @@ ksocknal_process_receive(struct ksock_conn *conn)
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
+ kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
+ kvec->iov_len = sizeof(struct ksock_lnet_msg);
- conn->ksnc_rx_niov = 1;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, sizeof(struct ksock_lnet_msg));
goto again; /* read lnet header now */
@@ -1345,26 +1269,9 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
LASSERT(to->nr_segs <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = iov_iter_count(to);
conn->ksnc_rx_nob_left = rlen;
- if (to->type & ITER_KVEC) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- to->nr_segs, to->kvec,
- to->iov_offset, iov_iter_count(to));
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- to->nr_segs, to->bvec,
- to->iov_offset, iov_iter_count(to));
- }
+ conn->ksnc_rx_to = *to;
LASSERT(conn->ksnc_rx_scheduled);
@@ -2329,12 +2236,12 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
+ CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port,
conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
+ iov_iter_count(&conn->ksnc_rx_to),
conn->ksnc_rx_nob_left);
return conn;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 970140f09258..cb28dd2baf2f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -162,94 +162,39 @@ ksocknal_lib_eager_ack(struct ksock_conn *conn)
sizeof(opt));
}
-int
-ksocknal_lib_recv_iov(struct ksock_conn *conn)
+static int lustre_csum(struct kvec *v, void *context)
{
- unsigned int niov = conn->ksnc_rx_niov;
- struct kvec *iov = conn->ksnc_rx_iov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
- int rc;
- int fragnob;
- int sum;
- __u32 saved_csum;
-
- LASSERT(niov > 0);
-
- for (nob = i = 0; i < niov; i++)
- nob += iov[i].iov_len;
-
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, niov, nob);
- rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
-
- saved_csum = 0;
- if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
- saved_csum = conn->ksnc_msg.ksm_csum;
- conn->ksnc_msg.ksm_csum = 0;
- }
-
- if (saved_csum) {
- /* accumulate checksum */
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- fragnob = iov[i].iov_len;
- if (fragnob > sum)
- fragnob = sum;
-
- conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
- iov[i].iov_base,
- fragnob);
- }
- conn->ksnc_msg.ksm_csum = saved_csum;
- }
-
- return rc;
+ struct ksock_conn *conn = context;
+ conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
+ v->iov_base, v->iov_len);
+ return 0;
}
int
-ksocknal_lib_recv_kiov(struct ksock_conn *conn)
+ksocknal_lib_recv(struct ksock_conn *conn)
{
- unsigned int niov = conn->ksnc_rx_nkiov;
- struct bio_vec *kiov = conn->ksnc_rx_kiov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
+ struct msghdr msg = { .msg_iter = conn->ksnc_rx_to };
+ __u32 saved_csum;
int rc;
- void *base;
- int sum;
- int fragnob;
- for (nob = i = 0; i < niov; i++)
- nob += kiov[i].bv_len;
-
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob);
rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
+ if (rc <= 0)
+ return rc;
- if (conn->ksnc_msg.ksm_csum) {
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
- fragnob = kiov[i].bv_len;
- if (fragnob > sum)
- fragnob = sum;
+ saved_csum = conn->ksnc_msg.ksm_csum;
+ if (!saved_csum)
+ return rc;
- conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
- base, fragnob);
+ /* header is included only in V2 - V3 checksums only the bulk data */
+ if (!(conn->ksnc_rx_to.type & ITER_BVEC) &&
+ conn->ksnc_proto != &ksocknal_protocol_v2x)
+ return rc;
+
+ /* accumulate checksum */
+ conn->ksnc_msg.ksm_csum = 0;
+ iov_iter_for_each_range(&conn->ksnc_rx_to, rc, lustre_csum, conn);
+ conn->ksnc_msg.ksm_csum = saved_csum;
- kunmap(kiov[i].bv_page);
- }
- }
return rc;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 27848cd69564..68d16ffec980 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -890,7 +890,7 @@ lnet_return_rx_credits_locked(struct lnet_msg *msg)
*/
LASSERT(msg->msg_kiov);
- rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
+ rb = container_of(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
rbp = rb->rb_pool;
msg->msg_kiov = NULL;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 539a26444f31..7d49d4865298 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -71,16 +71,12 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
}
sock_filp = sock_alloc_file(sock, 0, NULL);
- if (IS_ERR(sock_filp)) {
- sock_release(sock);
- rc = PTR_ERR(sock_filp);
- goto out;
- }
+ if (IS_ERR(sock_filp))
+ return PTR_ERR(sock_filp);
rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
fput(sock_filp);
-out:
return rc;
}
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 3c83aa31e2c2..5a5d1811ffbe 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -700,9 +700,9 @@ lnet_delay_rule_daemon(void *arg)
}
static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
{
- struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+ struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
spin_lock_bh(&delay_dd.dd_lock);
if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
@@ -762,7 +762,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+ timer_setup(&rule->dl_timer, delay_timer_cb, 0);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 2d6e64dea266..938b859b6650 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1016,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 65ac5128f005..8666f1e81ade 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -313,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sbi->ll_flags |= LL_SBI_ACL;
} else {
LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
sbi->ll_flags &= ~LL_SBI_ACL;
}
@@ -660,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
struct ll_sb_info *sbi;
/* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
+ if (!(sb->s_flags & SB_ACTIVE))
return;
sbi = ll_s2sbi(sb);
@@ -2039,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
int err;
__u32 read_only;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
- read_only = *flags & MS_RDONLY;
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ read_only = *flags & SB_RDONLY;
err = obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_READ_ONLY),
KEY_READ_ONLY, sizeof(read_only),
@@ -2053,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
}
if (read_only)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Remounted %s %s\n", profilenm,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 9e538a59f09d..03e55bca4ada 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1152,7 +1152,7 @@ static int mdc_read_page_remote(void *data, struct page *page0)
}
for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc_cold(inode->i_mapping);
+ page = page_cache_alloc(inode->i_mapping);
if (!page)
break;
page_pool[npages] = page;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 23cdb7c4476c..63be6e7273f3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -329,11 +329,11 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
return -1;
}
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
{
struct ptlrpc_service_part *svcpt;
- svcpt = (struct ptlrpc_service_part *)castmeharder;
+ svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
@@ -506,8 +506,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
if (!array->paa_reqs_count)
goto free_reqs_array;
- setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
- (unsigned long)svcpt);
+ timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
/* At SOW, service time should be quick; 10s seems generous. If client
* timeout is less than this, we'll be sending an early reply.
@@ -926,7 +925,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
- ptlrpc_at_timer((unsigned long)svcpt);
+ ptlrpc_at_timer(&svcpt->scp_at_timer);
} else {
mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
CDEBUG(D_INFO, "armed %s at %+ds\n",
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index 8eb13c3ba29c..27f078749148 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -1,9 +1,10 @@
menuconfig INTEL_ATOMISP
- bool "Enable support to Intel MIPI camera drivers"
- depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
- help
- Enable support for the Intel ISP2 camera interfaces and MIPI
- sensor drivers.
+ bool "Enable support to Intel MIPI camera drivers"
+ depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
+ select COMMON_CLK
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
if INTEL_ATOMISP
source "drivers/staging/media/atomisp/pci/Kconfig"
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 737452cbf8a0..255ce3630c2a 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -36,13 +36,23 @@
there are any specific things that can be done to fold in support for
multiple firmware versions.
+8. Switch to V4L2 async API to set up sensor, lens and flash devices.
+ Control those devices using V4L2 sub-device API without custom
+ extensions.
-Limitations:
+9. Switch to standard V4L2 sub-device API for sensor and lens. In
+ particular, the user space API needs to support V4L2 controls as
+ defined in the V4L2 spec and references to atomisp must be removed from
+ these drivers.
+
+10. Use LED flash API for flash LED drivers such as LM3554 (which already
+ has a LED class driver).
-1. Currently the patch only support some camera sensors
- gc2235/gc0310/0v2680/ov2722/ov5693/mt9m114...
+11. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+
+Limitations:
-2. To test the patches, you also need the ISP firmware
+1. To test the patches, you also need the ISP firmware
for BYT:/lib/firmware/shisp_2400b0_v21.bin
for CHT:/lib/firmware/shisp_2401a0_v21.bin
@@ -51,14 +61,14 @@ Limitations:
device but can also be extracted from the upgrade kit if you've managed
to lose them somehow.
-3. Without a 3A libary the capture behaviour is not very good. To take a good
+2. Without a 3A libary the capture behaviour is not very good. To take a good
picture, you need tune ISP parameters by IOCTL functions or use a 3A libary
such as libxcam.
-4. The driver is intended to drive the PCI exposed versions of the device.
+3. The driver is intended to drive the PCI exposed versions of the device.
It will not detect those devices enumerated via ACPI as a field of the
i915 GPU driver.
-5. The driver supports only v2 of the IPU/Camera. It will not work with the
+4. The driver supports only v2 of the IPU/Camera. It will not work with the
versions of the hardware in other SoCs.
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index b80d29d53e65..db054d3c7ed6 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -3,104 +3,96 @@
#
source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
-source "drivers/staging/media/atomisp/i2c/imx/Kconfig"
-config VIDEO_OV2722
+config VIDEO_ATOMISP_OV2722
tristate "OVT ov2722 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- OV2722 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
- OVT is a 2M raw sensor.
+ OVT is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_GC2235
+config VIDEO_ATOMISP_GC2235
tristate "Galaxy gc2235 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- GC2235 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
- GC2235 is a 2M raw sensor.
+ GC2235 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_OV8858
+config VIDEO_ATOMISP_OV8858
tristate "Omnivision ov8858 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2 && VIDEO_ATOMISP
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- ov8858 RAW sensor.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ ov8858 RAW sensor.
OV8858 is a 8M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_MSRLIST_HELPER
+config VIDEO_ATOMISP_MSRLIST_HELPER
tristate "Helper library to load, parse and apply large register lists."
depends on I2C
---help---
- This is a helper library to be used from a sensor driver to load, parse
- and apply large register lists.
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
- To compile this driver as a module, choose M here: the
- module will be called libmsrlisthelper.
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
-config VIDEO_MT9M114
+config VIDEO_ATOMISP_MT9M114
tristate "Aptina mt9m114 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- mt9m114 1.3 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
- mt9m114 is video camera sensor.
+ mt9m114 is video camera sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_AP1302
- tristate "AP1302 external ISP support"
- depends on I2C && VIDEO_V4L2
- select REGMAP_I2C
- ---help---
- This is a Video4Linux2 sensor-level driver for the external
- ISP AP1302.
-
- AP1302 is an exteral ISP.
-
- It currently only works with the atomisp driver.
-
-config VIDEO_GC0310
+config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
- depends on I2C && VIDEO_V4L2
- ---help---
- This is a Video4Linux2 sensor-level driver for the Galaxycore
- GC0310 0.3MP sensor.
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
-config VIDEO_OV2680
+config VIDEO_ATOMISP_OV2680
tristate "Omnivision OV2680 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- OV2680 raw camera.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
- ov2680 is a 2M raw sensor.
+ ov2680 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
#
# Kconfig for flash drivers
#
-config VIDEO_LM3554
+config VIDEO_ATOMISP_LM3554
tristate "LM3554 flash light driver"
+ depends on ACPI
depends on VIDEO_V4L2 && I2C
---help---
- This is a Video4Linux2 sub-dev driver for the LM3554
- flash light driver.
-
- To compile this driver as a module, choose M here: the
- module will be called lm3554
-
+ This is a Video4Linux2 sub-dev driver for the LM3554
+ flash light driver.
+ To compile this driver as a module, choose M here: the
+ module will be called lm3554
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 041a041718d2..99ea35c043fd 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -3,22 +3,19 @@
# Makefile for sensor drivers
#
-obj-$(CONFIG_VIDEO_IMX) += imx/
-obj-$(CONFIG_VIDEO_OV5693) += ov5693/
-obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
-obj-$(CONFIG_VIDEO_GC2235) += gc2235.o
-obj-$(CONFIG_VIDEO_OV2722) += ov2722.o
-obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
-obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
+obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
-obj-$(CONFIG_VIDEO_MSRLIST_HELPER) += libmsrlisthelper.o
-
-obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
+obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o
# Makefile for flash drivers
#
-obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
+obj-$(CONFIG_VIDEO_ATOMISP_LM3554) += atomisp-lm3554.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.c b/drivers/staging/media/atomisp/i2c/ap1302.c
deleted file mode 100644
index 2f772a020c8b..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.c
+++ /dev/null
@@ -1,1255 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "ap1302.h"
-
-#define to_ap1302_device(sub_dev) \
- container_of(sub_dev, struct ap1302_device, sd)
-
-/* Static definitions */
-static struct regmap_config ap1302_reg16_config = {
- .reg_bits = 16,
- .val_bits = 16,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static struct regmap_config ap1302_reg32_config = {
- .reg_bits = 16,
- .val_bits = 32,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static enum ap1302_contexts ap1302_cntx_mapping[] = {
- CONTEXT_PREVIEW, /* Invalid atomisp run mode */
- CONTEXT_VIDEO, /* ATOMISP_RUN_MODE_VIDEO */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_STILL_CAPTURE */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
- CONTEXT_PREVIEW, /* ATOMISP_RUN_MODE_PREVIEW */
-};
-
-static struct ap1302_res_struct ap1302_preview_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_snapshot_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_video_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static enum ap1302_contexts stream_to_context[] = {
- CONTEXT_SNAPSHOT,
- CONTEXT_PREVIEW,
- CONTEXT_PREVIEW,
- CONTEXT_VIDEO
-};
-
-static u16 aux_stream_config[CONTEXT_NUM][CONTEXT_NUM] = {
- {0, 0, 0}, /* Preview: No aux streams. */
- {1, 0, 2}, /* Snapshot: 1 for postview. 2 for video */
- {1, 0, 0}, /* Video: 1 for preview. */
-};
-
-static struct ap1302_context_info context_info[] = {
- {CNTX_WIDTH, AP1302_REG16, "width"},
- {CNTX_HEIGHT, AP1302_REG16, "height"},
- {CNTX_ROI_X0, AP1302_REG16, "roi_x0"},
- {CNTX_ROI_X1, AP1302_REG16, "roi_x1"},
- {CNTX_ROI_Y0, AP1302_REG16, "roi_y0"},
- {CNTX_ROI_Y1, AP1302_REG16, "roi_y1"},
- {CNTX_ASPECT, AP1302_REG16, "aspect"},
- {CNTX_LOCK, AP1302_REG16, "lock"},
- {CNTX_ENABLE, AP1302_REG16, "enable"},
- {CNTX_OUT_FMT, AP1302_REG16, "out_fmt"},
- {CNTX_SENSOR_MODE, AP1302_REG16, "sensor_mode"},
- {CNTX_MIPI_CTRL, AP1302_REG16, "mipi_ctrl"},
- {CNTX_MIPI_II_CTRL, AP1302_REG16, "mipi_ii_ctrl"},
- {CNTX_LINE_TIME, AP1302_REG32, "line_time"},
- {CNTX_MAX_FPS, AP1302_REG16, "max_fps"},
- {CNTX_AE_USG, AP1302_REG16, "ae_usg"},
- {CNTX_AE_UPPER_ET, AP1302_REG32, "ae_upper_et"},
- {CNTX_AE_MAX_ET, AP1302_REG32, "ae_max_et"},
- {CNTX_SS, AP1302_REG16, "ss"},
- {CNTX_S1_SENSOR_MODE, AP1302_REG16, "s1_sensor_mode"},
- {CNTX_HINF_CTRL, AP1302_REG16, "hinf_ctrl"},
-};
-
-/* This array stores the description list for metadata.
- The metadata contains exposure settings and face
- detection results. */
-static u16 ap1302_ss_list[] = {
- 0xb01c, /* From 0x0186 with size 0x1C are exposure settings. */
- 0x0186,
- 0xb002, /* 0x71c0 is for F-number */
- 0x71c0,
- 0xb010, /* From 0x03dc with size 0x10 are face general infos. */
- 0x03dc,
- 0xb0a0, /* From 0x03e4 with size 0xa0 are face detail infos. */
- 0x03e4,
- 0xb020, /* From 0x0604 with size 0x20 are smile rate infos. */
- 0x0604,
- 0x0000
-};
-
-/* End of static definitions */
-
-static int ap1302_i2c_read_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, void *val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (len == AP1302_REG16)
- ret = regmap_read(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_read(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Read reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%04X\n",
- reg, *(u16 *)val);
- else
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%08X\n",
- reg, *(u32 *)val);
- return ret;
-}
-
-static int ap1302_i2c_write_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, u32 val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- if (len == AP1302_REG16)
- ret = regmap_write(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_write(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Write reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%04X\n",
- reg, (u16)val);
- else
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%08X\n",
- reg, (u32)val);
- return ret;
-}
-
-static u16
-ap1302_calculate_context_reg_addr(enum ap1302_contexts context, u16 offset)
-{
- u16 reg_addr;
- /* The register offset is defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
- if (context == CONTEXT_SNAPSHOT) {
- if (offset == CNTX_S1_SENSOR_MODE)
- return 0;
- if (offset > CNTX_S1_SENSOR_MODE)
- offset -= 2;
- }
- if (context == CONTEXT_PREVIEW)
- reg_addr = REG_PREVIEW_BASE + offset;
- else if (context == CONTEXT_VIDEO)
- reg_addr = REG_VIDEO_BASE + offset;
- else
- reg_addr = REG_SNAPSHOT_BASE + offset;
- return reg_addr;
-}
-
-static int ap1302_read_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_read_reg(sd, reg_addr, len,
- ((u8 *)&dev->cntx_config[context]) + offset);
-}
-
-static int ap1302_write_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_write_reg(sd, reg_addr, len,
- *(u32 *)(((u8 *)&dev->cntx_config[context]) + offset));
-}
-
-static int ap1302_dump_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int i;
- dev_dbg(&client->dev, "Dump registers for context[%d]:\n", context);
- for (i = 0; i < ARRAY_SIZE(context_info); i++) {
- struct ap1302_context_info *info = &context_info[i];
- u8 *var = (u8 *)&dev->cntx_config[context] + info->offset;
- /* Snapshot context does not have s1_sensor_mode register. */
- if (context == CONTEXT_SNAPSHOT &&
- info->offset == CNTX_S1_SENSOR_MODE)
- continue;
- ap1302_read_context_reg(sd, context, info->offset, info->len);
- if (info->len == AP1302_REG16)
- dev_dbg(&client->dev, "context.%s = 0x%04X (%d)\n",
- info->name, *(u16 *)var, *(u16 *)var);
- else
- dev_dbg(&client->dev, "context.%s = 0x%08X (%d)\n",
- info->name, *(u32 *)var, *(u32 *)var);
- }
- return 0;
-}
-
-static int ap1302_request_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- ret = request_firmware(&dev->fw, "ap1302_fw.bin", &client->dev);
- if (ret)
- dev_err(&client->dev,
- "ap1302_request_firmware failed. ret=%d\n", ret);
- return ret;
-}
-
-/* When loading firmware, host writes firmware data from address 0x8000.
- When the address reaches 0x9FFF, the next address should return to 0x8000.
- This function handles this address window and load firmware data to AP1302.
- win_pos indicates the offset within this window. Firmware loading procedure
- may call this function several times. win_pos records the current position
- that has been written to.*/
-static int ap1302_write_fw_window(struct v4l2_subdev *sd,
- u16 *win_pos, const u8 *buf, u32 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 pos;
- u32 sub_len;
- for (pos = 0; pos < len; pos += sub_len) {
- if (len - pos < AP1302_FW_WINDOW_SIZE - *win_pos)
- sub_len = len - pos;
- else
- sub_len = AP1302_FW_WINDOW_SIZE - *win_pos;
- ret = regmap_raw_write(dev->regmap16,
- *win_pos + AP1302_FW_WINDOW_OFFSET,
- buf + pos, sub_len);
- if (ret)
- return ret;
- *win_pos += sub_len;
- if (*win_pos >= AP1302_FW_WINDOW_SIZE)
- *win_pos = 0;
- }
- return 0;
-}
-
-static int ap1302_load_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- const struct ap1302_firmware *fw;
- const u8 *fw_data;
- u16 reg_val = 0;
- u16 win_pos = 0;
- int ret;
-
- dev_info(&client->dev, "Start to load firmware.\n");
- if (!dev->fw) {
- dev_err(&client->dev, "firmware not requested.\n");
- return -EINVAL;
- }
- fw = (const struct ap1302_firmware *) dev->fw->data;
- if (dev->fw->size != (sizeof(*fw) + fw->total_size)) {
- dev_err(&client->dev, "firmware size does not match.\n");
- return -EINVAL;
- }
- /* The fw binary contains a header of struct ap1302_firmware.
- Following the header is the bootdata of AP1302.
- The bootdata pointer can be referenced as &fw[1]. */
- fw_data = (u8 *)&fw[1];
-
- /* Clear crc register. */
- ret = ap1302_i2c_write_reg(sd, REG_SIP_CRC, AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
-
- /* Load FW data for PLL init stage. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data, fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Write 2 to bootdata_stage register to apply basic_init_hp
- settings and enable PLL. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0x0002);
- if (ret)
- return ret;
-
- /* Wait 1ms for PLL to lock. */
- msleep(20);
-
- /* Load the rest of bootdata content. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data + fw->pll_init_size,
- fw->total_size - fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Check crc. */
- ret = ap1302_i2c_read_reg(sd, REG_SIP_CRC, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- if (reg_val != fw->crc) {
- dev_err(&client->dev,
- "crc does not match. T:0x%04X F:0x%04X\n",
- fw->crc, reg_val);
- return -EAGAIN;
- }
-
- /* Write 0xFFFF to bootdata_stage register to indicate AP1302 that
- the whole bootdata content has been loaded. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
- dev_info(&client->dev, "Load firmware successfully.\n");
-
- return 0;
-}
-
-static int __ap1302_s_power(struct v4l2_subdev *sd, int on, int load_fw)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret, i;
- u16 ss_ptr;
-
- dev_info(&client->dev, "ap1302_s_power is called.\n");
- ret = dev->platform_data->power_ctrl(sd, on);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_s_power error. on=%d ret=%d\n", on, ret);
- return ret;
- }
- dev->power_on = on;
- if (!on || !load_fw)
- return 0;
- /* Load firmware after power on. */
- ret = ap1302_load_firmware(sd);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_load_firmware failed. ret=%d\n", ret);
- return ret;
- }
- ret = ap1302_i2c_read_reg(sd, REG_SS_HEAD_PT0, AP1302_REG16, &ss_ptr);
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(ap1302_ss_list); i++) {
- ret = ap1302_i2c_write_reg(sd, ss_ptr + i * 2,
- AP1302_REG16, ap1302_ss_list[i]);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static int ap1302_s_power(struct v4l2_subdev *sd, int on)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __ap1302_s_power(sd, on, 1);
- dev->sys_activated = 0;
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int ap1302_s_config(struct v4l2_subdev *sd, void *pdata)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct camera_mipi_info *mipi_info;
- u16 reg_val = 0;
- int ret;
-
- dev_info(&client->dev, "ap1302_s_config is called.\n");
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret)
- goto fail_power;
- }
-
- ret = __ap1302_s_power(sd, 1, 0);
- if (ret)
- goto fail_power;
-
- /* Detect for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_VERSION, AP1302_REG16, &reg_val);
- if (ret || (reg_val != AP1302_CHIP_ID)) {
- dev_err(&client->dev,
- "Chip version does no match. ret=%d ver=0x%04x\n",
- ret, reg_val);
- goto fail_config;
- }
- dev_info(&client->dev, "AP1302 Chip ID is 0x%X\n", reg_val);
-
- /* Detect revision for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_REV, AP1302_REG16, &reg_val);
- if (ret)
- goto fail_config;
- dev_info(&client->dev, "AP1302 Chip Rev is 0x%X\n", reg_val);
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_config;
-
- mipi_info = v4l2_get_subdev_hostdata(sd);
- if (!mipi_info)
- goto fail_config;
- dev->num_lanes = mipi_info->num_lanes;
-
- ret = __ap1302_s_power(sd, 0, 0);
- if (ret)
- goto fail_power;
-
- mutex_unlock(&dev->input_lock);
-
- return ret;
-
-fail_config:
- __ap1302_s_power(sd, 0, 0);
-fail_power:
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "ap1302_s_config failed\n");
- return ret;
-}
-
-static enum ap1302_contexts ap1302_get_context(struct v4l2_subdev *sd)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- return dev->cur_context;
-}
-
-static int ap1302_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
-
- return 0;
-}
-
-static int ap1302_match_resolution(struct ap1302_context_res *res,
- struct v4l2_mbus_framefmt *fmt)
-{
- s32 w0, h0, mismatch, distance;
- s32 w1 = fmt->width;
- s32 h1 = fmt->height;
- s32 min_distance = INT_MAX;
- s32 i, idx = -1;
-
- if (w1 == 0 || h1 == 0)
- return -1;
-
- for (i = 0; i < res->res_num; i++) {
- w0 = res->res_table[i].width;
- h0 = res->res_table[i].height;
- if (w0 < w1 || h0 < h1)
- continue;
- mismatch = abs(w0 * h1 - w1 * h0) * 8192 / w1 / h0;
- if (mismatch > 8192 * AP1302_MAX_RATIO_MISMATCH / 100)
- continue;
- distance = (w0 * h1 + w1 * h0) * 8192 / w1 / h1;
- if (distance < min_distance) {
- min_distance = distance;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static s32 ap1302_try_mbus_fmt_locked(struct v4l2_subdev *sd,
- enum ap1302_contexts context,
- struct v4l2_mbus_framefmt *fmt)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct ap1302_res_struct *res_table;
- s32 res_num, idx = -1;
-
- res_table = dev->cntx_res[context].res_table;
- res_num = dev->cntx_res[context].res_num;
-
- if ((fmt->width <= res_table[res_num - 1].width) &&
- (fmt->height <= res_table[res_num - 1].height))
- idx = ap1302_match_resolution(&dev->cntx_res[context], fmt);
- if (idx == -1)
- idx = res_num - 1;
-
- fmt->width = res_table[idx].width;
- fmt->height = res_table[idx].height;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- return idx;
-}
-
-
-static int ap1302_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- s32 cur_res;
- if (format->pad)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- fmt->width = res_table[cur_res].width;
- fmt->height = res_table[cur_res].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct atomisp_input_stream_info *stream_info =
- (struct atomisp_input_stream_info *)fmt->reserved;
- enum ap1302_contexts context, main_context;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- context = ap1302_get_context(sd);
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- cfg->try_fmt = *fmt;
- mutex_unlock(&dev->input_lock);
- return 0;
- }
- context = stream_to_context[stream_info->stream];
- dev_dbg(&client->dev, "ap1302_set_mbus_fmt. stream=%d context=%d\n",
- stream_info->stream, context);
- dev->cntx_res[context].cur_res =
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- dev->cntx_config[context].width = fmt->width;
- dev->cntx_config[context].height = fmt->height;
- ap1302_write_context_reg(sd, context, CNTX_WIDTH, AP1302_REG16);
- ap1302_write_context_reg(sd, context, CNTX_HEIGHT, AP1302_REG16);
- ap1302_read_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt &= ~OUT_FMT_TYPE_MASK;
- dev->cntx_config[context].out_fmt |= AP1302_FMT_UYVY422;
- ap1302_write_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
-
- main_context = ap1302_get_context(sd);
- if (context == main_context) {
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_SSVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSTYPE_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (0x12 << MIPI_CTRL_SSTYPE_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- ap1302_read_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- dev->cntx_config[context].ss = AP1302_SS_CTRL;
- ap1302_write_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- } else {
- /* Configure aux stream */
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ii_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ii_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- if (stream_info->enable) {
- ap1302_read_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt |=
- (aux_stream_config[main_context][context]
- << OUT_FMT_IIS_OFFSET);
- ap1302_write_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- }
- }
- stream_info->ch_id = context;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- u32 cur_res;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- interval->interval.denominator = res_table[cur_res].fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- int index = fse->index;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- if (index >= dev->cntx_res[context].res_num) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- res_table = dev->cntx_res[context].res_table;
- fse->min_width = res_table[index].width;
- fse->min_height = res_table[index].height;
- fse->max_width = res_table[index].width;
- fse->max_height = res_table[index].height;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- *frames = 0;
- return 0;
-}
-
-static int ap1302_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- enum ap1302_contexts context;
- u32 reg_val;
- int ret;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- dev_dbg(&client->dev, "ap1302_s_stream. context=%d enable=%d\n",
- context, enable);
- /* Switch context */
- ap1302_i2c_read_reg(sd, REG_CTRL,
- AP1302_REG16, &reg_val);
- reg_val &= ~CTRL_CNTX_MASK;
- reg_val |= (context<<CTRL_CNTX_OFFSET);
- ap1302_i2c_write_reg(sd, REG_CTRL,
- AP1302_REG16, reg_val);
- /* Select sensor */
- ap1302_i2c_read_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, &reg_val);
- reg_val &= ~SENSOR_SELECT_MASK;
- reg_val |= (AP1302_SENSOR_PRI<<SENSOR_SELECT_OFFSET);
- ap1302_i2c_write_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, reg_val);
- if (enable) {
- dev_info(&client->dev, "Start stream. context=%d\n", context);
- ap1302_dump_context_reg(sd, context);
- if (!dev->sys_activated) {
- reg_val = AP1302_SYS_ACTIVATE;
- dev->sys_activated = 1;
- } else {
- reg_val = AP1302_SYS_SWITCH;
- }
- } else {
- dev_info(&client->dev, "Stop stream. context=%d\n", context);
- reg_val = AP1302_SYS_SWITCH;
- }
- ret = ap1302_i2c_write_reg(sd, REG_SYS_START, AP1302_REG16, reg_val);
- if (ret)
- dev_err(&client->dev,
- "AP1302 set stream failed. enable=%d\n", enable);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static u16 ap1302_ev_values[] = {0xfd00, 0xfe80, 0x0, 0x180, 0x300};
-
-static int ap1302_set_exposure_off(struct v4l2_subdev *sd, s32 val)
-{
- val -= AP1302_MIN_EV;
- return ap1302_i2c_write_reg(sd, REG_AE_BV_OFF, AP1302_REG16,
- ap1302_ev_values[val]);
-}
-
-static u16 ap1302_wb_values[] = {
- 0, /* V4L2_WHITE_BALANCE_MANUAL */
- 0xf, /* V4L2_WHITE_BALANCE_AUTO */
- 0x2, /* V4L2_WHITE_BALANCE_INCANDESCENT */
- 0x4, /* V4L2_WHITE_BALANCE_FLUORESCENT */
- 0x5, /* V4L2_WHITE_BALANCE_FLUORESCENT_H */
- 0x1, /* V4L2_WHITE_BALANCE_HORIZON */
- 0x5, /* V4L2_WHITE_BALANCE_DAYLIGHT */
- 0xf, /* V4L2_WHITE_BALANCE_FLASH */
- 0x6, /* V4L2_WHITE_BALANCE_CLOUDY */
- 0x6, /* V4L2_WHITE_BALANCE_SHADE */
-};
-
-static int ap1302_set_wb_mode(struct v4l2_subdev *sd, s32 val)
-{
- int ret = 0;
- u16 reg_val;
-
- ret = ap1302_i2c_read_reg(sd, REG_AWB_CTRL, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- reg_val &= ~AWB_CTRL_MODE_MASK;
- reg_val |= ap1302_wb_values[val] << AWB_CTRL_MODE_OFFSET;
- if (val == V4L2_WHITE_BALANCE_FLASH)
- reg_val |= AWB_CTRL_FLASH_MASK;
- else
- reg_val &= ~AWB_CTRL_FLASH_MASK;
- ret = ap1302_i2c_write_reg(sd, REG_AWB_CTRL, AP1302_REG16, reg_val);
- return ret;
-}
-
-static int ap1302_set_zoom(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_DZ_TGT_FCT, AP1302_REG16,
- val * 4 + 0x100);
- return 0;
-}
-
-static u16 ap1302_sfx_values[] = {
- 0x00, /* V4L2_COLORFX_NONE */
- 0x03, /* V4L2_COLORFX_BW */
- 0x0d, /* V4L2_COLORFX_SEPIA */
- 0x07, /* V4L2_COLORFX_NEGATIVE */
- 0x04, /* V4L2_COLORFX_EMBOSS */
- 0x0f, /* V4L2_COLORFX_SKETCH */
- 0x08, /* V4L2_COLORFX_SKY_BLUE */
- 0x09, /* V4L2_COLORFX_GRASS_GREEN */
- 0x0a, /* V4L2_COLORFX_SKIN_WHITEN */
- 0x00, /* V4L2_COLORFX_VIVID */
- 0x00, /* V4L2_COLORFX_AQUA */
- 0x00, /* V4L2_COLORFX_ART_FREEZE */
- 0x00, /* V4L2_COLORFX_SILHOUETTE */
- 0x10, /* V4L2_COLORFX_SOLARIZATION */
- 0x02, /* V4L2_COLORFX_ANTIQUE */
- 0x00, /* V4L2_COLORFX_SET_CBCR */
-};
-
-static int ap1302_set_special_effect(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SFX_MODE, AP1302_REG16,
- ap1302_sfx_values[val]);
- return 0;
-}
-
-static u16 ap1302_scene_mode_values[] = {
- 0x00, /* V4L2_SCENE_MODE_NONE */
- 0x07, /* V4L2_SCENE_MODE_BACKLIGHT */
- 0x0a, /* V4L2_SCENE_MODE_BEACH_SNOW */
- 0x06, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
- 0x00, /* V4L2_SCENE_MODE_DAWN_DUSK */
- 0x00, /* V4L2_SCENE_MODE_FALL_COLORS */
- 0x0d, /* V4L2_SCENE_MODE_FIREWORKS */
- 0x02, /* V4L2_SCENE_MODE_LANDSCAPE */
- 0x05, /* V4L2_SCENE_MODE_NIGHT */
- 0x0c, /* V4L2_SCENE_MODE_PARTY_INDOOR */
- 0x01, /* V4L2_SCENE_MODE_PORTRAIT */
- 0x03, /* V4L2_SCENE_MODE_SPORTS */
- 0x0e, /* V4L2_SCENE_MODE_SUNSET */
- 0x0b, /* V4L2_SCENE_MODE_TEXT */
-};
-
-static int ap1302_set_scene_mode(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SCENE_CTRL, AP1302_REG16,
- ap1302_scene_mode_values[val]);
- return 0;
-}
-
-static u16 ap1302_flicker_values[] = {
- 0x0, /* OFF */
- 0x3201, /* 50HZ */
- 0x3c01, /* 60HZ */
- 0x2 /* AUTO */
-};
-
-static int ap1302_set_flicker_freq(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_FLICK_CTRL, AP1302_REG16,
- ap1302_flicker_values[val]);
- return 0;
-}
-
-static int ap1302_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ap1302_device *dev = container_of(
- ctrl->handler, struct ap1302_device, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_RUN_MODE:
- dev->cur_context = ap1302_cntx_mapping[ctrl->val];
- break;
- case V4L2_CID_EXPOSURE:
- ap1302_set_exposure_off(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- ap1302_set_wb_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_ZOOM_ABSOLUTE:
- ap1302_set_zoom(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_COLORFX:
- ap1302_set_special_effect(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_SCENE_MODE:
- ap1302_set_scene_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- ap1302_set_flicker_freq(&dev->sd, ctrl->val);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ap1302_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 reg_val;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_read_reg(sd, reg->reg, reg->size, &reg_val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- if (ret)
- return ret;
-
- reg->val = reg_val;
-
- return 0;
-}
-
-static int ap1302_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_write_reg(sd, reg->reg, reg->size, reg->val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long ap1302_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- long ret = 0;
- switch (cmd) {
- case VIDIOC_DBG_G_REGISTER:
- ret = ap1302_g_register(sd, arg);
- break;
- case VIDIOC_DBG_S_REGISTER:
- ret = ap1302_s_register(sd, arg);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = ap1302_s_ctrl,
-};
-
-static const char * const ctrl_run_mode_menu[] = {
- NULL,
- "Video",
- "Still capture",
- "Continuous capture",
- "Preview",
-};
-
-static const struct v4l2_ctrl_config ctrls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_RUN_MODE,
- .name = "Run Mode",
- .type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .def = 4,
- .max = 4,
- .qmenu = ctrl_run_mode_menu,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE,
- .name = "Exposure",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = AP1302_MIN_EV,
- .def = 0,
- .max = AP1302_MAX_EV,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
- .name = "White Balance",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 9,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom Absolute",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 1024,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_COLORFX,
- .name = "Color Special Effect",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 15,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_SCENE_MODE,
- .name = "Scene Mode",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 13,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Light frequency filter",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 3,
- .max = 3,
- .step = 1,
- },
-};
-
-static const struct v4l2_subdev_sensor_ops ap1302_sensor_ops = {
- .g_skip_frames = ap1302_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops ap1302_video_ops = {
- .s_stream = ap1302_s_stream,
- .g_frame_interval = ap1302_g_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops ap1302_core_ops = {
- .s_power = ap1302_s_power,
- .ioctl = ap1302_ioctl,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ap1302_g_register,
- .s_register = ap1302_s_register,
-#endif
-};
-
-static const struct v4l2_subdev_pad_ops ap1302_pad_ops = {
- .enum_mbus_code = ap1302_enum_mbus_code,
- .enum_frame_size = ap1302_enum_frame_size,
- .get_fmt = ap1302_get_fmt,
- .set_fmt = ap1302_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ap1302_ops = {
- .core = &ap1302_core_ops,
- .pad = &ap1302_pad_ops,
- .video = &ap1302_video_ops,
- .sensor = &ap1302_sensor_ops
-};
-
-static int ap1302_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ap1302_device *dev = to_ap1302_device(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- release_firmware(dev->fw);
-
- media_entity_cleanup(&dev->sd.entity);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
-
- return 0;
-}
-
-static int ap1302_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ap1302_device *dev;
- int ret;
- unsigned int i;
-
- dev_info(&client->dev, "ap1302 probe called.\n");
-
- /* allocate device & init sub device */
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &ap1302_ops);
-
- ret = ap1302_request_firmware(&(dev->sd));
- if (ret) {
- dev_err(&client->dev, "Cannot request ap1302 firmware.\n");
- goto out_free;
- }
-
- dev->regmap16 = devm_regmap_init_i2c(client, &ap1302_reg16_config);
- if (IS_ERR(dev->regmap16)) {
- ret = PTR_ERR(dev->regmap16);
- dev_err(&client->dev,
- "Failed to allocate 16bit register map: %d\n", ret);
- return ret;
- }
-
- dev->regmap32 = devm_regmap_init_i2c(client, &ap1302_reg32_config);
- if (IS_ERR(dev->regmap32)) {
- ret = PTR_ERR(dev->regmap32);
- dev_err(&client->dev,
- "Failed to allocate 32bit register map: %d\n", ret);
- return ret;
- }
-
- if (client->dev.platform_data) {
- ret = ap1302_s_config(&dev->sd, client->dev.platform_data);
- if (ret)
- goto out_free;
- }
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- dev->cntx_res[CONTEXT_PREVIEW].res_num = ARRAY_SIZE(ap1302_preview_res);
- dev->cntx_res[CONTEXT_PREVIEW].res_table = ap1302_preview_res;
- dev->cntx_res[CONTEXT_SNAPSHOT].res_num =
- ARRAY_SIZE(ap1302_snapshot_res);
- dev->cntx_res[CONTEXT_SNAPSHOT].res_table = ap1302_snapshot_res;
- dev->cntx_res[CONTEXT_VIDEO].res_num = ARRAY_SIZE(ap1302_video_res);
- dev->cntx_res[CONTEXT_VIDEO].res_table = ap1302_video_res;
-
- ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ctrls));
- if (ret) {
- ap1302_remove(client);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(ctrls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler, &ctrls[i], NULL);
-
- if (dev->ctrl_handler.error) {
- ap1302_remove(client);
- return dev->ctrl_handler.error;
- }
-
- /* Use same lock for controls as for everything else. */
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = &dev->ctrl_handler;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- dev->run_mode = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_RUN_MODE);
- v4l2_ctrl_s_ctrl(dev->run_mode, ATOMISP_RUN_MODE_PREVIEW);
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret)
- ap1302_remove(client);
- return ret;
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- return ret;
-}
-
-static const struct i2c_device_id ap1302_id[] = {
- {AP1302_NAME, 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, ap1302_id);
-
-static struct i2c_driver ap1302_driver = {
- .driver = {
- .name = AP1302_NAME,
- },
- .probe = ap1302_probe,
- .remove = ap1302_remove,
- .id_table = ap1302_id,
-};
-
-module_i2c_driver(ap1302_driver);
-
-MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_DESCRIPTION("AP1302 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
deleted file mode 100644
index 4d0b181a9671..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __AP1302_H__
-#define __AP1302_H__
-
-#include "../include/linux/atomisp_platform.h"
-#include <linux/regmap.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#define AP1302_NAME "ap1302"
-#define AP1302_CHIP_ID 0x265
-#define AP1302_I2C_MAX_LEN 65534
-#define AP1302_FW_WINDOW_OFFSET 0x8000
-#define AP1302_FW_WINDOW_SIZE 0x2000
-
-#define AP1302_REG16 2
-#define AP1302_REG32 4
-
-#define REG_CHIP_VERSION 0x0000
-#define REG_CHIP_REV 0x0050
-#define REG_MF_ID 0x0004
-#define REG_ERROR 0x0006
-#define REG_CTRL 0x1000
-#define REG_DZ_TGT_FCT 0x1010
-#define REG_SFX_MODE 0x1016
-#define REG_SS_HEAD_PT0 0x1174
-#define REG_AE_BV_OFF 0x5014
-#define REG_AE_BV_BIAS 0x5016
-#define REG_AWB_CTRL 0x5100
-#define REG_FLICK_CTRL 0x5440
-#define REG_SCENE_CTRL 0x5454
-#define REG_BOOTDATA_STAGE 0x6002
-#define REG_SENSOR_SELECT 0x600C
-#define REG_SYS_START 0x601A
-#define REG_SIP_CRC 0xF052
-
-#define REG_PREVIEW_BASE 0x2000
-#define REG_SNAPSHOT_BASE 0x3000
-#define REG_VIDEO_BASE 0x4000
-#define CNTX_WIDTH 0x00
-#define CNTX_HEIGHT 0x02
-#define CNTX_ROI_X0 0x04
-#define CNTX_ROI_Y0 0x06
-#define CNTX_ROI_X1 0x08
-#define CNTX_ROI_Y1 0x0A
-#define CNTX_ASPECT 0x0C
-#define CNTX_LOCK 0x0E
-#define CNTX_ENABLE 0x10
-#define CNTX_OUT_FMT 0x12
-#define CNTX_SENSOR_MODE 0x14
-#define CNTX_MIPI_CTRL 0x16
-#define CNTX_MIPI_II_CTRL 0x18
-#define CNTX_LINE_TIME 0x1C
-#define CNTX_MAX_FPS 0x20
-#define CNTX_AE_USG 0x22
-#define CNTX_AE_UPPER_ET 0x24
-#define CNTX_AE_MAX_ET 0x28
-#define CNTX_SS 0x2C
-#define CNTX_S1_SENSOR_MODE 0x2E
-#define CNTX_HINF_CTRL 0x30
-
-#define CTRL_CNTX_MASK 0x03
-#define CTRL_CNTX_OFFSET 0x00
-#define HINF_CTRL_LANE_MASK 0x07
-#define HINF_CTRL_LANE_OFFSET 0x00
-#define MIPI_CTRL_IMGVC_MASK 0xC0
-#define MIPI_CTRL_IMGVC_OFFSET 0x06
-#define MIPI_CTRL_IMGTYPE_AUTO 0x3F
-#define MIPI_CTRL_SSVC_MASK 0xC000
-#define MIPI_CTRL_SSVC_OFFSET 0x0E
-#define MIPI_CTRL_SSTYPE_MASK 0x3F00
-#define MIPI_CTRL_SSTYPE_OFFSET 0x08
-#define OUT_FMT_IIS_MASK 0x30
-#define OUT_FMT_IIS_OFFSET 0x08
-#define OUT_FMT_SS_MASK 0x1000
-#define OUT_FMT_SS_OFFSET 0x12
-#define OUT_FMT_TYPE_MASK 0xFF
-#define SENSOR_SELECT_MASK 0x03
-#define SENSOR_SELECT_OFFSET 0x00
-#define AWB_CTRL_MODE_MASK 0x0F
-#define AWB_CTRL_MODE_OFFSET 0x00
-#define AWB_CTRL_FLASH_MASK 0x100
-
-#define AP1302_FMT_UYVY422 0x50
-
-#define AP1302_SYS_ACTIVATE 0x8010
-#define AP1302_SYS_SWITCH 0x8140
-#define AP1302_SENSOR_PRI 0x01
-#define AP1302_SENSOR_SEC 0x02
-#define AP1302_SS_CTRL 0x31
-
-#define AP1302_MAX_RATIO_MISMATCH 10 /* Unit in percentage */
-#define AP1302_MAX_EV 2
-#define AP1302_MIN_EV -2
-
-enum ap1302_contexts {
- CONTEXT_PREVIEW = 0,
- CONTEXT_SNAPSHOT,
- CONTEXT_VIDEO,
- CONTEXT_NUM
-};
-
-/* The context registers are defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
-struct ap1302_context_config {
- u16 width;
- u16 height;
- u16 roi_x0;
- u16 roi_y0;
- u16 roi_x1;
- u16 roi_y1;
- u16 aspect_factor;
- u16 lock;
- u16 enable;
- u16 out_fmt;
- u16 sensor_mode;
- u16 mipi_ctrl;
- u16 mipi_ii_ctrl;
- u16 padding;
- u32 line_time;
- u16 max_fps;
- u16 ae_usg;
- u32 ae_upper_et;
- u32 ae_max_et;
- u16 ss;
- u16 s1_sensor_mode;
- u16 hinf_ctrl;
- u32 reserved;
-};
-
-struct ap1302_res_struct {
- u16 width;
- u16 height;
- u16 fps;
-};
-
-struct ap1302_context_res {
- u32 res_num;
- u32 cur_res;
- struct ap1302_res_struct *res_table;
-};
-
-struct ap1302_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct camera_sensor_platform_data *platform_data;
- const struct firmware *fw;
- struct mutex input_lock; /* serialize sensor's ioctl */
- struct v4l2_mbus_framefmt format;
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *run_mode;
- struct ap1302_context_config cntx_config[CONTEXT_NUM];
- struct ap1302_context_res cntx_res[CONTEXT_NUM];
- enum ap1302_contexts cur_context;
- unsigned int num_lanes;
- struct regmap *regmap16;
- struct regmap *regmap32;
- bool sys_activated;
- bool power_on;
-};
-
-struct ap1302_firmware {
- u32 crc;
- u32 pll_init_size;
- u32 total_size;
- u32 reserved;
-};
-
-struct ap1302_context_info {
- u16 offset;
- u16 len;
- char *name;
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 35ed51ffe944..e70d8afcc229 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -738,10 +737,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
/* The upstream module driver (written to Crystal
* Cove) had this logic to pulse the rails low first.
@@ -772,10 +767,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* GPIO0 == "reset" (active low), GPIO1 == "power down" */
if (flag) {
/* Pulse reset, then release power down */
@@ -1165,13 +1156,6 @@ static int gc0310_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -1216,9 +1200,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1362,9 +1343,6 @@ static int gc0310_remove(struct i2c_client *client)
struct gc0310_device *dev = to_gc0310_sensor(sd);
dev_dbg(&client->dev, "gc0310_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1375,8 +1353,7 @@ static int gc0310_remove(struct i2c_client *client)
return 0;
}
-static int gc0310_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc0310_probe(struct i2c_client *client)
{
struct gc0310_device *dev;
int ret;
@@ -1385,10 +1362,8 @@ static int gc0310_probe(struct i2c_client *client,
pr_info("%s S\n", __func__);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1457,33 +1432,17 @@ static const struct acpi_device_id gc0310_acpi_match[] = {
{"INT0310"},
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc0310_id);
static struct i2c_driver gc0310_driver = {
.driver = {
- .name = GC0310_NAME,
- .acpi_match_table = ACPI_PTR(gc0310_acpi_match),
+ .name = "gc0310",
+ .acpi_match_table = gc0310_acpi_match,
},
- .probe = gc0310_probe,
+ .probe_new = gc0310_probe,
.remove = gc0310_remove,
- .id_table = gc0310_id,
};
-
-static int init_gc0310(void)
-{
- return i2c_add_driver(&gc0310_driver);
-}
-
-static void exit_gc0310(void)
-{
-
- i2c_del_driver(&gc0310_driver);
-}
-
-module_init(init_gc0310);
-module_exit(exit_gc0310);
+module_i2c_driver(gc0310_driver);
MODULE_AUTHOR("Lai, Angie <angie.lai@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index e43d31ea9676..85da5fe24033 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -548,10 +547,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
usleep_range(60, 90);
@@ -572,10 +567,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
usleep_range(60, 90);
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -906,13 +897,6 @@ static int gc2235_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -956,9 +940,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1101,9 +1082,6 @@ static int gc2235_remove(struct i2c_client *client)
struct gc2235_device *dev = to_gc2235_sensor(sd);
dev_dbg(&client->dev, "gc2235_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1114,8 +1092,7 @@ static int gc2235_remove(struct i2c_client *client)
return 0;
}
-static int gc2235_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc2235_probe(struct i2c_client *client)
{
struct gc2235_device *dev;
void *gcpdev;
@@ -1123,10 +1100,8 @@ static int gc2235_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1187,32 +1162,17 @@ static const struct acpi_device_id gc2235_acpi_match[] = {
{ "INT33F8" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc2235_id);
+
static struct i2c_driver gc2235_driver = {
.driver = {
- .name = GC2235_NAME,
- .acpi_match_table = ACPI_PTR(gc2235_acpi_match),
+ .name = "gc2235",
+ .acpi_match_table = gc2235_acpi_match,
},
- .probe = gc2235_probe,
+ .probe_new = gc2235_probe,
.remove = gc2235_remove,
- .id_table = gc2235_id,
};
-
-static int init_gc2235(void)
-{
- return i2c_add_driver(&gc2235_driver);
-}
-
-static void exit_gc2235(void)
-{
-
- i2c_del_driver(&gc2235_driver);
-}
-
-module_init(init_gc2235);
-module_exit(exit_gc2235);
+module_i2c_driver(gc2235_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
index decb65cfd7c9..81e5ec0c2b64 100644
--- a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/i2c.h>
diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 679176f7c542..4fd9f538ac95 100644
--- a/drivers/staging/media/atomisp/i2c/lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -171,10 +167,9 @@ static int lm3554_set_config1(struct lm3554 *flash)
/* -----------------------------------------------------------------------------
* Hardware trigger
*/
-static void lm3554_flash_off_delay(long unsigned int arg)
+static void lm3554_flash_off_delay(struct timer_list *t)
{
- struct v4l2_subdev *sd = i2c_get_clientdata((struct i2c_client *)arg);
- struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554 *flash = from_timer(flash, t, flash_off_delay);
struct lm3554_platform_data *pdata = flash->pdata;
gpio_set_value(pdata->gpio_strobe, 0);
@@ -862,8 +857,7 @@ static void *lm3554_platform_data_func(struct i2c_client *client)
return &platform_data;
}
-static int lm3554_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3554_probe(struct i2c_client *client)
{
int err = 0;
struct lm3554 *flash;
@@ -871,10 +865,8 @@ static int lm3554_probe(struct i2c_client *client,
int ret;
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
- if (!flash) {
- dev_err(&client->dev, "out of memory\n");
+ if (!flash)
return -ENOMEM;
- }
flash->pdata = client->dev.platform_data;
@@ -915,8 +907,7 @@ static int lm3554_probe(struct i2c_client *client,
mutex_init(&flash->power_lock);
- setup_timer(&flash->flash_off_delay, lm3554_flash_off_delay,
- (unsigned long)client);
+ timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0);
err = lm3554_gpio_init(client);
if (err) {
@@ -962,13 +953,6 @@ fail:
return ret;
}
-static const struct i2c_device_id lm3554_id[] = {
- {LM3554_NAME, 0},
- {},
-};
-
-MODULE_DEVICE_TABLE(i2c, lm3554_id);
-
static const struct dev_pm_ops lm3554_pm_ops = {
.suspend = lm3554_suspend,
.resume = lm3554_resume,
@@ -978,32 +962,19 @@ static const struct acpi_device_id lm3554_acpi_match[] = {
{ "INTCF1C" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match);
static struct i2c_driver lm3554_driver = {
.driver = {
- .name = LM3554_NAME,
+ .name = "lm3554",
.pm = &lm3554_pm_ops,
- .acpi_match_table = ACPI_PTR(lm3554_acpi_match),
+ .acpi_match_table = lm3554_acpi_match,
},
- .probe = lm3554_probe,
+ .probe_new = lm3554_probe,
.remove = lm3554_remove,
- .id_table = lm3554_id,
};
+module_i2c_driver(lm3554_driver);
-static __init int init_lm3554(void)
-{
- return i2c_add_driver(&lm3554_driver);
-}
-
-static __exit void exit_lm3554(void)
-{
- i2c_del_driver(&lm3554_driver);
-}
-
-module_init(init_lm3554);
-module_exit(exit_lm3554);
MODULE_AUTHOR("Jing Tao <jing.tao@intel.com>");
MODULE_DESCRIPTION("LED flash driver for LM3554");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 3c837cb8859c..55882bea2049 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -32,7 +28,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/acpi.h>
#include "../include/linux/atomisp_gmin_platform.h"
#include <media/v4l2-device.h>
@@ -455,10 +450,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v2p8_ctrl(sd, 1);
if (ret == 0) {
@@ -481,10 +472,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: current modules wire only one GPIO signal (RESET#),
* but the schematic wires up two to the connector. BIOS
* versions have been unfortunately inconsistent with which
@@ -1584,13 +1571,6 @@ mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
dev->platform_data =
(struct camera_sensor_platform_data *)platform_data;
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- v4l2_err(client, "mt9m114 platform init err\n");
- return ret;
- }
- }
ret = power_up(sd);
if (ret) {
v4l2_err(client, "mt9m114 power-up err");
@@ -1844,8 +1824,6 @@ static int mt9m114_remove(struct i2c_client *client)
dev = container_of(sd, struct mt9m114_device, sd);
dev->platform_data->csi_cfg(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -1853,8 +1831,7 @@ static int mt9m114_remove(struct i2c_client *client)
return 0;
}
-static int mt9m114_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mt9m114_probe(struct i2c_client *client)
{
struct mt9m114_device *dev;
int ret = 0;
@@ -1863,10 +1840,8 @@ static int mt9m114_probe(struct i2c_client *client,
/* Setup sensor configuration structure */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
pdata = client->dev.platform_data;
@@ -1926,38 +1901,22 @@ static int mt9m114_probe(struct i2c_client *client,
return 0;
}
-MODULE_DEVICE_TABLE(i2c, mt9m114_id);
-
static const struct acpi_device_id mt9m114_acpi_match[] = {
{ "INT33F0" },
{ "CRMT1040" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
static struct i2c_driver mt9m114_driver = {
.driver = {
.name = "mt9m114",
- .acpi_match_table = ACPI_PTR(mt9m114_acpi_match),
+ .acpi_match_table = mt9m114_acpi_match,
},
- .probe = mt9m114_probe,
+ .probe_new = mt9m114_probe,
.remove = mt9m114_remove,
- .id_table = mt9m114_id,
};
-
-static __init int init_mt9m114(void)
-{
- return i2c_add_driver(&mt9m114_driver);
-}
-
-static __exit void exit_mt9m114(void)
-{
- i2c_del_driver(&mt9m114_driver);
-}
-
-module_init(init_mt9m114);
-module_exit(exit_mt9m114);
+module_i2c_driver(mt9m114_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 51b7d61df0f5..cd67d38f183a 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -253,8 +252,8 @@ static int ov2680_write_reg_array(struct i2c_client *client,
if (!__ov2680_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov2680_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov2680_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -399,7 +398,9 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
u16 vts,hts;
int ret,exp_val;
- dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain);
+ dev_dbg(&client->dev,
+ "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",
+ coarse_itg, gain, digitgain);
hts = ov2680_res[dev->fmt_idx].pixels_per_line;
vts = ov2680_res[dev->fmt_idx].lines_per_frame;
@@ -847,10 +848,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret |= dev->platform_data->v1p8_ctrl(sd, 1);
ret |= dev->platform_data->v2p8_ctrl(sd, 1);
@@ -872,10 +869,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* The OV2680 documents only one GPIO input (#XSHUTDN), but
* existing integrations often wire two (reset/power_down)
* because that is the way other sensors work. There is no
@@ -1438,8 +1431,7 @@ static int ov2680_remove(struct i2c_client *client)
return 0;
}
-static int ov2680_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2680_probe(struct i2c_client *client)
{
struct ov2680_device *dev;
int ret;
@@ -1447,10 +1439,8 @@ static int ov2680_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1523,35 +1513,16 @@ static const struct acpi_device_id ov2680_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
-
-MODULE_DEVICE_TABLE(i2c, ov2680_id);
static struct i2c_driver ov2680_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = OV2680_NAME,
- .acpi_match_table = ACPI_PTR(ov2680_acpi_match),
-
+ .name = "ov2680",
+ .acpi_match_table = ov2680_acpi_match,
},
- .probe = ov2680_probe,
+ .probe_new = ov2680_probe,
.remove = ov2680_remove,
- .id_table = ov2680_id,
};
-
-static int init_ov2680(void)
-{
- return i2c_add_driver(&ov2680_driver);
-}
-
-static void exit_ov2680(void)
-{
-
- i2c_del_driver(&ov2680_driver);
-}
-
-module_init(init_ov2680);
-module_exit(exit_ov2680);
+module_i2c_driver(ov2680_driver);
MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 10094ac56561..4df7eba8d375 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -651,10 +650,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
if (ret == 0) {
@@ -678,10 +673,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: the GPIO order is asymmetric: always RESET#
* before PWDN# when turning it on or off.
*/
@@ -1044,13 +1035,6 @@ static int ov2722_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
@@ -1095,9 +1079,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1241,9 +1222,6 @@ static int ov2722_remove(struct i2c_client *client)
struct ov2722_device *dev = to_ov2722_sensor(sd);
dev_dbg(&client->dev, "ov2722_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister_subdev(sd);
@@ -1276,8 +1254,7 @@ static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
return 0;
}
-static int ov2722_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2722_probe(struct i2c_client *client)
{
struct ov2722_device *dev;
void *ovpdev;
@@ -1285,10 +1262,8 @@ static int ov2722_probe(struct i2c_client *client,
struct acpi_device *adev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1335,38 +1310,21 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov2722_id);
-
static const struct acpi_device_id ov2722_acpi_match[] = {
{ "INT33FB" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
static struct i2c_driver ov2722_driver = {
.driver = {
- .name = OV2722_NAME,
- .acpi_match_table = ACPI_PTR(ov2722_acpi_match),
+ .name = "ov2722",
+ .acpi_match_table = ov2722_acpi_match,
},
- .probe = ov2722_probe,
+ .probe_new = ov2722_probe,
.remove = ov2722_remove,
- .id_table = ov2722_id,
};
-
-static int init_ov2722(void)
-{
- return i2c_add_driver(&ov2722_driver);
-}
-
-static void exit_ov2722(void)
-{
-
- i2c_del_driver(&ov2722_driver);
-}
-
-module_init(init_ov2722);
-module_exit(exit_ov2722);
+module_i2c_driver(ov2722_driver);
MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index 7d8a0aeecb6c..c422d0398fc7 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -36,8 +32,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC0310_NAME "gc0310"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 1
#define I2C_RETRY_COUNT 5
@@ -196,11 +190,6 @@ struct gc0310_write_ctrl {
struct gc0310_write_buffer buffer;
};
-static const struct i2c_device_id gc0310_id[] = {
- {GC0310_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index a8d6aa9c9a5d..3c30a05c3991 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -33,8 +33,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC2235_NAME "gc2235"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -200,11 +198,6 @@ struct gc2235_write_ctrl {
struct gc2235_write_buffer buffer;
};
-static const struct i2c_device_id gc2235_id[] = {
- {GC2235_NAME, 0},
- {}
-};
-
static struct gc2235_reg const gc2235_stream_on[] = {
{ GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
{ GC2235_8BIT, 0x10, 0x91}, /* start mipi */
diff --git a/drivers/staging/media/atomisp/i2c/imx/Kconfig b/drivers/staging/media/atomisp/i2c/imx/Kconfig
deleted file mode 100644
index a39eeb3b6ad4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config VIDEO_IMX
- tristate "sony imx sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_MSRLIST_HELPER && m
- ---help---
- This is a Video4Linux2 sensor-level driver for the Sony
- IMX RAW sensor.
-
- It currently depends on internal V4L2 extensions defined in
- atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
deleted file mode 100644
index c1a85e6e27a9..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_IMX) += imx1x5.o
-
-imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o otp_imx.o otp_brcc064_e2prom.o otp_e2prom.o
-
-ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
-obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
-ccflags-y += $(call cc-disable-warning, missing-prototypes)
-ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
deleted file mode 100644
index fb74f14cbe5a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-#include "ad5816g.h"
-
-struct ad5816g_device ad5816g_dev;
-
-static int ad5816g_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = AD5816G_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = AD5816G_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int ad5816g_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_set_arc_mode(struct i2c_client *client)
-{
- int ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_ARC_EN);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_MODE,
- AD5816G_MODE_2_5M_SWITCH_CLOCK);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_FREQ, AD5816G_DEF_FREQ);
- return ret;
-}
-
-int ad5816g_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 ad5816g_id;
-
- /* Enable power */
- ret = ad5816g_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* waiting time AD5816G(vcm) - t1 + t2
- * t1(1ms) -Time from VDD high to first i2c cmd
- * t2(100us) - exit power-down mode time
- */
- usleep_range(1100, 2200);
- /* Detect device */
- ret = ad5816g_i2c_rd8(client, AD5816G_IC_INFO, &ad5816g_id);
- if (ret < 0)
- goto fail_powerdown;
- if (ad5816g_id != AD5816G_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- ret = ad5816g_set_arc_mode(client);
- if (ret)
- return ret;
-
- /* set the VCM_THRESHOLD */
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_THRESHOLD,
- AD5816G_DEF_THRESHOLD);
-
- return ret;
-
-fail_powerdown:
- ad5816g_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int ad5816g_vcm_power_down(struct v4l2_subdev *sd)
-{
- return ad5816g_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- return ad5816g_i2c_wr16(client, AD5816G_VCM_CODE_MSB, data);
-}
-
-int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, AD5816G_MAX_FOCUS_POS);
- ret = ad5816g_t_focus_vcm(sd, value);
- if (ret == 0) {
- ad5816g_dev.number_of_steps = value - ad5816g_dev.focus;
- ad5816g_dev.focus = value;
- getnstimeofday(&(ad5816g_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return ad5816g_t_focus_abs(sd, ad5816g_dev.focus + value);
-}
-
-int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(ad5816g_dev.number_of_steps) * DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (ad5816g_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- ad5816g_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = ad5816g_dev.focus - ad5816g_dev.number_of_steps;
- else
- *value = ad5816g_dev.focus;
-
- return 0;
-}
-
-int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h b/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
deleted file mode 100644
index e1396b00a0e1..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __AD5816G_H__
-#define __AD5816G_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define AD5816G_VCM_ADDR 0x0e
-
-/* ad5816g device structure */
-struct ad5816g_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
-};
-
-#define AD5816G_INVALID_CONFIG 0xffffffff
-#define AD5816G_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-/* Register Definitions */
-#define AD5816G_IC_INFO 0x00
-#define AD5816G_IC_VERSION 0x01
-#define AD5816G_CONTROL 0x02
-#define AD5816G_VCM_CODE_MSB 0x03
-#define AD5816G_VCM_CODE_LSB 0x04
-#define AD5816G_STATUS 0x05
-#define AD5816G_MODE 0x06
-#define AD5816G_VCM_FREQ 0x07
-#define AD5816G_VCM_THRESHOLD 0x08
-
-/* ARC MODE ENABLE */
-#define AD5816G_ARC_EN 0x02
-/* ARC RES2 MODE */
-#define AD5816G_ARC_RES2 0x01
-/* ARC VCM FREQ - 78.1Hz */
-#define AD5816G_DEF_FREQ 0x7a
-/* ARC VCM THRESHOLD - 0x08 << 1 */
-#define AD5816G_DEF_THRESHOLD 0x64
-#define AD5816G_ID 0x24
-#define VCM_CODE_MASK 0x03ff
-
-#define AD5816G_MODE_2_5M_SWITCH_CLOCK 0x14
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/common.h b/drivers/staging/media/atomisp/i2c/imx/common.h
deleted file mode 100644
index af2e3160df95..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/common.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#define MAX_FPS_OPTIONS_SUPPORTED 3
-#define I2C_MSG_LENGTH 0x2
-#define E2PROM_2ADDR 0x80000000
-#define E2PROM_ADDR_MASK 0x7fffffff
-
-/* Defines for register writes and register array processing */
-#define IMX_BYTE_MAX 32
-#define IMX_SHORT_MAX 16
-#define I2C_RETRY_COUNT 5
-#define IMX_TOK_MASK 0xfff0
-
-enum imx_tok_type {
- IMX_8BIT = 0x0001,
- IMX_16BIT = 0x0002,
- IMX_TOK_TERM = 0xf000, /* terminating token for reg list */
- IMX_TOK_DELAY = 0xfe00 /* delay token for reg list */
-};
-
-/**
- * struct imx_reg - MI sensor register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct imx_reg {
- enum imx_tok_type type;
- u16 sreg;
- u32 val; /* @set value for read/mod/write, @mask */
-};
-
-struct imx_fps_setting {
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- const struct imx_reg *regs; /* regs that the fps setting needs */
-};
-
-struct imx_resolution {
- const struct imx_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
- u8 *desc;
- const struct imx_reg *regs;
- int res;
- int width;
- int height;
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- unsigned short skip_frames;
- u8 bin_factor_x;
- u8 bin_factor_y;
- bool used;
-};
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-int imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val);
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.c b/drivers/staging/media/atomisp/i2c/imx/drv201.c
deleted file mode 100644
index 221e4875ac49..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "drv201.h"
-
-static struct drv201_device drv201_dev;
-
-static int drv201_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = DRV201_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = DRV201_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int drv201_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int drv201_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-int drv201_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = drv201_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
- /*
- * Jiggle SCL pin to wake up device.
- * Drv201 expect SCL from low to high to wake device up.
- * So the 1st access to i2c would fail.
- * Using following function to wake device up.
- */
- drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
-
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(WAKEUP_DELAY_US, WAKEUP_DELAY_US * 10);
-
- /* Reset device */
- ret = drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = drv201_i2c_rd8(client, DRV201_CONTROL, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DEFAULT_CONTROL_VAL) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
-
- drv201_dev.focus = DRV201_MAX_FOCUS_POS;
- drv201_dev.initialized = true;
-
- return 0;
-fail_powerdown:
- drv201_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int drv201_vcm_power_down(struct v4l2_subdev *sd)
-{
- return drv201_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- if (!drv201_dev.initialized)
- return -ENODEV;
- return drv201_i2c_wr16(client, DRV201_VCM_CURRENT, data);
-}
-
-int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DRV201_MAX_FOCUS_POS);
- ret = drv201_t_focus_vcm(sd, value);
- if (ret == 0) {
- drv201_dev.number_of_steps = value - drv201_dev.focus;
- drv201_dev.focus = value;
- getnstimeofday(&(drv201_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return drv201_t_focus_abs(sd, drv201_dev.focus + value);
-}
-
-int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(drv201_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (drv201_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- drv201_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = drv201_dev.focus - drv201_dev.number_of_steps;
- else
- *value = drv201_dev.focus;
-
- return 0;
-}
-
-int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.h b/drivers/staging/media/atomisp/i2c/imx/drv201.h
deleted file mode 100644
index 2ef8aafdf675..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRV201_H__
-#define __DRV201_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define DRV201_VCM_ADDR 0x0e
-
-/* drv201 device structure */
-struct drv201_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
- bool initialized; /* true if drv201 is detected */
-};
-
-#define DRV201_INVALID_CONFIG 0xffffffff
-#define DRV201_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DRV201_CONTROL 2
-#define DRV201_VCM_CURRENT 3
-#define DRV201_STATUS 5
-#define DRV201_MODE 6
-#define DRV201_VCM_FREQ 7
-
-#define DEFAULT_CONTROL_VAL 2
-#define DRV201_RESET 1
-#define WAKEUP_DELAY_US 100
-#define VCM_CODE_MASK 0x03ff
-
-#endif
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.c b/drivers/staging/media/atomisp/i2c/imx/dw9714.c
deleted file mode 100644
index f96855454313..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "dw9714.h"
-
-static struct dw9714_device dw9714_dev;
-static int dw9714_i2c_write(struct i2c_client *client, u16 data)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
- u16 val;
-
- val = cpu_to_be16(data);
- msg.addr = DW9714_VCM_ADDR;
- msg.flags = 0;
- msg.len = DW9714_16BIT;
- msg.buf = (u8 *)&val;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- return ret == num_msg ? 0 : -EIO;
-}
-
-int dw9714_vcm_power_up(struct v4l2_subdev *sd)
-{
- int ret;
-
- /* Enable power */
- ret = dw9714_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- usleep_range(12000, 12500);
- return ret;
-}
-
-int dw9714_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9714_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = -EINVAL;
- u8 mclk = vcm_step_mclk(dw9714_dev.vcm_settings.step_setting);
- u8 s = vcm_step_s(dw9714_dev.vcm_settings.step_setting);
-
- /*
- * For different mode, VCM_PROTECTION_OFF/ON required by the
- * control procedure. For DW9714_DIRECT/DLC mode, slew value is
- * VCM_DEFAULT_S(0).
- */
- switch (dw9714_dev.vcm_mode) {
- case DW9714_DIRECT:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, DIRECT_VCM);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- case DW9714_LSC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_DISABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client, vcm_val(val, s));
- break;
- case DW9714_DLC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_ENABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- }
- return ret;
-}
-
-int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DW9714_MAX_FOCUS_POS);
- ret = dw9714_t_focus_vcm(sd, value);
- if (ret == 0) {
- dw9714_dev.number_of_steps = value - dw9714_dev.focus;
- dw9714_dev.focus = value;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_abs_init(struct v4l2_subdev *sd)
-{
- int ret;
-
- ret = dw9714_t_focus_vcm(sd, DW9714_DEFAULT_FOCUS_POS);
- if (ret == 0) {
- dw9714_dev.number_of_steps =
- DW9714_DEFAULT_FOCUS_POS - dw9714_dev.focus;
- dw9714_dev.focus = DW9714_DEFAULT_FOCUS_POS;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return dw9714_t_focus_abs(sd, dw9714_dev.focus + value);
-}
-
-int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(dw9714_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (dw9714_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- dw9714_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = dw9714_dev.focus - dw9714_dev.number_of_steps;
- else
- *value = dw9714_dev.focus;
-
- return 0;
-}
-
-int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.step_setting = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
-
-int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.t_src = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.h b/drivers/staging/media/atomisp/i2c/imx/dw9714.h
deleted file mode 100644
index aee560026b56..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DW9714_H__
-#define __DW9714_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-
-#define DW9714_VCM_ADDR 0x0c
-
-enum dw9714_tok_type {
- DW9714_8BIT = 0x0001,
- DW9714_16BIT = 0x0002,
-};
-
-struct dw9714_vcm_settings {
- u16 code; /* bit[9:0]: Data[9:0] */
- u8 t_src; /* bit[4:0]: T_SRC[4:0] */
- u8 step_setting; /* bit[3:0]: S[3:0]/bit[5:4]: MCLK[1:0] */
- bool update;
-};
-
-enum dw9714_vcm_mode {
- DW9714_DIRECT = 0x1, /* direct control */
- DW9714_LSC = 0x2, /* linear slope control */
- DW9714_DLC = 0x3, /* dual level control */
-};
-
-/* dw9714 device structure */
-struct dw9714_device {
- struct dw9714_vcm_settings vcm_settings;
- struct timespec timestamp_t_focus_abs;
- enum dw9714_vcm_mode vcm_mode;
- s16 number_of_steps;
- bool initialized; /* true if dw9714 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9714_INVALID_CONFIG 0xffffffff
-#define DW9714_MAX_FOCUS_POS 1023
-#define DW9714_DEFAULT_FOCUS_POS 290
-
-
-/* MCLK[1:0] = 01 T_SRC[4:0] = 00001 S[3:0] = 0111 */
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DLC_ENABLE 1
-#define DLC_DISABLE 0
-#define VCM_PROTECTION_OFF 0xeca3
-#define VCM_PROTECTION_ON 0xdc51
-#define VCM_DEFAULT_S 0x0
-
-#define vcm_step_s(a) (u8)(a & 0xf)
-#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
-#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
-#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
-#define vcm_val(data, s) (u16)(data << 4 | s)
-#define DIRECT_VCM vcm_dlc_mclk(0, 0)
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.c b/drivers/staging/media/atomisp/i2c/imx/dw9718.c
deleted file mode 100644
index c02b9f0a2440..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Support for dw9718 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9718.h"
-
-static struct dw9718_device dw9718_dev;
-
-static int dw9718_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9718_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9718_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9718_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9718_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9718_MAX_FOCUS_POS);
- ret = dw9718_i2c_wr16(client, DW9718_DATA_M, value);
- /*pr_info("%s: value = %d\n", __func__, value);*/
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9718_dev.focus_time);
- dw9718_dev.focus = value;
-
- return 0;
-}
-
-int dw9718_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- if (dw9718_dev.power_on)
- return 0;
-
- /* Enable power */
- ret = dw9718_dev.platform_data->power_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "DW9718_PD power_ctrl failed %d\n", ret);
- return ret;
- }
- /* Wait for VBAT to stabilize */
- udelay(100);
-
- /* Detect device */
- ret = dw9718_i2c_rd8(client, DW9718_SACT, &value);
- if (ret < 0) {
- dev_err(&client->dev, "read DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
- /*
- * WORKAROUND: for module P8V12F-203 which are used on
- * Cherrytrail Refresh Davis Reef AoB, register SACT is not
- * returning default value as spec. But VCM works as expected and
- * root cause is still under discussion with vendor.
- * workaround here to avoid aborting the power up sequence and just
- * give a warning about this error.
- */
- if (value != DW9718_SACT_DEFAULT_VAL)
- dev_warn(&client->dev, "%s error, incorrect ID\n", __func__);
-
- /* Initialize according to recommended settings */
- ret = dw9718_i2c_wr8(client, DW9718_CONTROL,
- DW9718_CONTROL_SW_LINEAR |
- DW9718_CONTROL_S_SAC4 |
- DW9718_CONTROL_OCP_DISABLE |
- DW9718_CONTROL_UVLO_DISABLE);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_CONTROL failed %d\n", ret);
- goto fail_powerdown;
- }
- ret = dw9718_i2c_wr8(client, DW9718_SACT,
- DW9718_SACT_MULT_TWO |
- DW9718_SACT_PERIOD_8_8MS);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
-
- ret = dw9718_t_focus_abs(sd, dw9718_dev.focus);
- if (ret)
- return ret;
- dw9718_dev.initialized = true;
- dw9718_dev.power_on = 1;
-
- return 0;
-
-fail_powerdown:
- dev_err(&client->dev, "%s error, powerup failed\n", __func__);
- dw9718_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9718_vcm_power_down(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dw9718_dev.power_on)
- return 0;
-
- ret = dw9718_dev.platform_data->power_ctrl(sd, 0);
- if (ret) {
- dev_err(&client->dev, "%s power_ctrl failed\n",
- __func__);
- return ret;
- }
- dw9718_dev.power_on = 0;
-
- return 0;
-}
-
-int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9718_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9718_t_focus_abs(sd, dw9718_dev.focus + value);
-}
-
-int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9718_dev.focus;
- return 0;
-}
-int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_vcm_init(struct v4l2_subdev *sd)
-{
- dw9718_dev.platform_data = camera_get_af_platform_data();
- dw9718_dev.focus = DW9718_DEFAULT_FOCUS_POSITION;
- dw9718_dev.power_on = 0;
- return (NULL == dw9718_dev.platform_data) ? -ENODEV : 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.h b/drivers/staging/media/atomisp/i2c/imx/dw9718.h
deleted file mode 100644
index 4a1040c3149f..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9718_H__
-#define __DW9718_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9718_VCM_ADDR (0x18 >> 1)
-
-/* dw9718 device structure */
-struct dw9718_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9718 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
- __u8 power_on;
-};
-
-#define DW9718_MAX_FOCUS_POS 1023
-
-/* Register addresses */
-#define DW9718_PD 0x00
-#define DW9718_CONTROL 0x01
-#define DW9718_DATA_M 0x02
-#define DW9718_DATA_L 0x03
-#define DW9718_SW 0x04
-#define DW9718_SACT 0x05
-#define DW9718_FLAG 0x10
-
-#define DW9718_CONTROL_SW_LINEAR BIT(0)
-#define DW9718_CONTROL_S_SAC4 (BIT(1) | BIT(3))
-#define DW9718_CONTROL_OCP_DISABLE BIT(4)
-#define DW9718_CONTROL_UVLO_DISABLE BIT(5)
-
-#define DW9718_SACT_MULT_TWO 0x00
-#define DW9718_SACT_PERIOD_8_8MS 0x19
-#define DW9718_SACT_DEFAULT_VAL 0x60
-
-#define DW9718_DEFAULT_FOCUS_POSITION 300
-
-#endif /* __DW9718_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.c b/drivers/staging/media/atomisp/i2c/imx/dw9719.c
deleted file mode 100644
index 565237796bb4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9719.h"
-
-static struct dw9719_device dw9719_dev;
-
-static int dw9719_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9719_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9719_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9719_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val };
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9719_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9719_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = dw9719_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
-
- /*
- * Jiggle SCL pin to wake up device.
- */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, 1);
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(100, 1000);
-
- /* Enable the ringing compensation */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, DW9719_ENABLE_RINGING);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Use SAC3 mode */
- ret = dw9719_i2c_wr8(client, DW9719_MODE, DW9719_MODE_SAC3);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Set the resonance frequency */
- ret = dw9719_i2c_wr8(client, DW9719_VCM_FREQ, DW9719_DEFAULT_VCM_FREQ);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = dw9719_i2c_rd8(client, DW9719_INFO, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DW9719_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- dw9719_dev.focus = 0;
- dw9719_dev.initialized = true;
-
- return 0;
-
-fail_powerdown:
- dw9719_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9719_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9719_dev.platform_data->power_ctrl(sd, 0);
-}
-
-int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
-
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9719_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9719_MAX_FOCUS_POS);
- ret = dw9719_i2c_wr16(client, DW9719_VCM_CURRENT, value);
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9719_dev.focus_time);
- dw9719_dev.focus = value;
-
- return 0;
-}
-
-int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9719_t_focus_abs(sd, dw9719_dev.focus + value);
-}
-
-int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9719_dev.focus;
- return 0;
-}
-int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.h b/drivers/staging/media/atomisp/i2c/imx/dw9719.h
deleted file mode 100644
index 711f412aef2a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9719_H__
-#define __DW9719_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9719_VCM_ADDR (0x18 >> 1)
-
-/* dw9719 device structure */
-struct dw9719_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9719 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9719_INVALID_CONFIG 0xffffffff
-#define DW9719_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DW9719_INFO 0
-#define DW9719_ID 0xF1
-#define DW9719_CONTROL 2
-#define DW9719_VCM_CURRENT 3
-
-#define DW9719_MODE 6
-#define DW9719_VCM_FREQ 7
-
-#define DW9719_MODE_SAC3 0x40
-#define DW9719_DEFAULT_VCM_FREQ 0x04
-#define DW9719_ENABLE_RINGING 0x02
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c
deleted file mode 100644
index 49ab0af87096..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.c
+++ /dev/null
@@ -1,2480 +0,0 @@
-/*
- * Support for Sony imx 8MP camera sensor.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <asm/intel-mid.h>
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include "../../include/linux/libmsrlisthelper.h"
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "imx.h"
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
- imx135_embedded_effective_size[IMX135_EMBEDDED_DATA_LINE_NUM]
- = {76, 113};
-
-static enum atomisp_bayer_order imx_bayer_order_mapping[] = {
- atomisp_bayer_order_rggb,
- atomisp_bayer_order_grbg,
- atomisp_bayer_order_gbrg,
- atomisp_bayer_order_bggr
-};
-
-static const unsigned int
-IMX227_BRACKETING_LUT_FRAME_ENTRY[IMX_MAX_AE_LUT_LENGTH] = {
- 0x0E10, 0x0E1E, 0x0E2C, 0x0E3A, 0x0E48};
-
-static int
-imx_read_reg(struct i2c_client *client, u16 len, u16 reg, u16 *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX];
- int ret, i;
- int retry = 0;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- do {
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret != 2) {
- dev_err(&client->dev,
- "retrying i2c read from offset 0x%x error %d... %d\n",
- reg, ret, retry);
- msleep(20);
- }
- } while (ret != 2 && retry++ < I2C_RETRY_COUNT);
-
- if (ret != 2)
- return -EIO;
-
- /* high byte comes first */
- if (len == IMX_8BIT) {
- *val = (u8)data[0];
- } else {
- /* 16-bit access is default when len > 1 */
- for (i = 0; i < (len >> 1); i++)
- val[i] = be16_to_cpu(data[i]);
- }
-
- return 0;
-}
-
-static int imx_i2c_write(struct i2c_client *client, u16 len, u8 *data)
-{
- struct i2c_msg msg;
- int ret;
- int retry = 0;
-
- do {
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = len;
- msg.buf = data;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret != 1) {
- dev_err(&client->dev,
- "retrying i2c write transfer... %d\n", retry);
- msleep(20);
- }
- } while (ret != 1 && retry++ < I2C_RETRY_COUNT);
-
- return ret == 1 ? 0 : -EIO;
-}
-
-int
-imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val)
-{
- int ret;
- unsigned char data[4] = {0};
- u16 *wreg = (u16 *)data;
- const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
-
- if (data_length != IMX_8BIT && data_length != IMX_16BIT) {
- v4l2_err(client, "%s error, invalid data_length\n", __func__);
- return -EINVAL;
- }
-
- /* high byte goes out first */
- *wreg = cpu_to_be16(reg);
-
- if (data_length == IMX_8BIT)
- data[2] = (u8)(val);
- else {
- /* IMX_16BIT */
- u16 *wdata = (u16 *)&data[2];
- *wdata = cpu_to_be16(val);
- }
-
- ret = imx_i2c_write(client, len, data);
- if (ret)
- dev_err(&client->dev,
- "write error: wrote 0x%x to offset 0x%x error %d",
- val, reg, ret);
-
- return ret;
-}
-
-/*
- * imx_write_reg_array - Initializes a list of imx registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- *
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __imx_flush_reg_array, __imx_buf_reg_array() and
- * __imx_write_reg_is_consecutive() are internal functions to
- * imx_write_reg_array_fast() and should be not used anywhere else.
- *
- */
-
-static int __imx_flush_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl)
-{
- u16 size;
-
- if (ctrl->index == 0)
- return 0;
-
- size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
- ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
- ctrl->index = 0;
-
- return imx_i2c_write(client, size, (u8 *)&ctrl->buffer);
-}
-
-static int __imx_buf_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- int size;
- u16 *data16;
-
- switch (next->type) {
- case IMX_8BIT:
- size = 1;
- ctrl->buffer.data[ctrl->index] = (u8)next->val;
- break;
- case IMX_16BIT:
- size = 2;
- data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
- *data16 = cpu_to_be16((u16)next->val);
- break;
- default:
- return -EINVAL;
- }
-
- /* When first item is added, we need to store its starting address */
- if (ctrl->index == 0)
- ctrl->buffer.addr = next->sreg;
-
- ctrl->index += size;
-
- /*
- * Buffer cannot guarantee free space for u32? Better flush it to avoid
- * possible lack of memory for next item.
- */
- if (ctrl->index + sizeof(u16) >= IMX_MAX_WRITE_BUF_SIZE)
- return __imx_flush_reg_array(client, ctrl);
-
- return 0;
-}
-
-static int
-__imx_write_reg_is_consecutive(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- if (ctrl->index == 0)
- return 1;
-
- return ctrl->buffer.addr + ctrl->index == next->sreg;
-}
-
-static int imx_write_reg_array(struct i2c_client *client,
- const struct imx_reg *reglist)
-{
- const struct imx_reg *next = reglist;
- struct imx_write_ctrl ctrl;
- int err;
-
- ctrl.index = 0;
- for (; next->type != IMX_TOK_TERM; next++) {
- switch (next->type & IMX_TOK_MASK) {
- case IMX_TOK_DELAY:
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- msleep(next->val);
- break;
-
- default:
- /*
- * If next address is not consecutive, data needs to be
- * flushed before proceed.
- */
- if (!__imx_write_reg_is_consecutive(client, &ctrl,
- next)) {
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- }
- err = __imx_buf_reg_array(client, &ctrl, next);
- if (err) {
- v4l2_err(client, "%s: write error, aborted\n",
- __func__);
- return err;
- }
- break;
- }
- }
-
- return __imx_flush_reg_array(client, &ctrl);
-}
-
-static int __imx_min_fps_diff(int fps, const struct imx_fps_setting *fps_list)
-{
- int diff = INT_MAX;
- int i;
-
- if (fps == 0)
- return 0;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps) < diff)
- diff = abs(fps_list[i].fps - fps);
- }
-
- return diff;
-}
-
-static int __imx_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps)
- < abs(fps_list[fps_index].fps - fps))
- fps_index = i;
- }
- return fps_index;
-}
-
-/*
- * This is to choose the nearest fps setting above the requested fps
- * fps_list should be in ascendant order.
- */
-static int __imx_above_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (fps <= fps_list[i].fps) {
- fps_index = i;
- break;
- }
- }
-
- return fps_index;
-}
-
-static int imx_get_lanes(struct v4l2_subdev *sd)
-{
- struct camera_mipi_info *imx_info = v4l2_get_subdev_hostdata(sd);
-
- if (!imx_info)
- return -ENOSYS;
- if (imx_info->num_lanes < 1 || imx_info->num_lanes > 4 ||
- imx_info->num_lanes == 3)
- return -EINVAL;
-
- return imx_info->num_lanes;
-}
-
-static int __imx_update_exposure_timing(struct i2c_client *client, u16 exposure,
- u16 llp, u16 fll)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- if (dev->sensor_id != IMX227_ID) {
- /* Increase the VTS to match exposure + margin */
- if (exposure > fll - IMX_INTEGRATION_TIME_MARGIN)
- fll = exposure + IMX_INTEGRATION_TIME_MARGIN;
- }
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, llp);
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, fll);
- if (ret)
- return ret;
-
- if (exposure)
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, exposure);
-
- return ret;
-}
-
-static int __imx_update_gain(struct v4l2_subdev *sd, u16 gain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- /* set global gain */
- ret = imx_write_reg(client, IMX_8BIT, dev->reg_addr->global_gain, gain);
- if (ret)
- return ret;
-
- /* set short analog gain */
- if (dev->sensor_id == IMX135_ID)
- ret = imx_write_reg(client, IMX_8BIT, IMX_SHORT_AGC_GAIN, gain);
-
- return ret;
-}
-
-static int __imx_update_digital_gain(struct i2c_client *client, u16 digitgain)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- struct imx_write_buffer digit_gain;
-
- digit_gain.addr = cpu_to_be16(dev->reg_addr->dgc_adj);
- digit_gain.data[0] = (digitgain >> 8) & 0xFF;
- digit_gain.data[1] = digitgain & 0xFF;
-
- if (dev->sensor_id == IMX219_ID) {
- return imx_i2c_write(client, IMX219_DGC_LEN, (u8 *)&digit_gain);
- } else if (dev->sensor_id == IMX227_ID) {
- return imx_i2c_write(client, IMX227_DGC_LEN, (u8 *)&digit_gain);
- } else {
- digit_gain.data[2] = (digitgain >> 8) & 0xFF;
- digit_gain.data[3] = digitgain & 0xFF;
- digit_gain.data[4] = (digitgain >> 8) & 0xFF;
- digit_gain.data[5] = digitgain & 0xFF;
- digit_gain.data[6] = (digitgain >> 8) & 0xFF;
- digit_gain.data[7] = digitgain & 0xFF;
- return imx_i2c_write(client, IMX_DGC_LEN, (u8 *)&digit_gain);
- }
- return 0;
-}
-
-static int imx_set_exposure_gain(struct v4l2_subdev *sd, u16 coarse_itg,
- u16 gain, u16 digitgain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int lanes = imx_get_lanes(sd);
- unsigned int digitgain_scaled;
- int ret = 0;
-
- /* Validate exposure: cannot exceed VTS-4 where VTS is 16bit */
- coarse_itg = clamp_t(u16, coarse_itg, 0, IMX_MAX_EXPOSURE_SUPPORTED);
-
- /* Validate gain: must not exceed maximum 8bit value */
- gain = clamp_t(u16, gain, 0, IMX_MAX_GLOBAL_GAIN_SUPPORTED);
-
- mutex_lock(&dev->input_lock);
-
- if (dev->sensor_id == IMX227_ID) {
- ret = imx_write_reg_array(client, imx_param_hold);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
-
- /* For imx175, setting gain must be delayed by one */
- if ((dev->sensor_id == IMX175_ID) && dev->digital_gain)
- digitgain_scaled = dev->digital_gain;
- else
- digitgain_scaled = digitgain;
- /* imx132 with two lanes needs more gain to saturate at max */
- if (dev->sensor_id == IMX132_ID && lanes > 1) {
- digitgain_scaled *= IMX132_2LANES_GAINFACT;
- digitgain_scaled >>= IMX132_2LANES_GAINFACT_SHIFT;
- }
- /* Validate digital gain: must not exceed 12 bit value*/
- digitgain_scaled = clamp_t(unsigned int, digitgain_scaled,
- 0, IMX_MAX_DIGITAL_GAIN_SUPPORTED);
-
- ret = __imx_update_exposure_timing(client, coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
- dev->coarse_itg = coarse_itg;
-
- if (dev->sensor_id == IMX175_ID)
- ret = __imx_update_gain(sd, dev->gain);
- else
- ret = __imx_update_gain(sd, gain);
- if (ret)
- goto out;
- dev->gain = gain;
-
- ret = __imx_update_digital_gain(client, digitgain_scaled);
- if (ret)
- goto out;
- dev->digital_gain = digitgain;
-
-out:
- if (dev->sensor_id == IMX227_ID)
- ret = imx_write_reg_array(client, imx_param_update);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long imx_s_exposure(struct v4l2_subdev *sd,
- struct atomisp_exposure *exposure)
-{
- return imx_set_exposure_gain(sd, exposure->integration_time[0],
- exposure->gain[0], exposure->gain[1]);
-}
-
-/* FIXME -To be updated with real OTP reading */
-static int imx_g_priv_int_data(struct v4l2_subdev *sd,
- struct v4l2_private_int_data *priv)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u8 __user *to = priv->data;
- u32 read_size = priv->size;
- int ret;
-
- /* No need to copy data if size is 0 */
- if (!read_size)
- goto out;
-
- if (IS_ERR(dev->otp_data)) {
- dev_err(&client->dev, "OTP data not available");
- return PTR_ERR(dev->otp_data);
- }
- /* Correct read_size value only if bigger than maximum */
- if (read_size > dev->otp_driver->size)
- read_size = dev->otp_driver->size;
-
- ret = copy_to_user(to, dev->otp_data, read_size);
- if (ret) {
- dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
- __func__);
- return -EFAULT;
- }
-out:
- /* Return correct size */
- priv->size = dev->otp_driver->size;
-
- return 0;
-}
-
-static int __imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- int ret;
-
- if (dev->sensor_id == IMX_ID_DEFAULT)
- return 0;
-
- /* The default is no flip at sensor initialization */
- dev->h_flip->cur.val = 0;
- dev->v_flip->cur.val = 0;
- /* Sets the default FPS */
- dev->fps_index = 0;
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
-
- ret = imx_write_reg_array(client, dev->mode_tables->init_settings);
- if (ret)
- return ret;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rglanesel[] = {
- IMX132_RGLANESEL_1LANE, /* 1 lane */
- IMX132_RGLANESEL_2LANES, /* 2 lanes */
- IMX132_RGLANESEL_1LANE, /* undefined */
- IMX132_RGLANESEL_4LANES, /* 4 lanes */
- };
- ret = imx_write_reg(client, IMX_8BIT,
- IMX132_RGLANESEL, imx132_rglanesel[lanes - 1]);
- }
-
- return ret;
-}
-
-static int imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_init(sd, val);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static long imx_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
-
- switch (cmd) {
- case ATOMISP_IOC_S_EXPOSURE:
- return imx_s_exposure(sd, arg);
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- return imx_g_priv_int_data(sd, arg);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 1);
- if (ret)
- goto fail_power;
-
- /* flis clock control */
- ret = dev->platform_data->flisclk_ctrl(sd, 1);
- if (ret)
- goto fail_clk;
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "gpio failed\n");
- goto fail_gpio;
- }
-
- return 0;
-fail_gpio:
- dev->platform_data->gpio_ctrl(sd, 0);
-fail_clk:
- dev->platform_data->flisclk_ctrl(sd, 0);
-fail_power:
- dev->platform_data->power_ctrl(sd, 0);
- dev_err(&client->dev, "sensor power-up failed\n");
-
- return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = dev->platform_data->flisclk_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "flisclk failed\n");
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "gpio failed\n");
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "vprog failed.\n");
-
- return ret;
-}
-
-static int __imx_s_power(struct v4l2_subdev *sd, int on)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
- int r = 0;
-
- if (on == 0) {
- ret = power_down(sd);
- if (dev->vcm_driver && dev->vcm_driver->power_down)
- r = dev->vcm_driver->power_down(sd);
- if (ret == 0)
- ret = r;
- dev->power = 0;
- } else {
- if (dev->vcm_driver && dev->vcm_driver->power_up)
- ret = dev->vcm_driver->power_up(sd);
- if (ret)
- return ret;
- ret = power_up(sd);
- if (!ret) {
- dev->power = 1;
- return __imx_init(sd, 0);
- }
- }
-
- return ret;
-}
-
-static int imx_s_power(struct v4l2_subdev *sd, int on)
-{
- int ret;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_power(sd, on);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int imx_get_intg_factor(struct i2c_client *client,
- struct camera_mipi_info *info,
- const struct imx_reg *reglist)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- u32 vt_pix_clk_div;
- u32 vt_sys_clk_div;
- u32 pre_pll_clk_div;
- u32 pll_multiplier;
-
- const int ext_clk_freq_hz = 19200000;
- struct atomisp_sensor_mode_data *buf = &info->data;
- int ret;
- u16 data[IMX_INTG_BUF_COUNT];
-
- u32 vt_pix_clk_freq_mhz;
- u32 coarse_integration_time_min;
- u32 coarse_integration_time_max_margin;
- u32 read_mode;
- u32 div;
-
- if (info == NULL)
- return -EINVAL;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 1, IMX_VT_PIX_CLK_DIV, data);
- if (ret)
- return ret;
- vt_pix_clk_div = data[0] & IMX_MASK_5BIT;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID) {
- static const int rgpltd[] = { 2, 4, 1, 1 };
- ret = imx_read_reg(client, 1, IMX132_208_VT_RGPLTD, data);
- if (ret)
- return ret;
- vt_sys_clk_div = rgpltd[data[0] & IMX_MASK_2BIT];
- } else {
- ret = imx_read_reg(client, 1, IMX_VT_SYS_CLK_DIV, data);
- if (ret)
- return ret;
- vt_sys_clk_div = data[0] & IMX_MASK_2BIT;
- }
- ret = imx_read_reg(client, 1, IMX_PRE_PLL_CLK_DIV, data);
- if (ret)
- return ret;
- pre_pll_clk_div = data[0] & IMX_MASK_4BIT;
-
- ret = imx_read_reg(client, 2,
- (dev->sensor_id == IMX132_ID ||
- dev->sensor_id == IMX219_ID ||
- dev->sensor_id == IMX208_ID) ?
- IMX132_208_219_PLL_MULTIPLIER : IMX_PLL_MULTIPLIER, data);
- if (ret)
- return ret;
- pll_multiplier = data[0] & IMX_MASK_11BIT;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 4, IMX_COARSE_INTG_TIME_MIN, data);
- if (ret)
- return ret;
- coarse_integration_time_min = data[0];
- coarse_integration_time_max_margin = data[1];
-
- /* Get the cropping and output resolution to ISP for this mode. */
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_start_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_start_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_end_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_end = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_end_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_end = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->horizontal_output_size_h, data);
- if (ret)
- return ret;
- buf->output_width = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->vertical_output_size_h, data);
- if (ret)
- return ret;
- buf->output_height = data[0];
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID)
- read_mode = 0;
- else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_READ_MODE, data);
- else
- ret = imx_read_reg(client, 1, IMX_READ_MODE, data);
-
- if (ret)
- return ret;
- read_mode = data[0] & IMX_MASK_2BIT;
- }
-
- div = pre_pll_clk_div*vt_sys_clk_div*vt_pix_clk_div;
- if (div == 0)
- return -EINVAL;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID)
- vt_pix_clk_freq_mhz = ext_clk_freq_hz / div;
- else if (dev->sensor_id == IMX227_ID) {
- /* according to IMX227 datasheet:
- * vt_pix_freq_mhz = * num_of_vt_lanes(4) * ivt_pix_clk_freq_mhz
- */
- vt_pix_clk_freq_mhz =
- (u64)4 * ext_clk_freq_hz * pll_multiplier;
- do_div(vt_pix_clk_freq_mhz, div);
- } else
- vt_pix_clk_freq_mhz = 2 * ext_clk_freq_hz / div;
-
- vt_pix_clk_freq_mhz *= pll_multiplier;
- if (dev->sensor_id == IMX132_ID && lanes > 0)
- vt_pix_clk_freq_mhz *= lanes;
-
- dev->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
-
- buf->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
- buf->coarse_integration_time_min = coarse_integration_time_min;
- buf->coarse_integration_time_max_margin =
- coarse_integration_time_max_margin;
-
- buf->fine_integration_time_min = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_max_margin = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_def = IMX_FINE_INTG_TIME;
- buf->frame_length_lines = dev->lines_per_frame;
- buf->line_length_pck = dev->pixels_per_line;
- buf->read_mode = read_mode;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID) {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- } else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_BINNING_ENABLE,
- data);
- else
- ret = imx_read_reg(client, 1, IMX_BINNING_ENABLE, data);
-
- if (ret)
- return ret;
- /* 1:binning enabled, 0:disabled */
- if (data[0] == 1) {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1,
- IMX227_BINNING_TYPE, data);
- else
- ret = imx_read_reg(client, 1,
- IMX_BINNING_TYPE, data);
-
- if (ret)
- return ret;
- buf->binning_factor_x = data[0] >> 4 & 0x0f;
- if (!buf->binning_factor_x)
- buf->binning_factor_x = 1;
- buf->binning_factor_y = data[0] & 0xf;
- if (!buf->binning_factor_y)
- buf->binning_factor_y = 1;
- /* WOWRKAROUND, NHD setting for IMX227 should have 4x4
- * binning but the register setting does not reflect
- * this, I am asking vendor why this happens. this is
- * workaround for INTEL BZ 216560.
- */
- if (dev->sensor_id == IMX227_ID) {
- if (dev->curr_res_table[dev->fmt_idx].width ==
- 376 &&
- dev->curr_res_table[dev->fmt_idx].height ==
- 656) {
- buf->binning_factor_x = 4;
- buf->binning_factor_y = 4;
- }
- }
- } else {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- }
- }
-
- return 0;
-}
-
-/* This returns the exposure time being used. This should only be used
- for filling in EXIF data, not for actual image processing. */
-static int imx_q_exposure(struct v4l2_subdev *sd, s32 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 coarse;
- int ret;
-
- /* the fine integration time is currently not calculated */
- ret = imx_read_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, &coarse);
- *value = coarse;
-
- return ret;
-}
-
-static int imx_test_pattern(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- if (dev->power == 0)
- return 0;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_R,
- (u16)(dev->tp_r->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GR,
- (u16)(dev->tp_gr->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GB,
- (u16)(dev->tp_gb->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_B,
- (u16)(dev->tp_b->val >> 22));
- if (ret)
- return ret;
-
- return imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_MODE,
- (u16)(dev->tp_mode->val));
-}
-
-static u32 imx_translate_bayer_order(enum atomisp_bayer_order code)
-{
- switch (code) {
- case atomisp_bayer_order_rggb:
- return MEDIA_BUS_FMT_SRGGB10_1X10;
- case atomisp_bayer_order_grbg:
- return MEDIA_BUS_FMT_SGRBG10_1X10;
- case atomisp_bayer_order_bggr:
- return MEDIA_BUS_FMT_SBGGR10_1X10;
- case atomisp_bayer_order_gbrg:
- return MEDIA_BUS_FMT_SGBRG10_1X10;
- }
- return 0;
-}
-
-static int imx_v_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_VFLIP_BIT;
- else
- val &= ~IMX_VFLIP_BIT;
-
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_h_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_HFLIP_BIT;
- else
- val &= ~IMX_HFLIP_BIT;
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_g_focal(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_FOCAL_LENGTH_NUM << 16) | IMX_FOCAL_LENGTH_DEM;
- return 0;
-}
-
-static int imx_g_fnumber(struct v4l2_subdev *sd, s32 *val)
-{
- /*const f number for imx*/
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 16) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 24) |
- (IMX_F_NUMBER_DEM << 16) |
- (IMX_F_NUMBER_DEFAULT_NUM << 8) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_x;
-
- return 0;
-}
-
-static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_y;
-
- return 0;
-}
-
-static int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs)
- return dev->vcm_driver->t_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_rel)
- return dev->vcm_driver->t_focus_rel(sd, value);
- return 0;
-}
-
-static int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_status)
- return dev->vcm_driver->q_focus_status(sd, value);
- return 0;
-}
-
-static int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_abs)
- return dev->vcm_driver->q_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew)
- return dev->vcm_driver->t_vcm_slew(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing)
- return dev->vcm_driver->t_vcm_timing(sd, value);
- return 0;
-}
-
-static int imx_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_TEST_PATTERN:
- ret = imx_test_pattern(&dev->sd);
- break;
- case V4L2_CID_VFLIP:
- dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_v_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_h_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_t_focus_abs(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_RELATIVE:
- ret = imx_t_focus_rel(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_SLEW:
- ret = imx_t_vcm_slew(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_TIMEING:
- ret = imx_t_vcm_timing(&dev->sd, ctrl->val);
- break;
- }
-
- return ret;
-}
-
-static int imx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- int ret = 0;
- unsigned int val;
-
- switch (ctrl->id) {
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- ret = imx_q_exposure(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_q_focus_abs(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_STATUS:
- ret = imx_q_focus_status(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCAL_ABSOLUTE:
- ret = imx_g_focal(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_ABSOLUTE:
- ret = imx_g_fnumber(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_RANGE:
- ret = imx_g_fnumber_range(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_HORZ:
- ret = imx_g_bin_factor_x(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_VERT:
- ret = imx_g_bin_factor_y(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_VBLANK:
- ctrl->val = dev->lines_per_frame -
- dev->curr_res_table[dev->fmt_idx].height;
- break;
- case V4L2_CID_HBLANK:
- ctrl->val = dev->pixels_per_line -
- dev->curr_res_table[dev->fmt_idx].width;
- break;
- case V4L2_CID_PIXEL_RATE:
- ctrl->val = dev->vt_pix_clk_freq_mhz;
- break;
- case V4L2_CID_LINK_FREQ:
- val = dev->curr_res_table[dev->fmt_idx].
- fps_options[dev->fps_index].mipi_freq;
- if (val == 0)
- val = dev->curr_res_table[dev->fmt_idx].mipi_freq;
- if (val == 0)
- return -EINVAL;
- ctrl->val = val * 1000; /* To Hz */
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = imx_s_ctrl,
- .g_volatile_ctrl = imx_g_volatile_ctrl
-};
-
-static const struct v4l2_ctrl_config imx_controls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .min = 0x0,
- .max = 0xffff,
- .step = 0x01,
- .def = 0x00,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern",
- .min = 0,
- .max = 0xffff,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_R,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color R",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GR",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GB",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_B,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color B",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move absolute",
- .min = 0,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_RELATIVE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move relative",
- .min = IMX_MAX_FOCUS_NEG,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_STATUS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus status",
- .min = 0,
- .max = 100, /* allow enum to grow in the future */
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_SLEW,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm slew",
- .min = 0,
- .max = IMX_VCM_SLEW_STEP_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_TIMEING,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm step time",
- .min = 0,
- .max = IMX_VCM_SLEW_TIME_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCAL_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focal length",
- .min = IMX_FOCAL_LENGTH_DEFAULT,
- .max = IMX_FOCAL_LENGTH_DEFAULT,
- .step = 0x01,
- .def = IMX_FOCAL_LENGTH_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number",
- .min = IMX_F_NUMBER_DEFAULT,
- .max = IMX_F_NUMBER_DEFAULT,
- .step = 0x01,
- .def = IMX_F_NUMBER_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_RANGE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number range",
- .min = IMX_F_NUMBER_RANGE,
- .max = IMX_F_NUMBER_RANGE,
- .step = 0x01,
- .def = IMX_F_NUMBER_RANGE,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_HORZ,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "horizontal binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_VERT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vertical binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_LINK_FREQ,
- .name = "Link Frequency",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 1,
- .max = 1500000 * 1000,
- .step = 1,
- .def = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_PIXEL_RATE,
- .name = "Pixel Rate",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HBLANK,
- .name = "Horizontal Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VBLANK,
- .name = "Vertical Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .name = "Horizontal Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .name = "Vertical Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-};
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 600
-static int distance(struct imx_resolution const *res, u32 w, u32 h,
- bool keep_ratio)
-{
- unsigned int w_ratio;
- unsigned int h_ratio;
- int match;
- unsigned int allowed_ratio_mismatch = LARGEST_ALLOWED_RATIO_MISMATCH;
-
- if (!keep_ratio)
- allowed_ratio_mismatch = ~0;
-
- if (w == 0)
- return -1;
- w_ratio = (res->width << 13) / w;
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
-
- if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
- (match > allowed_ratio_mismatch))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(struct v4l2_subdev *sd, int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int fps_diff;
- int min_fps_diff = INT_MAX;
- int min_dist = INT_MAX;
- const struct imx_resolution *tmp_res = NULL;
- struct imx_device *dev = to_imx_sensor(sd);
- bool again = 1;
-retry:
- for (i = 0; i < dev->entries_curr_table; i++) {
- tmp_res = &dev->curr_res_table[i];
- dist = distance(tmp_res, w, h, again);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- if (dist == min_dist) {
- fps_diff = __imx_min_fps_diff(dev->targetfps,
- tmp_res->fps_options);
- if (fps_diff < min_fps_diff) {
- min_fps_diff = fps_diff;
- idx = i;
- }
- }
- }
-
- /*
- * FIXME!
- * only IMX135 for Saltbay and IMX227 use this algorithm
- */
- if (idx == -1 && again == true && dev->new_res_sel_method) {
- again = false;
- goto retry;
- }
- return idx;
-}
-
-/* Call with ctrl_handler.lock hold */
-static int __adjust_hvblank(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 new_frame_length_lines, new_line_length_pck;
- int ret;
-
- /*
- * No need to adjust h/v blank if not set dbg value
- * Note that there is no other checking on the h/v blank value,
- * as h/v blank can be set to any value above zero for debug purpose
- */
- if (!dev->v_blank->val || !dev->h_blank->val)
- return 0;
-
- new_frame_length_lines = dev->curr_res_table[dev->fmt_idx].height +
- dev->v_blank->val;
- new_line_length_pck = dev->curr_res_table[dev->fmt_idx].width +
- dev->h_blank->val;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, new_line_length_pck);
- if (ret)
- return ret;
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, new_frame_length_lines);
- if (ret)
- return ret;
-
- dev->lines_per_frame = new_frame_length_lines;
- dev->pixels_per_line = new_line_length_pck;
-
- return 0;
-}
-
-static int imx_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res;
- int lanes = imx_get_lanes(sd);
- int ret;
- u16 data, val;
- int idx;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
- if ((fmt->width > imx_max_res[dev->sensor_id].res_max_width)
- || (fmt->height > imx_max_res[dev->sensor_id].res_max_height)) {
- fmt->width = imx_max_res[dev->sensor_id].res_max_width;
- fmt->height = imx_max_res[dev->sensor_id].res_max_height;
- } else {
- idx = nearest_resolution_index(sd, fmt->width, fmt->height);
-
- /*
- * nearest_resolution_index() doesn't return smaller
- * resolutions. If it fails, it means the requested
- * resolution is higher than wecan support. Fallback
- * to highest possible resolution in this case.
- */
- if (idx == -1)
- idx = dev->entries_curr_table - 1;
-
- fmt->width = dev->curr_res_table[idx].width;
- fmt->height = dev->curr_res_table[idx].height;
- }
-
- fmt->code = dev->format.code;
- if(format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
- return 0;
- }
- mutex_lock(&dev->input_lock);
-
- dev->fmt_idx = nearest_resolution_index(sd, fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- ret = -EINVAL;
- goto out;
- }
- res = &dev->curr_res_table[dev->fmt_idx];
-
- /* Adjust the FPS selection based on the resolution selected */
- dev->fps_index = __imx_nearest_fps_index(dev->targetfps,
- res->fps_options);
- dev->fps = res->fps_options[dev->fps_index].fps;
- dev->regs = res->fps_options[dev->fps_index].regs;
- if (!dev->regs)
- dev->regs = res->regs;
-
- ret = imx_write_reg_array(client, dev->regs);
- if (ret)
- goto out;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rgpltd[] = {
- 2, /* 1 lane: /1 */
- 0, /* 2 lanes: /2 */
- 0, /* undefined */
- 1, /* 4 lanes: /4 */
- };
- ret = imx_write_reg(client, IMX_8BIT, IMX132_208_VT_RGPLTD,
- imx132_rgpltd[lanes - 1]);
- if (ret)
- goto out;
- }
-
- dev->pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- dev->lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- /* dbg h/v blank time */
- __adjust_hvblank(sd);
-
- ret = __imx_update_exposure_timing(client, dev->coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
-
- ret = __imx_update_gain(sd, dev->gain);
- if (ret)
- goto out;
-
- ret = __imx_update_digital_gain(client, dev->digital_gain);
- if (ret)
- goto out;
-
- ret = imx_write_reg_array(client, dev->param_update);
- if (ret)
- goto out;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- goto out;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- goto out;
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
-
- /*
- * Fill meta data info. add imx135 metadata setting for RAW10 format
- */
- switch (dev->sensor_id) {
- case IMX135_ID:
- ret = imx_read_reg(client, 2,
- IMX135_OUTPUT_DATA_FORMAT_REG, &data);
- if (ret)
- goto out;
- /*
- * The IMX135 can support various resolutions like
- * RAW6/8/10/12/14.
- * 1.The data format is RAW10:
- * matadata width = current resolution width(pixel) * 10 / 8
- * 2.The data format is RAW6 or RAW8:
- * matadata width = current resolution width(pixel);
- * 3.other data format(RAW12/14 etc):
- * TBD.
- */
- if (data == IMX135_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX135_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx135_embedded_effective_size;
-
- break;
- case IMX227_ID:
- ret = imx_read_reg(client, 2, IMX227_OUTPUT_DATA_FORMAT_REG,
- &data);
- if (ret)
- goto out;
- if (data == IMX227_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX227_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx227_embedded_effective_size;
-
- break;
- default:
- imx_info->metadata_width = 0;
- imx_info->metadata_height = 0;
- imx_info->metadata_effective_width = NULL;
- break;
- }
-
-out:
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-
-static int imx_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- fmt->width = dev->curr_res_table[dev->fmt_idx].width;
- fmt->height = dev->curr_res_table[dev->fmt_idx].height;
- fmt->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_detect(struct i2c_client *client, u16 *id, u8 *revision)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- /* i2c check */
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- /* check sensor chip ID */
- if (imx_read_reg(client, IMX_16BIT, IMX132_175_208_219_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
-
- if (*id == IMX132_ID || *id == IMX175_ID ||
- *id == IMX208_ID || *id == IMX219_ID)
- goto found;
-
- if (imx_read_reg(client, IMX_16BIT, IMX134_135_227_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
- if (*id != IMX134_ID && *id != IMX135_ID && *id != IMX227_ID) {
- v4l2_err(client, "no imx sensor found\n");
- return -ENODEV;
- }
-found:
- v4l2_info(client, "sensor_id = 0x%x\n", *id);
-
- /* TODO - need to be updated */
- *revision = 0;
-
- return 0;
-}
-
-static void __imx_print_timing(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 width = dev->curr_res_table[dev->fmt_idx].width;
- u16 height = dev->curr_res_table[dev->fmt_idx].height;
-
- dev_dbg(&client->dev, "Dump imx timing in stream on:\n");
- dev_dbg(&client->dev, "width: %d:\n", width);
- dev_dbg(&client->dev, "height: %d:\n", height);
- dev_dbg(&client->dev, "pixels_per_line: %d:\n", dev->pixels_per_line);
- dev_dbg(&client->dev, "line per frame: %d:\n", dev->lines_per_frame);
- dev_dbg(&client->dev, "pix freq: %d:\n", dev->vt_pix_clk_freq_mhz);
- dev_dbg(&client->dev, "init fps: %d:\n", dev->vt_pix_clk_freq_mhz /
- dev->pixels_per_line / dev->lines_per_frame);
- dev_dbg(&client->dev, "HBlank: %d nS:\n",
- 1000 * (dev->pixels_per_line - width) /
- (dev->vt_pix_clk_freq_mhz / 1000000));
- dev_dbg(&client->dev, "VBlank: %d uS:\n",
- (dev->lines_per_frame - height) * dev->pixels_per_line /
- (dev->vt_pix_clk_freq_mhz / 1000000));
-}
-
-/*
- * imx stream on/off
- */
-static int imx_s_stream(struct v4l2_subdev *sd, int enable)
-{
- int ret;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (enable) {
- /* Noise reduction & dead pixel applied before streaming */
- if (dev->fw == NULL) {
- dev_warn(&client->dev, "No MSR loaded from library");
- } else {
- ret = apply_msr_data(client, dev->fw);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
- ret = imx_test_pattern(sd);
- if (ret) {
- v4l2_err(client, "Configure test pattern failed.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- __imx_print_timing(sd);
- ret = imx_write_reg_array(client, imx_streaming);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 1;
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs_init)
- dev->vcm_driver->t_focus_abs_init(sd);
- } else {
- ret = imx_write_reg_array(client, imx_soft_standby);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 0;
- dev->targetfps = 0;
- }
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static int __update_imx_device_settings(struct imx_device *dev, u16 sensor_id)
-{
- /* IMX on other platform is not supported yet */
- return -EINVAL;
-}
-
-static int imx_s_config(struct v4l2_subdev *sd,
- int irq, void *pdata)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 sensor_revision;
- u16 sensor_id;
- int ret;
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "imx platform init err\n");
- return ret;
- }
- }
- /*
- * power off the module first.
- *
- * As first power on by board have undecided state of power/gpio pins.
- */
- ret = __imx_s_power(sd, 0);
- if (ret) {
- v4l2_err(client, "imx power-down err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = __imx_s_power(sd, 1);
- if (ret) {
- v4l2_err(client, "imx power-up err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_csi_cfg;
-
- /* config & detect sensor */
- ret = imx_detect(client, &sensor_id, &sensor_revision);
- if (ret) {
- v4l2_err(client, "imx_detect err s_config.\n");
- goto fail_detect;
- }
-
- dev->sensor_id = sensor_id;
- dev->sensor_revision = sensor_revision;
-
- /* Resolution settings depend on sensor type and platform */
- ret = __update_imx_device_settings(dev, dev->sensor_id);
- if (ret)
- goto fail_detect;
- /* Read sensor's OTP data */
- dev->otp_data = dev->otp_driver->otp_read(sd,
- dev->otp_driver->dev_addr, dev->otp_driver->start_addr,
- dev->otp_driver->size);
-
- /* power off sensor */
- ret = __imx_s_power(sd, 0);
-
- mutex_unlock(&dev->input_lock);
- if (ret)
- v4l2_err(client, "imx power-down err.\n");
-
- return ret;
-
-fail_detect:
- dev->platform_data->csi_cfg(sd, 0);
-fail_csi_cfg:
- __imx_s_power(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "sensor power-gating failed\n");
- return ret;
-}
-
-static int
-imx_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (code->index >= MAX_FMTS)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- code->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- int index = fse->index;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (index >= dev->entries_curr_table) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- fse->min_width = dev->curr_res_table[index].width;
- fse->min_height = dev->curr_res_table[index].height;
- fse->max_width = dev->curr_res_table[index].width;
- fse->max_height = dev->curr_res_table[index].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- dev->run_mode = param->parm.capture.capturemode;
-
- switch (dev->run_mode) {
- case CI_MODE_VIDEO:
- dev->curr_res_table = dev->mode_tables->res_video;
- dev->entries_curr_table = dev->mode_tables->n_res_video;
- break;
- case CI_MODE_STILL_CAPTURE:
- dev->curr_res_table = dev->mode_tables->res_still;
- dev->entries_curr_table = dev->mode_tables->n_res_still;
- break;
- default:
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
- }
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- interval->interval.denominator = dev->fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int __imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res =
- &dev->curr_res_table[dev->fmt_idx];
- struct camera_mipi_info *imx_info = NULL;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- unsigned int fps_index;
- int fps;
- int ret = 0;
-
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
-
- if (!interval->interval.numerator)
- interval->interval.numerator = 1;
-
- fps = interval->interval.denominator / interval->interval.numerator;
-
- if (!fps)
- return -EINVAL;
-
- dev->targetfps = fps;
- /* No need to proceed further if we are not streaming */
- if (!dev->streaming)
- return 0;
-
- /* Ignore if we are already using the required FPS. */
- if (fps == dev->fps)
- return 0;
-
- /*
- * Start here, sensor is already streaming, so adjust fps dynamically
- */
- fps_index = __imx_above_nearest_fps_index(fps, res->fps_options);
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_err(&client->dev, "Could not support fps: %d.\n", fps);
- return -EINVAL;
- }
-
- if (res->fps_options[fps_index].regs &&
- res->fps_options[fps_index].regs != dev->regs) {
- /*
- * if need a new setting, but the new setting has difference
- * with current setting, not use this one, as may have
- * unexpected result, e.g. PLL, IQ.
- */
- dev_dbg(&client->dev,
- "Sensor is streaming, not apply new sensor setting\n");
- if (fps > res->fps_options[dev->fps_index].fps) {
- /*
- * Does not support increase fps based on low fps
- * setting, as the high fps setting could not be used,
- * and fps requested is above current setting fps.
- */
- dev_warn(&client->dev,
- "Could not support fps: %d, keep current: %d.\n",
- fps, dev->fps);
- return 0;
- }
- } else {
- dev->fps_index = fps_index;
- dev->fps = res->fps_options[dev->fps_index].fps;
- }
-
- /* Update the new frametimings based on FPS */
- pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_warn(&client->dev, "Could not support fps: %d. Use:%d.\n",
- fps, res->fps_options[fps_index].fps);
- goto done;
- }
-
- /* if the new setting does not match exactly */
- if (dev->fps != fps) {
-#define MAX_LINES_PER_FRAME 0xffff
- dev_dbg(&client->dev, "adjusting fps using lines_per_frame\n");
- /*
- * FIXME!
- * 1: check DS on max value of lines_per_frame
- * 2: consider use pixel per line for more range?
- */
- if (dev->lines_per_frame * dev->fps / fps >
- MAX_LINES_PER_FRAME) {
- dev_warn(&client->dev,
- "adjust lines_per_frame out of range, try to use max value.\n");
- lines_per_frame = MAX_LINES_PER_FRAME;
- } else {
- lines_per_frame = lines_per_frame * dev->fps / fps;
- }
- }
-done:
- /* Update the new frametimings based on FPS */
- dev->pixels_per_line = pixels_per_line;
- dev->lines_per_frame = lines_per_frame;
-
- /* Update the new values so that user side knows the current settings */
- ret = __imx_update_exposure_timing(client,
- dev->coarse_itg, dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- return ret;
-
- dev->fps = fps;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- return ret;
-
- interval->interval.denominator = res->fps_options[dev->fps_index].fps;
- interval->interval.numerator = 1;
- __imx_print_timing(sd);
-
- return ret;
-}
-
-static int imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_frame_interval(sd, interval);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-static int imx_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- *frames = dev->curr_res_table[dev->fmt_idx].skip_frames;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static const struct v4l2_subdev_sensor_ops imx_sensor_ops = {
- .g_skip_frames = imx_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops imx_video_ops = {
- .s_stream = imx_s_stream,
- .s_parm = imx_s_parm,
- .g_frame_interval = imx_g_frame_interval,
- .s_frame_interval = imx_s_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops imx_core_ops = {
- .s_power = imx_s_power,
- .ioctl = imx_ioctl,
- .init = imx_init,
-};
-
-static const struct v4l2_subdev_pad_ops imx_pad_ops = {
- .enum_mbus_code = imx_enum_mbus_code,
- .enum_frame_size = imx_enum_frame_size,
- .get_fmt = imx_get_fmt,
- .set_fmt = imx_set_fmt,
-};
-
-static const struct v4l2_subdev_ops imx_ops = {
- .core = &imx_core_ops,
- .video = &imx_video_ops,
- .pad = &imx_pad_ops,
- .sensor = &imx_sensor_ops,
-};
-
-static const struct media_entity_operations imx_entity_ops = {
- .link_setup = NULL,
-};
-
-static int imx_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- media_entity_cleanup(&dev->sd.entity);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
- release_msr_list(client, dev->fw);
- kfree(dev);
-
- return 0;
-}
-
-static int __imx_init_ctrl_handler(struct imx_device *dev)
-{
- struct v4l2_ctrl_handler *hdl;
- int i;
-
- hdl = &dev->ctrl_handler;
-
- v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(imx_controls));
-
- for (i = 0; i < ARRAY_SIZE(imx_controls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler,
- &imx_controls[i], NULL);
-
- dev->pixel_rate = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_PIXEL_RATE);
- dev->h_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HBLANK);
- dev->v_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VBLANK);
- dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_LINK_FREQ);
- dev->h_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HFLIP);
- dev->v_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VFLIP);
- dev->tp_mode = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN);
- dev->tp_r = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_R);
- dev->tp_gr = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GR);
- dev->tp_gb = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GB);
- dev->tp_b = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_B);
-
- if (dev->ctrl_handler.error || dev->pixel_rate == NULL
- || dev->h_blank == NULL || dev->v_blank == NULL
- || dev->h_flip == NULL || dev->v_flip == NULL
- || dev->link_freq == NULL) {
- return dev->ctrl_handler.error;
- }
-
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = hdl;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- return 0;
-}
-
-static void imx_update_reg_info(struct imx_device *dev)
-{
- if (dev->sensor_id == IMX219_ID) {
- dev->reg_addr = &imx219_addr;
- dev->param_hold = imx219_param_hold;
- dev->param_update = imx219_param_update;
- } else {
- dev->reg_addr = &imx_addr;
- dev->param_hold = imx_param_hold;
- dev->param_update = imx_param_update;
- }
-}
-
-static int imx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct imx_device *dev;
- struct camera_mipi_info *imx_info = NULL;
- int ret;
- char *msr_file_name = NULL;
-
- /* allocate sensor device & init sub device */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- v4l2_err(client, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- dev->i2c_id = id->driver_data;
- dev->fmt_idx = 0;
- dev->sensor_id = IMX_ID_DEFAULT;
- dev->vcm_driver = &imx_vcms[IMX_ID_DEFAULT];
- dev->digital_gain = 256;
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &imx_ops);
-
- if (client->dev.platform_data) {
- ret = imx_s_config(&dev->sd, client->irq,
- client->dev.platform_data);
- if (ret)
- goto out_free;
- }
- imx_info = v4l2_get_subdev_hostdata(&dev->sd);
-
- /*
- * sd->name is updated with sensor driver name by the v4l2.
- * change it to sensor name in this case.
- */
- imx_update_reg_info(dev);
- snprintf(dev->sd.name, sizeof(dev->sd.name), "%s%x %d-%04x",
- IMX_SUBDEV_PREFIX, dev->sensor_id,
- i2c_adapter_id(client->adapter), client->addr);
-
- ret = __imx_init_ctrl_handler(dev);
- if (ret)
- goto out_ctrl_handler_free;
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- dev->sd.entity.ops = &imx_entity_ops;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret) {
- imx_remove(client);
- return ret;
- }
-
- /* Load the Noise reduction, Dead pixel registers from cpf file*/
- if (dev->platform_data->msr_file_name != NULL)
- msr_file_name = dev->platform_data->msr_file_name();
- if (msr_file_name) {
- ret = load_msr_list(client, msr_file_name, &dev->fw);
- if (ret) {
- imx_remove(client);
- return ret;
- }
- } else {
- dev_warn(&client->dev, "Drvb file not present");
- }
-
- return ret;
-
-out_ctrl_handler_free:
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- kfree(dev);
- return ret;
-}
-
-static const struct i2c_device_id imx_ids[] = {
- {IMX_NAME_175, IMX175_ID},
- {IMX_NAME_135, IMX135_ID},
- {IMX_NAME_135_FUJI, IMX135_FUJI_ID},
- {IMX_NAME_134, IMX134_ID},
- {IMX_NAME_132, IMX132_ID},
- {IMX_NAME_208, IMX208_ID},
- {IMX_NAME_219, IMX219_ID},
- {IMX_NAME_227, IMX227_ID},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, imx_ids);
-
-static struct i2c_driver imx_driver = {
- .driver = {
- .name = IMX_DRIVER,
- },
- .probe = imx_probe,
- .remove = imx_remove,
- .id_table = imx_ids,
-};
-
-static __init int init_imx(void)
-{
- return i2c_add_driver(&imx_driver);
-}
-
-static __exit void exit_imx(void)
-{
- i2c_del_driver(&imx_driver);
-}
-
-module_init(init_imx);
-module_exit(exit_imx);
-
-MODULE_DESCRIPTION("A low-level driver for Sony IMX sensors");
-MODULE_AUTHOR("Shenbo Huang <shenbo.huang@intel.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
deleted file mode 100644
index 30beb2a0ed93..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.h
+++ /dev/null
@@ -1,737 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX_H__
-#define __IMX_H__
-#include "../../include/linux/atomisp_platform.h"
-#include "../../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/videodev2.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-#include "imx175.h"
-#include "imx135.h"
-#include "imx134.h"
-#include "imx132.h"
-#include "imx208.h"
-#include "imx219.h"
-#include "imx227.h"
-
-#define IMX_MCLK 192
-
-/* TODO - This should be added into include/linux/videodev2.h */
-#ifndef V4L2_IDENT_IMX
-#define V4L2_IDENT_IMX 8245
-#endif
-
-#define IMX_MAX_AE_LUT_LENGTH 5
-/*
- * imx System control registers
- */
-#define IMX_MASK_5BIT 0x1F
-#define IMX_MASK_4BIT 0xF
-#define IMX_MASK_3BIT 0x7
-#define IMX_MASK_2BIT 0x3
-#define IMX_MASK_8BIT 0xFF
-#define IMX_MASK_11BIT 0x7FF
-#define IMX_INTG_BUF_COUNT 2
-
-#define IMX_FINE_INTG_TIME 0x1E8
-
-#define IMX_VT_PIX_CLK_DIV 0x0301
-#define IMX_VT_SYS_CLK_DIV 0x0303
-#define IMX_PRE_PLL_CLK_DIV 0x0305
-#define IMX227_IOP_PRE_PLL_CLK_DIV 0x030D
-#define IMX227_PLL_MULTIPLIER 0x0306
-#define IMX227_IOP_PLL_MULTIPLIER 0x030E
-#define IMX227_PLL_MULTI_DRIVE 0x0310
-#define IMX227_OP_PIX_CLK_DIV 0x0309
-#define IMX227_OP_SYS_CLK_DIV 0x030B
-#define IMX_PLL_MULTIPLIER 0x030C
-#define IMX_OP_PIX_DIV 0x0309
-#define IMX_OP_SYS_DIV 0x030B
-#define IMX_FRAME_LENGTH_LINES 0x0340
-#define IMX_LINE_LENGTH_PIXELS 0x0342
-#define IMX_COARSE_INTG_TIME_MIN 0x1004
-#define IMX_COARSE_INTG_TIME_MAX 0x1006
-#define IMX_BINNING_ENABLE 0x0390
-#define IMX227_BINNING_ENABLE 0x0900
-#define IMX_BINNING_TYPE 0x0391
-#define IMX227_BINNING_TYPE 0x0901
-#define IMX_READ_MODE 0x0390
-#define IMX227_READ_MODE 0x0900
-
-#define IMX_HORIZONTAL_START_H 0x0344
-#define IMX_VERTICAL_START_H 0x0346
-#define IMX_HORIZONTAL_END_H 0x0348
-#define IMX_VERTICAL_END_H 0x034a
-#define IMX_HORIZONTAL_OUTPUT_SIZE_H 0x034c
-#define IMX_VERTICAL_OUTPUT_SIZE_H 0x034e
-
-/* Post Divider setting register for imx132 and imx208 */
-#define IMX132_208_VT_RGPLTD 0x30A4
-
-/* Multiplier setting register for imx132, imx208, and imx219 */
-#define IMX132_208_219_PLL_MULTIPLIER 0x0306
-
-#define IMX_COARSE_INTEGRATION_TIME 0x0202
-#define IMX_TEST_PATTERN_MODE 0x0600
-#define IMX_TEST_PATTERN_COLOR_R 0x0602
-#define IMX_TEST_PATTERN_COLOR_GR 0x0604
-#define IMX_TEST_PATTERN_COLOR_B 0x0606
-#define IMX_TEST_PATTERN_COLOR_GB 0x0608
-#define IMX_IMG_ORIENTATION 0x0101
-#define IMX_VFLIP_BIT 2
-#define IMX_HFLIP_BIT 1
-#define IMX_GLOBAL_GAIN 0x0205
-#define IMX_SHORT_AGC_GAIN 0x0233
-#define IMX_DGC_ADJ 0x020E
-#define IMX_DGC_LEN 10
-#define IMX227_DGC_LEN 4
-#define IMX_MAX_EXPOSURE_SUPPORTED 0xfffb
-#define IMX_MAX_GLOBAL_GAIN_SUPPORTED 0x00ff
-#define IMX_MAX_DIGITAL_GAIN_SUPPORTED 0x0fff
-
-#define MAX_FMTS 1
-#define IMX_OTP_DATA_SIZE 1280
-
-#define IMX_SUBDEV_PREFIX "imx"
-#define IMX_DRIVER "imx1x5"
-
-/* Sensor ids from identification register */
-#define IMX_NAME_134 "imx134"
-#define IMX_NAME_135 "imx135"
-#define IMX_NAME_175 "imx175"
-#define IMX_NAME_132 "imx132"
-#define IMX_NAME_208 "imx208"
-#define IMX_NAME_219 "imx219"
-#define IMX_NAME_227 "imx227"
-#define IMX175_ID 0x0175
-#define IMX135_ID 0x0135
-#define IMX134_ID 0x0134
-#define IMX132_ID 0x0132
-#define IMX208_ID 0x0208
-#define IMX219_ID 0x0219
-#define IMX227_ID 0x0227
-
-/* Sensor id based on i2c_device_id table
- * (Fuji module can not be detected based on sensor registers) */
-#define IMX135_FUJI_ID 0x0136
-#define IMX_NAME_135_FUJI "imx135fuji"
-
-/* imx175 - use dw9714 vcm */
-#define IMX175_MERRFLD 0x175
-#define IMX175_VALLEYVIEW 0x176
-#define IMX135_SALTBAY 0x135
-#define IMX135_VICTORIABAY 0x136
-#define IMX132_SALTBAY 0x132
-#define IMX134_VALLEYVIEW 0x134
-#define IMX208_MOFD_PD2 0x208
-#define IMX219_MFV0_PRH 0x219
-#define IMX227_SAND 0x227
-
-/* otp - specific settings */
-#define E2PROM_ADDR 0xa0
-#define E2PROM_LITEON_12P1BA869D_ADDR 0xa0
-#define E2PROM_ABICO_SS89A839_ADDR 0xa8
-#define DEFAULT_OTP_SIZE 1280
-#define IMX135_OTP_SIZE 1280
-#define IMX219_OTP_SIZE 2048
-#define IMX227_OTP_SIZE 2560
-#define E2PROM_LITEON_12P1BA869D_SIZE 544
-
-#define IMX_ID_DEFAULT 0x0000
-#define IMX132_175_208_219_CHIP_ID 0x0000
-#define IMX134_135_CHIP_ID 0x0016
-#define IMX134_135_227_CHIP_ID 0x0016
-
-#define IMX175_RES_WIDTH_MAX 3280
-#define IMX175_RES_HEIGHT_MAX 2464
-#define IMX135_RES_WIDTH_MAX 4208
-#define IMX135_RES_HEIGHT_MAX 3120
-#define IMX132_RES_WIDTH_MAX 1936
-#define IMX132_RES_HEIGHT_MAX 1096
-#define IMX134_RES_WIDTH_MAX 3280
-#define IMX134_RES_HEIGHT_MAX 2464
-#define IMX208_RES_WIDTH_MAX 1936
-#define IMX208_RES_HEIGHT_MAX 1096
-#define IMX219_RES_WIDTH_MAX 3280
-#define IMX219_RES_HEIGHT_MAX 2464
-#define IMX227_RES_WIDTH_MAX 2400
-#define IMX227_RES_HEIGHT_MAX 2720
-
-/* Defines for lens/VCM */
-#define IMX_FOCAL_LENGTH_NUM 369 /*3.69mm*/
-#define IMX_FOCAL_LENGTH_DEM 100
-#define IMX_F_NUMBER_DEFAULT_NUM 22
-#define IMX_F_NUMBER_DEM 10
-#define IMX_INVALID_CONFIG 0xffffffff
-#define IMX_MAX_FOCUS_POS 1023
-#define IMX_MAX_FOCUS_NEG (-1023)
-#define IMX_VCM_SLEW_STEP_MAX 0x3f
-#define IMX_VCM_SLEW_TIME_MAX 0x1f
-
-#define IMX_BIN_FACTOR_MAX 4
-#define IMX_INTEGRATION_TIME_MARGIN 4
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-struct imx_vcm {
- int (*power_up)(struct v4l2_subdev *sd);
- int (*power_down)(struct v4l2_subdev *sd);
- int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
- int (*t_focus_abs_init)(struct v4l2_subdev *sd);
- int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
- int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
- int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
- int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
- int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
-};
-
-struct imx_otp {
- void * (*otp_read)(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
- u32 start_addr;
- u32 size;
- u8 dev_addr;
-};
-
-struct max_res {
- int res_max_width;
- int res_max_height;
-};
-
-struct max_res imx_max_res[] = {
- [IMX175_ID] = {
- .res_max_width = IMX175_RES_WIDTH_MAX,
- .res_max_height = IMX175_RES_HEIGHT_MAX,
- },
- [IMX135_ID] = {
- .res_max_width = IMX135_RES_WIDTH_MAX,
- .res_max_height = IMX135_RES_HEIGHT_MAX,
- },
- [IMX132_ID] = {
- .res_max_width = IMX132_RES_WIDTH_MAX,
- .res_max_height = IMX132_RES_HEIGHT_MAX,
- },
- [IMX134_ID] = {
- .res_max_width = IMX134_RES_WIDTH_MAX,
- .res_max_height = IMX134_RES_HEIGHT_MAX,
- },
- [IMX208_ID] = {
- .res_max_width = IMX208_RES_WIDTH_MAX,
- .res_max_height = IMX208_RES_HEIGHT_MAX,
- },
- [IMX219_ID] = {
- .res_max_width = IMX219_RES_WIDTH_MAX,
- .res_max_height = IMX219_RES_HEIGHT_MAX,
- },
- [IMX227_ID] = {
- .res_max_width = IMX227_RES_WIDTH_MAX,
- .res_max_height = IMX227_RES_HEIGHT_MAX,
- },
-};
-
-struct imx_settings {
- struct imx_reg const *init_settings;
- struct imx_resolution *res_preview;
- struct imx_resolution *res_still;
- struct imx_resolution *res_video;
- int n_res_preview;
- int n_res_still;
- int n_res_video;
-};
-
-struct imx_settings imx_sets[] = {
- [IMX175_MERRFLD] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX175_VALLEYVIEW] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX135_SALTBAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview,
- .res_still = imx135_res_still,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview),
- .n_res_still = ARRAY_SIZE(imx135_res_still),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX135_VICTORIABAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview_mofd,
- .res_still = imx135_res_still_mofd,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview_mofd),
- .n_res_still = ARRAY_SIZE(imx135_res_still_mofd),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX132_SALTBAY] = {
- .init_settings = imx132_init_settings,
- .res_preview = imx132_res_preview,
- .res_still = imx132_res_still,
- .res_video = imx132_res_video,
- .n_res_preview = ARRAY_SIZE(imx132_res_preview),
- .n_res_still = ARRAY_SIZE(imx132_res_still),
- .n_res_video = ARRAY_SIZE(imx132_res_video),
- },
- [IMX134_VALLEYVIEW] = {
- .init_settings = imx134_init_settings,
- .res_preview = imx134_res_preview,
- .res_still = imx134_res_still,
- .res_video = imx134_res_video,
- .n_res_preview = ARRAY_SIZE(imx134_res_preview),
- .n_res_still = ARRAY_SIZE(imx134_res_still),
- .n_res_video = ARRAY_SIZE(imx134_res_video),
- },
- [IMX208_MOFD_PD2] = {
- .init_settings = imx208_init_settings,
- .res_preview = imx208_res_preview,
- .res_still = imx208_res_still,
- .res_video = imx208_res_video,
- .n_res_preview = ARRAY_SIZE(imx208_res_preview),
- .n_res_still = ARRAY_SIZE(imx208_res_still),
- .n_res_video = ARRAY_SIZE(imx208_res_video),
- },
- [IMX219_MFV0_PRH] = {
- .init_settings = imx219_init_settings,
- .res_preview = imx219_res_preview,
- .res_still = imx219_res_still,
- .res_video = imx219_res_video,
- .n_res_preview = ARRAY_SIZE(imx219_res_preview),
- .n_res_still = ARRAY_SIZE(imx219_res_still),
- .n_res_video = ARRAY_SIZE(imx219_res_video),
- },
- [IMX227_SAND] = {
- .init_settings = imx227_init_settings,
- .res_preview = imx227_res_preview,
- .res_still = imx227_res_still,
- .res_video = imx227_res_video,
- .n_res_preview = ARRAY_SIZE(imx227_res_preview),
- .n_res_still = ARRAY_SIZE(imx227_res_still),
- .n_res_video = ARRAY_SIZE(imx227_res_video),
- },
-};
-
-struct imx_reg_addr {
- u16 frame_length_lines;
- u16 line_length_pixels;
- u16 horizontal_start_h;
- u16 vertical_start_h;
- u16 horizontal_end_h;
- u16 vertical_end_h;
- u16 horizontal_output_size_h;
- u16 vertical_output_size_h;
- u16 coarse_integration_time;
- u16 img_orientation;
- u16 global_gain;
- u16 dgc_adj;
-};
-
-struct imx_reg_addr imx_addr = {
- IMX_FRAME_LENGTH_LINES,
- IMX_LINE_LENGTH_PIXELS,
- IMX_HORIZONTAL_START_H,
- IMX_VERTICAL_START_H,
- IMX_HORIZONTAL_END_H,
- IMX_VERTICAL_END_H,
- IMX_HORIZONTAL_OUTPUT_SIZE_H,
- IMX_VERTICAL_OUTPUT_SIZE_H,
- IMX_COARSE_INTEGRATION_TIME,
- IMX_IMG_ORIENTATION,
- IMX_GLOBAL_GAIN,
- IMX_DGC_ADJ,
-};
-
-struct imx_reg_addr imx219_addr = {
- IMX219_FRAME_LENGTH_LINES,
- IMX219_LINE_LENGTH_PIXELS,
- IMX219_HORIZONTAL_START_H,
- IMX219_VERTICAL_START_H,
- IMX219_HORIZONTAL_END_H,
- IMX219_VERTICAL_END_H,
- IMX219_HORIZONTAL_OUTPUT_SIZE_H,
- IMX219_VERTICAL_OUTPUT_SIZE_H,
- IMX219_COARSE_INTEGRATION_TIME,
- IMX219_IMG_ORIENTATION,
- IMX219_GLOBAL_GAIN,
- IMX219_DGC_ADJ,
-};
-
-#define v4l2_format_capture_type_entry(_width, _height, \
- _pixelformat, _bytesperline, _colorspace) \
- {\
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,\
- .fmt.pix.width = (_width),\
- .fmt.pix.height = (_height),\
- .fmt.pix.pixelformat = (_pixelformat),\
- .fmt.pix.bytesperline = (_bytesperline),\
- .fmt.pix.colorspace = (_colorspace),\
- .fmt.pix.sizeimage = (_height)*(_bytesperline),\
- }
-
-#define s_output_format_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps) \
- {\
- .v4l2_fmt = v4l2_format_capture_type_entry(_width, \
- _height, _pixelformat, _bytesperline, \
- _colorspace),\
- .fps = (_fps),\
- }
-
-#define s_output_format_reg_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps, _reg_setting) \
- {\
- .s_fmt = s_output_format_entry(_width, _height,\
- _pixelformat, _bytesperline, \
- _colorspace, _fps),\
- .reg_setting = (_reg_setting),\
- }
-
-/* imx device structure */
-struct imx_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_mbus_framefmt format;
- struct camera_sensor_platform_data *platform_data;
- struct mutex input_lock; /* serialize sensor's ioctl */
- int fmt_idx;
- int status;
- int streaming;
- int power;
- int run_mode;
- int vt_pix_clk_freq_mhz;
- int fps_index;
- u32 focus;
- u16 sensor_id; /* Sensor id from registers */
- u16 i2c_id; /* Sensor id from i2c_device_id */
- u16 coarse_itg;
- u16 fine_itg;
- u16 digital_gain;
- u16 gain;
- u16 pixels_per_line;
- u16 lines_per_frame;
- u8 targetfps;
- u8 fps;
- const struct imx_reg *regs;
- u8 res;
- u8 type;
- u8 sensor_revision;
- u8 *otp_data;
- struct imx_settings *mode_tables;
- struct imx_vcm *vcm_driver;
- struct imx_otp *otp_driver;
- const struct imx_resolution *curr_res_table;
- unsigned long entries_curr_table;
- const struct firmware *fw;
- struct imx_reg_addr *reg_addr;
- const struct imx_reg *param_hold;
- const struct imx_reg *param_update;
-
- /* used for h/b blank tuning */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *pixel_rate;
- struct v4l2_ctrl *h_blank;
- struct v4l2_ctrl *v_blank;
- struct v4l2_ctrl *link_freq;
- struct v4l2_ctrl *h_flip;
- struct v4l2_ctrl *v_flip;
-
- /* Test pattern control */
- struct v4l2_ctrl *tp_mode;
- struct v4l2_ctrl *tp_r;
- struct v4l2_ctrl *tp_gr;
- struct v4l2_ctrl *tp_gb;
- struct v4l2_ctrl *tp_b;
-
- /* FIXME! */
- bool new_res_sel_method;
-};
-
-#define to_imx_sensor(x) container_of(x, struct imx_device, sd)
-
-#define IMX_MAX_WRITE_BUF_SIZE 32
-struct imx_write_buffer {
- u16 addr;
- u8 data[IMX_MAX_WRITE_BUF_SIZE];
-};
-
-struct imx_write_ctrl {
- int index;
- struct imx_write_buffer buffer;
-};
-
-static const struct imx_reg imx_soft_standby[] = {
- {IMX_8BIT, 0x0100, 0x00},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_streaming[] = {
- {IMX_8BIT, 0x0100, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_hold[] = {
- {IMX_8BIT, 0x0104, 0x01}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_update[] = {
- {IMX_8BIT, 0x0104, 0x00}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_hold[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_update[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-extern int ad5816g_vcm_power_up(struct v4l2_subdev *sd);
-extern int ad5816g_vcm_power_down(struct v4l2_subdev *sd);
-extern int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int drv201_vcm_power_up(struct v4l2_subdev *sd);
-extern int drv201_vcm_power_down(struct v4l2_subdev *sd);
-extern int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9714_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9714_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_focus_abs_init(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9719_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9719_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int vcm_power_up(struct v4l2_subdev *sd);
-extern int vcm_power_down(struct v4l2_subdev *sd);
-
-struct imx_vcm imx_vcms[] = {
- [IMX175_MERRFLD] = {
- .power_up = drv201_vcm_power_up,
- .power_down = drv201_vcm_power_down,
- .t_focus_abs = drv201_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = drv201_t_focus_rel,
- .q_focus_status = drv201_q_focus_status,
- .q_focus_abs = drv201_q_focus_abs,
- .t_vcm_slew = drv201_t_vcm_slew,
- .t_vcm_timing = drv201_t_vcm_timing,
- },
- [IMX175_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX135_SALTBAY] = {
- .power_up = ad5816g_vcm_power_up,
- .power_down = ad5816g_vcm_power_down,
- .t_focus_abs = ad5816g_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = ad5816g_t_focus_rel,
- .q_focus_status = ad5816g_q_focus_status,
- .q_focus_abs = ad5816g_q_focus_abs,
- .t_vcm_slew = ad5816g_t_vcm_slew,
- .t_vcm_timing = ad5816g_t_vcm_timing,
- },
- [IMX135_VICTORIABAY] = {
- .power_up = dw9719_vcm_power_up,
- .power_down = dw9719_vcm_power_down,
- .t_focus_abs = dw9719_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9719_t_focus_rel,
- .q_focus_status = dw9719_q_focus_status,
- .q_focus_abs = dw9719_q_focus_abs,
- .t_vcm_slew = dw9719_t_vcm_slew,
- .t_vcm_timing = dw9719_t_vcm_timing,
- },
- [IMX134_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = dw9714_t_focus_abs_init,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX219_MFV0_PRH] = {
- .power_up = dw9718_vcm_power_up,
- .power_down = dw9718_vcm_power_down,
- .t_focus_abs = dw9718_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9718_t_focus_rel,
- .q_focus_status = dw9718_q_focus_status,
- .q_focus_abs = dw9718_q_focus_abs,
- .t_vcm_slew = dw9718_t_vcm_slew,
- .t_vcm_timing = dw9718_t_vcm_timing,
- },
- [IMX_ID_DEFAULT] = {
- .power_up = NULL,
- .power_down = NULL,
- .t_focus_abs_init = NULL,
- },
-};
-
-extern void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-struct imx_otp imx_otps[] = {
- [IMX175_MERRFLD] = {
- .otp_read = imx_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX175_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ABICO_SS89A839_ADDR,
- .start_addr = E2PROM_2ADDR,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_SALTBAY] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_VICTORIABAY] = {
- .otp_read = imx_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX134_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_LITEON_12P1BA869D_ADDR,
- .start_addr = 0,
- .size = E2PROM_LITEON_12P1BA869D_SIZE,
- },
- [IMX132_SALTBAY] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX208_MOFD_PD2] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX219_MFV0_PRH] = {
- .otp_read = brcc064_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = IMX219_OTP_SIZE,
- },
- [IMX227_SAND] = {
- .otp_read = imx227_otp_read,
- .size = IMX227_OTP_SIZE,
- },
- [IMX_ID_DEFAULT] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx132.h b/drivers/staging/media/atomisp/i2c/imx/imx132.h
deleted file mode 100644
index 98f047b8a1ba..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx132.h
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX132_H__
-#define __IMX132_H__
-#include "common.h"
-
-/********************** registers define ********************************/
-#define IMX132_RGLANESEL 0x3301 /* Number of lanes */
-#define IMX132_RGLANESEL_1LANE 0x01
-#define IMX132_RGLANESEL_2LANES 0x00
-#define IMX132_RGLANESEL_4LANES 0x03
-
-#define IMX132_2LANES_GAINFACT 2096 /* 524/256 * 2^10 */
-#define IMX132_2LANES_GAINFACT_SHIFT 10
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx132_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x14},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0xA3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1456x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x04},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xB3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0xB0},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1636x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xAA},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x0D},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x06},
- {IMX_8BIT, 0x034D, 0x64},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1336x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x2C},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x77},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0x38},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx132_init_settings[] = {
- /* sw reset */
- { IMX_8BIT, 0x0100, 0x00 },
- { IMX_8BIT, 0x0103, 0x01 },
- { IMX_TOK_DELAY, 0, 5},
- { IMX_8BIT, 0x0103, 0x00 },
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx132_res_preview[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_still[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_video[] = {
- {
- .desc = "imx132_1336x1096_30fps",
- .regs = imx132_1336x1096_30fps,
- .width = 1336,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1456x1096_30fps",
- .regs = imx132_1456x1096_30fps,
- .width = 1456,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1636x1096_30fps",
- .regs = imx132_1636x1096_30fps,
- .width = 1636,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx134.h b/drivers/staging/media/atomisp/i2c/imx/imx134.h
deleted file mode 100644
index 9026e8ba5679..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx134.h
+++ /dev/null
@@ -1,2465 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX134_H__
-#define __IMX134_H__
-
-/********************** imx134 setting - version 1 *********************/
-static struct imx_reg const imx134_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Basic settings */
- { IMX_8BIT, 0x0105, 0x01 },
- { IMX_8BIT, 0x0220, 0x01 },
- { IMX_8BIT, 0x3302, 0x11 },
- { IMX_8BIT, 0x3833, 0x20 },
- { IMX_8BIT, 0x3893, 0x00 },
- { IMX_8BIT, 0x3906, 0x08 },
- { IMX_8BIT, 0x3907, 0x01 },
- { IMX_8BIT, 0x391B, 0x01 },
- { IMX_8BIT, 0x3C09, 0x01 },
- { IMX_8BIT, 0x600A, 0x00 },
-
- /* Analog settings */
- { IMX_8BIT, 0x3008, 0xB0 },
- { IMX_8BIT, 0x320A, 0x01 },
- { IMX_8BIT, 0x320D, 0x10 },
- { IMX_8BIT, 0x3216, 0x2E },
- { IMX_8BIT, 0x322C, 0x02 },
- { IMX_8BIT, 0x3409, 0x0C },
- { IMX_8BIT, 0x340C, 0x2D },
- { IMX_8BIT, 0x3411, 0x39 },
- { IMX_8BIT, 0x3414, 0x1E },
- { IMX_8BIT, 0x3427, 0x04 },
- { IMX_8BIT, 0x3480, 0x1E },
- { IMX_8BIT, 0x3484, 0x1E },
- { IMX_8BIT, 0x3488, 0x1E },
- { IMX_8BIT, 0x348C, 0x1E },
- { IMX_8BIT, 0x3490, 0x1E },
- { IMX_8BIT, 0x3494, 0x1E },
- { IMX_8BIT, 0x3511, 0x8F },
- { IMX_8BIT, 0x3617, 0x2D },
-
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 3280x2464 8M 30fps, vendor provide */
-static struct imx_reg const imx134_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* clock setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x0C }, /* x_output_size[15:8]: 3280*/
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x09 }, /* y_output_size[15:8]:2464 */
- { IMX_8BIT, 0x034F, 0xA0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x0C },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x09 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0xAE },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global timing setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration time setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning 30fps 1640x1232, vendor provide */
-static struct imx_reg const imx134_1640_1232_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1640 */
- { IMX_8BIT, 0x034D, 0x68 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1232 */
- { IMX_8BIT, 0x034F, 0xD0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x68 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xD0 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x68 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xD0 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x06 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x616, vendor provide */
-static struct imx_reg const imx134_820_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:820 */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x552 */
-static struct imx_reg const imx134_820_552_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:128 */
- { IMX_8BIT, 0x0347, 0x80 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3280-1 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2208+128-1 */
- { IMX_8BIT, 0x034B, 0x1F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x28 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 720x592 */
-static struct imx_reg const imx134_720_592_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:200 */
- { IMX_8BIT, 0x0345, 0xC8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0347, 0x28 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:2880+200-1 */
- { IMX_8BIT, 0x0349, 0x07 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2368+40-1 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x50 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_752_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x88 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x47 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: 752*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xF0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 1424x1168 */
-static struct imx_reg const imx134_1424_1168_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* no binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x22 }, /* 34/16=2.125 */
- { IMX_8BIT, 0x4082, 0x00 }, /* ?? */
- { IMX_8BIT, 0x4083, 0x00 }, /* ?? */
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x80 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x51 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0xB1 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]: 1424*/
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1168 */
- { IMX_8BIT, 0x034F, 0x90 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x0B },
- { IMX_8BIT, 0x0355, 0xD2 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xB2 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x90 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x90 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning, 16/35 down scaling, 30fps, dvs */
-static struct imx_reg const imx134_240_196_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /*4x4 binning */
- { IMX_8BIT, 0x0391, 0x44 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 }, /* down scaling = 16/35 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2104+590-1 */
- { IMX_8BIT, 0x0349, 0x85 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1720+366-1 */
- { IMX_8BIT, 0x034B, 0x25 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x00 }, /* x_output_size[15:8]: 240*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x00 }, /* y_output_size[15:8]:196 */
- { IMX_8BIT, 0x034F, 0xC4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 }, /* crop_x: 526 */
- { IMX_8BIT, 0x0355, 0x0E },
- { IMX_8BIT, 0x0356, 0x01 }, /* crop_y: 430 */
- { IMX_8BIT, 0x0357, 0xAE },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x00 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x00 },
- { IMX_8BIT, 0x3313, 0xC4 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
-
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0xF0 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0xC4 },
-
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x0A },
- { IMX_8BIT, 0x0203, 0x88 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning, 16/38 downscaling, 30fps, dvs */
-static struct imx_reg const imx134_448_366_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* 2x2 binning */
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x26 }, /* down scaling = 16/38 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2128+590-1 */
- { IMX_8BIT, 0x0349, 0x9D }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1740+366-1 */
- { IMX_8BIT, 0x034B, 0x39 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x01 }, /* x_output_size[15:8]: 448*/
- { IMX_8BIT, 0x034D, 0xC0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x01 }, /* y_output_size[15:8]:366 */
- { IMX_8BIT, 0x034F, 0x6E }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x04 }, /* crop_x: 1064 */
- { IMX_8BIT, 0x0355, 0x28 },
- { IMX_8BIT, 0x0356, 0x03 }, /* crop_y: 870 */
- { IMX_8BIT, 0x0357, 0x66 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x01 },
- { IMX_8BIT, 0x3311, 0xC0 },
- { IMX_8BIT, 0x3312, 0x01 },
- { IMX_8BIT, 0x3313, 0x6E },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
-
- { IMX_8BIT, 0x4084, 0x01 },
- { IMX_8BIT, 0x4085, 0xC0 },
- { IMX_8BIT, 0x4086, 0x01 },
- { IMX_8BIT, 0x4087, 0x6E },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 2336x1312, 30fps, for 1080p dvs, vendor provide */
-static struct imx_reg const imx134_2336_1312_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x16 }, /* down scaling = 16/22 = 8/11 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:34 */
- { IMX_8BIT, 0x0345, 0x22 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x4C }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3245 */
- { IMX_8BIT, 0x0349, 0xAD }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2135 */
- { IMX_8BIT, 0x034B, 0x57 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:2336 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:1312 */
- { IMX_8BIT, 0x034F, 0x20 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0x8C },
- { IMX_8BIT, 0x0356, 0x07 },
- { IMX_8BIT, 0x0357, 0x0C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x20 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xEB },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x20 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture */
-static struct imx_reg const imx134_1936_1096_30fps_v1[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1A }, /* downscaling 16/26*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x40 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x54 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0x89 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x49 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0x4A },
- { IMX_8BIT, 0x0356, 0x06 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0xF6 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x80 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x38 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x80 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x38 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1936_1096_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1B }, /* downscaling 16/27*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x06 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x34 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0xC9 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x6F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0xC4 },
- { IMX_8BIT, 0x0356, 0x07 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0x3A },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 }, /* decide by mode and output size */
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1296x736, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1296_736_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x14 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0345, 0x14 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x38 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3239 */
- { IMX_8BIT, 0x0349, 0xBB }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2131 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]:1280 */
- { IMX_8BIT, 0x034D, 0x10 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:720 */
- { IMX_8BIT, 0x034F, 0xE0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x54 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x98 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x10 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0xE0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0x10 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x10 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0xE0 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1280x720, 30fps, for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_1568_876_60fps_0625[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0x8F },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x3B }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x6C }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x6C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x6C },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x6F },
- { IMX_8BIT, 0x0831, 0x27 },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x2F },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x2F },
- { IMX_8BIT, 0x0836, 0x9F },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-
-/* 4 lane for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-/* 4 lane for 480p dvs, default 60fps, vendor provide */
-static struct imx_reg const imx134_880_592[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1D }, /* downscaling ratio = 16/29 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:44 */
- { IMX_8BIT, 0x0345, 0x2C }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:160 */
- { IMX_8BIT, 0x0347, 0xA0 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3235 */
- { IMX_8BIT, 0x0349, 0xA3 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2307 */
- { IMX_8BIT, 0x034B, 0x03 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:880 */
- { IMX_8BIT, 0x034D, 0x70 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:592 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x3C },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x32 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x70 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0x70 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x50 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-static struct imx_reg const imx134_2336_1308_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x01 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0xD8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x02 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x44 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0xF7 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x07 }, /* y_addr_end[15:8]:2107 */
- { IMX_8BIT, 0x034B, 0x5F+4 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:876 */
- { IMX_8BIT, 0x034F, 0x1C+4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x1C+4 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x1C+4 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xE8 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-struct imx_resolution imx134_res_preview[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_720_592_30fps,
- .width = 720,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_552_30fps_preview",
- .regs = imx134_820_552_30fps,
- .width = 820,
- .height = 552,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_616_preview_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_preview_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_preview_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_preview_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_still[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_1424_1168_30fps,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_still_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_still_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- /* WORKAROUND for FW performance limitation */
- .fps = 8,
- .pixels_per_line = 6400,
- .lines_per_frame = 5312,
- },
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_video[] = {
- {
- .desc = "imx134_QCIF_DVS_30fps",
- .regs = imx134_240_196_30fps,
- .width = 240,
- .height = 196,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_CIF_DVS_30fps",
- .regs = imx134_448_366_30fps,
- .width = 448,
- .height = 366,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_480p",
- .regs = imx134_880_592,
- .width = 880,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1568_880",
- .regs = imx134_1568_880,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_30fps",
- .regs = imx134_2336_1312_30fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_60fps",
- .regs = imx134_2336_1308_60fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- /*This setting only be used for SDV mode*/
- .desc = "imx134_8M_sdv_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx135.h b/drivers/staging/media/atomisp/i2c/imx/imx135.h
deleted file mode 100644
index 58b43af909f2..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx135.h
+++ /dev/null
@@ -1,3374 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX135_H__
-#define __IMX135_H__
-
-#include "common.h"
-
-#define IMX_SC_CMMN_CHIP_ID_H 0x0016
-#define IMX_SC_CMMN_CHIP_ID_L 0x0017
-
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-#define IMX135_EMBEDDED_DATA_LINE_NUM 2
-#define IMX135_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX135_OUTPUT_FORMAT_RAW10 0x0a0a
-/*
- * We use three different MIPI rates for our modes based on the resolution and
- * FPS requirements. So we have three PLL configurationa and these are based
- * on the EMC friendly MIPI values.
- *
- * Maximum clock: Pix clock @ 360.96MHz MIPI @ 451.2MHz 902.4mbps
- * Reduced clock: Pix clock @ 273.00MHz MIPI @ 342.0MHz 684.0mbps
- * Binning modes: Pix clock @ 335.36MHz MIPI @ 209.6MHz 419.2mbps
- * Global Timing registers are based on the data rates and these are part of
- * the below clock definitions.
- */
-/* MIPI 499.2MHz 998.4mbps PIXCLK: 399.36MHz */
-#define PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x70}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */
-#define PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x34}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 209.6MHz, 419.2mbps PIXCLK: 335.36 MHz */
-#define PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x06}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x02}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x06}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x12}, \
- {IMX_8BIT, 0x0830, 0x5f}, \
- {IMX_8BIT, 0x0831, 0x1f}, \
- {IMX_8BIT, 0x0832, 0x3f}, \
- {IMX_8BIT, 0x0833, 0x1f}, \
- {IMX_8BIT, 0x0834, 0x1f}, \
- {IMX_8BIT, 0x0835, 0x17}, \
- {IMX_8BIT, 0x0836, 0x67}, \
- {IMX_8BIT, 0x0837, 0x27}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 342MHz 684mbps PIXCLK: 273.6MHz */
-#define PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x08}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x1d}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x77}, \
- {IMX_8BIT, 0x0831, 0x2f}, \
- {IMX_8BIT, 0x0832, 0x4f}, \
- {IMX_8BIT, 0x0833, 0x37}, \
- {IMX_8BIT, 0x0834, 0x2f}, \
- {IMX_8BIT, 0x0835, 0x37}, \
- {IMX_8BIT, 0x0836, 0xa7}, \
- {IMX_8BIT, 0x0837, 0x37}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* Basic settings: Applied only once after the sensor power up */
-static struct imx_reg const imx135_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- { IMX_8BIT, 0x0220, 0x01},
- { IMX_8BIT, 0x3008, 0xB0},
- { IMX_8BIT, 0x320A, 0x01},
- { IMX_8BIT, 0x320D, 0x10},
- { IMX_8BIT, 0x3216, 0x2E},
- { IMX_8BIT, 0x3230, 0x0A},
- { IMX_8BIT, 0x3228, 0x05},
- { IMX_8BIT, 0x3229, 0x02},
- { IMX_8BIT, 0x322C, 0x02},
- { IMX_8BIT, 0x3302, 0x10},
- { IMX_8BIT, 0x3390, 0x45},
- { IMX_8BIT, 0x3409, 0x0C},
- { IMX_8BIT, 0x340B, 0xF5},
- { IMX_8BIT, 0x340C, 0x2D},
- { IMX_8BIT, 0x3412, 0x41},
- { IMX_8BIT, 0x3413, 0xAD},
- { IMX_8BIT, 0x3414, 0x1E},
- { IMX_8BIT, 0x3427, 0x04},
- { IMX_8BIT, 0x3480, 0x1E},
- { IMX_8BIT, 0x3484, 0x1E},
- { IMX_8BIT, 0x3488, 0x1E},
- { IMX_8BIT, 0x348C, 0x1E},
- { IMX_8BIT, 0x3490, 0x1E},
- { IMX_8BIT, 0x3494, 0x1E},
- { IMX_8BIT, 0x349C, 0x38},
- { IMX_8BIT, 0x34A3, 0x38},
- { IMX_8BIT, 0x3511, 0x8F},
- { IMX_8BIT, 0x3518, 0x00},
- { IMX_8BIT, 0x3519, 0x94},
- { IMX_8BIT, 0x3833, 0x20},
- { IMX_8BIT, 0x3893, 0x01},
- { IMX_8BIT, 0x38C2, 0x08},
- { IMX_8BIT, 0x38C3, 0x08},
- { IMX_8BIT, 0x3C09, 0x01},
- { IMX_8BIT, 0x4000, 0x0E},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x84},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x441D, 0x28},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x3F},
- { IMX_8BIT, 0x4447, 0xFF},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446E, 0x01},
- { IMX_8BIT, 0x4500, 0x1F},
- { IMX_8BIT, 0x600a, 0x00},
- { IMX_8BIT, 0x380a, 0x00},
- { IMX_8BIT, 0x380b, 0x00},
- { IMX_8BIT, 0x4103, 0x00},
- { IMX_8BIT, 0x4243, 0x9a},
- { IMX_8BIT, 0x4330, 0x01},
- { IMX_8BIT, 0x4331, 0x90},
- { IMX_8BIT, 0x4332, 0x02},
- { IMX_8BIT, 0x4333, 0x58},
- { IMX_8BIT, 0x4334, 0x03},
- { IMX_8BIT, 0x4335, 0x20},
- { IMX_8BIT, 0x4336, 0x03},
- { IMX_8BIT, 0x4337, 0x84},
- { IMX_8BIT, 0x433C, 0x01},
- { IMX_8BIT, 0x4340, 0x02},
- { IMX_8BIT, 0x4341, 0x58},
- { IMX_8BIT, 0x4342, 0x03},
- { IMX_8BIT, 0x4343, 0x52},
- { IMX_8BIT, 0x4364, 0x0b},
- { IMX_8BIT, 0x4368, 0x00},
- { IMX_8BIT, 0x4369, 0x0f},
- { IMX_8BIT, 0x436a, 0x03},
- { IMX_8BIT, 0x436b, 0xa8},
- { IMX_8BIT, 0x436c, 0x00},
- { IMX_8BIT, 0x436d, 0x00},
- { IMX_8BIT, 0x436e, 0x00},
- { IMX_8BIT, 0x436f, 0x06},
- { IMX_8BIT, 0x4281, 0x21},
- { IMX_8BIT, 0x4282, 0x18},
- { IMX_8BIT, 0x4283, 0x04},
- { IMX_8BIT, 0x4284, 0x08},
- { IMX_8BIT, 0x4287, 0x7f},
- { IMX_8BIT, 0x4288, 0x08},
- { IMX_8BIT, 0x428c, 0x08},
- { IMX_8BIT, 0x4297, 0x00},
- { IMX_8BIT, 0x4299, 0x7E},
- { IMX_8BIT, 0x42A4, 0xFB},
- { IMX_8BIT, 0x42A5, 0x7E},
- { IMX_8BIT, 0x42A6, 0xDF},
- { IMX_8BIT, 0x42A7, 0xB7},
- { IMX_8BIT, 0x42AF, 0x03},
- { IMX_8BIT, 0x4207, 0x03},
- { IMX_8BIT, 0x4218, 0x00},
- { IMX_8BIT, 0x421B, 0x20},
- { IMX_8BIT, 0x421F, 0x04},
- { IMX_8BIT, 0x4222, 0x02},
- { IMX_8BIT, 0x4223, 0x22},
- { IMX_8BIT, 0x422E, 0x54},
- { IMX_8BIT, 0x422F, 0xFB},
- { IMX_8BIT, 0x4230, 0xFF},
- { IMX_8BIT, 0x4231, 0xFE},
- { IMX_8BIT, 0x4232, 0xFF},
- { IMX_8BIT, 0x4235, 0x58},
- { IMX_8BIT, 0x4236, 0xF7},
- { IMX_8BIT, 0x4237, 0xFD},
- { IMX_8BIT, 0x4239, 0x4E},
- { IMX_8BIT, 0x423A, 0xFC},
- { IMX_8BIT, 0x423B, 0xFD},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x20},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x432D, 0x01},
- { IMX_8BIT, 0x4338, 0x02},
- { IMX_8BIT, 0x4339, 0x00},
- { IMX_8BIT, 0x433A, 0x00},
- { IMX_8BIT, 0x433B, 0x02},
- { IMX_8BIT, 0x435A, 0x03},
- { IMX_8BIT, 0x435B, 0x84},
- { IMX_8BIT, 0x435E, 0x01},
- { IMX_8BIT, 0x435F, 0xFF},
- { IMX_8BIT, 0x4360, 0x01},
- { IMX_8BIT, 0x4361, 0xF4},
- { IMX_8BIT, 0x4362, 0x03},
- { IMX_8BIT, 0x4363, 0x84},
- { IMX_8BIT, 0x437B, 0x01},
- { IMX_8BIT, 0x4400, 0x00}, /* STATS off ISP do not support STATS*/
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4404, 0x13},
- { IMX_8BIT, 0x4405, 0x26},
- { IMX_8BIT, 0x4406, 0x07},
- { IMX_8BIT, 0x4408, 0x20},
- { IMX_8BIT, 0x4409, 0xE5},
- { IMX_8BIT, 0x440A, 0xFB},
- { IMX_8BIT, 0x440C, 0xF6},
- { IMX_8BIT, 0x440D, 0xEA},
- { IMX_8BIT, 0x440E, 0x20},
- { IMX_8BIT, 0x4410, 0x00},
- { IMX_8BIT, 0x4411, 0x00},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x4414, 0x1F},
- { IMX_8BIT, 0x4415, 0xFF},
- { IMX_8BIT, 0x4416, 0x20},
- { IMX_8BIT, 0x4417, 0x00},
- { IMX_8BIT, 0x4418, 0x1F},
- { IMX_8BIT, 0x4419, 0xFF},
- { IMX_8BIT, 0x441A, 0x20},
- { IMX_8BIT, 0x441B, 0x00},
- { IMX_8BIT, 0x441D, 0x40},
- { IMX_8BIT, 0x441E, 0x1E},
- { IMX_8BIT, 0x441F, 0x38},
- { IMX_8BIT, 0x4420, 0x01},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x1D},
- { IMX_8BIT, 0x4447, 0xF9},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4456, 0x0F},
- { IMX_8BIT, 0x4457, 0xFF},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x445C, 0x00},
- { IMX_8BIT, 0x445D, 0x28},
- { IMX_8BIT, 0x445E, 0x01},
- { IMX_8BIT, 0x445F, 0x90},
- { IMX_8BIT, 0x4460, 0x00},
- { IMX_8BIT, 0x4461, 0x60},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446C, 0x00},
- { IMX_8BIT, 0x446D, 0x00},
- { IMX_8BIT, 0x446E, 0x00},
- { IMX_8BIT, 0x452A, 0x02},
- { IMX_8BIT, 0x0712, 0x01},
- { IMX_8BIT, 0x0713, 0x00},
- { IMX_8BIT, 0x0714, 0x01},
- { IMX_8BIT, 0x0715, 0x00},
- { IMX_8BIT, 0x0716, 0x01},
- { IMX_8BIT, 0x0717, 0x00},
- { IMX_8BIT, 0x0718, 0x01},
- { IMX_8BIT, 0x0719, 0x00},
- { IMX_8BIT, 0x4500, 0x1F },
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- { IMX_8BIT, 0x0205, 0x00},
- { IMX_8BIT, 0x020E, 0x01},
- { IMX_8BIT, 0x020F, 0x00},
- { IMX_8BIT, 0x0210, 0x02},
- { IMX_8BIT, 0x0211, 0x00},
- { IMX_8BIT, 0x0212, 0x02},
- { IMX_8BIT, 0x0213, 0x00},
- { IMX_8BIT, 0x0214, 0x01},
- { IMX_8BIT, 0x0215, 0x00},
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00},
- { IMX_8BIT, 0x0231, 0x00},
- { IMX_8BIT, 0x0233, 0x00},
- { IMX_8BIT, 0x0234, 0x00},
- { IMX_8BIT, 0x0235, 0x40},
- { IMX_8BIT, 0x0238, 0x00},
- { IMX_8BIT, 0x0239, 0x04},
- { IMX_8BIT, 0x023B, 0x00},
- { IMX_8BIT, 0x023C, 0x01},
- { IMX_8BIT, 0x33B0, 0x04},
- { IMX_8BIT, 0x33B1, 0x00},
- { IMX_8BIT, 0x33B3, 0x00},
- { IMX_8BIT, 0x33B4, 0x01},
- { IMX_8BIT, 0x3800, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-/********* Preview, continuous capture and still modes *****************/
-
-static struct imx_reg const imx135_13m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 0, 4207,3119 4208x3120 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4208x3120 */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 13MP reduced pixel clock MIPI 342MHz is EMC friendly*/
-static struct imx_reg const imx135_13m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 8.5 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 368x304
- * cropped region is 3128x2584
- */
-static struct imx_reg const imx135_368x304_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x88}, /* 136/16=8.5 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x1C}, /* 540 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x0C}, /* 268 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x53}, /* 3667 */
- {IMX_8BIT, 0x034A, 0x0B}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0x23}, /* 2851 */
- {IMX_8BIT, 0x034C, 0x01}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 368 */
- {IMX_8BIT, 0x034E, 0x01}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x30}, /* 304 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x38},
- {IMX_8BIT, 0x0356, 0x0A},
- {IMX_8BIT, 0x0357, 0x18},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x01}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x01},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xD0},
- {IMX_8BIT, 0x4084, 0x01}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x70},
- {IMX_8BIT, 0x4086, 0x01},
- {IMX_8BIT, 0x4087, 0x30},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/4 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The xga cropped setting should be 816x612 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 832x628
- * cropped region is 3328x2512
- */
-static struct imx_reg const imx135_xga_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x44},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
-/* {IMX_8BIT, 0x4203, 0xFF}, */
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xB8}, /* 440 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x30}, /* 304 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xB7}, /* 4207-440=3767 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xFF}, /* 3119-304=2815 */
- {IMX_8BIT, 0x034C, 0x03}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x40}, /* 832 */
- {IMX_8BIT, 0x034E, 0x02}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x74}, /* 628 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x03}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x40},
- {IMX_8BIT, 0x0356, 0x02},
- {IMX_8BIT, 0x0357, 0x74},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x03}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x40},
- {IMX_8BIT, 0x3312, 0x02},
- {IMX_8BIT, 0x3313, 0x74},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0x21},
- {IMX_8BIT, 0x4084, 0x03}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x40},
- {IMX_8BIT, 0x4086, 0x02},
- {IMX_8BIT, 0x4087, 0x74},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 28/16 DS from (16:9)8m cropped setting.
- *
- * The 8m(16:9) cropped setting is 3360x1890 effective res.
- * - this is larger then the expected 3264x1836 FOV
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1936x1096
- * cropped region is 3388x1918
- */
-static struct imx_reg const imx135_1936x1096_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1C}, /* 28/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x9A}, /* 410 */
- {IMX_8BIT, 0x0346, 0x02}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x58}, /* 600 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xD5}, /* 3797 */
- {IMX_8BIT, 0x034A, 0x09}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xD5}, /* 2517 */
- {IMX_8BIT, 0x034C, 0x07}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1936 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x48}, /* 1096 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0D}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x3C},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x7E},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x07}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x48},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x07}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x48},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 2.125 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1424x1168
- * cropped region is 3026x2482
- */
-static struct imx_reg const imx135_1424x1168_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x22}, /* 34/16=2.125 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x4E}, /* 590 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x3E}, /* 318 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x1F}, /* 3615 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 2799 */
- {IMX_8BIT, 0x034C, 0x05}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1424 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x90}, /* 1168 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0B}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD2},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xB2},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x05}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x90},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x05}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x90},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/2 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The 2m cropped setting should be 1632x1224 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1648x1240
- * cropped region is 3296x2480
- */
-static struct imx_reg const imx135_2m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xC8}, /* 464(1D0) -> 456(1C8)*/
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x40}, /* 320 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xA7}, /* 4207-456=3751 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 3119-320=2799 */
- {IMX_8BIT, 0x034C, 0x06}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 1648 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0xD8}, /* 1240 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0xD8},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x06}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0xD8},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * 8M Cropped 16:9 setting
- *
- * Effect res: 3264x1836
- * Sensor out: 3280x1852
- */
-static struct imx_reg const imx135_6m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x02}, /* 634 */
- {IMX_8BIT, 0x0347, 0x7A},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x09}, /* 2485 */
- {IMX_8BIT, 0x034B, 0xB5},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07}, /* 1852 */
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x3C},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x48},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xE7},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09}, /* 2464 */
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xA0},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m_for_mipi342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Resize IMG Hand V size-> Scaling related?*/
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * FOV is: 3280x2464, larger then 3264x2448.
- * Sensor output: 336x256
- * Cropping region: 3444x2624
- */
-static struct imx_reg const imx135_336x256[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* 2x binning */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x52}, /* scaling: 82/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* x_start: 374 */
- {IMX_8BIT, 0x0345, 0x76},
- {IMX_8BIT, 0x0346, 0x00}, /* y_start: 248 */
- {IMX_8BIT, 0x0347, 0xF8},
- {IMX_8BIT, 0x0348, 0x0E}, /* x_end: 3817 */
- {IMX_8BIT, 0x0349, 0xE9},
- {IMX_8BIT, 0x034A, 0x0B}, /* y_end: 2871 */
- {IMX_8BIT, 0x034B, 0x37},
- {IMX_8BIT, 0x034C, 0x01}, /* x_out: 336 */
- {IMX_8BIT, 0x034D, 0x50},
- {IMX_8BIT, 0x034E, 0x01}, /* y_out: 256 */
- {IMX_8BIT, 0x034F, 0x00},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* dig x_out: 1722 */
- {IMX_8BIT, 0x0355, 0xBA},
- {IMX_8BIT, 0x0356, 0x05}, /* dig y_out: 1312 */
- {IMX_8BIT, 0x0357, 0x20},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x3311, 0x50},
- {IMX_8BIT, 0x3312, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x3313, 0x00},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x4085, 0x50},
- {IMX_8BIT, 0x4086, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_1m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1F},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x58},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x28},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x17},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x07},
- {IMX_8BIT, 0x034C, 0x04},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x03},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x07},
- {IMX_8BIT, 0x0355, 0xE0},
- {IMX_8BIT, 0x0356, 0x05},
- {IMX_8BIT, 0x0357, 0xF0},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x04},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x03},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x04},
- {IMX_8BIT, 0x4085, 0x10},
- {IMX_8BIT, 0x4086, 0x03},
- {IMX_8BIT, 0x4087, 0x10},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_3m_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01}, /* Binning */
- {IMX_8BIT, 0x0391, 0x22}, /* 2x2 binning */
- {IMX_8BIT, 0x0392, 0x00}, /* average */
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x28},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x08},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x47},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x27},
- {IMX_8BIT, 0x034C, 0x08},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x06},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x10},
- {IMX_8BIT, 0x0356, 0x06},
- {IMX_8BIT, 0x0357, 0x10},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x08},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x06},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 1080P 1936x1104 */
-static struct imx_reg const imx135_1080p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x11},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x2E},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x84},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x41},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xAF},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x50},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x0A},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0x96},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x07},
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x50},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x00},
- {IMX_8BIT, 0x4084, 0x07},
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x50},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static const struct imx_reg imx135_1080p_nodvs_fullfov_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 168,464,4039,2655: 3872x2192 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0xD0 },
- { IMX_8BIT, 0x0348, 0x0F },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x5F },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x0355, 0x90 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x48 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P NODVS 1936x1096 */
-static const struct imx_reg imx135_1080p_nodvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x11 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,396,4161,2727: 4116x2332 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x8C },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xA7 },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1166 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x8E },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P 10%DVS 2104x1184 */
-static const struct imx_reg imx135_1080p_10_dvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 0,376,4207,2743: 4208x2368 */
- { IMX_8BIT, 0x0345, 0x00 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x78 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x6F },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xB7 },
- { IMX_8BIT, 0x034C, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x034D, 0x38 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0xA0 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x0355, 0x38 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x08 },
- { IMX_8BIT, 0x3311, 0x38 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx135_720pdvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /*2058 x 1156 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/******************* Video Modes ******************/
-
-/* 1080P DVS 2336x1320 */
-static const struct imx_reg imx135_2336x1320_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 60,404,4147,2715: 4088x2312 */
- { IMX_8BIT, 0x0345, 0x3C },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x33 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x09 }, /*2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0F }, /* 4088x2312 */
- { IMX_8BIT, 0x0355, 0xF8 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0x08 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xE2 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P DVS 2336x1320 Cropped */
-static const struct imx_reg imx135_2336x1320_cropped_mipi499[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x03 }, /* 936,900,3271,2219: 2336x1320 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x03 },
- { IMX_8BIT, 0x0347, 0x84 },
- { IMX_8BIT, 0x0348, 0x0C },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x08 },
- { IMX_8BIT, 0x034B, 0xAB },
- { IMX_8BIT, 0x034C, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x00 },
- { IMX_8BIT, 0x331D, 0xB4 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 720P DVS 1568 x 880 */
-static const struct imx_reg imx135_720p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2e },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x0a },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 }, /* TODO! */
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* wvga: H : 1640 V : 1024 */
-static const struct imx_reg imx135_wvga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 },
- {IMX_8BIT, 0x0345, 0x36 },
- {IMX_8BIT, 0x0346, 0x01 },
- {IMX_8BIT, 0x0347, 0x18 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x39 },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x17 },
- {IMX_8BIT, 0x034C, 0x06 },
- {IMX_8BIT, 0x034D, 0x68 },
- {IMX_8BIT, 0x034E, 0x04 },
- {IMX_8BIT, 0x034F, 0x00 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x08 },
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x05 },
- {IMX_8BIT, 0x0357, 0x00 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x06 },
- {IMX_8BIT, 0x3311, 0x68 },
- {IMX_8BIT, 0x3312, 0x04 },
- {IMX_8BIT, 0x3313, 0x00 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0xBD },
- {IMX_8BIT, 0x4084, 0x06 },
- {IMX_8BIT, 0x4085, 0x68 },
- {IMX_8BIT, 0x4086, 0x04 },
- {IMX_8BIT, 0x4087, 0x00 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P 1036 x 696 */
-static const struct imx_reg imx135_480p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },/* No scal */
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x2784*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0xA8 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x88 },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036 * 696 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0xB8 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x696 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xB8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0xB8 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0xB8 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P DVS 936 x 602 */
-static const struct imx_reg imx135_480p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 56,244,4151,2877: 4096x2634 */
- { IMX_8BIT, 0x0345, 0x38 },
- { IMX_8BIT, 0x0346, 0x00 },
- { IMX_8BIT, 0x0347, 0xf4 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x37 },
- { IMX_8BIT, 0x034A, 0x0b },
- { IMX_8BIT, 0x034B, 0x3d },
- { IMX_8BIT, 0x034C, 0x03 }, /* 936 x 602 */
- { IMX_8BIT, 0x034D, 0xa8 },
- { IMX_8BIT, 0x034E, 0x02 },
- { IMX_8BIT, 0x034F, 0x5a },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x00 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x25 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0xa8 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x5a },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0xa8 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x5a },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 1036 V : 780 */
-static const struct imx_reg imx135_vga_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x3120*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x03 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x03 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x03 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 820 V : 616 */
-static const struct imx_reg imx135_vga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4104x3080*/
- {IMX_8BIT, 0x0345, 0x34 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x3B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x03 }, /* 820x616 */
- {IMX_8BIT, 0x034D, 0x34 },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1026x770 */
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x03 },
- {IMX_8BIT, 0x3311, 0x34 },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x03 },
- {IMX_8BIT, 0x4085, 0x34 },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 436 V : 360 */
-static const struct imx_reg imx135_436x360_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x22 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,0,3995,3119 3784x3120 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
-
- {IMX_8BIT, 0x034C, 0x01 }, /* 436x360 */
- {IMX_8BIT, 0x034D, 0xB4 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x12 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x0C },
-
- {IMX_8BIT, 0x0354, 0x03 }, /* 928x768 crop from 946x780*/
- {IMX_8BIT, 0x0355, 0xA0 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x00 },
-
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0xB4 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0xB4 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QVGA: H : 408 V : 308 */
-static const struct imx_reg imx135_qvga__dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 64,20,4143,3099 4080x3080 */
- {IMX_8BIT, 0x0345, 0x40 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x2F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x01 }, /* 408x308 */
- {IMX_8BIT, 0x034D, 0x98 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x34 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 1020x770 */
- {IMX_8BIT, 0x0355, 0xFC },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x98 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x34 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0x68 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x98 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x34 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 368 V : 304 */
-static const struct imx_reg imx135_cif_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01 }, /* 264,42,3943,3081 3680x3040 */
- {IMX_8BIT, 0x0345, 0x08 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x2a },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x67 },
- {IMX_8BIT, 0x034A, 0x0c },
- {IMX_8BIT, 0x034B, 0x09 },
- {IMX_8BIT, 0x034C, 0x01 }, /* 368x304 */
- {IMX_8BIT, 0x034D, 0x70 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x30 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 920x760 */
- {IMX_8BIT, 0x0355, 0x98 },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xf8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x70 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x30 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x70 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x30 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 1888 V : 1548 */
-static const struct imx_reg imx135_cif_binning_1888x1548[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 264,42, 3776x3096 */
- {IMX_8BIT, 0x0345, 0xD8 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x0C },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x97 },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x23 },
- {IMX_8BIT, 0x034C, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x034D, 0x60 },
- {IMX_8BIT, 0x034E, 0x06 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x0355, 0x60 },
- {IMX_8BIT, 0x0356, 0x06 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x07 },
- {IMX_8BIT, 0x3311, 0x60 },
- {IMX_8BIT, 0x3312, 0x06 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x07 },
- {IMX_8BIT, 0x4085, 0x60 },
- {IMX_8BIT, 0x4086, 0x06 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QCIF H : 216 V : 176 */
-static const struct imx_reg imx135_qcif_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x46 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,20,3995,3099 3784x3080 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x00 }, /* 216x176 */
- {IMX_8BIT, 0x034D, 0xD8 },
- {IMX_8BIT, 0x034E, 0x00 },
- {IMX_8BIT, 0x034F, 0xB0 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 946x770 */
- {IMX_8BIT, 0x0355, 0xB2 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x00 },
- {IMX_8BIT, 0x3311, 0xD8 },
- {IMX_8BIT, 0x3312, 0x00 },
- {IMX_8BIT, 0x3313, 0xB0 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x00 },
- {IMX_8BIT, 0x4085, 0xD8 },
- {IMX_8BIT, 0x4086, 0x00 },
- {IMX_8BIT, 0x4087, 0xB0 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*
- * ISP Scaling is now supported in offine capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_preview_mofd[] = {
- {
- .desc = "imx135_cif_binning_preview",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9114,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_preview",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__cont_cap",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_scaled_from_12m__cont_cap",
- .regs = imx135_8m_scaled_from_12m,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3280,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_10m__cont_cap",
- .regs = imx135_10m,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_13m__cont_cap",
- .regs = imx135_13m,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3290,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-struct imx_resolution imx135_res_preview[] = {
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936x1096_cropped",
- .regs = imx135_1936x1096_cropped,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP Scaling is now supported in online capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_still_mofd[] = {
- {
- .desc = "imx135_cif_binning_still",
- .regs = imx135_cif_binning_1888x1548,
- .width = 1888,
- .height = 1548,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_still",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__still",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_for_mipi_342_still",
- .regs = imx135_6m_for_mipi_342,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9114,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_8m_scaled_from_12m_for_mipi342_still",
- .regs = imx135_8m_scaled_from_12m_for_mipi342,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 8,
- .pixels_per_line = 7672,
- .lines_per_frame = 4458,
- },
- { /* Pixel clock: 273.6MHz */
- .fps = 15,
- .pixels_per_line = 5500,
- .lines_per_frame = 3314,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_10m_for_mipi_342_still",
- .regs = imx135_10m_for_mipi_342,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9144,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_13m_still",
- .regs = imx135_13m_for_mipi_342,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 5,
- .pixels_per_line = 9144,
- .lines_per_frame = 5990,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
-};
-
-struct imx_resolution imx135_res_still[] = {
- {
- .desc = "imx135_qvga",
- .regs = imx135_336x256,
- .width = 336,
- .height = 256,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_cif",
- .regs = imx135_368x304_cropped,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2M_for_11:9",
- .regs = imx135_1424x1168_cropped,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 15,
- .pixels_per_line = 6466,
- .lines_per_frame = 3710,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cropped_video",
- .regs = imx135_6m_cropped,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP scaling is not supported in case of video modes. So we need to have
- * separate sensor mode for video use cases
- */
-struct imx_resolution imx135_res_video[] = {
- /* For binning modes pix clock is 335.36 MHz. */
- {
- .desc = "imx135_qcif_dvs_binning_video",
- .regs = imx135_qcif_dvs_binning,
- .width = 216,
- .height = 176,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_cif_binning_video",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_qvga__dvs_binning_video",
- .regs = imx135_qvga__dvs_binning,
- .width = 408,
- .height = 308,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_436x360_binning_video",
- .regs = imx135_436x360_binning,
- .width = 436,
- .height = 360,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_dvs_binning_video",
- .regs = imx135_vga_dvs_binning,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_dvs_binning_video",
- .regs = imx135_480p_dvs_binning,
- .width = 936,
- .height = 602,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_720P_dvs_video",
- .regs = imx135_720pdvs_max_clock,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_wvga_dvs_binning_video",
- .regs = imx135_wvga_dvs_binning,
- .width = 1640,
- .height = 1024,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936_1096_fullfov_max_clock",
- .regs = imx135_1080p_nodvs_max_clock,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_1080P_dvs_video",
- .regs = imx135_2336x1320_max_clock,
- .width = 2336,
- .height = 1320,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- .regs = imx135_2336x1320_max_clock,
- .mipi_freq = 451200,
- },
- {/* Pixel Clock : 399.36MHz */
- .fps = 60,
- .pixels_per_line = 4754,
- .lines_per_frame = 1400,
- .regs = imx135_2336x1320_cropped_mipi499,
- .mipi_freq = 499200,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx175.h b/drivers/staging/media/atomisp/i2c/imx/imx175.h
deleted file mode 100644
index 5e082088cb37..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx175.h
+++ /dev/null
@@ -1,1960 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX175_H__
-#define __IMX175_H__
-#include "common.h"
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx_STILL_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_8M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx_STILL_5M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_5M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x03},
- {IMX_8BIT, 0x33D5, 0x34},
- {IMX_8BIT, 0x33D6, 0x02},
- {IMX_8BIT, 0x33D7, 0x68},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WIDE_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0D}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x70}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x10}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x00}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x14}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x8C}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xBC}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x68},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0xBC},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*****************************video************************/
-static struct imx_reg const imx_1080p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x06}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x4C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xC6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xDB}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x42}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEA}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x61}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x09}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x05}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x20}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xD0}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0F}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x3C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA6}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-/*****************************video************************/
-static struct imx_reg const imx_720p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD7}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x3E}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEE}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x65}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x70}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x18}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_480p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD4}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xC8}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xF1}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xDB}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x50}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x15}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x14}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x28}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x08}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xCA}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x38}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xD0}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xCF}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-static struct imx_reg const imx_CIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xDB}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x30}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x94}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x9E}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x1C}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xB6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x03}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x38}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x68}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x09}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x97}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x37}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x98}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x34}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x01},
- {IMX_8BIT, 0x33D5, 0x98},
- {IMX_8BIT, 0x33D6, 0x01},
- {IMX_8BIT, 0x33D7, 0x34},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QCIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x04}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xB8}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x03}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x70}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x08}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x06}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x2F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD8}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xB0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x00},
- {IMX_8BIT, 0x33D5, 0xD8},
- {IMX_8BIT, 0x33D6, 0x00},
- {IMX_8BIT, 0x33D7, 0xB0},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx175_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0103, 0x01},
- /* misc control */
- {IMX_8BIT, 0x3020, 0x10},
- {IMX_8BIT, 0x302D, 0x02},
- {IMX_8BIT, 0x302F, 0x80},
- {IMX_8BIT, 0x3032, 0xA3},
- {IMX_8BIT, 0x3033, 0x20},
- {IMX_8BIT, 0x3034, 0x24},
- {IMX_8BIT, 0x3041, 0x15},
- {IMX_8BIT, 0x3042, 0x87},
- {IMX_8BIT, 0x3050, 0x35},
- {IMX_8BIT, 0x3056, 0x57},
- {IMX_8BIT, 0x305D, 0x41},
- {IMX_8BIT, 0x3097, 0x69},
- {IMX_8BIT, 0x3109, 0x41},
- {IMX_8BIT, 0x3148, 0x3F},
- {IMX_8BIT, 0x330F, 0x07},
- /* csi & inck */
- {IMX_8BIT, 0x3364, 0x00},
- {IMX_8BIT, 0x3368, 0x13},
- {IMX_8BIT, 0x3369, 0x33},
- /* znr */
- {IMX_8BIT, 0x4100, 0x0E},
- {IMX_8BIT, 0x4104, 0x32},
- {IMX_8BIT, 0x4105, 0x32},
- {IMX_8BIT, 0x4108, 0x01},
- {IMX_8BIT, 0x4109, 0x7C},
- {IMX_8BIT, 0x410A, 0x00},
- {IMX_8BIT, 0x410B, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx175_res_preview[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
-
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "WIDE_PREVIEW_30fps",
- .regs = imx_WIDE_PREVIEW_30fps,
- .width = 1640,
- .height = 956,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1000,
- .lines_per_frame = 0x0D70,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "STILL_720p_30fps",
- .regs = imx_STILL_720p_30fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1428,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_30fps",
- .regs = imx_STILL_2M_30fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_no_dvs_30fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0F3C,
- .lines_per_frame = 0x07D0,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_30fps",
- .regs = imx_STILL_3M_30fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_30fps",
- .regs = imx_STILL_5M_30fps,
- .width = 2576,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_30fps",
- .regs = imx_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_still[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261000,
- },
- {
- .desc = "VGA_strong_dvs_15fps",
- .regs = imx_VGA_strong_dvs_15fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1C86,
- .lines_per_frame = 0x079E,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "imx_STILL_720p_15fps",
- .regs = imx_STILL_720p_15fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1838,
- .lines_per_frame = 0x08CA,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_15fps",
- .regs = imx_STILL_2M_15fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_15fps",
- .regs = imx_1080p_no_dvs_15fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x189C,
- .lines_per_frame = 0x09A6,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_15fps",
- .regs = imx_STILL_3M_15fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_15fps",
- .regs = imx_STILL_5M_15fps,
- .width = 2576,
- .height = 1936,
- .fps = 15,
- .pixels_per_line = 0x1646, /* consistent with regs arrays */
- .lines_per_frame = 0x0BB8, /* consistent with regs arrays */
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_15fps",
- .regs = imx_STILL_6M_15fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_15fps",
- .regs = imx_STILL_8M_15fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_video[] = {
- {
- .desc = "QCIF_strong_dvs_30fps",
- .regs = imx_QCIF_strong_dvs_30fps,
- .width = 216,
- .height = 176,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "QVGA_strong_dvs_30fps",
- .regs = imx_QVGA_strong_dvs_30fps,
- .width = 408,
- .height = 308,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1194,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "720p_strong_dvs_30fps",
- .regs = imx_720p_strong_dvs_30fps,
- .width = 1552,
- .height = 880,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- .fps = 60,
- .pixels_per_line = 0xD70,
- .lines_per_frame = 0x444,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "480p_strong_dvs_30fps",
- .regs = imx_480p_strong_dvs_30fps,
- .width = 880,
- .height = 592,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "WVGA_strong_dvs_30fps",
- .regs = imx_WVGA_strong_dvs_30fps,
- .width = 1640,
- .height = 1024,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_strong_dvs_30fps,
- .width = 2320,
- .height = 1312,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11C6,
- .lines_per_frame = 0x06A4,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx208.h b/drivers/staging/media/atomisp/i2c/imx/imx208.h
deleted file mode 100644
index fed387f42f99..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx208.h
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX208_H__
-#define __IMX208_H__
-#include "common.h"
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx208_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x07}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x8F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x47}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x90}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x48}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x736_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xB4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x93}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xE0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x976_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x3C}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x0B}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_336x256_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x78}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x24}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x23}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x50}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x03}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x03}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x66}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_192x160_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xE4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x47}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x63}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0xC0}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x03}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x05}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x03}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x05}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x11}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x74}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx208_init_settings[] = {
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx208_res_preview[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_still[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_video[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx219.h b/drivers/staging/media/atomisp/i2c/imx/imx219.h
deleted file mode 100644
index bbd515bf7279..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx219.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX219_H__
-#define __IMX219_H__
-#include "common.h"
-
-#define IMX219_FRAME_LENGTH_LINES 0x0160
-#define IMX219_LINE_LENGTH_PIXELS 0x0162
-#define IMX219_HORIZONTAL_START_H 0x0164
-#define IMX219_VERTICAL_START_H 0x0168
-#define IMX219_HORIZONTAL_END_H 0x0166
-#define IMX219_VERTICAL_END_H 0x016A
-#define IMX219_HORIZONTAL_OUTPUT_SIZE_H 0x016c
-#define IMX219_VERTICAL_OUTPUT_SIZE_H 0x016E
-#define IMX219_COARSE_INTEGRATION_TIME 0x015A
-#define IMX219_IMG_ORIENTATION 0x0172
-#define IMX219_GLOBAL_GAIN 0x0157
-#define IMX219_DGC_ADJ 0x0158
-
-#define IMX219_DGC_LEN 4
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx219_STILL_8M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x0A}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x94}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x00}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x00}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x09}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x9F}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x09}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0xA0}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x49}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x4C}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_STILL_6M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x07}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x64}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x01}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x32}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x08}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x6D}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x07}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0x3C}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x33}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x36}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_init_settings[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx219_res_preview[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_still[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_video[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx227.h b/drivers/staging/media/atomisp/i2c/imx/imx227.h
deleted file mode 100644
index 795fe017d01b..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx227.h
+++ /dev/null
@@ -1,727 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX227_H__
-#define __IMX227_H__
-
-#include "common.h"
-
-#define IMX227_EMBEDDED_DATA_LINE_NUM 2
-#define IMX227_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX227_OUTPUT_FORMAT_RAW10 0x0a0a
-
-/* AE Bracketing Registers */
-#define IMX227_BRACKETING_LUT_MODE_BIT_CONTINUE_STREAMING 0x1
-#define IMX227_BRACKETING_LUT_MODE_BIT_LOOP_MODE 0x2
-
-#define IMX227_BRACKETING_LUT_CONTROL 0x0E00
-#define IMX227_BRACKETING_LUT_MODE 0x0E01
-#define IMX227_BRACKETING_LUT_ENTRY_CONTROL 0x0E02
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
-imx227_embedded_effective_size[IMX227_EMBEDDED_DATA_LINE_NUM] = {160, 62};
-
-/************************** settings for imx *************************/
-/* Full Output Mode */
-static struct imx_reg const imx_STILL_6_5M_25fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd0}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 4:3 Output Mode */
-static struct imx_reg const imx_STILL_5_5M_3X4_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xb0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x08},
- {IMX_8BIT, 0x0349, 0xaf},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x9f},
- {IMX_8BIT, 0x034c, 0x08},
- {IMX_8BIT, 0x034d, 0x00},
- {IMX_8BIT, 0x034e, 0x0a},
- {IMX_8BIT, 0x034f, 0xa0},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd8}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Square Output Mode */
-static struct imx_reg const imx_STILL_5_7M_1X1_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0xa0},
- {IMX_8BIT, 0x0348, 0x09},
- {IMX_8BIT, 0x0349, 0x5f},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0xff},
- {IMX_8BIT, 0x034c, 0x09},
- {IMX_8BIT, 0x034d, 0x60},
- {IMX_8BIT, 0x034e, 0x09},
- {IMX_8BIT, 0x034f, 0x60},
-
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd4}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Full Frame 1080P Mode (use ISP scaler)*/
-static struct imx_reg const imx_VIDEO_4M_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Cropped 1080P Mode */
-static struct imx_reg const imx_VIDEO_2M_9X16_45fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0x8a},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x88},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xd1},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0x17},
- {IMX_8BIT, 0x034c, 0x04},
- {IMX_8BIT, 0x034d, 0x48},
- {IMX_8BIT, 0x034e, 0x07},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x04},
- {IMX_8BIT, 0x040d, 0x48},
- {IMX_8BIT, 0x040e, 0x07},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x00},
- {IMX_8BIT, 0x0901, 0x00},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
-
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Moment mode */
-static struct imx_reg const imx_VIDEO_1_3M_3X4_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd9}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* High Speed 3:4 mode */
-static struct imx_reg const imx_VIDEO_VGA_3X4_120fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x9004, 0xca}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Binned 720P mode */
-static struct imx_reg const imx_VIDEO_1M_9X16_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xd0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x40},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x8f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x5f},
- {IMX_8BIT, 0x034c, 0x02},
- {IMX_8BIT, 0x034d, 0xe0},
- {IMX_8BIT, 0x034e, 0x05},
- {IMX_8BIT, 0x034f, 0x10},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x02},
- {IMX_8BIT, 0x040d, 0xe0},
- {IMX_8BIT, 0x040e, 0x05},
- {IMX_8BIT, 0x040f, 0x10},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Binned 496x868 mode */
-static struct imx_reg const imx_VIDEO_496x868_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0xec},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x08},
- {IMX_8BIT, 0x034b, 0xb3},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0xf0},
- {IMX_8BIT, 0x034e, 0x03},
- {IMX_8BIT, 0x034f, 0x64},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0xf0},
- {IMX_8BIT, 0x040e, 0x03},
- {IMX_8BIT, 0x040f, 0x64},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Hangout mode */
-static struct imx_reg const imx_PREVIEW_374X652_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x02},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x76},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x8c},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VIDEO_NHD_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x78},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx227_init_settings[] = {
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0306, 0x00},
- {IMX_8BIT, 0x0307, 0xBB},
- {IMX_8BIT, 0x030E, 0x03},
- {IMX_8BIT, 0x030F, 0x0D},
- {IMX_8BIT, 0x463b, 0x30},
- {IMX_8BIT, 0x463e, 0x05},
- {IMX_8BIT, 0x4612, 0x66},
- {IMX_8BIT, 0x4815, 0x65},
- {IMX_8BIT, 0x4991, 0x00},
- {IMX_8BIT, 0x4992, 0x01},
- {IMX_8BIT, 0x4993, 0xff},
- {IMX_8BIT, 0x458b, 0x00},
- {IMX_8BIT, 0x452a, 0x02},
- {IMX_8BIT, 0x4a7c, 0x00},
- {IMX_8BIT, 0x4a7d, 0x1c},
- {IMX_8BIT, 0x4a7e, 0x00},
- {IMX_8BIT, 0x4a7f, 0x17},
- {IMX_8BIT, 0x462C, 0x2E},
- {IMX_8BIT, 0x461B, 0x28},
- {IMX_8BIT, 0x4663, 0x29},
- {IMX_8BIT, 0x461A, 0x7C},
- {IMX_8BIT, 0x4619, 0x28},
- {IMX_8BIT, 0x4667, 0x22},
- {IMX_8BIT, 0x466B, 0x23},
- {IMX_8BIT, 0x40AD, 0xFF},
- {IMX_8BIT, 0x40BE, 0x00},
- {IMX_8BIT, 0x40BF, 0x6E},
- {IMX_8BIT, 0x40CE, 0x00},
- {IMX_8BIT, 0x40CF, 0x0A},
- {IMX_8BIT, 0x40CA, 0x00},
- {IMX_8BIT, 0x40CB, 0x1F},
- {IMX_8BIT, 0x4D16, 0x00},
- {IMX_8BIT, 0x6204, 0x01},
- {IMX_8BIT, 0x6209, 0x00},
- {IMX_8BIT, 0x621F, 0x01},
- {IMX_8BIT, 0x621E, 0x10},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx227_res_preview[] = {
- {
- .desc = "imx_PREVIEW_374X652_30fps",
- .regs = imx_PREVIEW_374X652_30fps,
- .width = 374,
- .height = 652,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- }
-};
-
-struct imx_resolution imx227_res_still[] = {
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x2130,
- .lines_per_frame = 0x1A22,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x266E,
- .lines_per_frame = 0x1704,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- },
-};
-
-struct imx_resolution imx227_res_video[] = {
- {
- .desc = "imx_VIDEO_4M_9X16_30fps",
- .regs = imx_VIDEO_4M_9X16_30fps,
- .width = 1536,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_2M_9X16_45fps",
- .regs = imx_VIDEO_2M_9X16_45fps,
- .width = 1096,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- .fps = 45,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0800,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_1_3M_3X4_60fps",
- .regs = imx_VIDEO_1_3M_3X4_60fps,
- .width = 1024,
- .height = 1360,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_1M_9X16_60fps",
- .regs = imx_VIDEO_1M_9X16_60fps,
- .width = 736,
- .height = 1296,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C10,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_VGA_3X4_120fps",
- .regs = imx_VIDEO_VGA_3X4_120fps,
- .width = 512,
- .height = 680,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 120,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0302,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_NHD_9X16_30fps",
- .regs = imx_VIDEO_NHD_9X16_30fps,
- .width = 376,
- .height = 656,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
-};
-
-#endif /* __IMX227_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp.c b/drivers/staging/media/atomisp/i2c/imx/otp.c
deleted file mode 100644
index 462275038046..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- return buf;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
deleted file mode 100644
index b11f90c5960c..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from brcc064 and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- unsigned char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
deleted file mode 100644
index 73d041f97811..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from the gerneral e2prom chip(eg.
- * CAT24C08, CAT24C128, le24l042cs, and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- bool two_addr = (start_addr & E2PROM_2ADDR) >> 31;
- char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- if (two_addr) {
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
- } else {
- i2c_addr |= (addr >> 8) & 0x7;
- msg[0].addr = i2c_addr;
- addr_buf = addr & 0xFF;
- msg[0].len = 1;
- msg[0].buf = (u8 *)&addr_buf;
- }
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c b/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
deleted file mode 100644
index 1ca27c26ef75..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-#include "common.h"
-
-/* Defines for OTP Data Registers */
-#define IMX_OTP_START_ADDR 0x3B04
-#define IMX_OTP_PAGE_SIZE 64
-#define IMX_OTP_READY_REG 0x3B01
-#define IMX_OTP_PAGE_REG 0x3B02
-#define IMX_OTP_MODE_REG 0x3B00
-#define IMX_OTP_PAGE_MAX 20
-#define IMX_OTP_READY_REG_DONE 1
-#define IMX_OTP_READ_ONETIME 32
-#define IMX_OTP_MODE_READ 1
-#define IMX227_OTP_START_ADDR 0x0A04
-#define IMX227_OTP_ENABLE_REG 0x0A00
-#define IMX227_OTP_READY_REG 0x0A01
-#define IMX227_OTP_PAGE_REG 0x0A02
-#define IMX227_OTP_READY_REG_DONE 1
-#define IMX227_OTP_MODE_READ 1
-
-static int
-imx_read_otp_data(struct i2c_client *client, u16 len, u16 reg, void *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX] = { 0 };
- int err;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- err = i2c_transfer(client->adapter, msg, 2);
- if (err != 2) {
- if (err >= 0)
- err = -EIO;
- goto error;
- }
-
- memcpy(val, data, len);
- return 0;
-
-error:
- dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
- return err;
-}
-
-static int imx_read_otp_reg_array(struct i2c_client *client, u16 size, u16 addr,
- u8 *buf)
-{
- u16 index;
- int ret;
-
- for (index = 0; index + IMX_OTP_READ_ONETIME <= size;
- index += IMX_OTP_READ_ONETIME) {
- ret = imx_read_otp_data(client, IMX_OTP_READ_ONETIME,
- addr + index, &buf[index]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_MODE_REG, IMX_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
-void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_ENABLE_REG, IMX227_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX227_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/vcm.c b/drivers/staging/media/atomisp/i2c/imx/vcm.c
deleted file mode 100644
index 2d2df04c800a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/vcm.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "../../include/linux/atomisp_platform.h"
-
-int vcm_power_up(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- /* Enable power */
- return vcm_platform_data->power_ctrl(sd, 1);
-}
-
-int vcm_power_down(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- return vcm_platform_data->power_ctrl(sd, 0);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
index 5e7d79d2e01b..0af79d77a404 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.h
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -394,11 +390,6 @@ static struct mt9m114_res_struct mt9m114_res[] = {
};
#define N_RES (ARRAY_SIZE(mt9m114_res))
-static const struct i2c_device_id mt9m114_id[] = {
- {"mt9m114", 0},
- {}
-};
-
static struct misensor_reg const mt9m114_exitstandby[] = {
{MISENSOR_16BIT, 0x098E, 0xDC00},
/* exit-standby */
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index ab8907e6c9ef..bf4897347df7 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,10 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2680_NAME "ov2680"
-#define OV2680B_NAME "ov2680b"
-#define OV2680F_NAME "ov2680f"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -227,12 +219,6 @@ struct ov2680_format {
struct ov2680_write_buffer buffer;
};
- static const struct i2c_device_id ov2680_id[] = {
- {OV2680B_NAME, 0},
- {OV2680F_NAME, 0},
- {}
- };
-
static struct ov2680_reg const ov2680_global_setting[] = {
{OV2680_8BIT, 0x0103, 0x01},
{OV2680_8BIT, 0x3002, 0x00},
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index 73ecb1679718..d8a973d71699 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2722_NAME "ov2722"
-
#define OV2722_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -257,11 +251,6 @@ struct ov2722_write_ctrl {
struct ov2722_write_buffer buffer;
};
-static const struct i2c_device_id ov2722_id[] = {
- {OV2722_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
index 9fb1bffbe9b3..3f527f2047a7 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
@@ -1,11 +1,11 @@
-config VIDEO_OV5693
+config VIDEO_ATOMISP_OV5693
tristate "Omnivision ov5693 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- ov5693 5 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
- ov5693 is video camera sensor.
-
- It currently only works with the atomisp driver.
+ ov5693 is video camera sensor.
+ It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index facb70e6a93e..aa6be85c5a60 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
index 2dd894989cd9..4de44569fe54 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 123642557aa8..3e7c3851280f 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -31,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -391,8 +386,8 @@ static int ov5693_write_reg_array(struct i2c_client *client,
if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov5693_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov5693_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -945,12 +940,8 @@ static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
{
- int ret;
-
value = min(value, AD5823_MAX_FOCUS_POS);
- ret = ad5823_t_focus_vcm(sd, value);
-
- return ret;
+ return ad5823_t_focus_vcm(sd, value);
}
static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
@@ -1302,10 +1293,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
/* This driver assumes "internal DVDD, PWDNB tied to DOVDD".
* In this set up only gpio0 (XSHUTDN) should be available
* but in some products (for example ECS) gpio1 (PWDNB) is
@@ -1332,19 +1319,12 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
{
- int ret;
struct ov5693_device *dev = to_ov5693_sensor(sd);
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
- ret = dev->platform_data->gpio0_ctrl(sd, flag);
-
- return ret;
+ return dev->platform_data->gpio0_ctrl(sd, flag);
}
static int __power_up(struct v4l2_subdev *sd)
@@ -1942,8 +1922,7 @@ static int ov5693_remove(struct i2c_client *client)
return 0;
}
-static int ov5693_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *dev;
int i2c;
@@ -1965,10 +1944,8 @@ static int ov5693_probe(struct i2c_client *client,
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -2030,8 +2007,6 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov5693_id);
-
static const struct acpi_device_id ov5693_acpi_match[] = {
{"INT33BE"},
{},
@@ -2040,27 +2015,13 @@ MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
static struct i2c_driver ov5693_driver = {
.driver = {
- .name = OV5693_NAME,
- .acpi_match_table = ACPI_PTR(ov5693_acpi_match),
+ .name = "ov5693",
+ .acpi_match_table = ov5693_acpi_match,
},
- .probe = ov5693_probe,
+ .probe_new = ov5693_probe,
.remove = ov5693_remove,
- .id_table = ov5693_id,
};
-
-static int init_ov5693(void)
-{
- return i2c_add_driver(&ov5693_driver);
-}
-
-static void exit_ov5693(void)
-{
-
- i2c_del_driver(&ov5693_driver);
-}
-
-module_init(init_ov5693);
-module_exit(exit_ov5693);
+module_i2c_driver(ov5693_driver);
MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index 8c2e6794463b..2ea63807c56d 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../../include/linux/atomisp_platform.h"
-#define OV5693_NAME "ov5693"
-
#define OV5693_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -278,11 +272,6 @@ struct ov5693_write_ctrl {
struct ov5693_write_buffer buffer;
};
-static const struct i2c_device_id ov5693_id[] = {
- {OV5693_NAME, 0},
- {}
-};
-
static struct ov5693_reg const ov5693_global_setting[] = {
{OV5693_8BIT, 0x0103, 0x01},
{OV5693_8BIT, 0x3001, 0x0a},
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.c b/drivers/staging/media/atomisp/i2c/ov8858.c
index 43e1638fd674..ba147ac2e36f 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.c
+++ b/drivers/staging/media/atomisp/i2c/ov8858.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -480,8 +476,6 @@ static int ov8858_priv_int_data_init(struct v4l2_subdev *sd)
if (!dev->otp_data) {
dev->otp_data = devm_kzalloc(&client->dev, size, GFP_KERNEL);
if (!dev->otp_data) {
- dev_err(&client->dev, "%s: can't allocate memory",
- __func__);
r = -ENOMEM;
goto error3;
}
@@ -714,10 +708,6 @@ static int __power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (dev->platform_data->v1p2_ctrl) {
ret = dev->platform_data->v1p2_ctrl(sd, flag);
if (ret) {
@@ -769,10 +759,6 @@ static int __gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!client || !dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
if (dev->platform_data->gpio0_ctrl)
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -1575,15 +1561,6 @@ static int ov8858_s_config(struct v4l2_subdev *sd,
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "platform init error %d!\n", ret);
- return ret;
- }
- }
-
ret = __ov8858_s_power(sd, 1);
if (ret) {
dev_err(&client->dev, "power-up error %d!\n", ret);
@@ -1628,8 +1605,6 @@ fail_detect:
fail_csi_cfg:
__ov8858_s_power(sd, 0);
fail_update:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
mutex_unlock(&dev->input_lock);
dev_err(&client->dev, "sensor power-gating failed\n");
return ret;
@@ -1930,8 +1905,6 @@ static int ov8858_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov8858_device *dev = to_ov8858_sensor(sd);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -2082,8 +2055,7 @@ static const struct v4l2_ctrl_config ctrls[] = {
}
};
-static int ov8858_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov8858_probe(struct i2c_client *client)
{
struct ov8858_device *dev;
unsigned int i;
@@ -2094,15 +2066,11 @@ static int ov8858_probe(struct i2c_client *client,
/* allocate sensor device & init sub device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
- if (id)
- dev->i2c_id = id->driver_data;
dev->fmt_idx = 0;
dev->sensor_id = OV_ID_DEFAULT;
dev->vcm_driver = &ov8858_vcms[OV8858_ID_DEFAULT];
@@ -2182,40 +2150,21 @@ out_free:
return ret;
}
-static const struct i2c_device_id ov8858_id[] = {
- {OV8858_NAME, 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ov8858_id);
-
static const struct acpi_device_id ov8858_acpi_match[] = {
{"INT3477"},
{},
};
+MODULE_DEVICE_TABLE(acpi, ov8858_acpi_match);
static struct i2c_driver ov8858_driver = {
.driver = {
- .name = OV8858_NAME,
- .acpi_match_table = ACPI_PTR(ov8858_acpi_match),
+ .name = "ov8858",
+ .acpi_match_table = ov8858_acpi_match,
},
- .probe = ov8858_probe,
+ .probe_new = ov8858_probe,
.remove = ov8858_remove,
- .id_table = ov8858_id,
};
-
-static __init int ov8858_init_mod(void)
-{
- return i2c_add_driver(&ov8858_driver);
-}
-
-static __exit void ov8858_exit_mod(void)
-{
- i2c_del_driver(&ov8858_driver);
-}
-
-module_init(ov8858_init_mod);
-module_exit(ov8858_exit_mod);
+module_i2c_driver(ov8858_driver);
MODULE_DESCRIPTION("A low-level driver for Omnivision OV8858 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
index 638d1a803a2b..6c89568bb44e 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
index 7d74a8899fae..f81851306832 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h b/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
deleted file mode 100644
index dc7104470f5c..000000000000
--- a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Access to message bus through three registers
- * in CUNIT(0:0:0) PCI configuration space.
- * MSGBUS_CTRL_REG(0xD0):
- * 31:24 = message bus opcode
- * 23:16 = message bus port
- * 15:8 = message bus address, low 8 bits.
- * 7:4 = message bus byte enables
- * MSGBUS_CTRL_EXT_REG(0xD8):
- * 31:8 = message bus address, high 24 bits.
- * MSGBUS_DATA_REG(0xD4):
- * hold the data for write or read
- */
-#define PCI_ROOT_MSGBUS_CTRL_REG 0xD0
-#define PCI_ROOT_MSGBUS_DATA_REG 0xD4
-#define PCI_ROOT_MSGBUS_CTRL_EXT_REG 0xD8
-#define PCI_ROOT_MSGBUS_READ 0x10
-#define PCI_ROOT_MSGBUS_WRITE 0x11
-#define PCI_ROOT_MSGBUS_DWORD_ENABLE 0xf0
-
-/* In BYT platform for all internal PCI devices d3 delay
- * of 3 ms is sufficient. Default value of 10 ms is overkill.
- */
-#define INTERNAL_PCI_PM_D3_WAIT 3
-
-#define ISP_SUB_CLASS 0x80
-#define SUB_CLASS_MASK 0xFF00
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd);
-u32 intel_mid_msgbus_read32(u8 port, u32 addr);
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext);
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data);
-u32 intel_mid_soc_stepping(void);
-int intel_mid_dw_i2c_acquire_ownership(void);
-int intel_mid_dw_i2c_release_ownership(void);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index d67dd658cff9..15fa5679bae7 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CSS15
@@ -212,14 +208,14 @@ struct atomisp_dis_vector {
};
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
* arrays that contain the coeffients for each type.
*/
struct atomisp_dvs2_coef_types {
- short __user *odd_real; /**< real part of the odd coefficients*/
- short __user *odd_imag; /**< imaginary part of the odd coefficients*/
- short __user *even_real;/**< real part of the even coefficients*/
- short __user *even_imag;/**< imaginary part of the even coefficients*/
+ short __user *odd_real; /** real part of the odd coefficients*/
+ short __user *odd_imag; /** imaginary part of the odd coefficients*/
+ short __user *even_real;/** real part of the even coefficients*/
+ short __user *even_imag;/** imaginary part of the even coefficients*/
};
/*
@@ -227,10 +223,10 @@ struct atomisp_dvs2_coef_types {
* arrays that contain the statistics for each type.
*/
struct atomisp_dvs2_stat_types {
- int __user *odd_real; /**< real part of the odd statistics*/
- int __user *odd_imag; /**< imaginary part of the odd statistics*/
- int __user *even_real;/**< real part of the even statistics*/
- int __user *even_imag;/**< imaginary part of the even statistics*/
+ int __user *odd_real; /** real part of the odd statistics*/
+ int __user *odd_imag; /** imaginary part of the odd statistics*/
+ int __user *even_real;/** real part of the even statistics*/
+ int __user *even_imag;/** imaginary part of the even statistics*/
};
struct atomisp_dis_coefficients {
@@ -394,16 +390,16 @@ struct atomisp_metadata_config {
* Generic resolution structure.
*/
struct atomisp_resolution {
- uint32_t width; /**< Width */
- uint32_t height; /**< Height */
+ uint32_t width; /** Width */
+ uint32_t height; /** Height */
};
/*
* This specifies the coordinates (x,y)
*/
struct atomisp_zoom_point {
- int32_t x; /**< x coordinate */
- int32_t y; /**< y coordinate */
+ int32_t x; /** x coordinate */
+ int32_t y; /** y coordinate */
};
/*
@@ -415,9 +411,9 @@ struct atomisp_zoom_region {
};
struct atomisp_dz_config {
- uint32_t dx; /**< Horizontal zoom factor */
- uint32_t dy; /**< Vertical zoom factor */
- struct atomisp_zoom_region zoom_region; /**< region for zoom */
+ uint32_t dx; /** Horizontal zoom factor */
+ uint32_t dy; /** Vertical zoom factor */
+ struct atomisp_zoom_region zoom_region; /** region for zoom */
};
struct atomisp_parm {
@@ -762,7 +758,7 @@ enum atomisp_acc_arg_type {
ATOMISP_ACC_ARG_FRAME /* Frame argument */
};
-/** ISP memories, isp2400 */
+/* ISP memories, isp2400 */
enum atomisp_acc_memory {
ATOMISP_ACC_MEMORY_PMEM0 = 0,
ATOMISP_ACC_MEMORY_DMEM0,
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
index 5390b97ac6e7..7e3ca12dd4e9 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -17,9 +17,6 @@
#include "atomisp_platform.h"
-const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
-const struct atomisp_platform_data *atomisp_get_platform_data(void);
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index dbac2b777dad..e0f0c379e7ce 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef ATOMISP_PLATFORM_H_
@@ -205,20 +201,13 @@ struct camera_vcm_control {
};
struct camera_sensor_platform_data {
- int (*gpio_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
- bool (*low_fps)(void);
- int (*platform_init)(struct i2c_client *);
- int (*platform_deinit)(void);
- char *(*msr_file_name)(void);
- struct atomisp_camera_caps *(*get_camera_caps)(void);
- int (*gpio_intr_ctrl)(struct v4l2_subdev *subdev);
- /* New G-Min power and GPIO interface, replaces
- * power/gpio_ctrl with methods to control individual
- * lines as implemented on all known camera modules. */
+ /*
+ * New G-Min power and GPIO interface to control individual
+ * lines as implemented on all known camera modules.
+ */
int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
@@ -228,12 +217,6 @@ struct camera_sensor_platform_data {
char *module_id);
};
-struct camera_af_platform_data {
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
-};
-
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
-
struct camera_mipi_info {
enum atomisp_camera_port port;
unsigned int num_lanes;
diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
index 589f4eae38ca..8988b37943b3 100644
--- a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
+++ b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __LIBMSRLISTHELPER_H__
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
index 7d6a8c05dd52..9276ce44d907 100644
--- a/drivers/staging/media/atomisp/include/media/lm3554.h
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _LM3554_H_
@@ -24,7 +20,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
-#define LM3554_NAME "lm3554"
#define LM3554_ID 3554
#define v4l2_queryctrl_entry_integer(_id, _name,\
diff --git a/drivers/staging/media/atomisp/include/media/lm3642.h b/drivers/staging/media/atomisp/include/media/lm3642.h
deleted file mode 100644
index 545d95763335..000000000000
--- a/drivers/staging/media/atomisp/include/media/lm3642.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * include/media/lm3642.h
- *
- * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- */
-
-#ifndef _LM3642_H_
-#define _LM3642_H_
-
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-
-#define LM3642_NAME "lm3642"
-#define LM3642_ID 3642
-
-#define v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step, \
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_INTEGER, \
- .name = _name, \
- .minimum = (_minimum), \
- .maximum = (_maximum), \
- .step = (_step), \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-#define v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_BOOLEAN, \
- .name = _name, \
- .minimum = 0, \
- .maximum = 1, \
- .step = 1, \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-
-#define s_ctrl_id_entry_integer(_id, _name, \
- _minimum, _maximum, _step, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-#define s_ctrl_id_entry_boolean(_id, _name, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-
-/* Default Values */
-#define LM3642_DEFAULT_TIMEOUT 300U
-#define LM3642_DEFAULT_RAMP_TIME 0x10 /* 1.024ms */
-#define LM3642_DEFAULT_INDICATOR_CURRENT 0x01 /* 1.88A */
-#define LM3642_DEFAULT_FLASH_CURRENT 0x0f /* 1500mA */
-
-/* Value settings for Flash Time-out Duration*/
-#define LM3642_MIN_TIMEOUT 100U
-#define LM3642_MAX_TIMEOUT 800U
-#define LM3642_TIMEOUT_STEPSIZE 100U
-
-/* Flash modes */
-#define LM3642_MODE_SHUTDOWN 0
-#define LM3642_MODE_INDICATOR 1
-#define LM3642_MODE_TORCH 2
-#define LM3642_MODE_FLASH 3
-
-/* timer delay time */
-#define LM3642_TIMER_DELAY 5
-
-/* Percentage <-> value macros */
-#define LM3642_MIN_PERCENT 0U
-#define LM3642_MAX_PERCENT 100U
-#define LM3642_CLAMP_PERCENTAGE(val) \
- clamp(val, LM3642_MIN_PERCENT, LM3642_MAX_PERCENT)
-
-#define LM3642_VALUE_TO_PERCENT(v, step) \
- (((((unsigned long)((v)+1))*(step))+50)/100)
-#define LM3642_PERCENT_TO_VALUE(p, step) \
- (((((unsigned long)(p))*100)+((step)>>1))/(step)-1)
-
-/* Product specific limits
- * TODO: get these from platform data */
-#define LM3642_FLASH_MAX_LVL 0x0F /* 1500mA */
-#define LM3642_TORCH_MAX_LVL 0x07 /* 187mA */
-#define LM3642_INDICATOR_MAX_LVL 0x01 /* 1.88A */
-
-/* Flash brightness, input is percentage, output is [0..15] */
-#define LM3642_FLASH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_FLASH_MAX_LVL+1)>>1)) \
- /((LM3642_FLASH_MAX_LVL+1)))
-#define LM3642_FLASH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(15, LM3642_FLASH_STEP)
-
-/* Torch brightness, input is percentage, output is [0..7] */
-#define LM3642_TORCH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_TORCH_MAX_LVL+1)>>1)) \
- /((LM3642_TORCH_MAX_LVL+1)))
-#define LM3642_TORCH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(0, LM3642_TORCH_STEP)
-
-/* Indicator brightness, input is percentage, output is [0..1] */
-#define LM3642_INDICATOR_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_INDICATOR_MAX_LVL+1)>>1)) \
- /((LM3642_INDICATOR_MAX_LVL+1)))
-#define LM3642_INDICATOR_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(1, LM3642_INDICATOR_STEP)
-
-/*
- * lm3642_platform_data - Flash controller platform data
- */
-struct lm3642_platform_data {
- int gpio_torch;
- int gpio_strobe;
- int (*power_ctrl)(struct v4l2_subdev *subdev, int on);
-
- unsigned int torch_en;
- unsigned int flash_en;
- unsigned int tx_en;
- unsigned int ivfm_en;
-};
-
-#endif /* _LM3642_H_ */
-
diff --git a/drivers/staging/media/atomisp/pci/Kconfig b/drivers/staging/media/atomisp/pci/Kconfig
index a72421431c7a..41f116d52060 100644
--- a/drivers/staging/media/atomisp/pci/Kconfig
+++ b/drivers/staging/media/atomisp/pci/Kconfig
@@ -3,11 +3,12 @@
#
config VIDEO_ATOMISP
- tristate "Intel Atom Image Signal Processor Driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF_VMALLOC
- ---help---
- Say Y here if your platform supports Intel Atom SoC
- camera imaging subsystem.
- To compile this driver as a module, choose M here: the
- module will be called atomisp
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select IOSF_MBI
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
index 513a430ee01a..5d102a4f8aff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
index 1eac329339b7..a6638edee360 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
index 5b58e7d9ca5b..56386154643b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
index f48bf451c1f5..debf0e3853ff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/firmware.h>
@@ -27,7 +23,8 @@
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <asm/intel-mid.h>
+
+#include <asm/iosf_mbi.h>
#include <media/v4l2-event.h>
#include <media/videobuf-vmalloc.h>
@@ -143,36 +140,36 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
unsigned int ratio, timeout, guar_ratio;
u32 isp_sspm1 = 0;
int i;
+
if (!isp->hpll_freq) {
dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
}
ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1
| ratio << ISP_REQ_FREQ_OFFSET
| 1 << ISP_FREQ_VALID_OFFSET
| guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
-
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 20;
while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
udelay(100);
timeout--;
@@ -187,10 +184,10 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 10;
while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
new_freq);
udelay(100);
@@ -1660,20 +1657,15 @@ void atomisp_css_flush(struct atomisp_device *isp)
dev_dbg(isp->dev, "atomisp css flush done\n");
}
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr)
-#else
-void atomisp_wdt(unsigned long pipe_addr)
-#endif
+void atomisp_wdt(struct timer_list *t)
{
#ifndef ISP2401
- struct atomisp_device *isp = (struct atomisp_device *)isp_addr;
+ struct atomisp_sub_device *asd = from_timer(asd, t, wdt);
#else
- struct atomisp_video_pipe *pipe =
- (struct atomisp_video_pipe *)pipe_addr;
+ struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt);
struct atomisp_sub_device *asd = pipe->asd;
- struct atomisp_device *isp = asd->isp;
#endif
+ struct atomisp_device *isp = asd->isp;
#ifdef ISP2401
atomic_inc(&pipe->wdt_count);
@@ -5195,7 +5187,7 @@ int get_frame_info_nop(struct atomisp_sub_device *asd,
return 0;
}
-/**
+/*
* Resets CSS parameters that depend on input resolution.
*
* Update params like CSS RAW binning, 2ppc mode and pp_input
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
index 31ba4e613d13..bdc73862fb79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -85,11 +81,7 @@ static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_wdt_work(struct work_struct *work);
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr);
-#else
-void atomisp_wdt(unsigned long pipe_addr);
-#endif
+void atomisp_wdt(struct timer_list *t);
void atomisp_setup_flash(struct atomisp_sub_device *asd);
irqreturn_t atomisp_isr(int irq, void *dev);
irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
index 69d1526da362..2558193045a6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
index fb8b8fab4e92..3ef850cd25bd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
index 5027fd20d966..b7f9da014641 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -4055,7 +4051,7 @@ int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
unsigned int *zoom)
{
- struct ia_css_dz_config dz_config; /**< Digital Zoom */
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
@@ -4526,7 +4522,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
for (i = 0; i < isp->num_of_streams; i++)
atomisp_wdt_stop(&isp->asd[i], 0);
#ifndef ISP2401
- atomisp_wdt((unsigned long)isp);
+ atomisp_wdt(&isp->asd[0].wdt);
#else
queue_work(isp->wdt_work_queue, &isp->wdt_work);
#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
index b62ad9082018..b03711668eda 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
index 0592ac1f2832..44c21813a06e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
index 750478f614d6..95669eedaad1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_COMPAT_IOCTL32_H__
@@ -32,17 +28,17 @@ struct atomisp_histogram32 {
};
struct atomisp_dvs2_stat_types32 {
- compat_uptr_t odd_real; /**< real part of the odd statistics*/
- compat_uptr_t odd_imag; /**< imaginary part of the odd statistics*/
- compat_uptr_t even_real;/**< real part of the even statistics*/
- compat_uptr_t even_imag;/**< imaginary part of the even statistics*/
+ compat_uptr_t odd_real; /** real part of the odd statistics*/
+ compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
+ compat_uptr_t even_real;/** real part of the even statistics*/
+ compat_uptr_t even_imag;/** imaginary part of the even statistics*/
};
struct atomisp_dvs2_coef_types32 {
- compat_uptr_t odd_real; /**< real part of the odd coefficients*/
- compat_uptr_t odd_imag; /**< imaginary part of the odd coefficients*/
- compat_uptr_t even_real;/**< real part of the even coefficients*/
- compat_uptr_t even_imag;/**< imaginary part of the even coefficients*/
+ compat_uptr_t odd_real; /** real part of the odd coefficients*/
+ compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
+ compat_uptr_t even_real;/** real part of the even coefficients*/
+ compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
};
struct atomisp_dvs2_statistics32 {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
index 2c5036685447..fa03b78c3580 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
index faa9cf7e05c0..0191d28a55bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_CSI2_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
index 204d941cdb6c..54e28605b5de 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_DFS_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
index 1ae2358de8d4..7129b88456cb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -162,7 +158,7 @@ static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
return size;
}
-static struct driver_attribute iunit_drvfs_attrs[] = {
+static const struct driver_attribute iunit_drvfs_attrs[] = {
__ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
__ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
__ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
index 5cb717b0c1c2..b91bfef21639 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
index c766119bf798..377ec2a9fa6d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
index 1b86abd35c38..61fdeb5ee60a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index d8cfed358d55..dd7596d8763d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -1137,10 +1133,8 @@ static int remove_pad_from_frame(struct atomisp_device *isp,
ia_css_ptr store = load;
buffer = kmalloc(width*sizeof(load), GFP_KERNEL);
- if (!buffer) {
- dev_err(isp->dev, "out of memory.\n");
+ if (!buffer)
return -ENOMEM;
- }
load += ISP_LEFT_PAD;
for (i = 0; i < height; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
index 8471e391501a..2faab3429d43 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
index e9650cb75ba5..55ba185b43a0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _atomisp_helper_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
index 7542a72f1d0f..52a6f8002048 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_INTERNAL_H__
@@ -29,9 +25,6 @@
#include <linux/pm_qos.h>
#include <linux/idr.h>
-#include <asm/intel-mid.h>
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
#include <media/media-device.h>
#include <media/v4l2-subdev.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
index 717647951fb6..339b5d31e1f1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
@@ -14,17 +14,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/delay.h>
#include <linux/pci.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -943,10 +938,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
while (count--) {
s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
- if (!s3a_buf) {
- dev_err(isp->dev, "s3a stat buf alloc failed\n");
+ if (!s3a_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, s3a_buf, NULL, NULL)) {
@@ -965,7 +958,6 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
if (!dis_buf) {
- dev_err(isp->dev, "dis stat buf alloc failed\n");
kfree(s3a_buf);
goto error;
}
@@ -990,10 +982,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
GFP_KERNEL);
- if (!md_buf) {
- dev_err(isp->dev, "metadata buf alloc failed\n");
+ if (!md_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, NULL, NULL, md_buf)) {
@@ -2943,13 +2933,15 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
#else
if (isp->motor)
#endif
- err = v4l2_subdev_call(
#ifndef ISP2401
+ err = v4l2_subdev_call(
isp->inputs[asd->input_curr].motor,
+ core, ioctl, cmd, arg);
#else
+ err = v4l2_subdev_call(
isp->motor,
-#endif
core, ioctl, cmd, arg);
+#endif
else
err = v4l2_subdev_call(
isp->inputs[asd->input_curr].camera,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
index fb5fadb5332b..0d2785b9ef99 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
index 744ab6eb42a0..70b53988553c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -25,7 +21,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
index ba5c2ab14253..c3eba675da06 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_SUBDEV_H__
@@ -227,7 +223,7 @@ struct atomisp_subdev_params {
bool dis_proj_data_valid;
- struct ia_css_dz_config dz_config; /**< Digital Zoom */
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
struct ia_css_capture_config capture_config;
struct atomisp_css_isp_config config;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
index af09218d8b71..319ded6a96da 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
index 48b96048cab4..b71cc7bcdbab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
index 64ab60f02e85..af354c4bfd3e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
index 5ce282d6c939..462b296554c7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#undef TRACE_SYSTEM
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
index 663aa916e3ca..3c260f8b52e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -28,6 +24,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+
#include "../../include/linux/atomisp_gmin_platform.h"
#include "atomisp_cmd.h"
@@ -46,7 +44,6 @@
#include "hrt/hive_isp_css_mm_hrt.h"
#include "device_access.h"
-#include <asm/intel-mid.h>
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
@@ -386,28 +383,23 @@ done:
*/
static void punit_ddr_dvfs_enable(bool enable)
{
- int reg = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSDVFS);
int door_bell = 1 << 8;
int max_wait = 30;
+ int reg;
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
if (enable) {
reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
} else {
reg |= (MRFLD_BIT1 | door_bell);
reg &= ~(MRFLD_BIT0);
}
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSDVFS, reg);
-
- /*Check Req_ACK to see freq status, wait until door_bell is cleared*/
- if (reg & door_bell) {
- while (max_wait--) {
- if (0 == (intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSDVFS) & door_bell))
- break;
-
- usleep_range(100, 500);
- }
+ /* Check Req_ACK to see freq status, wait until door_bell is cleared */
+ while ((reg & door_bell) && max_wait--) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ usleep_range(100, 500);
}
if (max_wait == -1)
@@ -421,10 +413,10 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
u32 reg_value;
/* writing 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
reg_value |= MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/*WA:Enable DVFS*/
if (IS_CHT)
@@ -437,8 +429,7 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
*/
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-off in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x3 */
@@ -477,14 +468,14 @@ int atomisp_mrfld_power_up(struct atomisp_device *isp)
msleep(10);
/* writing 0x0 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/* FIXME: experienced value for delay */
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-on in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x0 */
@@ -755,7 +746,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
&subdevs->v4l2_subdev.board_info;
struct i2c_adapter *adapter =
i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
- struct camera_sensor_platform_data *sensor_pdata;
int sensor_num, i;
if (adapter == NULL) {
@@ -807,13 +797,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
* pixel_format.
*/
isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
- sensor_pdata = (struct camera_sensor_platform_data *)
- board_info->platform_data;
- if (sensor_pdata->get_camera_caps)
- isp->inputs[isp->input_cnt].camera_caps =
- sensor_pdata->get_camera_caps();
- else
- isp->inputs[isp->input_cnt].camera_caps =
+ isp->inputs[isp->input_cnt].camera_caps =
atomisp_get_default_camera_caps();
sensor_num = isp->inputs[isp->input_cnt]
.camera_caps->sensor_num;
@@ -1020,7 +1004,7 @@ csi_and_subdev_probe_failed:
v4l2_device_unregister(&isp->v4l2_dev);
v4l2_device_failed:
media_device_unregister(&isp->media_dev);
- media_device_cleanup(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
return ret;
}
@@ -1155,17 +1139,12 @@ static int init_atomisp_wdts(struct atomisp_device *isp)
struct atomisp_sub_device *asd = &isp->asd[i];
asd = &isp->asd[i];
#ifndef ISP2401
- setup_timer(&asd->wdt, atomisp_wdt, (unsigned long)isp);
+ timer_setup(&asd->wdt, atomisp_wdt, 0);
#else
- setup_timer(&asd->video_out_capture.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_capture);
- setup_timer(&asd->video_out_preview.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_preview);
- setup_timer(&asd->video_out_vf.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_vf);
- setup_timer(&asd->video_out_video_capture.wdt,
- atomisp_wdt,
- (unsigned long)&asd->video_out_video_capture);
+ timer_setup(&asd->video_out_capture.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_preview.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_video_capture.wdt, atomisp_wdt, 0);
#endif
}
return 0;
@@ -1323,7 +1302,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
isp->dfs = &dfs_config_cht;
isp->pdev->d3cold_delay = 0;
- val = intel_mid_msgbus_read32(CCK_PORT, CCK_FUSE_REG_0);
+ iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
index 191b2e57a810..944a6cf40a2f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
index 766218ed3649..914aa7f98700 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
@@ -18,7 +18,6 @@
#include <sp.h>
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <assert_support.h>
#include <platform_support.h>
#include "ia_css_circbuf_comm.h"
@@ -45,7 +44,7 @@ struct ia_css_circbuf_s {
* @param elems An array of elements.
* @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
+extern void ia_css_circbuf_create(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t *elems,
ia_css_circbuf_desc_t *desc);
@@ -55,7 +54,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
*
* @param cb The pointer to the circular buffer.
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
+extern void ia_css_circbuf_destroy(
ia_css_circbuf_t *cb);
/**
@@ -68,7 +67,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
*
* @return the pop-out value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
+extern uint32_t ia_css_circbuf_pop(
ia_css_circbuf_t *cb);
/**
@@ -82,7 +81,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
*
* @return the extracted value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
+extern uint32_t ia_css_circbuf_extract(
ia_css_circbuf_t *cb,
int offset);
@@ -97,7 +96,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
* @param elem The pointer to the element.
* @param val The value to be set.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
+static inline void ia_css_circbuf_elem_set_val(
ia_css_circbuf_elem_t *elem,
uint32_t val)
{
@@ -111,7 +110,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
*
* @param elem The pointer to the element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
+static inline void ia_css_circbuf_elem_init(
ia_css_circbuf_elem_t *elem)
{
OP___assert(elem != NULL);
@@ -124,7 +123,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
* @param src The element as the copy source.
* @param dest The element as the copy destination.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
+static inline void ia_css_circbuf_elem_cpy(
ia_css_circbuf_elem_t *src,
ia_css_circbuf_elem_t *dest)
{
@@ -143,7 +142,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
*
* @return the position at offset.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_get_pos_at_offset(
ia_css_circbuf_t *cb,
uint32_t base,
int offset)
@@ -176,7 +175,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
+static inline int ia_css_circbuf_get_offset(
ia_css_circbuf_t *cb,
uint32_t src_pos,
uint32_t dest_pos)
@@ -201,7 +200,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
*
* TODO: Test this API.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
+static inline uint32_t ia_css_circbuf_get_size(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -217,7 +216,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
*
* @return the number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
+static inline uint32_t ia_css_circbuf_get_num_elems(
ia_css_circbuf_t *cb)
{
int num;
@@ -239,7 +238,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
+static inline bool ia_css_circbuf_is_empty(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -257,7 +256,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
OP___assert(cb->desc != NULL);
@@ -274,7 +273,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
* @param cb The pointer to the circular buffer.
* @param elem The new element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_write(
+static inline void ia_css_circbuf_write(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t elem)
{
@@ -298,7 +297,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_write(
* @param cb The pointer to the circular buffer.
* @param val The value to be pushed in.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_push(
+static inline void ia_css_circbuf_push(
ia_css_circbuf_t *cb,
uint32_t val)
{
@@ -321,7 +320,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_push(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
+static inline uint32_t ia_css_circbuf_get_free_elems(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -338,7 +337,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
+extern uint32_t ia_css_circbuf_peek(
ia_css_circbuf_t *cb,
int offset);
@@ -350,7 +349,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
+extern uint32_t ia_css_circbuf_peek_from_start(
ia_css_circbuf_t *cb,
int offset);
@@ -369,7 +368,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
* @return true on succesfully increasing the size
* false on failure
*/
-STORAGE_CLASS_EXTERN bool ia_css_circbuf_increase_size(
+extern bool ia_css_circbuf_increase_size(
ia_css_circbuf_t *cb,
unsigned int sz_delta,
ia_css_circbuf_elem_t *elems);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
index a8447d409c31..8dd7cd6cd3d8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -17,7 +17,6 @@
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <platform_support.h>
#include <sp.h>
#include "ia_css_circbuf_comm.h"
@@ -35,7 +34,7 @@
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
+static inline bool ia_css_circbuf_desc_is_empty(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -52,7 +51,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
+static inline bool ia_css_circbuf_desc_is_full(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -65,7 +64,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
* @param cb_desc The pointer circular buffer descriptor
* @param size The size of the circular buffer
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
+static inline void ia_css_circbuf_desc_init(
ia_css_circbuf_desc_t *cb_desc,
int8_t size)
{
@@ -82,7 +81,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
*
* @return the position in the circular buffer descriptor.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t base,
int offset)
@@ -114,7 +113,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
+static inline int ia_css_circbuf_desc_get_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t src_pos,
uint32_t dest_pos)
@@ -135,7 +134,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
*
* @return The number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
+static inline uint32_t ia_css_circbuf_desc_get_num_elems(
ia_css_circbuf_desc_t *cb_desc)
{
int num;
@@ -155,7 +154,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_free_elems(
+static inline uint32_t ia_css_circbuf_desc_get_free_elems(
ia_css_circbuf_desc_t *cb_desc)
{
uint32_t num;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
index 19bae1610fb6..050d60f0894f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
@@ -21,7 +21,7 @@
* Forward declarations.
*
**********************************************************************/
-/**
+/*
* @brief Read the oldest element from the circular buffer.
* Read the oldest element WITHOUT checking whehter the
* circular buffer is empty or not. The oldest element is
@@ -34,7 +34,7 @@
static inline ia_css_circbuf_elem_t
ia_css_circbuf_read(ia_css_circbuf_t *cb);
-/**
+/*
* @brief Shift a chunk of elements in the circular buffer.
* A chunk of elements (i.e. the ones from the "start" position
* to the "chunk_src" position) are shifted in the circular buffer,
@@ -48,7 +48,7 @@ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
uint32_t chunk_src,
uint32_t chunk_dest);
-/**
+/*
* @brief Get the "val" field in the element.
*
* @param elem The pointer to the element.
@@ -63,7 +63,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
* Non-inline functions.
*
**********************************************************************/
-/**
+/*
* @brief Create the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -88,7 +88,7 @@ ia_css_circbuf_create(ia_css_circbuf_t *cb,
cb->elems = elems;
}
-/**
+/*
* @brief Destroy the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -99,7 +99,7 @@ void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
cb->elems = NULL;
}
-/**
+/*
* @brief Pop a value out of the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -116,7 +116,7 @@ uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
return ret;
}
-/**
+/*
* @brief Extract a value out of the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -166,7 +166,7 @@ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
return val;
}
-/**
+/*
* @brief Peek an element from the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -180,7 +180,7 @@ uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
return cb->elems[pos].val;
}
-/**
+/*
* @brief Get the value of an element from the circular buffer.
* Refer to "ia_css_circbuf.h" for details.
*/
@@ -194,7 +194,7 @@ uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
return cb->elems[pos].val;
}
-/** @brief increase size of a circular buffer.
+/* @brief increase size of a circular buffer.
* Use 'CAUTION' before using this function. This was added to
* support / fix issue with increasing size for tagger only
* Please refer to "ia_css_circbuf.h" for details.
@@ -252,7 +252,7 @@ bool ia_css_circbuf_increase_size(
* Inline functions.
*
****************************************************************/
-/**
+/*
* @brief Get the "val" field in the element.
* Refer to "Forward declarations" for details.
*/
@@ -262,7 +262,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
return elem->val;
}
-/**
+/*
* @brief Read the oldest element from the circular buffer.
* Refer to "Forward declarations" for details.
*/
@@ -282,7 +282,7 @@ ia_css_circbuf_read(ia_css_circbuf_t *cb)
return elem;
}
-/**
+/*
* @brief Shift a chunk of elements in the circular buffer.
* Refer to "Forward declarations" for details.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
index 616789d9b3f6..a6d650a9a1f4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
@@ -19,7 +19,7 @@
#include <ia_css_frame_public.h> /* ia_css_frame_info */
#include <ia_css_binary.h> /* ia_css_binary_descr */
-/** @brief Get a binary descriptor for copy.
+/* @brief Get a binary descriptor for copy.
*
* @param[in] pipe
* @param[out] copy_desc
@@ -36,7 +36,7 @@ extern void ia_css_pipe_get_copy_binarydesc(
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for vfpp.
+/* @brief Get a binary descriptor for vfpp.
*
* @param[in] pipe
* @param[out] vfpp_descr
@@ -51,7 +51,7 @@ extern void ia_css_pipe_get_vfpp_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get numerator and denominator of bayer downscaling factor.
+/* @brief Get numerator and denominator of bayer downscaling factor.
*
* @param[in] bds_factor: The bayer downscaling factor.
* (= The bds_factor member in the sh_css_bds_factor structure.)
@@ -67,7 +67,7 @@ extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
unsigned int *bds_factor_numerator,
unsigned int *bds_factor_denominator);
-/** @brief Get a binary descriptor for preview stage.
+/* @brief Get a binary descriptor for preview stage.
*
* @param[in] pipe
* @param[out] preview_descr
@@ -86,7 +86,7 @@ extern enum ia_css_err ia_css_pipe_get_preview_binarydesc(
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for video stage.
+/* @brief Get a binary descriptor for video stage.
*
* @param[in/out] pipe
* @param[out] video_descr
@@ -105,7 +105,7 @@ extern enum ia_css_err ia_css_pipe_get_video_binarydesc(
struct ia_css_frame_info *vf_info,
int stream_config_left_padding);
-/** @brief Get a binary descriptor for yuv scaler stage.
+/* @brief Get a binary descriptor for yuv scaler stage.
*
* @param[in/out] pipe
* @param[out] yuv_scaler_descr
@@ -124,7 +124,7 @@ void ia_css_pipe_get_yuvscaler_binarydesc(
struct ia_css_frame_info *internal_out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for capture pp stage.
+/* @brief Get a binary descriptor for capture pp stage.
*
* @param[in/out] pipe
* @param[out] capture_pp_descr
@@ -140,7 +140,7 @@ extern void ia_css_pipe_get_capturepp_binarydesc(
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for primary capture.
+/* @brief Get a binary descriptor for primary capture.
*
* @param[in] pipe
* @param[out] prim_descr
@@ -158,7 +158,7 @@ extern void ia_css_pipe_get_primary_binarydesc(
struct ia_css_frame_info *vf_info,
unsigned int stage_idx);
-/** @brief Get a binary descriptor for pre gdc stage.
+/* @brief Get a binary descriptor for pre gdc stage.
*
* @param[in] pipe
* @param[out] pre_gdc_descr
@@ -173,7 +173,7 @@ extern void ia_css_pipe_get_pre_gdc_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get a binary descriptor for gdc stage.
+/* @brief Get a binary descriptor for gdc stage.
*
* @param[in] pipe
* @param[out] gdc_descr
@@ -188,7 +188,7 @@ extern void ia_css_pipe_get_gdc_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get a binary descriptor for post gdc.
+/* @brief Get a binary descriptor for post gdc.
*
* @param[in] pipe
* @param[out] post_gdc_descr
@@ -205,7 +205,7 @@ extern void ia_css_pipe_get_post_gdc_binarydesc(
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for de.
+/* @brief Get a binary descriptor for de.
*
* @param[in] pipe
* @param[out] pre_de_descr
@@ -220,7 +220,7 @@ extern void ia_css_pipe_get_pre_de_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get a binary descriptor for pre anr stage.
+/* @brief Get a binary descriptor for pre anr stage.
*
* @param[in] pipe
* @param[out] pre_anr_descr
@@ -235,7 +235,7 @@ extern void ia_css_pipe_get_pre_anr_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get a binary descriptor for ANR stage.
+/* @brief Get a binary descriptor for ANR stage.
*
* @param[in] pipe
* @param[out] anr_descr
@@ -250,7 +250,7 @@ extern void ia_css_pipe_get_anr_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Get a binary descriptor for post anr stage.
+/* @brief Get a binary descriptor for post anr stage.
*
* @param[in] pipe
* @param[out] post_anr_descr
@@ -267,7 +267,7 @@ extern void ia_css_pipe_get_post_anr_binarydesc(
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info);
-/** @brief Get a binary descriptor for ldc stage.
+/* @brief Get a binary descriptor for ldc stage.
*
* @param[in/out] pipe
* @param[out] capture_pp_descr
@@ -282,7 +282,7 @@ extern void ia_css_pipe_get_ldc_binarydesc(
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info);
-/** @brief Calculates the required BDS factor
+/* @brief Calculates the required BDS factor
*
* @param[in] input_res
* @param[in] output_res
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
index ba8858759b30..155b6fb4722b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
@@ -18,7 +18,7 @@
#include <ia_css_types.h>
#include <ia_css_frame_public.h>
-/** @brief Get Input format bits per pixel based on stream configuration of this
+/* @brief Get Input format bits per pixel based on stream configuration of this
* pipe.
*
* @param[in] pipe
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
index 17d3b7de93ba..98a2a3e9b3e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
@@ -22,6 +22,7 @@
#include <assert_support.h>
/* HRT_GDC_N */
#include "gdc_device.h"
+#include <linux/kernel.h>
/* This module provides a binary descriptions to used to find a binary. Since,
* every stage is associated with a binary, it implicity helps stage
@@ -147,11 +148,9 @@ enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
unsigned int *bds_factor_denominator)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
if (bds_factors_list[i].bds_factor == bds_factor) {
*bds_factor_numerator = bds_factors_list[i].numerator;
*bds_factor_denominator = bds_factors_list[i].denominator;
@@ -170,8 +169,6 @@ enum ia_css_err binarydesc_calculate_bds_factor(
unsigned int *bds_factor)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
unsigned int in_w = input_res.width,
in_h = input_res.height,
out_w = output_res.width, out_h = output_res.height;
@@ -186,7 +183,7 @@ enum ia_css_err binarydesc_calculate_bds_factor(
assert(out_w != 0 && out_h != 0);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
unsigned num = bds_factors_list[i].numerator;
unsigned den = bds_factors_list[i].denominator;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
index f8b2e458f876..a8c27676a38b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
@@ -22,7 +22,7 @@
#include <ia_css_stream_public.h>
#include <ia_css_stream_format.h>
-/** @brief convert "errno" error code to "ia_css_err" error code
+/* @brief convert "errno" error code to "ia_css_err" error code
*
* @param[in] "errno" error code
* @return "ia_css_err" error code
@@ -31,7 +31,7 @@
enum ia_css_err ia_css_convert_errno(
int in_err);
-/** @brief check vf frame info.
+/* @brief check vf frame info.
*
* @param[in] info
* @return IA_CSS_SUCCESS or error code upon error.
@@ -40,7 +40,7 @@ enum ia_css_err ia_css_convert_errno(
extern enum ia_css_err ia_css_util_check_vf_info(
const struct ia_css_frame_info * const info);
-/** @brief check input configuration.
+/* @brief check input configuration.
*
* @param[in] stream_config
* @param[in] must_be_raw
@@ -52,7 +52,7 @@ extern enum ia_css_err ia_css_util_check_input(
bool must_be_raw,
bool must_be_yuv);
-/** @brief check vf and out frame info.
+/* @brief check vf and out frame info.
*
* @param[in] out_info
* @param[in] vf_info
@@ -63,7 +63,7 @@ extern enum ia_css_err ia_css_util_check_vf_out_info(
const struct ia_css_frame_info * const out_info,
const struct ia_css_frame_info * const vf_info);
-/** @brief check width and height
+/* @brief check width and height
*
* @param[in] width
* @param[in] height
@@ -75,7 +75,7 @@ extern enum ia_css_err ia_css_util_check_res(
unsigned int height);
#ifdef ISP2401
-/** @brief compare resolutions (less or equal)
+/* @brief compare resolutions (less or equal)
*
* @param[in] a resolution
* @param[in] b resolution
@@ -108,7 +108,7 @@ extern bool ia_css_util_resolution_is_even(
const struct ia_css_resolution resolution);
#endif
-/** @brief check width and height
+/* @brief check width and height
*
* @param[in] stream_format
* @param[in] two_ppc
@@ -119,7 +119,7 @@ extern unsigned int ia_css_util_input_format_bpp(
enum ia_css_stream_format stream_format,
bool two_ppc);
-/** @brief check if input format it raw
+/* @brief check if input format it raw
*
* @param[in] stream_format
* @return true if the input format is raw or false otherwise
@@ -128,7 +128,7 @@ extern unsigned int ia_css_util_input_format_bpp(
extern bool ia_css_util_is_input_format_raw(
enum ia_css_stream_format stream_format);
-/** @brief check if input format it yuv
+/* @brief check if input format it yuv
*
* @param[in] stream_format
* @return true if the input format is yuv or false otherwise
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
index 08f486e20a65..54193789a809 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
@@ -111,7 +111,7 @@ unsigned int ia_css_util_input_format_bpp(
break;
}
-return rval;
+ return rval;
}
enum ia_css_err ia_css_util_check_vf_info(
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
index 5819bcff5e55..9c0cb4a63862 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
@@ -34,7 +34,7 @@
* @brief Get the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
+static inline void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -73,7 +73,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
* @brief Get the state of the csi rx fe dlane process.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
+static inline void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state)
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
* @brief dump the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
+static inline void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -118,7 +118,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
* @brief Get the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
+static inline void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -181,7 +181,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
* @brief Dump the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
+static inline void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -225,7 +225,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
+static inline hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg)
{
@@ -239,7 +239,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
+static inline void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value)
@@ -253,7 +253,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
+static inline hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg)
{
@@ -267,7 +267,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_reg_store(
+static inline void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value)
@@ -277,6 +277,6 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_reg_store(
ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
}
-/** end of DLI */
+/* end of DLI */
#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
index 470c92d287fe..4d07c2fe1469 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
@@ -192,7 +192,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state);
}
}
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -227,7 +227,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
}
-/** end of DLI */
+/* end of DLI */
#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
index 14d1d3b627a9..842ae340ae13 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
@@ -26,7 +26,7 @@
#include "isys_irq_private.h"
#endif
-/** Public interface */
+/* Public interface */
STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
const isys_irq_ID_t isys_irqc_id)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
index c17ce131c9e1..e69f39893bd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
@@ -59,7 +59,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
state->status, state->edge, state->mask, state->enable, state->level_no);
}
-/** end of NCI */
+/* end of NCI */
/* -------------------------------------------------------+
| Device level interface (DLI) |
@@ -101,7 +101,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
return value;
}
-/** end of DLI */
+/* end of DLI */
#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
index 1603a09b621a..f946105ddf43 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
@@ -122,7 +122,7 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
stream2mmio_print_sid_state(&(state->sid_state[i]));
}
}
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -163,6 +163,6 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
reg * sizeof(hrt_data), value);
}
-/** end of DLI */
+/* end of DLI */
#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
index 3f34b508f0bf..c5bf540eadf1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
@@ -160,5 +160,5 @@ STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
}
-/** end of DLI */
+/* end of DLI */
#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
index e7a734a9fc43..1be5c6956d65 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
@@ -46,7 +46,7 @@ struct isys2401_dma_port_cfg_s {
uint32_t cropping;
uint32_t width;
};
-/** end of DMA Port */
+/* end of DMA Port */
/************************************************
*
@@ -79,7 +79,7 @@ struct isys2401_dma_cfg_s {
isys2401_dma_extension extension;
uint32_t height;
};
-/** end of DMA Device */
+/* end of DMA Device */
/* isys2401_dma_channel limits per DMA ID */
extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
index 216813e42a0a..0bf2feb8bbfb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
@@ -86,6 +86,6 @@ struct pixelgen_prbs_cfg_s {
sync_generator_cfg_t sync_gen_cfg;
};
-/** end of Pixel-generator: TPG. ("pixelgen_global.h") */
+/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
index 9f7ecac46273..d2e3a2deea2e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
@@ -331,7 +331,7 @@ typedef enum {
IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
N_IBUF_CTRL_ID
} ibuf_ctrl_ID_t;
-/** end of Input-buffer Controller */
+/* end of Input-buffer Controller */
/*
* Stream2MMIO.
@@ -364,7 +364,7 @@ typedef enum {
STREAM2MMIO_SID7_ID,
N_STREAM2MMIO_SID_ID
} stream2mmio_sid_ID_t;
-/** end of Stream2MMIO */
+/* end of Stream2MMIO */
/**
* Input System 2401: CSI-MIPI recevier.
@@ -390,7 +390,7 @@ typedef enum {
CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
N_CSI_RX_DLANE_ID
} csi_rx_fe_dlane_ID_t;
-/** end of CSI-MIPI receiver */
+/* end of CSI-MIPI receiver */
typedef enum {
ISYS2401_DMA0_ID = 0,
@@ -406,7 +406,7 @@ typedef enum {
PIXELGEN2_ID,
N_PIXELGEN_ID
} pixelgen_ID_t;
-/** end of pixel-generator. ("system_global.h") */
+/* end of pixel-generator. ("system_global.h") */
typedef enum {
INPUT_SYSTEM_CSI_PORT0_ID = 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
index 1f6a55ff5db8..efcd6e1679e8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
@@ -31,7 +31,7 @@ more details.
#ifndef __CSS_API_VERSION_H
#define __CSS_API_VERSION_H
-/** @file
+/* @file
* CSS API version file. This file contains the version number of the CSS-API.
*
* This file is generated from a set of input files describing the CSS-API
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
index 87a25d4289ec..770db7dff5d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
@@ -12,7 +12,7 @@
* more details.
*/
-#include <stddef.h> /* NULL */
+#include <linux/kernel.h>
#include "dma.h"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
index 9d3a29696094..bcfb734c2ed3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
@@ -28,7 +28,7 @@ STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
assert(ID < N_EVENT_ID);
assert(event_source_addr[ID] != ((hrt_address)-1));
(void)ia_css_device_load_uint32(event_source_addr[ID]);
-return;
+ return;
}
STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
index 1087944d637f..1bf292401adc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
@@ -38,12 +38,12 @@ STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
@@ -546,7 +546,7 @@ void fifo_monitor_get_state(
return;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
@@ -556,7 +556,7 @@ STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
index 618b2f7e9c75..d58cd7d1828d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
@@ -33,26 +33,26 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
const fifo_switch_t switch_id,
const hrt_data sel)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
const fifo_monitor_ID_t ID,
const fifo_switch_t switch_id)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
-return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+ return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
}
@@ -61,19 +61,19 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
const fifo_monitor_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
index 69fa616889b1..1966b147f8ab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
@@ -22,12 +22,12 @@
/*
* Local function declarations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value);
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg);
@@ -62,7 +62,7 @@ void gdc_lut_store(
gdc_reg_store(ID, lut_offset++, word_0);
gdc_reg_store(ID, lut_offset++, word_1);
}
-return;
+ return;
}
/*
@@ -103,25 +103,25 @@ int gdc_get_unity(
{
assert(ID < N_GDC_ID);
(void)ID;
-return (int)(1UL << HRT_GDC_FRAC_BITS);
+ return (int)(1UL << HRT_GDC_FRAC_BITS);
}
/*
* Local function implementations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value)
{
ia_css_device_store_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg)
{
-return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
index 9a34ac052adf..da88aa3af664 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
@@ -104,5 +104,5 @@ void gp_device_get_state(
_REG_GP_SYNCGEN_FRAME_CNT_ADDR);
state->soft_reset = gp_device_reg_load(ID,
_REG_GP_SOFT_RESET_ADDR);
-return;
+ return;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
index bce1fdf79114..7c0362c29411 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
const unsigned int reg_addr,
const hrt_data value)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
const gp_device_ID_t ID,
const hrt_address reg_addr)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
}
#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
index 5a4eabf79ee2..bcfd443f5202 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
@@ -21,7 +21,7 @@
#endif /* __INLINE_GP_TIMER__ */
#include "system_local.h"
-/** FIXME: not sure if reg_load(), reg_store() should be API.
+/* FIXME: not sure if reg_load(), reg_store() should be API.
*/
static uint32_t
gp_timer_reg_load(uint32_t reg);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
index 6ace2184b522..b6ebf34eaa9d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
@@ -29,7 +29,7 @@ STORAGE_CLASS_GPIO_C void gpio_reg_store(
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
@@ -38,7 +38,7 @@ STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
{
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
index 2b636e0e6482..32a780380e11 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
@@ -22,9 +22,9 @@
STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
const hmem_ID_t ID)
{
-assert(ID < N_HMEM_ID);
+ assert(ID < N_HMEM_ID);
(void)ID;
-return HMEM_SIZE*sizeof(hmem_data_t);
+ return HMEM_SIZE*sizeof(hmem_data_t);
}
#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
index d34933e44aa9..2f42a9c2771c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
const hrt_address reg_addr,
const hrt_data value)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
const input_formatter_ID_t ID,
const unsigned int reg_addr)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
}
#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
index f35e18987b67..bd6821e436b2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
@@ -81,27 +81,27 @@ static input_system_error_t input_system_multiplexer_cfg(
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state);
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state);
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state);
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state);
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state);
@@ -173,7 +173,7 @@ void input_system_get_state(
&(state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]));
}
-return;
+ return;
}
void receiver_get_state(
@@ -245,7 +245,7 @@ void receiver_get_state(
state->be_irq_clear = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
-return;
+ return;
}
bool is_mipi_format_yuv420(
@@ -258,7 +258,7 @@ bool is_mipi_format_yuv420(
(mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
/* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
-return is_yuv420;
+ return is_yuv420;
}
void receiver_set_compression(
@@ -300,7 +300,7 @@ void receiver_set_compression(
reg = ((field_id < 6)?(val << (field_id * 5)):(val << ((field_id - 6) * 5)));
receiver_reg_store(ID, addr, reg);
-return;
+ return;
}
void receiver_port_enable(
@@ -319,7 +319,7 @@ void receiver_port_enable(
receiver_port_reg_store(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
-return;
+ return;
}
bool is_receiver_port_enabled(
@@ -328,7 +328,7 @@ bool is_receiver_port_enabled(
{
hrt_data reg = receiver_port_reg_load(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
-return ((reg & 0x01) != 0);
+ return ((reg & 0x01) != 0);
}
void receiver_irq_enable(
@@ -338,14 +338,14 @@ void receiver_irq_enable(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
-return;
+ return;
}
rx_irq_info_t receiver_get_irq_info(
const rx_ID_t ID,
const mipi_port_ID_t port_ID)
{
-return receiver_port_reg_load(ID,
+ return receiver_port_reg_load(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
}
@@ -356,10 +356,10 @@ void receiver_irq_clear(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state)
@@ -418,10 +418,10 @@ STORAGE_CLASS_INLINE void capture_unit_get_state(
sub_id,
CAPT_FSM_STATE_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state)
@@ -468,10 +468,10 @@ STORAGE_CLASS_INLINE void acquisition_unit_get_state(
sub_id,
ACQ_INT_CNTR_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state)
@@ -551,10 +551,10 @@ STORAGE_CLASS_INLINE void ctrl_unit_get_state(
sub_id,
ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state)
@@ -587,10 +587,10 @@ STORAGE_CLASS_INLINE void mipi_port_get_state(
state->lane_rx_count[i] = (uint8_t)((state->rx_count)>>(i*8));
}
-return;
+ return;
}
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state)
@@ -602,30 +602,30 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
assert(state != NULL);
switch (ch_id) {
- case 0:
- state->comp_scheme0 = receiver_reg_load(ID,
+ case 0:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
- break;
- case 1:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 1:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
- break;
- case 2:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 2:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
- break;
- case 3:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 3:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
- break;
+ break;
}
/* See Table 7.1.17,..., 7.1.24 */
@@ -640,7 +640,7 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
}
-return;
+ return;
}
// MW: "2400" in the name is not good, but this is to avoid a naming conflict
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
index ed1b947b00f9..118185eb86e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
const input_system_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
@@ -46,19 +46,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
const rx_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
@@ -67,12 +67,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
@@ -80,11 +80,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
const mipi_port_ID_t port_ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
@@ -93,12 +93,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
@@ -106,11 +106,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
const sub_system_ID_t sub_ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
}
#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
index 6b58bc13dc1b..51daf76c2aea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
@@ -22,13 +22,13 @@
#include "platform_support.h" /* hrt_sleep() */
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID);
@@ -69,7 +69,7 @@ void irq_clear_all(
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
-return;
+ return;
}
/*
@@ -114,7 +114,7 @@ void irq_enable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
void irq_enable_pulse(
@@ -129,7 +129,7 @@ void irq_enable_pulse(
/* output is given as edge, not pulse */
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
-return;
+ return;
}
void irq_disable_channel(
@@ -160,7 +160,7 @@ void irq_disable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
enum hrt_isp_css_irq_status irq_get_channel_id(
@@ -195,7 +195,7 @@ enum hrt_isp_css_irq_status irq_get_channel_id(
if (irq_id != NULL)
*irq_id = (unsigned int)idx;
-return status;
+ return status;
}
static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
@@ -220,7 +220,7 @@ void irq_raise(
(unsigned int)addr, 1);
gp_device_reg_store(GP_DEVICE0_ID,
(unsigned int)addr, 0);
-return;
+ return;
}
void irq_controller_get_state(
@@ -240,7 +240,7 @@ void irq_controller_get_state(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
state->irq_level_not_pulse = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
-return;
+ return;
}
bool any_virq_signal(void)
@@ -248,7 +248,7 @@ bool any_virq_signal(void)
unsigned int irq_status = irq_reg_load(IRQ0_ID,
_HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
-return (irq_status != 0);
+ return (irq_status != 0);
}
void cnd_virq_enable_channel(
@@ -279,7 +279,7 @@ void cnd_virq_enable_channel(
irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
}
}
-return;
+ return;
}
@@ -290,7 +290,7 @@ void virq_clear_all(void)
for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
irq_clear_all(irq_id);
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_signals(
@@ -320,7 +320,7 @@ enum hrt_isp_css_irq_status virq_get_channel_signals(
}
}
-return irq_status;
+ return irq_status;
}
void virq_clear_info(
@@ -333,7 +333,7 @@ void virq_clear_info(
for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
irq_info->irq_status_reg[ID] = 0;
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_id(
@@ -403,10 +403,10 @@ enum hrt_isp_css_irq_status virq_get_channel_id(
if (irq_id != NULL)
*irq_id = (virq_id_t)idx;
-return status;
+ return status;
}
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID)
{
assert(ID < N_IRQ_ID);
@@ -415,7 +415,7 @@ STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX*sizeof(hrt_data));
}
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID)
{
hrt_data en_reg;
@@ -425,10 +425,10 @@ STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
en_reg = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
-return (en_reg != 0);
+ return (en_reg != 0);
}
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID)
{
@@ -444,5 +444,5 @@ STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
*channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
-return ID;
+ return ID;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
index eb325e870e88..23a13ac696c2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_IRQ_C void irq_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
const irq_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
index 47c21e486c25..531c932a48f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
@@ -34,7 +34,7 @@ void cnd_isp_irq_enable(
isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
ISP_IRQ_READY_BIT);
}
-return;
+ return;
}
void isp_get_state(
@@ -94,7 +94,7 @@ void isp_get_state(
!isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
ISP_ICACHE_MT_SINK_BIT);
*/
-return;
+ return;
}
/* ISP functions to control the ISP state from the host, even in crun. */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
index b75d0f85d524..a28b67eb66ea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
@@ -24,20 +24,20 @@ void mmu_set_page_table_base_index(
const hrt_data base_index)
{
mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
-return;
+ return;
}
hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID)
{
-return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+ return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
}
void mmu_invalidate_cache(
const mmu_ID_t ID)
{
mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
-return;
+ return;
}
void mmu_invalidate_cache_all(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
index 392b6cc24e8f..7377666f6eb7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_MMU_H void mmu_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_MMU_H hrt_data mmu_reg_load(
const mmu_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __MMU_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
index e6283bf67ad3..5ea81c0e82d1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_SP_C void sp_ctrl_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
const sp_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
@@ -47,7 +47,7 @@ STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
const unsigned int bit)
{
hrt_data val = sp_ctrl_load(ID, reg);
-return (val & (1UL << bit)) != 0;
+ return (val & (1UL << bit)) != 0;
}
STORAGE_CLASS_SP_C void sp_ctrl_setbit(
@@ -57,7 +57,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_setbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data | (1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
@@ -67,7 +67,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store(
@@ -76,10 +76,10 @@ STORAGE_CLASS_SP_C void sp_dmem_store(
const void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_load(
@@ -88,10 +88,10 @@ STORAGE_CLASS_SP_C void sp_dmem_load(
void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
@@ -99,11 +99,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
hrt_address addr,
const uint8_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
@@ -111,11 +111,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
hrt_address addr,
const uint16_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
@@ -123,19 +123,19 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
hrt_address addr,
const uint32_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -144,8 +144,8 @@ STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -154,8 +154,8 @@ STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
index 92fb15d04703..fd0d92e87c36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
@@ -15,7 +15,6 @@
#ifndef __ASSERT_SUPPORT_H_INCLUDED__
#define __ASSERT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
/**
* The following macro can help to test the size of a struct at compile
@@ -92,7 +91,7 @@
* The implemenation for the pipe generation tool is in see support.isp.h */
#define OP___assert(cnd) assert(cnd)
-STORAGE_CLASS_INLINE void compile_time_assert (unsigned cond)
+static inline void compile_time_assert (unsigned cond)
{
/* Call undefined function if cond is false */
extern void _compile_time_assert (void);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
index d71e08f27a42..6928965cf513 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "bamem_local.h"
#ifndef __INLINE_BAMEM__
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_BAMEM_H extern
#define STORAGE_CLASS_BAMEM_C
#include "bamem_public.h"
#else /* __INLINE_BAMEM__ */
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_BAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_BAMEM_H static inline
+#define STORAGE_CLASS_BAMEM_C static inline
#include "bamem_private.h"
#endif /* __INLINE_BAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
index 0398f5802f05..917ee8cdb1d9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
@@ -30,18 +30,13 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "csi_rx_local.h"
#ifndef __INLINE_CSI_RX__
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_CSI_RX_C
#include "csi_rx_public.h"
#else /* __INLINE_CSI_RX__ */
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_CSI_RX_C STORAGE_CLASS_INLINE
#include "csi_rx_private.h"
#endif /* __INLINE_CSI_RX__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
index 7d8011735033..0aa22446e27e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "debug_local.h"
#ifndef __INLINE_DEBUG__
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DEBUG_H extern
#define STORAGE_CLASS_DEBUG_C
#include "debug_public.h"
#else /* __INLINE_DEBUG__ */
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DEBUG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DEBUG_H static inline
+#define STORAGE_CLASS_DEBUG_C static inline
#include "debug_private.h"
#endif /* __INLINE_DEBUG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
index b266191f21ef..d9dee691e3f8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "dma_local.h"
#ifndef __INLINE_DMA__
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DMA_H extern
#define STORAGE_CLASS_DMA_C
#include "dma_public.h"
#else /* __INLINE_DMA__ */
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DMA_H static inline
+#define STORAGE_CLASS_DMA_C static inline
#include "dma_private.h"
#endif /* __INLINE_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
index 78827c554cc3..df579e902796 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "event_fifo_local.h"
#ifndef __INLINE_EVENT__
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_EVENT_H extern
#define STORAGE_CLASS_EVENT_C
#include "event_fifo_public.h"
#else /* __INLINE_EVENT__ */
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_EVENT_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_EVENT_H static inline
+#define STORAGE_CLASS_EVENT_C static inline
#include "event_fifo_private.h"
#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
index 3bdd260bcaa5..f10c4fa2e32b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "fifo_monitor_local.h"
#ifndef __INLINE_FIFO_MONITOR__
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_FIFO_MONITOR_H extern
#define STORAGE_CLASS_FIFO_MONITOR_C
#include "fifo_monitor_public.h"
#else /* __INLINE_FIFO_MONITOR__ */
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_FIFO_MONITOR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_FIFO_MONITOR_H static inline
+#define STORAGE_CLASS_FIFO_MONITOR_C static inline
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
index 016132ba0b7f..75c6854c8e7b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gdc_local.h"
#ifndef __INLINE_GDC__
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GDC_H extern
#define STORAGE_CLASS_GDC_C
#include "gdc_public.h"
#else /* __INLINE_GDC__ */
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GDC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GDC_H static inline
+#define STORAGE_CLASS_GDC_C static inline
#include "gdc_private.h"
#endif /* __INLINE_GDC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
index 766d2532d8f9..aba94e623043 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gp_device_local.h"
#ifndef __INLINE_GP_DEVICE__
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_DEVICE_H extern
#define STORAGE_CLASS_GP_DEVICE_C
#include "gp_device_public.h"
#else /* __INLINE_GP_DEVICE__ */
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_DEVICE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_DEVICE_H static inline
+#define STORAGE_CLASS_GP_DEVICE_C static inline
#include "gp_device_private.h"
#endif /* __INLINE_GP_DEVICE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
index ca70f5603bf8..d5d2df24e11a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h" /*GP_TIMER_BASE address */
#include "gp_timer_local.h" /*GP_TIMER register offsets */
#ifndef __INLINE_GP_TIMER__
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_TIMER_H extern
#define STORAGE_CLASS_GP_TIMER_C
#include "gp_timer_public.h" /* functions*/
#else /* __INLINE_GP_TIMER__ */
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_TIMER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_TIMER_H static inline
+#define STORAGE_CLASS_GP_TIMER_C static inline
#include "gp_timer_private.h" /* inline functions*/
#endif /* __INLINE_GP_TIMER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
index dec21bcb6f47..d37f7166aa4a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gpio_local.h"
#ifndef __INLINE_GPIO__
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GPIO_H extern
#define STORAGE_CLASS_GPIO_C
#include "gpio_public.h"
#else /* __INLINE_GPIO__ */
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GPIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GPIO_H static inline
+#define STORAGE_CLASS_GPIO_C static inline
#include "gpio_private.h"
#endif /* __INLINE_GPIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
index 671dd5b5fca6..a82fd3a21e98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "hmem_local.h"
#ifndef __INLINE_HMEM__
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_HMEM_H extern
#define STORAGE_CLASS_HMEM_C
#include "hmem_public.h"
#else /* __INLINE_HMEM__ */
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_HMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_HMEM_H static inline
+#define STORAGE_CLASS_HMEM_C static inline
#include "hmem_private.h"
#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
index 396240954bed..426d022d3a26 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
@@ -28,7 +28,7 @@
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
+extern void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -38,7 +38,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
+extern void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -49,7 +49,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
* @param[in] lane The lane ID.
* @param[out] state Point to the dlane state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
+extern void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state);
@@ -60,7 +60,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
+extern void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
/**
@@ -70,10 +70,10 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
+extern void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
+extern hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg);
/**
@@ -101,7 +101,7 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
+extern void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value);
@@ -114,7 +114,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
+extern hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg);
/**
@@ -126,10 +126,10 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_reg_store(
+extern void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value);
-/** end of DLI */
+/* end of DLI */
#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
index d27f87a719db..d09d1e320306 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
@@ -33,7 +33,7 @@
\return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
*/
-STORAGE_CLASS_EXTERN void gdc_lut_store(
+extern void gdc_lut_store(
const gdc_ID_t ID,
const int data[4][HRT_GDC_N]);
@@ -43,7 +43,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_store(
\param in_lut[in] The data matrix to be converted
\param out_lut[out] The data matrix as the output of conversion
*/
-STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
+extern void gdc_lut_convert_to_isp_format(
const int in_lut[4][HRT_GDC_N],
int out_lut[4][HRT_GDC_N]);
@@ -53,7 +53,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
\return unity
*/
-STORAGE_CLASS_EXTERN int gdc_get_unity(
+extern int gdc_get_unity(
const gdc_ID_t ID);
#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
index 9b8e7c92442d..8538f86ab5e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
@@ -15,10 +15,10 @@
#ifndef __HMEM_PUBLIC_H_INCLUDED__
#define __HMEM_PUBLIC_H_INCLUDED__
-#include <stddef.h> /* size_t */
+#include <linux/types.h> /* size_t */
/*! Return the size of HMEM[ID]
-
+
\param ID[in] HMEM identifier
\Note: The size is the byte size of the area it occupies
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
index 1ac0e64e539c..98ee9947fb8e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
@@ -54,7 +54,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
const ibuf_ctrl_ID_t ID,
ibuf_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -87,7 +87,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
const ibuf_ctrl_ID_t ID,
const hrt_address reg,
const hrt_data value);
-/** end of DLI */
+/* end of DLI */
#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
index 2251f372145b..0d978e5911c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP1W
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP1W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP1W */
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP1W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP1W */
/*
@@ -50,7 +49,7 @@
/* Arithmetic */
-/** @brief bitwise AND
+/* @brief bitwise AND
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -64,7 +63,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_and(
const tvector1w _a,
const tvector1w _b);
-/** @brief bitwise OR
+/* @brief bitwise OR
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -78,7 +77,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_or(
const tvector1w _a,
const tvector1w _b);
-/** @brief bitwise XOR
+/* @brief bitwise XOR
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -92,7 +91,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_xor(
const tvector1w _a,
const tvector1w _b);
-/** @brief bitwise inverse
+/* @brief bitwise inverse
*
* @param[in] _a first argument
*
@@ -106,7 +105,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_inv(
/* Additive */
-/** @brief addition
+/* @brief addition
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -121,7 +120,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_add(
const tvector1w _a,
const tvector1w _b);
-/** @brief subtraction
+/* @brief subtraction
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -136,7 +135,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_sub(
const tvector1w _a,
const tvector1w _b);
-/** @brief saturated addition
+/* @brief saturated addition
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -151,7 +150,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_addsat(
const tvector1w _a,
const tvector1w _b);
-/** @brief saturated subtraction
+/* @brief saturated subtraction
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -167,7 +166,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subsat(
const tvector1w _b);
#ifdef ISP2401
-/** @brief Unsigned saturated subtraction
+/* @brief Unsigned saturated subtraction
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -183,7 +182,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_subsat_u(
const tvector1w_unsigned _b);
#endif
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -203,7 +202,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subasr1(
const tvector1w _a,
const tvector1w _b);
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -218,7 +217,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalfrnd(
const tvector1w _a,
const tvector1w _b);
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -234,7 +233,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
const tvector1w _b);
-/** @brief saturated absolute value
+/* @brief saturated absolute value
*
* @param[in] _a input
*
@@ -248,7 +247,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs(
const tvector1w _a);
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -265,7 +264,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subabssat(
/* Multiplicative */
-/** @brief doubling multiply
+/* @brief doubling multiply
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -282,7 +281,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_muld(
const tvector1w _a,
const tvector1w _b);
-/** @brief integer multiply
+/* @brief integer multiply
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -299,7 +298,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mul(
const tvector1w _a,
const tvector1w _b);
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -317,7 +316,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qmul(
const tvector1w _a,
const tvector1w _b);
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -338,7 +337,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qrmul(
/* Comparative */
-/** @brief equal
+/* @brief equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -352,7 +351,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_eq(
const tvector1w _a,
const tvector1w _b);
-/** @brief not equal
+/* @brief not equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -366,7 +365,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ne(
const tvector1w _a,
const tvector1w _b);
-/** @brief less or equal
+/* @brief less or equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -380,7 +379,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_le(
const tvector1w _a,
const tvector1w _b);
-/** @brief less then
+/* @brief less then
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -394,7 +393,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_lt(
const tvector1w _a,
const tvector1w _b);
-/** @brief greater or equal
+/* @brief greater or equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -408,7 +407,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ge(
const tvector1w _a,
const tvector1w _b);
-/** @brief greater than
+/* @brief greater than
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -424,7 +423,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_gt(
/* Shift */
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -442,7 +441,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asr(
const tvector1w _a,
const tvector1w _b);
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -461,7 +460,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asrrnd(
const tvector1w _a,
const tvector1w _b);
-/** @brief saturating arithmetic shift left
+/* @brief saturating arithmetic shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -481,7 +480,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asl(
const tvector1w _a,
const tvector1w _b);
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -494,7 +493,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_aslsat(
const tvector1w _a,
const tvector1w _b);
-/** @brief logical shift left
+/* @brief logical shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -511,7 +510,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsl(
const tvector1w _a,
const tvector1w _b);
-/** @brief logical shift right
+/* @brief logical shift right
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -529,7 +528,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsr(
const tvector1w _b);
#ifdef ISP2401
-/** @brief bidirectional saturating arithmetic shift
+/* @brief bidirectional saturating arithmetic shift
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -547,7 +546,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift_sat(
const tvector1w _a,
const tvector1w _b);
-/** @brief bidirectional non-saturating arithmetic shift
+/* @brief bidirectional non-saturating arithmetic shift
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -566,7 +565,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift(
const tvector1w _b);
-/** @brief bidirectional logical shift
+/* @brief bidirectional logical shift
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -589,7 +588,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
#endif
/* Cast */
-/** @brief Cast from int to 1w
+/* @brief Cast from int to 1w
*
* @param[in] _a input
*
@@ -602,7 +601,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
const int _a);
-/** @brief Cast from 1w to int
+/* @brief Cast from 1w to int
*
* @param[in] _a input
*
@@ -615,7 +614,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
const tvector1w _a);
-/** @brief Cast from 1w to 2w
+/* @brief Cast from 1w to 2w
*
* @param[in] _a input
*
@@ -628,7 +627,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w(
const tvector1w _a);
-/** @brief Cast from 2w to 1w
+/* @brief Cast from 2w to 1w
*
* @param[in] _a input
*
@@ -642,7 +641,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_cast_to_1w(
const tvector2w _a);
-/** @brief Cast from 2w to 1w with saturation
+/* @brief Cast from 2w to 1w with saturation
*
* @param[in] _a input
*
@@ -658,7 +657,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_sat_cast_to_1w(
/* clipping */
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -674,7 +673,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clip_asym(
const tvector1w _a,
const tvector1w _b);
-/** @brief Clip zero
+/* @brief Clip zero
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -692,7 +691,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clipz(
/* division */
-/** @brief Truncated division
+/* @brief Truncated division
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -709,7 +708,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_div(
const tvector1w _a,
const tvector1w _b);
-/** @brief Fractional saturating divide
+/* @brief Fractional saturating divide
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -727,7 +726,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qdiv(
const tvector1w _a,
const tvector1w _b);
-/** @brief Modulo
+/* @brief Modulo
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -742,7 +741,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mod(
const tvector1w _a,
const tvector1w _b);
-/** @brief Unsigned integer Square root
+/* @brief Unsigned integer Square root
*
* @param[in] _a input
*
@@ -755,7 +754,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_sqrt_u(
/* Miscellaneous */
-/** @brief Multiplexer
+/* @brief Multiplexer
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -771,7 +770,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mux(
const tvector1w _b,
const tflags _c);
-/** @brief Average without rounding
+/* @brief Average without rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -787,7 +786,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avg(
const tvector1w _a,
const tvector1w _b);
-/** @brief Average with rounding
+/* @brief Average with rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -803,7 +802,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avgrnd(
const tvector1w _a,
const tvector1w _b);
-/** @brief Minimum
+/* @brief Minimum
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -817,7 +816,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_min(
const tvector1w _a,
const tvector1w _b);
-/** @brief Maximum
+/* @brief Maximum
*
* @param[in] _a first argument
* @param[in] _b second argument
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
index 1cfe6d717283..7575d260b837 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP2W
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP2W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP2W */
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP2W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP2W */
/*
@@ -49,7 +48,7 @@
/* Arithmetic */
-/** @brief bitwise AND
+/* @brief bitwise AND
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -63,7 +62,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_and(
const tvector2w _a,
const tvector2w _b);
-/** @brief bitwise OR
+/* @brief bitwise OR
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -77,7 +76,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_or(
const tvector2w _a,
const tvector2w _b);
-/** @brief bitwise XOR
+/* @brief bitwise XOR
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -91,7 +90,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_xor(
const tvector2w _a,
const tvector2w _b);
-/** @brief bitwise inverse
+/* @brief bitwise inverse
*
* @param[in] _a first argument
*
@@ -105,7 +104,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_inv(
/* Additive */
-/** @brief addition
+/* @brief addition
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -120,7 +119,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_add(
const tvector2w _a,
const tvector2w _b);
-/** @brief subtraction
+/* @brief subtraction
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -135,7 +134,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_sub(
const tvector2w _a,
const tvector2w _b);
-/** @brief saturated addition
+/* @brief saturated addition
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -150,7 +149,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_addsat(
const tvector2w _a,
const tvector2w _b);
-/** @brief saturated subtraction
+/* @brief saturated subtraction
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -165,7 +164,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subsat(
const tvector2w _a,
const tvector2w _b);
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -185,7 +184,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subasr1(
const tvector2w _a,
const tvector2w _b);
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -200,7 +199,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalfrnd(
const tvector2w _a,
const tvector2w _b);
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -215,7 +214,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
const tvector2w _a,
const tvector2w _b);
-/** @brief saturated absolute value
+/* @brief saturated absolute value
*
* @param[in] _a input
*
@@ -229,7 +228,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs(
const tvector2w _a);
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -246,7 +245,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subabssat(
/* Multiplicative */
-/** @brief integer multiply
+/* @brief integer multiply
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -263,7 +262,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mul(
const tvector2w _a,
const tvector2w _b);
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -280,7 +279,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qmul(
const tvector2w _a,
const tvector2w _b);
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -302,7 +301,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qrmul(
/* Comparative */
-/** @brief equal
+/* @brief equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -316,7 +315,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_eq(
const tvector2w _a,
const tvector2w _b);
-/** @brief not equal
+/* @brief not equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -330,7 +329,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ne(
const tvector2w _a,
const tvector2w _b);
-/** @brief less or equal
+/* @brief less or equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -344,7 +343,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_le(
const tvector2w _a,
const tvector2w _b);
-/** @brief less then
+/* @brief less then
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -358,7 +357,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_lt(
const tvector2w _a,
const tvector2w _b);
-/** @brief greater or equal
+/* @brief greater or equal
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -372,7 +371,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ge(
const tvector2w _a,
const tvector2w _b);
-/** @brief greater than
+/* @brief greater than
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -388,7 +387,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_gt(
/* Shift */
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -405,7 +404,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asr(
const tvector2w _a,
const tvector2w _b);
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -424,7 +423,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asrrnd(
const tvector2w _a,
const tvector2w _b);
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -444,7 +443,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asl(
const tvector2w _a,
const tvector2w _b);
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -457,7 +456,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_aslsat(
const tvector2w _a,
const tvector2w _b);
-/** @brief logical shift left
+/* @brief logical shift left
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -474,7 +473,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsl(
const tvector2w _a,
const tvector2w _b);
-/** @brief logical shift right
+/* @brief logical shift right
*
* @param[in] _a input
* @param[in] _b shift amount
@@ -493,7 +492,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsr(
/* clipping */
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -508,7 +507,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clip_asym(
const tvector2w _a,
const tvector2w _b);
-/** @brief Clip zero
+/* @brief Clip zero
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -525,7 +524,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clipz(
/* division */
-/** @brief Truncated division
+/* @brief Truncated division
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -542,7 +541,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_div(
const tvector2w _a,
const tvector2w _b);
-/** @brief Saturating truncated division
+/* @brief Saturating truncated division
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -560,7 +559,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w OP_2w_divh(
const tvector2w _a,
const tvector1w _b);
-/** @brief Modulo
+/* @brief Modulo
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -573,7 +572,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mod(
const tvector2w _a,
const tvector2w _b);
-/** @brief Unsigned Integer Square root
+/* @brief Unsigned Integer Square root
*
* @param[in] _a input
*
@@ -586,7 +585,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w_unsigned OP_2w_sqrt_u(
/* Miscellaneous */
-/** @brief Multiplexer
+/* @brief Multiplexer
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -602,7 +601,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mux(
const tvector2w _b,
const tflags _c);
-/** @brief Average without rounding
+/* @brief Average without rounding
*
* @param[in] _a first operand
* @param[in] _b second operand
@@ -618,7 +617,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avg(
const tvector2w _a,
const tvector2w _b);
-/** @brief Average with rounding
+/* @brief Average with rounding
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -634,7 +633,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avgrnd(
const tvector2w _a,
const tvector2w _b);
-/** @brief Minimum
+/* @brief Minimum
*
* @param[in] _a first argument
* @param[in] _b second argument
@@ -648,7 +647,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_min(
const tvector2w _a,
const tvector2w _b);
-/** @brief Maximum
+/* @brief Maximum
*
* @param[in] _a first argument
* @param[in] _b second argument
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
index 5624cfcfa015..6c53ca9df96c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
@@ -43,7 +43,7 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
const stream2mmio_ID_t ID,
const stream2mmio_sid_ID_t sid_id,
stream2mmio_sid_state_t *state);
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -96,6 +96,6 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
const stream2mmio_ID_t ID,
const hrt_address reg,
const hrt_data value);
-/** end of DLI */
+/* end of DLI */
#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
index 4258fa872087..0a13eda73607 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
@@ -24,7 +24,7 @@
\return none, MMU[ID].page_table_base_index = base_index
*/
-STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
+extern void mmu_set_page_table_base_index(
const mmu_ID_t ID,
const hrt_data base_index);
@@ -35,7 +35,7 @@ STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
\return MMU[ID].page_table_base_index
*/
-STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
+extern hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID);
/*! Invalidate the page table cache of MMU[ID]
@@ -44,7 +44,7 @@ STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
+extern void mmu_invalidate_cache(
const mmu_ID_t ID);
@@ -52,7 +52,7 @@ STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache_all(void);
+extern void mmu_invalidate_cache_all(void);
/*! Write to a control register of MMU[ID]
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
index c0f3f3ea32d7..f597e07d7c4f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
@@ -41,7 +41,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
const pixelgen_ID_t ID,
pixelgen_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
/*****************************************************
*
@@ -73,7 +73,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
const pixelgen_ID_t ID,
const hrt_address reg,
const hrt_data value);
-/** end of DLI */
+/* end of DLI */
#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
index 3e955fca2a94..c1638c06407d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
@@ -15,20 +15,19 @@
#ifndef _REF_VECTOR_FUNC_H_INCLUDED_
#define _REF_VECTOR_FUNC_H_INCLUDED_
-#include "storage_class.h"
#ifdef INLINE_VECTOR_FUNC
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_REF_VECTOR_DATA_H static inline_DATA
#else /* INLINE_VECTOR_FUNC */
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H extern
+#define STORAGE_CLASS_REF_VECTOR_DATA_H extern_DATA
#endif /* INLINE_VECTOR_FUNC */
#include "ref_vector_func_types.h"
-/** @brief Doubling multiply accumulate with saturation
+/* @brief Doubling multiply accumulate with saturation
*
* @param[in] acc accumulator
* @param[in] a multiply input
@@ -45,7 +44,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd_sat(
tvector1w a,
tvector1w b );
-/** @brief Doubling multiply accumulate
+/* @brief Doubling multiply accumulate
*
* @param[in] acc accumulator
* @param[in] a multiply input
@@ -62,7 +61,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd(
tvector1w a,
tvector1w b );
-/** @brief Re-aligning multiply
+/* @brief Re-aligning multiply
*
* @param[in] a multiply input
* @param[in] b multiply input
@@ -79,7 +78,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
tvector1w b,
tscalar1w shift );
-/** @brief Leading bit index
+/* @brief Leading bit index
*
* @param[in] a input
*
@@ -93,7 +92,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod(
tvector1w a);
-/** @brief Config Unit Input Processing
+/* @brief Config Unit Input Processing
*
* @param[in] a input
* @param[in] input_scale input scaling factor
@@ -112,7 +111,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_input_scaling_offset_clamping(
tscalar1w_5bit_signed input_scale,
tscalar1w_5bit_signed input_offset);
-/** @brief Config Unit Output Processing
+/* @brief Config Unit Output Processing
*
* @param[in] a output
* @param[in] output_scale output scaling factor
@@ -128,7 +127,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_output_scaling_clamping(
tvector1w a,
tscalar1w_5bit_signed output_scale);
-/** @brief Config Unit Piecewiselinear estimation
+/* @brief Config Unit Piecewiselinear estimation
*
* @param[in] a input
* @param[in] config_points config parameter structure
@@ -144,7 +143,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_piecewise_estimation(
tvector1w a,
ref_config_points config_points);
-/** @brief Fast Config Unit
+/* @brief Fast Config Unit
*
* @param[in] x input
* @param[in] init_vectors LUT data structure
@@ -162,7 +161,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_XCU(
xcu_ref_init_vectors init_vectors);
-/** @brief LXCU
+/* @brief LXCU
*
* @param[in] x input
* @param[in] init_vectors LUT data structure
@@ -181,7 +180,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_LXCU(
tvector1w x,
xcu_ref_init_vectors init_vectors);
-/** @brief Coring
+/* @brief Coring
*
* @param[in] coring_vec Amount of coring based on brightness level
* @param[in] filt_input Vector of input pixels on which Coring is applied
@@ -197,7 +196,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
tvector1w filt_input,
tscalar1w m_CnrCoring0 );
-/** @brief Normalised FIR with coefficients [3,4,1]
+/* @brief Normalised FIR with coefficients [3,4,1]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -210,7 +209,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [1,4,3]
+/* @brief Normalised FIR with coefficients [1,4,3]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -223,7 +222,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [1,2,1]
+/* @brief Normalised FIR with coefficients [1,2,1]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -235,7 +234,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [13,16,3]
+/* @brief Normalised FIR with coefficients [13,16,3]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -247,7 +246,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [9,16,7]
+/* @brief Normalised FIR with coefficients [9,16,7]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -259,7 +258,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [5,16,11]
+/* @brief Normalised FIR with coefficients [5,16,11]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -271,7 +270,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with coefficients [1,16,15]
+/* @brief Normalised FIR with coefficients [1,16,15]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -283,7 +282,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
const s_1w_1x3_matrix m);
-/** @brief Normalised FIR with programable phase shift
+/* @brief Normalised FIR with programable phase shift
*
* @param[in] m 1x3 matrix with pixels
* @param[in] coeff phase shift
@@ -296,7 +295,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff (
const s_1w_1x3_matrix m, tscalar1w_3bit coeff);
-/** @brief 3 tap FIR with coefficients [1,1,1]
+/* @brief 3 tap FIR with coefficients [1,1,1]
*
* @param[in] m 1x3 matrix with pixels
*
@@ -309,7 +308,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_9dB_nrm (
const s_1w_1x3_matrix m);
#ifdef ISP2401
-/** @brief symmetric 3 tap FIR acts as LPF or BSF
+/* @brief symmetric 3 tap FIR acts as LPF or BSF
*
* @param[in] m 1x3 matrix with pixels
* @param[in] k filter coefficient shift
@@ -337,7 +336,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
tscalar_bool bsf_flag);
#endif
-/** @brief Normalised 2D FIR with coefficients [1;2;1] * [1,2,1]
+/* @brief Normalised 2D FIR with coefficients [1;2;1] * [1,2,1]
*
* @param[in] m 3x3 matrix with pixels
*
@@ -354,7 +353,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
const s_1w_3x3_matrix m);
-/** @brief Normalised 2D FIR with coefficients [1;1;1] * [1,1,1]
+/* @brief Normalised 2D FIR with coefficients [1;1;1] * [1,1,1]
*
* @param[in] m 3x3 matrix with pixels
*
@@ -372,7 +371,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
const s_1w_3x3_matrix m);
-/** @brief Normalised dual output 2D FIR with coefficients [1;2;1] * [1,2,1]
+/* @brief Normalised dual output 2D FIR with coefficients [1;2;1] * [1,2,1]
*
* @param[in] m 4x3 matrix with pixels
*
@@ -392,7 +391,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm (
const s_1w_4x3_matrix m);
-/** @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
+/* @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
*
* @param[in] m 4x3 matrix with pixels
*
@@ -412,7 +411,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
const s_1w_4x3_matrix m);
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
*
* @param[in] m 5x5 matrix with pixels
*
@@ -430,7 +429,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
const s_1w_5x5_matrix m);
-/** @brief Normalised FIR 1x5
+/* @brief Normalised FIR 1x5
*
* @param[in] m 1x5 matrix with pixels
*
@@ -448,7 +447,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
const s_1w_1x5_matrix m);
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
*
* @param[in] m 5x5 matrix with pixels
*
@@ -466,7 +465,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
const s_1w_5x5_matrix m);
-/** @brief Approximate averaging FIR 1x5
+/* @brief Approximate averaging FIR 1x5
*
* @param[in] m 1x5 matrix with pixels
*
@@ -480,7 +479,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
s_1w_1x5_matrix m);
-/** @brief Approximate averaging FIR 1x9
+/* @brief Approximate averaging FIR 1x9
*
* @param[in] m 1x9 matrix with pixels
*
@@ -494,7 +493,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
s_1w_1x9_matrix m);
-/** @brief Approximate averaging FIR 1x11
+/* @brief Approximate averaging FIR 1x11
*
* @param[in] m 1x11 matrix with pixels
*
@@ -508,7 +507,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box (
s_1w_1x11_matrix m);
-/** @brief Symmetric 7 tap filter with normalization
+/* @brief Symmetric 7 tap filter with normalization
*
* @param[in] in 1x7 matrix with pixels
* @param[in] coeff 1x4 matrix with coefficients
@@ -529,7 +528,7 @@ fir1x7m_sym_nrm(s_1w_1x7_matrix in,
s_1w_1x4_matrix coeff,
tvector1w out_shift);
-/** @brief Symmetric 7 tap filter with normalization at input side
+/* @brief Symmetric 7 tap filter with normalization at input side
*
* @param[in] in 1x7 matrix with pixels
* @param[in] coeff 1x4 matrix with coefficients
@@ -550,7 +549,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in,
s_1w_1x4_matrix coeff);
-/** @brief Symmetric 7 tap filter with normalization at output side
+/* @brief Symmetric 7 tap filter with normalization at output side
*
* @param[in] in 1x7 matrix with pixels
* @param[in] coeff 1x4 matrix with coefficients
@@ -572,7 +571,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in,
s_1w_1x4_matrix coeff);
-/** @brief 4 tap filter with normalization
+/* @brief 4 tap filter with normalization
*
* @param[in] in 1x4 matrix with pixels
* @param[in] coeff 1x4 matrix with coefficients
@@ -589,7 +588,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
s_1w_1x4_matrix coeff,
tvector1w out_shift);
-/** @brief 4 tap filter with normalization for half pixel interpolation
+/* @brief 4 tap filter with normalization for half pixel interpolation
*
* @param[in] in 1x4 matrix with pixels
*
@@ -605,7 +604,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in);
-/** @brief 4 tap filter with normalization for quarter pixel interpolation
+/* @brief 4 tap filter with normalization for quarter pixel interpolation
*
* @param[in] in 1x4 matrix with pixels
* @param[in] coeff 1x4 matrix with coefficients
@@ -627,7 +626,7 @@ fir1x4m_bicubic_bezier_quarter(s_1w_1x4_matrix in,
s_1w_1x4_matrix coeff);
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
*
* @param[in] in 1x3 matrix with pixels
* @param[in] coeff 1x2 matrix with coefficients
@@ -647,7 +646,7 @@ fir1x3m_sym_nrm(s_1w_1x3_matrix in,
s_1w_1x2_matrix coeff,
tvector1w out_shift);
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
*
* @param[in] in 1x3 matrix with pixels
* @param[in] coeff 1x2 matrix with coefficients
@@ -667,7 +666,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
s_1w_1x2_matrix coeff);
-/** @brief Mean of 1x3 matrix
+/* @brief Mean of 1x3 matrix
*
* @param[in] m 1x3 matrix with pixels
*
@@ -679,7 +678,7 @@ fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
s_1w_1x3_matrix m);
-/** @brief Mean of 3x3 matrix
+/* @brief Mean of 3x3 matrix
*
* @param[in] m 3x3 matrix with pixels
*
@@ -691,7 +690,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
s_1w_3x3_matrix m);
-/** @brief Mean of 1x4 matrix
+/* @brief Mean of 1x4 matrix
*
* @param[in] m 1x4 matrix with pixels
*
@@ -702,7 +701,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
s_1w_1x4_matrix m);
-/** @brief Mean of 4x4 matrix
+/* @brief Mean of 4x4 matrix
*
* @param[in] m 4x4 matrix with pixels
*
@@ -713,7 +712,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
s_1w_4x4_matrix m);
-/** @brief Mean of 2x3 matrix
+/* @brief Mean of 2x3 matrix
*
* @param[in] m 2x3 matrix with pixels
*
@@ -725,7 +724,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
s_1w_2x3_matrix m);
-/** @brief Mean of 1x5 matrix
+/* @brief Mean of 1x5 matrix
*
* @param[in] m 1x5 matrix with pixels
*
@@ -736,7 +735,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
*/
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
-/** @brief Mean of 1x6 matrix
+/* @brief Mean of 1x6 matrix
*
* @param[in] m 1x6 matrix with pixels
*
@@ -748,7 +747,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
s_1w_1x6_matrix m);
-/** @brief Mean of 5x5 matrix
+/* @brief Mean of 5x5 matrix
*
* @param[in] m 5x5 matrix with pixels
*
@@ -760,7 +759,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
s_1w_5x5_matrix m);
-/** @brief Mean of 6x6 matrix
+/* @brief Mean of 6x6 matrix
*
* @param[in] m 6x6 matrix with pixels
*
@@ -772,7 +771,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
s_1w_6x6_matrix m);
-/** @brief Minimum of 4x4 matrix
+/* @brief Minimum of 4x4 matrix
*
* @param[in] m 4x4 matrix with pixels
*
@@ -784,7 +783,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
s_1w_4x4_matrix m);
-/** @brief Maximum of 4x4 matrix
+/* @brief Maximum of 4x4 matrix
*
* @param[in] m 4x4 matrix with pixels
*
@@ -796,7 +795,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m(
s_1w_4x4_matrix m);
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
*
* @param[in] a 3x3 matrix with pixels
*
@@ -814,7 +813,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m_precise(
s_1w_3x3_matrix a,
s_1w_3x3_matrix b);
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
*
* @param[in] a 3x3 matrix with pixels
*
@@ -834,7 +833,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m(
s_1w_3x3_matrix a,
s_1w_3x3_matrix b);
-/** @brief SAD between two 5x5 matrices
+/* @brief SAD between two 5x5 matrices
*
* @param[in] a 5x5 matrix with pixels
*
@@ -848,7 +847,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
s_1w_5x5_matrix a,
s_1w_5x5_matrix b);
-/** @brief Absolute gradient between two sets of 1x5 matrices
+/* @brief Absolute gradient between two sets of 1x5 matrices
*
* @param[in] m0 first set of 1x5 matrix with pixels
* @param[in] m1 second set of 1x5 matrix with pixels
@@ -861,7 +860,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1);
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
*
* @param[in] a input0
* @param[in] b input1
@@ -883,7 +882,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx_c(
tvector1w b,
tscalar1w_weight c);
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
*
* @param[in] a input0
* @param[in] b input1
@@ -905,7 +904,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx(
tvector1w b,
tvector1w_weight c);
-/** @brief Bi-linear Interpolation
+/* @brief Bi-linear Interpolation
*
* @param[in] a input0
* @param[in] b input1
@@ -926,7 +925,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol(
tvector1w b,
tscalar1w_weight c);
-/** @brief Generic Block Matching Algorithm
+/* @brief Generic Block Matching Algorithm
* @param[in] search_window pointer to input search window of 16x16 pixels
* @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M
* @param[in] output pointer to output sads
@@ -955,9 +954,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H int generic_block_matching_algorithm(
tscalar1w_4bit_bma_shift shift);
#ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_1_32way
+/* @brief OP_1w_asp_bma_16_1_32way
#else
-/** @brief OP_1w_asp_bma_16_1_32way_nomask
+/* @brief OP_1w_asp_bma_16_1_32way_nomask
#endif
*
* @param[in] search_area input search window of 16x16 pixels
@@ -985,9 +984,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way_nomask(
tscalar1w_4bit_bma_shift shift);
#ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_2_32way
+/* @brief OP_1w_asp_bma_16_2_32way
#else
-/** @brief OP_1w_asp_bma_16_2_32way_nomask
+/* @brief OP_1w_asp_bma_16_2_32way_nomask
#endif
*
* @param[in] search_area input search window of 16x16 pixels
@@ -1012,9 +1011,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way_nomask(
ref_block_8x8 input_block,
tscalar1w_4bit_bma_shift shift);
#ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_1_32way
+/* @brief OP_1w_asp_bma_14_1_32way
#else
-/** @brief OP_1w_asp_bma_14_1_32way_nomask
+/* @brief OP_1w_asp_bma_14_1_32way_nomask
#endif
*
* @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1042,9 +1041,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way_nomask(
tscalar1w_4bit_bma_shift shift);
#ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_2_32way
+/* @brief OP_1w_asp_bma_14_2_32way
#else
-/** @brief OP_1w_asp_bma_14_2_32way_nomask
+/* @brief OP_1w_asp_bma_14_2_32way_nomask
#endif
*
* @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1070,7 +1069,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way_nomask(
tscalar1w_4bit_bma_shift shift);
#ifdef ISP2401
-/** @brief multiplex addition and passing
+/* @brief multiplex addition and passing
*
* @param[in] _a first pixel
* @param[in] _b second pixel
@@ -1088,7 +1087,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_cond_add(
#endif
#ifdef HAS_bfa_unit
-/** @brief OP_1w_single_bfa_7x7
+/* @brief OP_1w_single_bfa_7x7
*
* @param[in] weights - spatial and range weight lut
* @param[in] threshold - threshold plane, for range weight scaling
@@ -1116,7 +1115,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_single_bfa_7x7(
tvector1w central_pix,
s_1w_7x7_matrix src_plane);
-/** @brief OP_1w_joint_bfa_7x7
+/* @brief OP_1w_joint_bfa_7x7
*
* @param[in] weights - spatial and range weight lut
* @param[in] threshold0 - 1st threshold plane, for range weight scaling
@@ -1150,7 +1149,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_joint_bfa_7x7(
tvector1w central_pix1,
s_1w_7x7_matrix src1_plane);
-/** @brief bbb_bfa_gen_spatial_weight_lut
+/* @brief bbb_bfa_gen_spatial_weight_lut
*
* @param[in] in - 7x7 matrix of spatial weights
* @param[in] out - generated LUT
@@ -1164,7 +1163,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_spatial_weight_lut(
s_1w_7x7_matrix in,
tvector1w out[BFA_MAX_KWAY]);
-/** @brief bbb_bfa_gen_range_weight_lut
+/* @brief bbb_bfa_gen_range_weight_lut
*
* @param[in] in - input range weight,
* @param[in] out - generated LUT
@@ -1185,7 +1184,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
#endif
#ifdef ISP2401
-/** @brief OP_1w_imax32
+/* @brief OP_1w_imax32
*
* @param[in] src - structure that holds an array of 32 elements.
*
@@ -1196,7 +1195,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32(
imax32_ref_in_vector src);
-/** @brief OP_1w_imaxidx32
+/* @brief OP_1w_imaxidx32
*
* @param[in] src - structure that holds a vector of elements.
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
index f5de0df7981e..c7d9095472b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "ibuf_ctrl_local.h"
#ifndef __INLINE_IBUF_CTRL__
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IBUF_CTRL_H extern
#define STORAGE_CLASS_IBUF_CTRL_C
#include "ibuf_ctrl_public.h"
#else /* __INLINE_IBUF_CTRL__ */
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IBUF_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IBUF_CTRL_H static inline
+#define STORAGE_CLASS_IBUF_CTRL_C static inline
#include "ibuf_ctrl_private.h"
#endif /* __INLINE_IBUF_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
index 041c8b660aa4..eeaaecdd57ba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_formatter_local.h"
#ifndef __INLINE_INPUT_FORMATTER__
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_FORMATTER_H extern
#define STORAGE_CLASS_INPUT_FORMATTER_C
#include "input_formatter_public.h"
#else /* __INLINE_INPUT_FORMATTER__ */
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_FORMATTER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_FORMATTER_H static inline
+#define STORAGE_CLASS_INPUT_FORMATTER_C static inline
#include "input_formatter_private.h"
#endif /* __INLINE_INPUT_FORMATTER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
index 182867367b48..3f02d9ec9588 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_system_local.h"
#ifndef __INLINE_INPUT_SYSTEM__
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_SYSTEM_H extern
#define STORAGE_CLASS_INPUT_SYSTEM_C
#include "input_system_public.h"
#else /* __INLINE_INPUT_SYSTEM__ */
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_SYSTEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_SYSTEM_H static inline
+#define STORAGE_CLASS_INPUT_SYSTEM_C static inline
#include "input_system_private.h"
#endif /* __INLINE_INPUT_SYSTEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
index 1dc443892cc5..e1446388dee5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "irq_local.h"
#ifndef __INLINE_IRQ__
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IRQ_H extern
#define STORAGE_CLASS_IRQ_C
#include "irq_public.h"
#else /* __INLINE_IRQ__ */
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IRQ_H static inline
+#define STORAGE_CLASS_IRQ_C static inline
#include "irq_private.h"
#endif /* __INLINE_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
index 49190d0abc30..b916953e7f47 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isp_local.h"
#ifndef __INLINE_ISP__
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_H extern
#define STORAGE_CLASS_ISP_C
#include "isp_public.h"
#else /* __INLINE_iSP__ */
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_H static inline
+#define STORAGE_CLASS_ISP_C static inline
#include "isp_private.h"
#endif /* __INLINE_ISP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
index 9a608f07adcb..76aba114a5c1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_dma_local.h"
#ifndef __INLINE_ISYS2401_DMA__
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_DMA_H extern
#define STORAGE_CLASS_ISYS2401_DMA_C
#include "isys_dma_public.h"
#else /* __INLINE_ISYS2401_DMA__ */
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_DMA_H static inline
+#define STORAGE_CLASS_ISYS2401_DMA_C static inline
#include "isys_dma_private.h"
#endif /* __INLINE_ISYS2401_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
index cf858bcc8e45..d3f64cfd0b7d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
@@ -16,21 +16,20 @@
#define __IA_CSS_ISYS_IRQ_H__
#include <type_support.h>
-#include <storage_class.h>
#include <system_local.h>
#if defined(USE_INPUT_SYSTEM_VERSION_2401)
#ifndef __INLINE_ISYS2401_IRQ__
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_IRQ_H extern
+#define STORAGE_CLASS_ISYS2401_IRQ_C extern
#include "isys_irq_public.h"
#else /* __INLINE_ISYS2401_IRQ__ */
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_IRQ_H static inline
+#define STORAGE_CLASS_ISYS2401_IRQ_C static inline
#include "isys_irq_private.h"
#endif /* __INLINE_ISYS2401_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
index 3e8cfe555ad5..16fbf9d25eba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_stream2mmio_local.h"
#ifndef __INLINE_STREAM2MMIO__
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM2MMIO_H extern
#define STORAGE_CLASS_STREAM2MMIO_C
#include "isys_stream2mmio_public.h"
#else /* __INLINE_STREAM2MMIO__ */
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM2MMIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM2MMIO_H static inline
+#define STORAGE_CLASS_STREAM2MMIO_C static inline
#include "isys_stream2mmio_private.h"
#endif /* __INLINE_STREAM2MMIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
index f74b405b0f39..6436dae0007e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -15,7 +15,6 @@
#ifndef __MATH_SUPPORT_H
#define __MATH_SUPPORT_H
-#include "storage_class.h" /* for STORAGE_CLASS_INLINE */
#if defined(__KERNEL__)
#include <linux/kernel.h> /* Override the definition of max/min from linux kernel*/
#endif /*__KERNEL__*/
@@ -110,66 +109,66 @@ Leaving out the other math utility functions as they are newly added
#else /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE int max(int a, int b)
+static inline int max(int a, int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE int min(int a, int b)
+static inline int min(int a, int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b)
+static inline unsigned int ceil_div(unsigned int a, unsigned int b)
{
return CEIL_DIV(a, b);
}
#endif /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b)
+static inline unsigned int umax(unsigned int a, unsigned int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b)
+static inline unsigned int umin(unsigned int a, unsigned int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul(unsigned int a, unsigned int b)
{
return CEIL_MUL(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul2(unsigned int a, unsigned int b)
{
return CEIL_MUL2(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift(unsigned int a, unsigned int b)
{
return CEIL_SHIFT(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
{
return CEIL_SHIFT_MUL(a, b);
}
#ifdef ISP2401
-STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_div(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_DIV(a, b);
}
-STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_MUL(a, b);
}
#endif
-/** @brief Next Power of Two
+/* @brief Next Power of Two
*
* @param[in] unsigned number
*
@@ -187,7 +186,7 @@ STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned i
*
*/
-STORAGE_CLASS_INLINE unsigned int ceil_pow2(unsigned int a)
+static inline unsigned int ceil_pow2(unsigned int a)
{
if (a == 0) {
return 1;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
index 1b2017b029f2..519e850ec390 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "mmu_local.h"
#ifndef __INLINE_MMU__
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_MMU_H extern
#define STORAGE_CLASS_MMU_C
#include "mmu_public.h"
#else /* __INLINE_MMU__ */
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MMU_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_MMU_H static inline
+#define STORAGE_CLASS_MMU_C static inline
#include "mmu_private.h"
#endif /* __INLINE_MMU__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
index 565983aafa4d..cd938375e02e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
@@ -15,14 +15,13 @@
#ifndef __MPMATH_H_INCLUDED__
#define __MPMATH_H_INCLUDED__
-#include "storage_class.h"
#ifdef INLINE_MPMATH
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H static inline
+#define STORAGE_CLASS_MPMATH_DATA_H static inline_DATA
#else /* INLINE_MPMATH */
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H extern
+#define STORAGE_CLASS_MPMATH_DATA_H extern_DATA
#endif /* INLINE_MPMATH */
#include <type_support.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
index 6e48ea9afc29..a607242c5f1a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "osys_local.h"
#ifndef __INLINE_OSYS__
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_OSYS_H extern
#define STORAGE_CLASS_OSYS_C
#include "osys_public.h"
#else /* __INLINE_OSYS__ */
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_OSYS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_OSYS_H static inline
+#define STORAGE_CLASS_OSYS_C static inline
#include "osys_private.h"
#endif /* __INLINE_OSYS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
index 67f7f3a14231..418d02382d76 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "pixelgen_local.h"
#ifndef __INLINE_PIXELGEN__
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_PIXELGEN_H extern
#define STORAGE_CLASS_PIXELGEN_C
#include "pixelgen_public.h"
#else /* __INLINE_PIXELGEN__ */
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_PIXELGEN_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_PIXELGEN_H static inline
+#define STORAGE_CLASS_PIXELGEN_C static inline
#include "pixelgen_private.h"
#endif /* __INLINE_PIXELGEN__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
index 02f9eee67ff3..39a125ba563d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
@@ -20,7 +20,6 @@
* Platform specific includes and functionality.
*/
-#include "storage_class.h"
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
index cfbc222ea0c1..ca0fbbb57788 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
@@ -15,7 +15,6 @@
#ifndef __PRINT_SUPPORT_H_INCLUDED__
#define __PRINT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
#include <stdarg.h>
#if !defined(__KERNEL__)
@@ -24,7 +23,7 @@
extern int (*sh_css_printf) (const char *fmt, va_list args);
/* depends on host supplied print function in ia_css_init() */
-STORAGE_CLASS_INLINE void ia_css_print(const char *fmt, ...)
+static inline void ia_css_print(const char *fmt, ...)
{
va_list ap;
if (sh_css_printf) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
index a3d874b9516a..aa5fadf5aadb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
@@ -29,18 +29,17 @@
*
*/
-#include <storage_class.h>
#include "queue_local.h"
#ifndef __INLINE_QUEUE__
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_QUEUE_H extern
#define STORAGE_CLASS_QUEUE_C
/* #include "queue_public.h" */
#include "ia_css_queue.h"
#else /* __INLINE_QUEUE__ */
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_QUEUE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_QUEUE_H static inline
+#define STORAGE_CLASS_QUEUE_C static inline
#include "queue_private.h"
#endif /* __INLINE_QUEUE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
index 82c55acd0380..bd9f53e6b680 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "resource_local.h"
#ifndef __INLINE_RESOURCE__
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RESOURCE_H extern
#define STORAGE_CLASS_RESOURCE_C
#include "resource_public.h"
#else /* __INLINE_RESOURCE__ */
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RESOURCE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RESOURCE_H static inline
+#define STORAGE_CLASS_RESOURCE_C static inline
#include "resource_private.h"
#endif /* __INLINE_RESOURCE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
index c34c2e75c51f..43cfb0cb4aa8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "socket_local.h"
#ifndef __INLINE_SOCKET__
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SOCKET_H extern
#define STORAGE_CLASS_SOCKET_C
#include "socket_public.h"
#else /* __INLINE_SOCKET__ */
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SOCKET_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SOCKET_H static inline
+#define STORAGE_CLASS_SOCKET_C static inline
#include "socket_private.h"
#endif /* __INLINE_SOCKET__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
index 150fc2f6129b..8f57f2060791 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "sp_local.h"
#ifndef __INLINE_SP__
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SP_H extern
#define STORAGE_CLASS_SP_C
#include "sp_public.h"
#else /* __INLINE_SP__ */
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SP_H static inline
+#define STORAGE_CLASS_SP_C static inline
#include "sp_private.h"
#endif /* __INLINE_SP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
deleted file mode 100644
index 3908e668dacd..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __STORAGE_CLASS_H_INCLUDED__
-#define __STORAGE_CLASS_H_INCLUDED__
-
-/**
-* @file
-* Platform specific includes and functionality.
-*/
-
-#define STORAGE_CLASS_EXTERN extern
-
-#if defined(_MSC_VER)
-#define STORAGE_CLASS_INLINE static __inline
-#else
-#define STORAGE_CLASS_INLINE static inline
-#endif
-
-#define STORAGE_CLASS_EXTERN_DATA extern const
-#define STORAGE_CLASS_INLINE_DATA static const
-
-#endif /* __STORAGE_CLASS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
index 8e41f60b5d39..53d535e4f2ae 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "stream_buffer_local.h"
#ifndef __INLINE_STREAM_BUFFER__
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM_BUFFER_H extern
#define STORAGE_CLASS_STREAM_BUFFER_C
#include "stream_buffer_public.h"
#else /* __INLINE_STREAM_BUFFER__ */
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM_BUFFER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM_BUFFER_H static inline
+#define STORAGE_CLASS_STREAM_BUFFER_C static inline
#include "stream_buffer_private.h"
#endif /* __INLINE_STREAM_BUFFER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
index c53241a7a281..f4d9674cdab6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -16,7 +16,6 @@
#define __STRING_SUPPORT_H_INCLUDED__
#include <platform_support.h>
#include <type_support.h>
-#include <storage_class.h>
#if !defined(_MSC_VER)
/*
@@ -24,7 +23,7 @@
*/
-/** @brief Copy from src_buf to dest_buf.
+/* @brief Copy from src_buf to dest_buf.
*
* @param[out] dest_buf. Destination buffer to copy to
* @param[in] dest_size. The size of the destination buffer in bytes
@@ -34,7 +33,7 @@
* @return EINVAL on Invalid arguments
* @return ERANGE on Destination size too small
*/
-STORAGE_CLASS_INLINE int memcpy_s(
+static inline int memcpy_s(
void* dest_buf,
size_t dest_size,
const void* src_buf,
@@ -54,7 +53,7 @@ STORAGE_CLASS_INLINE int memcpy_s(
return 0;
}
-/** @brief Get the length of the string, excluding the null terminator
+/* @brief Get the length of the string, excluding the null terminator
*
* @param[in] src_str. The source string
* @param[in] max_len. Look only for max_len bytes in the string
@@ -79,7 +78,7 @@ static size_t strnlen_s(
return ix;
}
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
*
* @param[out] dest_str. Destination buffer to copy to
* @param[in] dest_size. The size of the destination buffer in bytes
@@ -89,7 +88,7 @@ static size_t strnlen_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strncpy_s(
+static inline int strncpy_s(
char* dest_str,
size_t dest_size,
const char* src_str,
@@ -121,7 +120,7 @@ STORAGE_CLASS_INLINE int strncpy_s(
return 0;
}
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
*
* @param[out] dest_str. Destination buffer to copy to
* @param[in] dest_size. The size of the destination buffer in bytes
@@ -130,7 +129,7 @@ STORAGE_CLASS_INLINE int strncpy_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strcpy_s(
+static inline int strcpy_s(
char* dest_str,
size_t dest_size,
const char* src_str)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
index 7385fd11c95f..ace695643369 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
@@ -29,17 +29,16 @@
*
*/
-#include "storage_class.h"
#include "tag_local.h"
#ifndef __INLINE_TAG__
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TAG_H extern
#define STORAGE_CLASS_TAG_C
#include "tag_public.h"
#else /* __INLINE_TAG__ */
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TAG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TAG_H static inline
+#define STORAGE_CLASS_TAG_C static inline
#include "tag_private.h"
#endif /* __INLINE_TAG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
index ed13451c9261..f6bc1c47553f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "timed_ctrl_local.h"
#ifndef __INLINE_TIMED_CTRL__
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TIMED_CTRL_H extern
#define STORAGE_CLASS_TIMED_CTRL_C
#include "timed_ctrl_public.h"
#else /* __INLINE_TIMED_CTRL__ */
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TIMED_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TIMED_CTRL_H static inline
+#define STORAGE_CLASS_TIMED_CTRL_C static inline
#include "timed_ctrl_private.h"
#endif /* __INLINE_TIMED_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
index b82fa3eba79f..bc77537fa73a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
@@ -30,27 +30,6 @@
#define IA_CSS_INT32_T_BITS 32
#define IA_CSS_UINT64_T_BITS 64
-#if defined(_MSC_VER)
-#include <stdint.h>
-/* For ATE compilation define the bool */
-#if defined(_ATE_)
-#define bool int
-#define true 1
-#define false 0
-#else
-#include <stdbool.h>
-#endif
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#if defined(_M_X64)
-#define HOST_ADDRESS(x) (unsigned long long)(x)
-#else
-#define HOST_ADDRESS(x) (unsigned long)(x)
-#endif
-
-#elif defined(__KERNEL__)
-
#define CHAR_BIT (8)
#include <linux/types.h>
@@ -58,25 +37,4 @@
#include <linux/errno.h>
#define HOST_ADDRESS(x) (unsigned long)(x)
-#elif defined(__GNUC__)
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS 1
-#endif
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#else /* default is for the FIST environment */
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#endif
-
#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
index acf932e1f563..82d447bf9704 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vamem_local.h"
#ifndef __INLINE_VAMEM__
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VAMEM_H extern
#define STORAGE_CLASS_VAMEM_C
#include "vamem_public.h"
#else /* __INLINE_VAMEM__ */
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VAMEM_H static inline
+#define STORAGE_CLASS_VAMEM_C static inline
#include "vamem_private.h"
#endif /* __INLINE_VAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
index 5d3be31759e4..5368b9062897 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
@@ -15,7 +15,6 @@
#ifndef __VECTOR_FUNC_H_INCLUDED__
#define __VECTOR_FUNC_H_INCLUDED__
-#include "storage_class.h"
/* TODO: Later filters will be moved to types directory,
* and we should only include matrix_MxN types */
@@ -27,12 +26,12 @@
#include "vector_func_local.h"
#ifndef __INLINE_VECTOR_FUNC__
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_FUNC_H extern
#define STORAGE_CLASS_VECTOR_FUNC_C
#include "vector_func_public.h"
#else /* __INLINE_VECTOR_FUNC__ */
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_FUNC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_VECTOR_FUNC_C static inline
#include "vector_func_private.h"
#endif /* __INLINE_VECTOR_FUNC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
index 261f87378ce5..4923f2d5518b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
@@ -15,17 +15,16 @@
#ifndef __VECTOR_OPS_H_INCLUDED__
#define __VECTOR_OPS_H_INCLUDED__
-#include "storage_class.h"
#include "vector_ops_local.h"
#ifndef __INLINE_VECTOR_OPS__
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_OPS_H extern
#define STORAGE_CLASS_VECTOR_OPS_C
#include "vector_ops_public.h"
#else /* __INLINE_VECTOR_OPS__ */
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_OPS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_OPS_H static inline
+#define STORAGE_CLASS_VECTOR_OPS_C static inline
#include "vector_ops_private.h"
#endif /* __INLINE_VECTOR_OPS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
index 79a36755bfd9..d3375729c441 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vmem_local.h"
#ifndef __INLINE_VMEM__
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VMEM_H extern
#define STORAGE_CLASS_VMEM_C
#include "vmem_public.h"
#else /* __INLINE_VMEM__ */
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VMEM_H static inline
+#define STORAGE_CLASS_VMEM_C static inline
#include "vmem_private.h"
#endif /* __INLINE_VMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
index 9169e04f9b4b..13083fe55141 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "xmem_local.h"
#ifndef __INLINE_XMEM__
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_XMEM_H extern
#define STORAGE_CLASS_XMEM_C
#include "xmem_public.h"
#else /* __INLINE_XMEM__ */
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_XMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_XMEM_H static inline
+#define STORAGE_CLASS_XMEM_C static inline
#include "xmem_private.h"
#endif /* __INLINE_XMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
index 9aa8c168a803..2cf1d58941bf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
@@ -17,7 +17,7 @@
#include <assert_support.h>
#include "tag_local.h"
-/**
+/*
* @brief Creates the tag description from the given parameters.
* @param[in] num_captures
* @param[in] skip
@@ -39,7 +39,7 @@ sh_css_create_tag_descr(int num_captures,
tag_descr->exp_id = exp_id;
}
-/**
+/*
* @brief Encodes the members of tag description into a 32-bit value.
* @param[in] tag Pointer to the tag description
* @return (unsigned int) Encoded 32-bit tag-info
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
index 2458b3767c90..e44df6916d90 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
@@ -16,7 +16,7 @@
#ifndef _IA_CSS_H_
#define _IA_CSS_H_
-/** @file
+/* @file
* This file is the starting point of the CSS-API. It includes all CSS-API
* header files.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
index a80a7dbaf712..080198796ad0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_3A_H
#define __IA_CSS_3A_H
-/** @file
+/* @file
* This file contains types used for 3A statistics
*/
@@ -31,7 +31,7 @@ enum ia_css_3a_tables {
IA_CSS_NUM_3A_TABLES
};
-/** Structure that holds 3A statistics in the ISP internal
+/* Structure that holds 3A statistics in the ISP internal
* format. Use ia_css_get_3a_statistics() to translate
* this to the format used on the host (3A library).
* */
@@ -48,13 +48,13 @@ struct ia_css_isp_3a_statistics {
struct {
ia_css_ptr rgby_tbl;
} data_hmem;
- uint32_t exp_id; /**< exposure id, to match statistics to a frame,
+ uint32_t exp_id; /** exposure id, to match statistics to a frame,
see ia_css_event_public.h for more detail. */
- uint32_t isp_config_id;/**< Unique ID to track which config was actually applied to a particular frame */
- ia_css_ptr data_ptr; /**< pointer to base of all data */
- uint32_t size; /**< total size of all data */
+ uint32_t isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr data_ptr; /** pointer to base of all data */
+ uint32_t size; /** total size of all data */
uint32_t dmem_size;
- uint32_t vmem_size; /**< both lo and hi have this size */
+ uint32_t vmem_size; /** both lo and hi have this size */
uint32_t hmem_size;
};
#define SIZE_OF_DMEM_STRUCT \
@@ -77,7 +77,7 @@ struct ia_css_isp_3a_statistics {
SIZE_OF_IA_CSS_PTR + \
4 * sizeof(uint32_t))
-/** Map with host-side pointers to ISP-format statistics.
+/* Map with host-side pointers to ISP-format statistics.
* These pointers can either be copies of ISP data or memory mapped
* ISP pointers.
* All of the data behind these pointers is allocated contiguously, the
@@ -85,17 +85,17 @@ struct ia_css_isp_3a_statistics {
* point into this one block of data.
*/
struct ia_css_isp_3a_statistics_map {
- void *data_ptr; /**< Pointer to start of memory */
+ void *data_ptr; /** Pointer to start of memory */
struct ia_css_3a_output *dmem_stats;
uint16_t *vmem_stats_hi;
uint16_t *vmem_stats_lo;
struct ia_css_bh_table *hmem_stats;
- uint32_t size; /**< total size in bytes of data_ptr */
- uint32_t data_allocated; /**< indicate whether data_ptr
+ uint32_t size; /** total size in bytes of data_ptr */
+ uint32_t data_allocated; /** indicate whether data_ptr
was allocated or not. */
};
-/** @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
* @param[out] host_stats Host buffer.
* @param[in] isp_stats ISP buffer.
* @return error value if temporary memory cannot be allocated
@@ -109,7 +109,7 @@ enum ia_css_err
ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
const struct ia_css_isp_3a_statistics *isp_stats);
-/** @brief Translate 3A statistics from ISP format to host format.
+/* @brief Translate 3A statistics from ISP format to host format.
* @param[out] host_stats host-format statistics
* @param[in] isp_stats ISP-format statistics
* @return None
@@ -125,35 +125,35 @@ ia_css_translate_3a_statistics(
/* Convenience functions for alloc/free of certain datatypes */
-/** @brief Allocate memory for the 3a statistics on the ISP
+/* @brief Allocate memory for the 3a statistics on the ISP
* @param[in] grid The grid.
* @return Pointer to the allocated 3a statistics buffer on the ISP
*/
struct ia_css_isp_3a_statistics *
ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
-/** @brief Free the 3a statistics memory on the isp
+/* @brief Free the 3a statistics memory on the isp
* @param[in] me Pointer to the 3a statistics buffer on the ISP.
* @return None
*/
void
ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
-/** @brief Allocate memory for the 3a statistics on the host
+/* @brief Allocate memory for the 3a statistics on the host
* @param[in] grid The grid.
* @return Pointer to the allocated 3a statistics buffer on the host
*/
struct ia_css_3a_statistics *
ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
-/** @brief Free the 3a statistics memory on the host
+/* @brief Free the 3a statistics memory on the host
* @param[in] me Pointer to the 3a statistics buffer on the host.
* @return None
*/
void
ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
-/** @brief Allocate a 3a statistics map structure
+/* @brief Allocate a 3a statistics map structure
* @param[in] isp_stats pointer to ISP 3a statistis struct
* @param[in] data_ptr host-side pointer to ISP 3a statistics.
* @return Pointer to the allocated 3a statistics map
@@ -174,7 +174,7 @@ ia_css_isp_3a_statistics_map_allocate(
const struct ia_css_isp_3a_statistics *isp_stats,
void *data_ptr);
-/** @brief Free the 3a statistics map
+/* @brief Free the 3a statistics map
* @param[in] me Pointer to the 3a statistics map
* @return None
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
index a2a1873aca83..138bc3bb4627 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
@@ -15,7 +15,7 @@
#ifndef _IA_CSS_ACC_TYPES_H
#define _IA_CSS_ACC_TYPES_H
-/** @file
+/* @file
* This file contains types used for acceleration
*/
@@ -40,16 +40,16 @@
* in the kernel and HAL.
*/
-/** Type of acceleration.
+/* Type of acceleration.
*/
enum ia_css_acc_type {
- IA_CSS_ACC_NONE, /**< Normal binary */
- IA_CSS_ACC_OUTPUT, /**< Accelerator stage on output frame */
- IA_CSS_ACC_VIEWFINDER, /**< Accelerator stage on viewfinder frame */
- IA_CSS_ACC_STANDALONE, /**< Stand-alone acceleration */
+ IA_CSS_ACC_NONE, /** Normal binary */
+ IA_CSS_ACC_OUTPUT, /** Accelerator stage on output frame */
+ IA_CSS_ACC_VIEWFINDER, /** Accelerator stage on viewfinder frame */
+ IA_CSS_ACC_STANDALONE, /** Stand-alone acceleration */
};
-/** Cells types
+/* Cells types
*/
enum ia_css_cell_type {
IA_CSS_SP0 = 0,
@@ -58,45 +58,45 @@ enum ia_css_cell_type {
MAX_NUM_OF_CELLS
};
-/** Firmware types.
+/* Firmware types.
*/
enum ia_css_fw_type {
- ia_css_sp_firmware, /**< Firmware for the SP */
- ia_css_isp_firmware, /**< Firmware for the ISP */
- ia_css_bootloader_firmware, /**< Firmware for the BootLoader */
- ia_css_acc_firmware /**< Firmware for accelrations */
+ ia_css_sp_firmware, /** Firmware for the SP */
+ ia_css_isp_firmware, /** Firmware for the ISP */
+ ia_css_bootloader_firmware, /** Firmware for the BootLoader */
+ ia_css_acc_firmware /** Firmware for accelrations */
};
struct ia_css_blob_descr;
-/** Blob descriptor.
+/* Blob descriptor.
* This structure describes an SP or ISP blob.
* It describes the test, data and bss sections as well as position in a
* firmware file.
* For convenience, it contains dynamic data after loading.
*/
struct ia_css_blob_info {
- /**< Static blob data */
- uint32_t offset; /**< Blob offset in fw file */
- struct ia_css_isp_param_memory_offsets memory_offsets; /**< offset wrt hdr in bytes */
- uint32_t prog_name_offset; /**< offset wrt hdr in bytes */
- uint32_t size; /**< Size of blob */
- uint32_t padding_size; /**< total cummulative of bytes added due to section alignment */
- uint32_t icache_source; /**< Position of icache in blob */
- uint32_t icache_size; /**< Size of icache section */
- uint32_t icache_padding;/**< bytes added due to icache section alignment */
- uint32_t text_source; /**< Position of text in blob */
- uint32_t text_size; /**< Size of text section */
- uint32_t text_padding; /**< bytes added due to text section alignment */
- uint32_t data_source; /**< Position of data in blob */
- uint32_t data_target; /**< Start of data in SP dmem */
- uint32_t data_size; /**< Size of text section */
- uint32_t data_padding; /**< bytes added due to data section alignment */
- uint32_t bss_target; /**< Start position of bss in SP dmem */
- uint32_t bss_size; /**< Size of bss section */
- /**< Dynamic data filled by loader */
- CSS_ALIGN(const void *code, 8); /**< Code section absolute pointer within fw, code = icache + text */
- CSS_ALIGN(const void *data, 8); /**< Data section absolute pointer within fw, data = data + bss */
+ /** Static blob data */
+ uint32_t offset; /** Blob offset in fw file */
+ struct ia_css_isp_param_memory_offsets memory_offsets; /** offset wrt hdr in bytes */
+ uint32_t prog_name_offset; /** offset wrt hdr in bytes */
+ uint32_t size; /** Size of blob */
+ uint32_t padding_size; /** total cummulative of bytes added due to section alignment */
+ uint32_t icache_source; /** Position of icache in blob */
+ uint32_t icache_size; /** Size of icache section */
+ uint32_t icache_padding;/** bytes added due to icache section alignment */
+ uint32_t text_source; /** Position of text in blob */
+ uint32_t text_size; /** Size of text section */
+ uint32_t text_padding; /** bytes added due to text section alignment */
+ uint32_t data_source; /** Position of data in blob */
+ uint32_t data_target; /** Start of data in SP dmem */
+ uint32_t data_size; /** Size of text section */
+ uint32_t data_padding; /** bytes added due to data section alignment */
+ uint32_t bss_target; /** Start position of bss in SP dmem */
+ uint32_t bss_size; /** Size of bss section */
+ /** Dynamic data filled by loader */
+ CSS_ALIGN(const void *code, 8); /** Code section absolute pointer within fw, code = icache + text */
+ CSS_ALIGN(const void *data, 8); /** Data section absolute pointer within fw, data = data + bss */
};
struct ia_css_binary_input_info {
@@ -140,9 +140,9 @@ struct ia_css_binary_s3a_info {
uint32_t fixed_s3a_deci_log;
};
-/** DPC related binary info */
+/* DPC related binary info */
struct ia_css_binary_dpc_info {
- uint32_t bnr_lite; /**< bnr lite enable flag */
+ uint32_t bnr_lite; /** bnr lite enable flag */
};
struct ia_css_binary_iterator_info {
@@ -193,7 +193,7 @@ struct ia_css_binary_block_info {
uint32_t output_block_height;
};
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
* It describes the capabilities of a binary, like the maximum resolution,
* support features, dma channels, uds features, etc.
* This part is to be used by the SP.
@@ -210,7 +210,7 @@ struct ia_css_binary_info {
struct ia_css_binary_dvs_info dvs;
struct ia_css_binary_vf_dec_info vf_dec;
struct ia_css_binary_s3a_info s3a;
- struct ia_css_binary_dpc_info dpc_bnr; /**< DPC related binary info */
+ struct ia_css_binary_dpc_info dpc_bnr; /** DPC related binary info */
struct ia_css_binary_iterator_info iterator;
struct ia_css_binary_address_info addresses;
struct ia_css_binary_uds_info uds;
@@ -269,7 +269,7 @@ struct ia_css_binary_info {
} dma;
};
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
* It describes the capabilities of a binary, like the maximum resolution,
* support features, dma channels, uds features, etc.
*/
@@ -281,8 +281,8 @@ struct ia_css_binary_xinfo {
enum ia_css_acc_type type;
CSS_ALIGN(int32_t num_output_formats, 8);
enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM];
- CSS_ALIGN(int32_t num_vf_formats, 8); /**< number of supported vf formats */
- enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /**< types of supported vf formats */
+ CSS_ALIGN(int32_t num_vf_formats, 8); /** number of supported vf formats */
+ enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
uint8_t num_output_pins;
ia_css_ptr xmem_addr;
CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
@@ -291,55 +291,55 @@ struct ia_css_binary_xinfo {
CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
};
-/** Structure describing the Bootloader (an ISP binary).
+/* Structure describing the Bootloader (an ISP binary).
* It contains several address, either in ddr, isp_dmem or
* the entry function in icache.
*/
struct ia_css_bl_info {
- uint32_t num_dma_cmds; /**< Number of cmds sent by CSS */
- uint32_t dma_cmd_list; /**< Dma command list sent by CSS */
- uint32_t sw_state; /**< Polled from css */
+ uint32_t num_dma_cmds; /** Number of cmds sent by CSS */
+ uint32_t dma_cmd_list; /** Dma command list sent by CSS */
+ uint32_t sw_state; /** Polled from css */
/* Entry functions */
- uint32_t bl_entry; /**< The SP entry function */
+ uint32_t bl_entry; /** The SP entry function */
};
-/** Structure describing the SP binary.
+/* Structure describing the SP binary.
* It contains several address, either in ddr, sp_dmem or
* the entry function in pmem.
*/
struct ia_css_sp_info {
- uint32_t init_dmem_data; /**< data sect config, stored to dmem */
- uint32_t per_frame_data; /**< Per frame data, stored to dmem */
- uint32_t group; /**< Per pipeline data, loaded by dma */
- uint32_t output; /**< SP output data, loaded by dmem */
- uint32_t host_sp_queue; /**< Host <-> SP queues */
- uint32_t host_sp_com;/**< Host <-> SP commands */
- uint32_t isp_started; /**< Polled from sensor thread, csim only */
- uint32_t sw_state; /**< Polled from css */
- uint32_t host_sp_queues_initialized; /**< Polled from the SP */
- uint32_t sleep_mode; /**< different mode to halt SP */
- uint32_t invalidate_tlb; /**< inform SP to invalidate mmu TLB */
+ uint32_t init_dmem_data; /** data sect config, stored to dmem */
+ uint32_t per_frame_data; /** Per frame data, stored to dmem */
+ uint32_t group; /** Per pipeline data, loaded by dma */
+ uint32_t output; /** SP output data, loaded by dmem */
+ uint32_t host_sp_queue; /** Host <-> SP queues */
+ uint32_t host_sp_com;/** Host <-> SP commands */
+ uint32_t isp_started; /** Polled from sensor thread, csim only */
+ uint32_t sw_state; /** Polled from css */
+ uint32_t host_sp_queues_initialized; /** Polled from the SP */
+ uint32_t sleep_mode; /** different mode to halt SP */
+ uint32_t invalidate_tlb; /** inform SP to invalidate mmu TLB */
#ifndef ISP2401
- uint32_t stop_copy_preview; /**< suspend copy and preview pipe when capture */
+ uint32_t stop_copy_preview; /** suspend copy and preview pipe when capture */
#endif
- uint32_t debug_buffer_ddr_address; /**< inform SP the address
+ uint32_t debug_buffer_ddr_address; /** inform SP the address
of DDR debug queue */
- uint32_t perf_counter_input_system_error; /**< input system perf
+ uint32_t perf_counter_input_system_error; /** input system perf
counter array */
#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
- uint32_t debug_wait; /**< thread/pipe post mortem debug */
- uint32_t debug_stage; /**< thread/pipe post mortem debug */
- uint32_t debug_stripe; /**< thread/pipe post mortem debug */
+ uint32_t debug_wait; /** thread/pipe post mortem debug */
+ uint32_t debug_stage; /** thread/pipe post mortem debug */
+ uint32_t debug_stripe; /** thread/pipe post mortem debug */
#endif
- uint32_t threads_stack; /**< sp thread's stack pointers */
- uint32_t threads_stack_size; /**< sp thread's stack sizes */
- uint32_t curr_binary_id; /**< current binary id */
- uint32_t raw_copy_line_count; /**< raw copy line counter */
- uint32_t ddr_parameter_address; /**< acc param ddrptr, sp dmem */
- uint32_t ddr_parameter_size; /**< acc param size, sp dmem */
+ uint32_t threads_stack; /** sp thread's stack pointers */
+ uint32_t threads_stack_size; /** sp thread's stack sizes */
+ uint32_t curr_binary_id; /** current binary id */
+ uint32_t raw_copy_line_count; /** raw copy line counter */
+ uint32_t ddr_parameter_address; /** acc param ddrptr, sp dmem */
+ uint32_t ddr_parameter_size; /** acc param size, sp dmem */
/* Entry functions */
- uint32_t sp_entry; /**< The SP entry function */
- uint32_t tagger_frames_addr; /**< Base address of tagger state */
+ uint32_t sp_entry; /** The SP entry function */
+ uint32_t tagger_frames_addr; /** Base address of tagger state */
};
/* The following #if is there because this header file is also included
@@ -348,37 +348,37 @@ struct ia_css_sp_info {
More permanent solution will be to refactor this include.
*/
#if !defined(__ISP)
-/** Accelerator firmware information.
+/* Accelerator firmware information.
*/
struct ia_css_acc_info {
- uint32_t per_frame_data; /**< Dummy for now */
+ uint32_t per_frame_data; /** Dummy for now */
};
-/** Firmware information.
+/* Firmware information.
*/
union ia_css_fw_union {
- struct ia_css_binary_xinfo isp; /**< ISP info */
- struct ia_css_sp_info sp; /**< SP info */
- struct ia_css_bl_info bl; /**< Bootloader info */
- struct ia_css_acc_info acc; /**< Accelerator info */
+ struct ia_css_binary_xinfo isp; /** ISP info */
+ struct ia_css_sp_info sp; /** SP info */
+ struct ia_css_bl_info bl; /** Bootloader info */
+ struct ia_css_acc_info acc; /** Accelerator info */
};
-/** Firmware information.
+/* Firmware information.
*/
struct ia_css_fw_info {
- size_t header_size; /**< size of fw header */
+ size_t header_size; /** size of fw header */
CSS_ALIGN(uint32_t type, 8);
- union ia_css_fw_union info; /**< Binary info */
- struct ia_css_blob_info blob; /**< Blob info */
+ union ia_css_fw_union info; /** Binary info */
+ struct ia_css_blob_info blob; /** Blob info */
/* Dynamic part */
struct ia_css_fw_info *next;
- CSS_ALIGN(uint32_t loaded, 8); /**< Firmware has been loaded */
- CSS_ALIGN(const uint8_t *isp_code, 8); /**< ISP pointer to code */
- /**< Firmware handle between user space and kernel */
+ CSS_ALIGN(uint32_t loaded, 8); /** Firmware has been loaded */
+ CSS_ALIGN(const uint8_t *isp_code, 8); /** ISP pointer to code */
+ /** Firmware handle between user space and kernel */
CSS_ALIGN(uint32_t handle, 8);
- /**< Sections to copy from/to ISP */
+ /** Sections to copy from/to ISP */
struct ia_css_isp_param_css_segments mem_initializers;
- /**< Initializer for local ISP memories */
+ /** Initializer for local ISP memories */
};
struct ia_css_blob_descr {
@@ -390,39 +390,39 @@ struct ia_css_blob_descr {
struct ia_css_acc_fw;
-/** Structure describing the SP binary of a stand-alone accelerator.
+/* Structure describing the SP binary of a stand-alone accelerator.
*/
struct ia_css_acc_sp {
- void (*init)(struct ia_css_acc_fw *); /**< init for crun */
- uint32_t sp_prog_name_offset; /**< program name offset wrt hdr in bytes */
- uint32_t sp_blob_offset; /**< blob offset wrt hdr in bytes */
- void *entry; /**< Address of sp entry point */
- uint32_t *css_abort; /**< SP dmem abort flag */
- void *isp_code; /**< SP dmem address holding xmem
+ void (*init)(struct ia_css_acc_fw *); /** init for crun */
+ uint32_t sp_prog_name_offset; /** program name offset wrt hdr in bytes */
+ uint32_t sp_blob_offset; /** blob offset wrt hdr in bytes */
+ void *entry; /** Address of sp entry point */
+ uint32_t *css_abort; /** SP dmem abort flag */
+ void *isp_code; /** SP dmem address holding xmem
address of isp code */
- struct ia_css_fw_info fw; /**< SP fw descriptor */
- const uint8_t *code; /**< ISP pointer of allocated SP code */
+ struct ia_css_fw_info fw; /** SP fw descriptor */
+ const uint8_t *code; /** ISP pointer of allocated SP code */
};
-/** Acceleration firmware descriptor.
+/* Acceleration firmware descriptor.
* This descriptor descibes either SP code (stand-alone), or
* ISP code (a separate pipeline stage).
*/
struct ia_css_acc_fw_hdr {
- enum ia_css_acc_type type; /**< Type of accelerator */
- uint32_t isp_prog_name_offset; /**< program name offset wrt
+ enum ia_css_acc_type type; /** Type of accelerator */
+ uint32_t isp_prog_name_offset; /** program name offset wrt
header in bytes */
- uint32_t isp_blob_offset; /**< blob offset wrt header
+ uint32_t isp_blob_offset; /** blob offset wrt header
in bytes */
- uint32_t isp_size; /**< Size of isp blob */
- const uint8_t *isp_code; /**< ISP pointer to code */
- struct ia_css_acc_sp sp; /**< Standalone sp code */
- /**< Firmware handle between user space and kernel */
+ uint32_t isp_size; /** Size of isp blob */
+ const uint8_t *isp_code; /** ISP pointer to code */
+ struct ia_css_acc_sp sp; /** Standalone sp code */
+ /** Firmware handle between user space and kernel */
uint32_t handle;
- struct ia_css_data parameters; /**< Current SP parameters */
+ struct ia_css_data parameters; /** Current SP parameters */
};
-/** Firmware structure.
+/* Firmware structure.
* This contains the header and actual blobs.
* For standalone, it contains SP and ISP blob.
* For a pipeline stage accelerator, it contains ISP code only.
@@ -430,7 +430,7 @@ struct ia_css_acc_fw_hdr {
* header and computed using the access macros below.
*/
struct ia_css_acc_fw {
- struct ia_css_acc_fw_hdr header; /**< firmware header */
+ struct ia_css_acc_fw_hdr header; /** firmware header */
/*
int8_t isp_progname[]; **< ISP program name
int8_t sp_progname[]; **< SP program name, stand-alone only
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
index b2ecf3618c15..a0058eac7d5a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_BUFFER_H
#define __IA_CSS_BUFFER_H
-/** @file
+/* @file
* This file contains datastructures and types for buffers used in CSS
*/
@@ -23,7 +23,7 @@
#include "ia_css_types.h"
#include "ia_css_timer.h"
-/** Enumeration of buffer types. Buffers can be queued and de-queued
+/* Enumeration of buffer types. Buffers can be queued and de-queued
* to hand them over between IA and ISP.
*/
enum ia_css_buffer_type {
@@ -48,28 +48,28 @@ enum ia_css_buffer_type {
/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
#if !defined(__ISP)
-/** Buffer structure. This is a container structure that enables content
+/* Buffer structure. This is a container structure that enables content
* independent buffer queues and access functions.
*/
struct ia_css_buffer {
- enum ia_css_buffer_type type; /**< Buffer type. */
+ enum ia_css_buffer_type type; /** Buffer type. */
unsigned int exp_id;
- /**< exposure id for this buffer; 0 = not available
+ /** exposure id for this buffer; 0 = not available
see ia_css_event_public.h for more detail. */
union {
- struct ia_css_isp_3a_statistics *stats_3a; /**< 3A statistics & optionally RGBY statistics. */
- struct ia_css_isp_dvs_statistics *stats_dvs; /**< DVS statistics. */
- struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /**< SKC DVS statistics. */
- struct ia_css_frame *frame; /**< Frame buffer. */
- struct ia_css_acc_param *custom_data; /**< Custom buffer. */
- struct ia_css_metadata *metadata; /**< Sensor metadata. */
- } data; /**< Buffer data pointer. */
- uint64_t driver_cookie; /**< cookie for the driver */
- struct ia_css_time_meas timing_data; /**< timing data (readings from the timer) */
- struct ia_css_clock_tick isys_eof_clock_tick; /**< ISYS's end of frame timer tick*/
+ struct ia_css_isp_3a_statistics *stats_3a; /** 3A statistics & optionally RGBY statistics. */
+ struct ia_css_isp_dvs_statistics *stats_dvs; /** DVS statistics. */
+ struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /** SKC DVS statistics. */
+ struct ia_css_frame *frame; /** Frame buffer. */
+ struct ia_css_acc_param *custom_data; /** Custom buffer. */
+ struct ia_css_metadata *metadata; /** Sensor metadata. */
+ } data; /** Buffer data pointer. */
+ uint64_t driver_cookie; /** cookie for the driver */
+ struct ia_css_time_meas timing_data; /** timing data (readings from the timer) */
+ struct ia_css_clock_tick isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
};
-/** @brief Dequeue param buffers from sp2host_queue
+/* @brief Dequeue param buffers from sp2host_queue
*
* @return None
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
index a15d3e368341..021a313fab85 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_CONTROL_H
#define __IA_CSS_CONTROL_H
-/** @file
+/* @file
* This file contains functionality for starting and controlling CSS
*/
@@ -24,7 +24,7 @@
#include <ia_css_firmware.h>
#include <ia_css_irq.h>
-/** @brief Initialize the CSS API.
+/* @brief Initialize the CSS API.
* @param[in] env Environment, provides functions to access the
* environment in which the CSS code runs. This is
* used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err ia_css_init(
uint32_t l1_base,
enum ia_css_irq_type irq_type);
-/** @brief Un-initialize the CSS API.
+/* @brief Un-initialize the CSS API.
* @return None
*
* This function deallocates all memory that has been allocated by the CSS API
@@ -66,7 +66,7 @@ enum ia_css_err ia_css_init(
void
ia_css_uninit(void);
-/** @brief Suspend CSS API for power down
+/* @brief Suspend CSS API for power down
* @return success or faulure code
*
* suspend shuts down the system by:
@@ -80,7 +80,7 @@ ia_css_uninit(void);
enum ia_css_err
ia_css_suspend(void);
-/** @brief Resume CSS API from power down
+/* @brief Resume CSS API from power down
* @return success or failure code
*
* After a power cycle, this function will bring the CSS API back into
@@ -91,7 +91,7 @@ ia_css_suspend(void);
enum ia_css_err
ia_css_resume(void);
-/** @brief Enable use of a separate queue for ISYS events.
+/* @brief Enable use of a separate queue for ISYS events.
*
* @param[in] enable: enable or disable use of separate ISYS event queues.
* @return error if called when SP is running.
@@ -105,7 +105,7 @@ ia_css_resume(void);
enum ia_css_err
ia_css_enable_isys_event_queue(bool enable);
-/** @brief Test whether the ISP has started.
+/* @brief Test whether the ISP has started.
*
* @return Boolean flag true if the ISP has started or false otherwise.
*
@@ -114,7 +114,7 @@ ia_css_enable_isys_event_queue(bool enable);
bool
ia_css_isp_has_started(void);
-/** @brief Test whether the SP has initialized.
+/* @brief Test whether the SP has initialized.
*
* @return Boolean flag true if the SP has initialized or false otherwise.
*
@@ -123,7 +123,7 @@ ia_css_isp_has_started(void);
bool
ia_css_sp_has_initialized(void);
-/** @brief Test whether the SP has terminated.
+/* @brief Test whether the SP has terminated.
*
* @return Boolean flag true if the SP has terminated or false otherwise.
*
@@ -132,7 +132,7 @@ ia_css_sp_has_initialized(void);
bool
ia_css_sp_has_terminated(void);
-/** @brief start SP hardware
+/* @brief start SP hardware
*
* @return IA_CSS_SUCCESS or error code upon error.
*
@@ -144,7 +144,7 @@ enum ia_css_err
ia_css_start_sp(void);
-/** @brief stop SP hardware
+/* @brief stop SP hardware
*
* @return IA_CSS_SUCCESS or error code upon error.
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
index 59459f7a9876..84a960b7abbc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
@@ -15,7 +15,7 @@
#ifndef _IA_CSS_DEVICE_ACCESS_H
#define _IA_CSS_DEVICE_ACCESS_H
-/** @file
+/* @file
* File containing internal functions for the CSS-API to access the CSS device.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
index 147bf81959d3..1f01534964e3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_DVS_H
#define __IA_CSS_DVS_H
-/** @file
+/* @file
* This file contains types for DVS statistics
*/
@@ -31,7 +31,7 @@ enum dvs_statistics_type {
};
-/** Structure that holds DVS statistics in the ISP internal
+/* Structure that holds DVS statistics in the ISP internal
* format. Use ia_css_get_dvs_statistics() to translate
* this to the format used on the host (DVS engine).
* */
@@ -40,12 +40,12 @@ struct ia_css_isp_dvs_statistics {
ia_css_ptr ver_proj;
uint32_t hor_size;
uint32_t ver_size;
- uint32_t exp_id; /**< see ia_css_event_public.h for more detail */
+ uint32_t exp_id; /** see ia_css_event_public.h for more detail */
ia_css_ptr data_ptr; /* base pointer containing all memory */
uint32_t size; /* size of allocated memory in data_ptr */
};
-/** Structure that holds SKC DVS statistics in the ISP internal
+/* Structure that holds SKC DVS statistics in the ISP internal
* format. Use ia_css_dvs_statistics_get() to translate this to
* the format used on the host.
* */
@@ -82,7 +82,7 @@ union ia_css_dvs_statistics_host {
struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
};
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
* @param[in] host_stats Host buffer
* @param[in] isp_stats ISP buffer
* @return error value if temporary memory cannot be allocated
@@ -100,7 +100,7 @@ enum ia_css_err
ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
const struct ia_css_isp_dvs_statistics *isp_stats);
-/** @brief Translate DVS statistics from ISP format to host format
+/* @brief Translate DVS statistics from ISP format to host format
* @param[in] host_stats Host buffer
* @param[in] isp_stats ISP buffer
* @return None
@@ -116,7 +116,7 @@ ia_css_translate_dvs_statistics(
struct ia_css_dvs_statistics *host_stats,
const struct ia_css_isp_dvs_statistics_map *isp_stats);
-/** @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
* @param[in] host_stats Host buffer
* @param[in] isp_stats ISP buffer
* @return error value if temporary memory cannot be allocated
@@ -134,7 +134,7 @@ enum ia_css_err
ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
const struct ia_css_isp_dvs_statistics *isp_stats);
-/** @brief Translate DVS2 statistics from ISP format to host format
+/* @brief Translate DVS2 statistics from ISP format to host format
* @param[in] host_stats Host buffer
* @param[in] isp_stats ISP buffer
* @return None
@@ -150,7 +150,7 @@ ia_css_translate_dvs2_statistics(
struct ia_css_dvs2_statistics *host_stats,
const struct ia_css_isp_dvs_statistics_map *isp_stats);
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
* @param[in] type - DVS statistics type
* @param[in] host_stats Host buffer
* @param[in] isp_stats ISP buffer
@@ -161,105 +161,105 @@ ia_css_dvs_statistics_get(enum dvs_statistics_type type,
union ia_css_dvs_statistics_host *host_stats,
const union ia_css_dvs_statistics_isp *isp_stats);
-/** @brief Allocate the DVS statistics memory on the ISP
+/* @brief Allocate the DVS statistics memory on the ISP
* @param[in] grid The grid.
* @return Pointer to the allocated DVS statistics buffer on the ISP
*/
struct ia_css_isp_dvs_statistics *
ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS statistics memory on the ISP
+/* @brief Free the DVS statistics memory on the ISP
* @param[in] me Pointer to the DVS statistics buffer on the ISP.
* @return None
*/
void
ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
-/** @brief Allocate the DVS 2.0 statistics memory
+/* @brief Allocate the DVS 2.0 statistics memory
* @param[in] grid The grid.
* @return Pointer to the allocated DVS statistics buffer on the ISP
*/
struct ia_css_isp_dvs_statistics *
ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
* @param[in] me Pointer to the DVS statistics buffer on the ISP.
* @return None
*/
void
ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
-/** @brief Allocate the DVS statistics memory on the host
+/* @brief Allocate the DVS statistics memory on the host
* @param[in] grid The grid.
* @return Pointer to the allocated DVS statistics buffer on the host
*/
struct ia_css_dvs_statistics *
ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS statistics memory on the host
+/* @brief Free the DVS statistics memory on the host
* @param[in] me Pointer to the DVS statistics buffer on the host.
* @return None
*/
void
ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
-/** @brief Allocate the DVS coefficients memory
+/* @brief Allocate the DVS coefficients memory
* @param[in] grid The grid.
* @return Pointer to the allocated DVS coefficients buffer
*/
struct ia_css_dvs_coefficients *
ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS coefficients memory
+/* @brief Free the DVS coefficients memory
* @param[in] me Pointer to the DVS coefficients buffer.
* @return None
*/
void
ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
-/** @brief Allocate the DVS 2.0 statistics memory on the host
+/* @brief Allocate the DVS 2.0 statistics memory on the host
* @param[in] grid The grid.
* @return Pointer to the allocated DVS 2.0 statistics buffer on the host
*/
struct ia_css_dvs2_statistics *
ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
* @param[in] me Pointer to the DVS 2.0 statistics buffer on the host.
* @return None
*/
void
ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
-/** @brief Allocate the DVS 2.0 coefficients memory
+/* @brief Allocate the DVS 2.0 coefficients memory
* @param[in] grid The grid.
* @return Pointer to the allocated DVS 2.0 coefficients buffer
*/
struct ia_css_dvs2_coefficients *
ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
-/** @brief Free the DVS 2.0 coefficients memory
+/* @brief Free the DVS 2.0 coefficients memory
* @param[in] me Pointer to the DVS 2.0 coefficients buffer.
* @return None
*/
void
ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
-/** @brief Allocate the DVS 2.0 6-axis config memory
+/* @brief Allocate the DVS 2.0 6-axis config memory
* @param[in] stream The stream.
* @return Pointer to the allocated DVS 6axis configuration buffer
*/
struct ia_css_dvs_6axis_config *
ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
-/** @brief Free the DVS 2.0 6-axis config memory
+/* @brief Free the DVS 2.0 6-axis config memory
* @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer
* @return None
*/
void
ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
-/** @brief Allocate a dvs statistics map structure
+/* @brief Allocate a dvs statistics map structure
* @param[in] isp_stats pointer to ISP dvs statistis struct
* @param[in] data_ptr host-side pointer to ISP dvs statistics.
* @return Pointer to the allocated dvs statistics map
@@ -280,7 +280,7 @@ ia_css_isp_dvs_statistics_map_allocate(
const struct ia_css_isp_dvs_statistics *isp_stats,
void *data_ptr);
-/** @brief Free the dvs statistics map
+/* @brief Free the dvs statistics map
* @param[in] me Pointer to the dvs statistics map
* @return None
*
@@ -291,7 +291,7 @@ ia_css_isp_dvs_statistics_map_allocate(
void
ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
-/** @brief Allocate memory for the SKC DVS statistics on the ISP
+/* @brief Allocate memory for the SKC DVS statistics on the ISP
* @return Pointer to the allocated ACC DVS statistics buffer on the ISP
*/
struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
index 1ae9daf0be76..8b0218ee658d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
@@ -20,12 +20,12 @@
#include "ia_css_types.h"
#include "ia_css_acc_types.h"
-/** @file
+/* @file
* This file contains prototypes for functions that need to be provided to the
* CSS-API host-code by the environment in which the CSS-API code runs.
*/
-/** Memory allocation attributes, for use in ia_css_css_mem_env. */
+/* Memory allocation attributes, for use in ia_css_css_mem_env. */
enum ia_css_mem_attr {
IA_CSS_MEM_ATTR_CACHED = 1 << 0,
IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
@@ -33,62 +33,62 @@ enum ia_css_mem_attr {
IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
};
-/** Environment with function pointers for local IA memory allocation.
+/* Environment with function pointers for local IA memory allocation.
* This provides the CSS code with environment specific functionality
* for memory allocation of small local buffers such as local data structures.
* This is never expected to allocate more than one page of memory (4K bytes).
*/
struct ia_css_cpu_mem_env {
void (*flush)(struct ia_css_acc_fw *fw);
- /**< Flush function to flush the cache for given accelerator. */
+ /** Flush function to flush the cache for given accelerator. */
};
-/** Environment with function pointers to access the CSS hardware. This includes
+/* Environment with function pointers to access the CSS hardware. This includes
* registers and local memories.
*/
struct ia_css_hw_access_env {
void (*store_8)(hrt_address addr, uint8_t data);
- /**< Store an 8 bit value into an address in the CSS HW address space.
+ /** Store an 8 bit value into an address in the CSS HW address space.
The address must be an 8 bit aligned address. */
void (*store_16)(hrt_address addr, uint16_t data);
- /**< Store a 16 bit value into an address in the CSS HW address space.
+ /** Store a 16 bit value into an address in the CSS HW address space.
The address must be a 16 bit aligned address. */
void (*store_32)(hrt_address addr, uint32_t data);
- /**< Store a 32 bit value into an address in the CSS HW address space.
+ /** Store a 32 bit value into an address in the CSS HW address space.
The address must be a 32 bit aligned address. */
uint8_t (*load_8)(hrt_address addr);
- /**< Load an 8 bit value from an address in the CSS HW address
+ /** Load an 8 bit value from an address in the CSS HW address
space. The address must be an 8 bit aligned address. */
uint16_t (*load_16)(hrt_address addr);
- /**< Load a 16 bit value from an address in the CSS HW address
+ /** Load a 16 bit value from an address in the CSS HW address
space. The address must be a 16 bit aligned address. */
uint32_t (*load_32)(hrt_address addr);
- /**< Load a 32 bit value from an address in the CSS HW address
+ /** Load a 32 bit value from an address in the CSS HW address
space. The address must be a 32 bit aligned address. */
void (*store)(hrt_address addr, const void *data, uint32_t bytes);
- /**< Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+ /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
void (*load)(hrt_address addr, void *data, uint32_t bytes);
- /**< Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+ /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
};
-/** Environment with function pointers to print error and debug messages.
+/* Environment with function pointers to print error and debug messages.
*/
struct ia_css_print_env {
int (*debug_print)(const char *fmt, va_list args);
- /**< Print a debug message. */
+ /** Print a debug message. */
int (*error_print)(const char *fmt, va_list args);
- /**< Print an error message.*/
+ /** Print an error message.*/
};
-/** Environment structure. This includes function pointers to access several
+/* Environment structure. This includes function pointers to access several
* features provided by the environment in which the CSS API is used.
* This is used to run the camera IP in multiple platforms such as Linux,
* Windows and several simulation environments.
*/
struct ia_css_env {
- struct ia_css_cpu_mem_env cpu_mem_env; /**< local flush. */
- struct ia_css_hw_access_env hw_access_env; /**< CSS HW access functions */
- struct ia_css_print_env print_env; /**< Message printing env. */
+ struct ia_css_cpu_mem_env cpu_mem_env; /** local flush. */
+ struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
+ struct ia_css_print_env print_env; /** Message printing env. */
};
#endif /* __IA_CSS_ENV_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
index 572e4e55c69e..cf895815ea31 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
@@ -15,12 +15,12 @@
#ifndef __IA_CSS_ERR_H
#define __IA_CSS_ERR_H
-/** @file
+/* @file
* This file contains possible return values for most
* functions in the CSS-API.
*/
-/** Errors, these values are used as the return value for most
+/* Errors, these values are used as the return value for most
* functions in this API.
*/
enum ia_css_err {
@@ -41,22 +41,22 @@ enum ia_css_err {
IA_CSS_ERR_NOT_SUPPORTED
};
-/** FW warnings. This enum contains a value for each warning that
+/* FW warnings. This enum contains a value for each warning that
* the SP FW could indicate potential performance issue
*/
enum ia_css_fw_warning {
IA_CSS_FW_WARNING_NONE,
- IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the ISys queue.
+ IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
This warning can be avoided by de-queing ISYS buffers more timely. */
- IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the PSys queue.
+ IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
This warning can be avoided by de-queing PSYS buffers more timely. */
- IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /** < CSS system delayed because of insufficient available buffers.
+ IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
This warning can be avoided by unlocking locked frame-buffers more timely. */
- IA_CSS_FW_WARNING_EXP_ID_LOCKED, /** < Exposure ID skipped because the frame associated to it was still locked.
+ IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
This warning can be avoided by unlocking locked frame-buffers more timely. */
- IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /** < Exposure ID cannot be found on the circular buffer.
+ IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
This warning can be avoided by unlocking locked frame-buffers more timely. */
- IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /** < Frame and param pair mismatched in tagger.
+ IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
This warning can be avoided by providing a param set for each frame. */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
index aaf349772abe..036a2f03d3bd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_EVENT_PUBLIC_H
#define __IA_CSS_EVENT_PUBLIC_H
-/** @file
+/* @file
* This file contains CSS-API events functionality
*/
@@ -24,7 +24,7 @@
#include <ia_css_types.h> /* ia_css_pipe */
#include <ia_css_timer.h> /* ia_css_timer */
-/** The event type, distinguishes the kind of events that
+/* The event type, distinguishes the kind of events that
* can are generated by the CSS system.
*
* !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -35,43 +35,43 @@
*/
enum ia_css_event_type {
IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0,
- /**< Output frame ready. */
+ /** Output frame ready. */
IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1,
- /**< Second output frame ready. */
+ /** Second output frame ready. */
IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2,
- /**< Viewfinder Output frame ready. */
+ /** Viewfinder Output frame ready. */
IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3,
- /**< Second viewfinder Output frame ready. */
+ /** Second viewfinder Output frame ready. */
IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4,
- /**< Indication that 3A statistics are available. */
+ /** Indication that 3A statistics are available. */
IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5,
- /**< Indication that DIS statistics are available. */
+ /** Indication that DIS statistics are available. */
IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6,
- /**< Pipeline Done event, sent after last pipeline stage. */
+ /** Pipeline Done event, sent after last pipeline stage. */
IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7,
- /**< Frame tagged. */
+ /** Frame tagged. */
IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8,
- /**< Input frame ready. */
+ /** Input frame ready. */
IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9,
- /**< Metadata ready. */
+ /** Metadata ready. */
IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10,
- /**< Indication that LACE statistics are available. */
+ /** Indication that LACE statistics are available. */
IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11,
- /**< Extension stage complete. */
+ /** Extension stage complete. */
IA_CSS_EVENT_TYPE_TIMER = 1 << 12,
- /**< Timer event for measuring the SP side latencies. It contains the
+ /** Timer event for measuring the SP side latencies. It contains the
32-bit timer value from the SP */
IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13,
- /**< End Of Frame event, sent when in buffered sensor mode. */
+ /** End Of Frame event, sent when in buffered sensor mode. */
IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14,
- /**< Performance warning encounter by FW */
+ /** Performance warning encounter by FW */
IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15,
- /**< Assertion hit by FW */
+ /** Assertion hit by FW */
};
#define IA_CSS_EVENT_TYPE_NONE 0
-/** IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
* The other events (such as PORT_EOF) cannot be enabled/disabled
* and are hence excluded from this macro.
*/
@@ -89,7 +89,7 @@ enum ia_css_event_type {
IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \
IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
-/** The event struct, container for the event type and its related values.
+/* The event struct, container for the event type and its related values.
* Depending on the event type, either pipe or port will be filled.
* Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
* For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
@@ -97,14 +97,14 @@ enum ia_css_event_type {
*/
struct ia_css_event {
struct ia_css_pipe *pipe;
- /**< Pipe handle on which event happened, NULL for non pipe related
+ /** Pipe handle on which event happened, NULL for non pipe related
events. */
enum ia_css_event_type type;
- /**< Type of Event, always valid/filled. */
+ /** Type of Event, always valid/filled. */
uint8_t port;
- /**< Port number for EOF event (not valid for other events). */
+ /** Port number for EOF event (not valid for other events). */
uint8_t exp_id;
- /**< Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+ /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
The exposure ID is unique only within a logical stream and it is
only generated on systems that have an input system (such as 2400
and 2401).
@@ -120,26 +120,26 @@ struct ia_css_event {
in the exposure IDs. Therefor applications should not use this
to detect frame drops. */
uint32_t fw_handle;
- /**< Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+ /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
events). */
enum ia_css_fw_warning fw_warning;
- /**< Firmware warning code, only for WARNING events. */
+ /** Firmware warning code, only for WARNING events. */
uint8_t fw_assert_module_id;
- /**< Firmware module id, only for ASSERT events, should be logged by driver. */
+ /** Firmware module id, only for ASSERT events, should be logged by driver. */
uint16_t fw_assert_line_no;
- /**< Firmware line number, only for ASSERT events, should be logged by driver. */
+ /** Firmware line number, only for ASSERT events, should be logged by driver. */
clock_value_t timer_data;
- /**< For storing the full 32-bit of the timer value. Valid only for TIMER
+ /** For storing the full 32-bit of the timer value. Valid only for TIMER
event */
uint8_t timer_code;
- /**< For storing the code of the TIMER event. Valid only for
+ /** For storing the code of the TIMER event. Valid only for
TIMER event */
uint8_t timer_subcode;
- /**< For storing the subcode of the TIMER event. Valid only
+ /** For storing the subcode of the TIMER event. Valid only
for TIMER event */
};
-/** @brief Dequeue a PSYS event from the CSS system.
+/* @brief Dequeue a PSYS event from the CSS system.
*
* @param[out] event Pointer to the event struct which will be filled by
* this function if an event is available.
@@ -156,7 +156,7 @@ struct ia_css_event {
enum ia_css_err
ia_css_dequeue_psys_event(struct ia_css_event *event);
-/** @brief Dequeue an event from the CSS system.
+/* @brief Dequeue an event from the CSS system.
*
* @param[out] event Pointer to the event struct which will be filled by
* this function if an event is available.
@@ -171,7 +171,7 @@ ia_css_dequeue_psys_event(struct ia_css_event *event);
enum ia_css_err
ia_css_dequeue_event(struct ia_css_event *event);
-/** @brief Dequeue an ISYS event from the CSS system.
+/* @brief Dequeue an ISYS event from the CSS system.
*
* @param[out] event Pointer to the event struct which will be filled by
* this function if an event is available.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
index 06d375a09be2..d7d7f0a995e5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
@@ -15,21 +15,21 @@
#ifndef __IA_CSS_FIRMWARE_H
#define __IA_CSS_FIRMWARE_H
-/** @file
+/* @file
* This file contains firmware loading/unloading support functionality
*/
#include "ia_css_err.h"
#include "ia_css_env.h"
-/** CSS firmware package structure.
+/* CSS firmware package structure.
*/
struct ia_css_fw {
- void *data; /**< pointer to the firmware data */
- unsigned int bytes; /**< length in bytes of firmware data */
+ void *data; /** pointer to the firmware data */
+ unsigned int bytes; /** length in bytes of firmware data */
};
-/** @brief Loads the firmware
+/* @brief Loads the firmware
* @param[in] env Environment, provides functions to access the
* environment in which the CSS code runs. This is
* used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err
ia_css_load_firmware(const struct ia_css_env *env,
const struct ia_css_fw *fw);
-/** @brief Unloads the firmware
+/* @brief Unloads the firmware
* @return None
*
* This function unloads the firmware loaded by ia_css_load_firmware.
@@ -61,7 +61,7 @@ ia_css_load_firmware(const struct ia_css_env *env,
void
ia_css_unload_firmware(void);
-/** @brief Checks firmware version
+/* @brief Checks firmware version
* @param[in] fw Firmware package containing the firmware for all
* predefined ISP binaries.
* @return Returns true when the firmware version matches with the CSS
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
index da9c60144c6d..e5ffc579aef1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
@@ -15,7 +15,7 @@
#ifndef _IA_CSS_FRAC_H
#define _IA_CSS_FRAC_H
-/** @file
+/* @file
* This file contains typedefs used for fractional numbers
*/
@@ -25,13 +25,13 @@
* NOTE: the 16 bit fixed point types actually occupy 32 bits
* to save on extension operations in the ISP code.
*/
-/** Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
typedef uint32_t ia_css_u0_16;
-/** Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
typedef uint32_t ia_css_u5_11;
-/** Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
typedef uint32_t ia_css_u8_8;
-/** Signed fixed point value, 0 integer bits, 15 fractional bits */
+/* Signed fixed point value, 0 integer bits, 15 fractional bits */
typedef int32_t ia_css_s0_15;
#endif /* _IA_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
index d534fbd91380..2f177edc36ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_FRAME_FORMAT_H
#define __IA_CSS_FRAME_FORMAT_H
-/** @file
+/* @file
* This file contains information about formats supported in the ISP
*/
-/** Frame formats, some of these come from fourcc.org, others are
+/* Frame formats, some of these come from fourcc.org, others are
better explained by video4linux2. The NV11 seems to be described only
on MSDN pages, but even those seem to be gone now.
Frames can come in many forms, the main categories are RAW, RGB and YUV
@@ -48,45 +48,45 @@
- css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
*/
enum ia_css_frame_format {
- IA_CSS_FRAME_FORMAT_NV11 = 0, /**< 12 bit YUV 411, Y, UV plane */
- IA_CSS_FRAME_FORMAT_NV12, /**< 12 bit YUV 420, Y, UV plane */
- IA_CSS_FRAME_FORMAT_NV12_16, /**< 16 bit YUV 420, Y, UV plane */
- IA_CSS_FRAME_FORMAT_NV12_TILEY, /**< 12 bit YUV 420, Intel proprietary tiled format, TileY */
- IA_CSS_FRAME_FORMAT_NV16, /**< 16 bit YUV 422, Y, UV plane */
- IA_CSS_FRAME_FORMAT_NV21, /**< 12 bit YUV 420, Y, VU plane */
- IA_CSS_FRAME_FORMAT_NV61, /**< 16 bit YUV 422, Y, VU plane */
- IA_CSS_FRAME_FORMAT_YV12, /**< 12 bit YUV 420, Y, V, U plane */
- IA_CSS_FRAME_FORMAT_YV16, /**< 16 bit YUV 422, Y, V, U plane */
- IA_CSS_FRAME_FORMAT_YUV420, /**< 12 bit YUV 420, Y, U, V plane */
- IA_CSS_FRAME_FORMAT_YUV420_16, /**< yuv420, 16 bits per subpixel */
- IA_CSS_FRAME_FORMAT_YUV422, /**< 16 bit YUV 422, Y, U, V plane */
- IA_CSS_FRAME_FORMAT_YUV422_16, /**< yuv422, 16 bits per subpixel */
- IA_CSS_FRAME_FORMAT_UYVY, /**< 16 bit YUV 422, UYVY interleaved */
- IA_CSS_FRAME_FORMAT_YUYV, /**< 16 bit YUV 422, YUYV interleaved */
- IA_CSS_FRAME_FORMAT_YUV444, /**< 24 bit YUV 444, Y, U, V plane */
- IA_CSS_FRAME_FORMAT_YUV_LINE, /**< Internal format, 2 y lines followed
+ IA_CSS_FRAME_FORMAT_NV11 = 0, /** 12 bit YUV 411, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12, /** 12 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_16, /** 16 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
+ IA_CSS_FRAME_FORMAT_NV16, /** 16 bit YUV 422, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV21, /** 12 bit YUV 420, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_NV61, /** 16 bit YUV 422, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_YV12, /** 12 bit YUV 420, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YV16, /** 16 bit YUV 422, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YUV420, /** 12 bit YUV 420, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV420_16, /** yuv420, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_YUV422, /** 16 bit YUV 422, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV422_16, /** yuv422, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_UYVY, /** 16 bit YUV 422, UYVY interleaved */
+ IA_CSS_FRAME_FORMAT_YUYV, /** 16 bit YUV 422, YUYV interleaved */
+ IA_CSS_FRAME_FORMAT_YUV444, /** 24 bit YUV 444, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV_LINE, /** Internal format, 2 y lines followed
by a uvinterleaved line */
- IA_CSS_FRAME_FORMAT_RAW, /**< RAW, 1 plane */
- IA_CSS_FRAME_FORMAT_RGB565, /**< 16 bit RGB, 1 plane. Each 3 sub
+ IA_CSS_FRAME_FORMAT_RAW, /** RAW, 1 plane */
+ IA_CSS_FRAME_FORMAT_RGB565, /** 16 bit RGB, 1 plane. Each 3 sub
pixels are packed into one 16 bit
value, 5 bits for R, 6 bits for G
and 5 bits for B. */
- IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /**< 24 bit RGB, 3 planes */
- IA_CSS_FRAME_FORMAT_RGBA888, /**< 32 bit RGBA, 1 plane, A=Alpha
+ IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
+ IA_CSS_FRAME_FORMAT_RGBA888, /** 32 bit RGBA, 1 plane, A=Alpha
(alpha is unused) */
- IA_CSS_FRAME_FORMAT_QPLANE6, /**< Internal, for advanced ISP */
- IA_CSS_FRAME_FORMAT_BINARY_8, /**< byte stream, used for jpeg. For
+ IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
+ IA_CSS_FRAME_FORMAT_BINARY_8, /** byte stream, used for jpeg. For
frames of this type, we set the
height to 1 and the width to the
number of allocated bytes. */
- IA_CSS_FRAME_FORMAT_MIPI, /**< MIPI frame, 1 plane */
- IA_CSS_FRAME_FORMAT_RAW_PACKED, /**< RAW, 1 plane, packed */
- IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /**< 8 bit per Y/U/V.
+ IA_CSS_FRAME_FORMAT_MIPI, /** MIPI frame, 1 plane */
+ IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /** 8 bit per Y/U/V.
Y odd line; UYVY
interleaved even line */
- IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /**< Legacy YUV420. UY odd
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
line; VY even line */
- IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /**< 10 bit per Y/U/V. Y odd
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /** 10 bit per Y/U/V. Y odd
line; UYVY interleaved
even line */
};
@@ -95,7 +95,7 @@ enum ia_css_frame_format {
/* because of issues this would cause with the Clockwork code checking tool. */
#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
-/** Number of valid output frame formats for ISP **/
+/* Number of valid output frame formats for ISP **/
#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
#endif /* __IA_CSS_FRAME_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
index 92f2389176b2..ba7a076c3afa 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_FRAME_PUBLIC_H
#define __IA_CSS_FRAME_PUBLIC_H
-/** @file
+/* @file
* This file contains structs to describe various frame-formats supported by the ISP.
*/
@@ -25,73 +25,73 @@
#include "ia_css_frame_format.h"
#include "ia_css_buffer.h"
-/** For RAW input, the bayer order needs to be specified separately. There
+/* For RAW input, the bayer order needs to be specified separately. There
* are 4 possible orders. The name is constructed by taking the first two
* colors on the first line and the first two colors from the second line.
*/
enum ia_css_bayer_order {
- IA_CSS_BAYER_ORDER_GRBG, /**< GRGRGRGRGR .. BGBGBGBGBG */
- IA_CSS_BAYER_ORDER_RGGB, /**< RGRGRGRGRG .. GBGBGBGBGB */
- IA_CSS_BAYER_ORDER_BGGR, /**< BGBGBGBGBG .. GRGRGRGRGR */
- IA_CSS_BAYER_ORDER_GBRG, /**< GBGBGBGBGB .. RGRGRGRGRG */
+ IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
+ IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
+ IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
+ IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
};
#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
-/** Frame plane structure. This describes one plane in an image
+/* Frame plane structure. This describes one plane in an image
* frame buffer.
*/
struct ia_css_frame_plane {
- unsigned int height; /**< height of a plane in lines */
- unsigned int width; /**< width of a line, in DMA elements, note that
+ unsigned int height; /** height of a plane in lines */
+ unsigned int width; /** width of a line, in DMA elements, note that
for RGB565 the three subpixels are stored in
one element. For all other formats this is
the number of subpixels per line. */
- unsigned int stride; /**< stride of a line in bytes */
- unsigned int offset; /**< offset in bytes to start of frame data.
+ unsigned int stride; /** stride of a line in bytes */
+ unsigned int offset; /** offset in bytes to start of frame data.
offset is wrt data field in ia_css_frame */
};
-/** Binary "plane". This is used to story binary streams such as jpeg
+/* Binary "plane". This is used to story binary streams such as jpeg
* images. This is not actually a real plane.
*/
struct ia_css_frame_binary_plane {
- unsigned int size; /**< number of bytes in the stream */
- struct ia_css_frame_plane data; /**< plane */
+ unsigned int size; /** number of bytes in the stream */
+ struct ia_css_frame_plane data; /** plane */
};
-/** Container for planar YUV frames. This contains 3 planes.
+/* Container for planar YUV frames. This contains 3 planes.
*/
struct ia_css_frame_yuv_planes {
- struct ia_css_frame_plane y; /**< Y plane */
- struct ia_css_frame_plane u; /**< U plane */
- struct ia_css_frame_plane v; /**< V plane */
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane u; /** U plane */
+ struct ia_css_frame_plane v; /** V plane */
};
-/** Container for semi-planar YUV frames.
+/* Container for semi-planar YUV frames.
*/
struct ia_css_frame_nv_planes {
- struct ia_css_frame_plane y; /**< Y plane */
- struct ia_css_frame_plane uv; /**< UV plane */
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane uv; /** UV plane */
};
-/** Container for planar RGB frames. Each color has its own plane.
+/* Container for planar RGB frames. Each color has its own plane.
*/
struct ia_css_frame_rgb_planes {
- struct ia_css_frame_plane r; /**< Red plane */
- struct ia_css_frame_plane g; /**< Green plane */
- struct ia_css_frame_plane b; /**< Blue plane */
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane g; /** Green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
};
-/** Container for 6-plane frames. These frames are used internally
+/* Container for 6-plane frames. These frames are used internally
* in the advanced ISP only.
*/
struct ia_css_frame_plane6_planes {
- struct ia_css_frame_plane r; /**< Red plane */
- struct ia_css_frame_plane r_at_b; /**< Red at blue plane */
- struct ia_css_frame_plane gr; /**< Red-green plane */
- struct ia_css_frame_plane gb; /**< Blue-green plane */
- struct ia_css_frame_plane b; /**< Blue plane */
- struct ia_css_frame_plane b_at_r; /**< Blue at red plane */
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane r_at_b; /** Red at blue plane */
+ struct ia_css_frame_plane gr; /** Red-green plane */
+ struct ia_css_frame_plane gb; /** Blue-green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
+ struct ia_css_frame_plane b_at_r; /** Blue at red plane */
};
/* Crop info struct - stores the lines to be cropped in isp */
@@ -103,15 +103,15 @@ struct ia_css_crop_info {
unsigned int start_line;
};
-/** Frame info struct. This describes the contents of an image frame buffer.
+/* Frame info struct. This describes the contents of an image frame buffer.
*/
struct ia_css_frame_info {
- struct ia_css_resolution res; /**< Frame resolution (valid data) */
- unsigned int padded_width; /**< stride of line in memory (in pixels) */
- enum ia_css_frame_format format; /**< format of the frame data */
- unsigned int raw_bit_depth; /**< number of valid bits per pixel,
+ struct ia_css_resolution res; /** Frame resolution (valid data) */
+ unsigned int padded_width; /** stride of line in memory (in pixels) */
+ enum ia_css_frame_format format; /** format of the frame data */
+ unsigned int raw_bit_depth; /** number of valid bits per pixel,
only valid for RAW bayer frames */
- enum ia_css_bayer_order raw_bayer_order; /**< bayer order, only valid
+ enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
for RAW bayer frames */
/* the params below are computed based on bayer_order
* we can remove the raw_bayer_order if it is redundant
@@ -136,9 +136,9 @@ struct ia_css_frame_info {
* Specifies the DVS loop delay in "frame periods"
*/
enum ia_css_frame_delay {
- IA_CSS_FRAME_DELAY_0, /**< Frame delay = 0 */
- IA_CSS_FRAME_DELAY_1, /**< Frame delay = 1 */
- IA_CSS_FRAME_DELAY_2 /**< Frame delay = 2 */
+ IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
+ IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
+ IA_CSS_FRAME_DELAY_2 /** Frame delay = 2 */
};
enum ia_css_frame_flash_state {
@@ -147,13 +147,13 @@ enum ia_css_frame_flash_state {
IA_CSS_FRAME_FLASH_STATE_FULL
};
-/** Frame structure. This structure describes an image buffer or frame.
+/* Frame structure. This structure describes an image buffer or frame.
* This is the main structure used for all input and output images.
*/
struct ia_css_frame {
- struct ia_css_frame_info info; /**< info struct describing the frame */
- ia_css_ptr data; /**< pointer to start of image data */
- unsigned int data_bytes; /**< size of image data in bytes */
+ struct ia_css_frame_info info; /** info struct describing the frame */
+ ia_css_ptr data; /** pointer to start of image data */
+ unsigned int data_bytes; /** size of image data in bytes */
/* LA: move this to ia_css_buffer */
/*
* -1 if data address is static during life time of pipeline
@@ -171,10 +171,10 @@ struct ia_css_frame {
enum ia_css_buffer_type buf_type;
enum ia_css_frame_flash_state flash_state;
unsigned int exp_id;
- /**< exposure id, see ia_css_event_public.h for more detail */
- uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
- bool valid; /**< First video output frame is not valid */
- bool contiguous; /**< memory is allocated physically contiguously */
+ /** exposure id, see ia_css_event_public.h for more detail */
+ uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+ bool valid; /** First video output frame is not valid */
+ bool contiguous; /** memory is allocated physically contiguously */
union {
unsigned int _initialisation_dummy;
struct ia_css_frame_plane raw;
@@ -185,7 +185,7 @@ struct ia_css_frame {
struct ia_css_frame_nv_planes nv;
struct ia_css_frame_plane6_planes plane6;
struct ia_css_frame_binary_plane binary;
- } planes; /**< frame planes, select the right one based on
+ } planes; /** frame planes, select the right one based on
info.format */
};
@@ -204,7 +204,7 @@ struct ia_css_frame {
{ 0 } /* planes */ \
}
-/** @brief Fill a frame with zeros
+/* @brief Fill a frame with zeros
*
* @param frame The frame.
* @return None
@@ -213,7 +213,7 @@ struct ia_css_frame {
*/
void ia_css_frame_zero(struct ia_css_frame *frame);
-/** @brief Allocate a CSS frame structure
+/* @brief Allocate a CSS frame structure
*
* @param frame The allocated frame.
* @param width The width (in pixels) of the frame.
@@ -234,7 +234,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
unsigned int stride,
unsigned int raw_bit_depth);
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
*
* @param frame The allocated frame.
* @param[in] info The frame info structure.
@@ -247,7 +247,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
enum ia_css_err
ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
const struct ia_css_frame_info *info);
-/** @brief Free a CSS frame structure.
+/* @brief Free a CSS frame structure.
*
* @param[in] frame Pointer to the frame.
* @return None
@@ -258,7 +258,7 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
void
ia_css_frame_free(struct ia_css_frame *frame);
-/** @brief Allocate a contiguous CSS frame structure
+/* @brief Allocate a contiguous CSS frame structure
*
* @param frame The allocated frame.
* @param width The width (in pixels) of the frame.
@@ -280,7 +280,7 @@ ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
unsigned int stride,
unsigned int raw_bit_depth);
-/** @brief Allocate a contiguous CSS frame from a frame info structure.
+/* @brief Allocate a contiguous CSS frame from a frame info structure.
*
* @param frame The allocated frame.
* @param[in] info The frame info structure.
@@ -296,7 +296,7 @@ enum ia_css_err
ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
const struct ia_css_frame_info *info);
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
*
* @param frame The allocated frame.
* @param[in] info The frame info structure.
@@ -309,7 +309,7 @@ enum ia_css_err
ia_css_frame_create_from_info(struct ia_css_frame **frame,
const struct ia_css_frame_info *info);
-/** @brief Set a mapped data buffer to a CSS frame
+/* @brief Set a mapped data buffer to a CSS frame
*
* @param[in] frame Valid CSS frame pointer
* @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame
@@ -327,7 +327,7 @@ ia_css_frame_set_data(struct ia_css_frame *frame,
const ia_css_ptr mapped_data,
size_t data_size_bytes);
-/** @brief Map an existing frame data pointer to a CSS frame.
+/* @brief Map an existing frame data pointer to a CSS frame.
*
* @param frame Pointer to the frame to be initialized
* @param[in] info The frame info.
@@ -350,7 +350,7 @@ ia_css_frame_map(struct ia_css_frame **frame,
uint16_t attribute,
void *context);
-/** @brief Unmap a CSS frame structure.
+/* @brief Unmap a CSS frame structure.
*
* @param[in] frame Pointer to the CSS frame.
* @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
index 8a17c3346caa..f415570a3da9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_INPUT_PORT_H
#define __IA_CSS_INPUT_PORT_H
-/** @file
+/* @file
* This file contains information about the possible input ports for CSS
*/
-/** Enumeration of the physical input ports on the CSS hardware.
+/* Enumeration of the physical input ports on the CSS hardware.
* There are 3 MIPI CSI-2 ports.
*/
enum ia_css_csi2_port {
@@ -28,39 +28,39 @@ enum ia_css_csi2_port {
IA_CSS_CSI2_PORT2 /* Implicitly map to MIPI_PORT2_ID */
};
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
* TO BE REMOVED when all drivers move to CSS API 2.1
*/
#define IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0
#define IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1
#define IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2
-/** The CSI2 interface supports 2 types of compression or can
+/* The CSI2 interface supports 2 types of compression or can
* be run without compression.
*/
enum ia_css_csi2_compression_type {
- IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /**< No compression */
- IA_CSS_CSI2_COMPRESSION_TYPE_1, /**< Compression scheme 1 */
- IA_CSS_CSI2_COMPRESSION_TYPE_2 /**< Compression scheme 2 */
+ IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
+ IA_CSS_CSI2_COMPRESSION_TYPE_1, /** Compression scheme 1 */
+ IA_CSS_CSI2_COMPRESSION_TYPE_2 /** Compression scheme 2 */
};
struct ia_css_csi2_compression {
enum ia_css_csi2_compression_type type;
- /**< Compression used */
+ /** Compression used */
unsigned int compressed_bits_per_pixel;
- /**< Compressed bits per pixel (only when compression is enabled) */
+ /** Compressed bits per pixel (only when compression is enabled) */
unsigned int uncompressed_bits_per_pixel;
- /**< Uncompressed bits per pixel (only when compression is enabled) */
+ /** Uncompressed bits per pixel (only when compression is enabled) */
};
-/** Input port structure.
+/* Input port structure.
*/
struct ia_css_input_port {
- enum ia_css_csi2_port port; /**< Physical CSI-2 port */
- unsigned int num_lanes; /**< Number of lanes used (4-lane port only) */
- unsigned int timeout; /**< Timeout value */
- unsigned int rxcount; /**< Register value, should include all lanes */
- struct ia_css_csi2_compression compression; /**< Compression used */
+ enum ia_css_csi2_port port; /** Physical CSI-2 port */
+ unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
+ unsigned int timeout; /** Timeout value */
+ unsigned int rxcount; /** Register value, should include all lanes */
+ struct ia_css_csi2_compression compression; /** Compression used */
};
#endif /* __IA_CSS_INPUT_PORT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
index 416ca4d28732..10ef61178bb2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_IRQ_H
#define __IA_CSS_IRQ_H
-/** @file
+/* @file
* This file contains information for Interrupts/IRQs from CSS
*/
@@ -23,14 +23,14 @@
#include "ia_css_pipe_public.h"
#include "ia_css_input_port.h"
-/** Interrupt types, these enumerate all supported interrupt types.
+/* Interrupt types, these enumerate all supported interrupt types.
*/
enum ia_css_irq_type {
- IA_CSS_IRQ_TYPE_EDGE, /**< Edge (level) sensitive interrupt */
- IA_CSS_IRQ_TYPE_PULSE /**< Pulse-shaped interrupt */
+ IA_CSS_IRQ_TYPE_EDGE, /** Edge (level) sensitive interrupt */
+ IA_CSS_IRQ_TYPE_PULSE /** Pulse-shaped interrupt */
};
-/** Interrupt request type.
+/* Interrupt request type.
* When the CSS hardware generates an interrupt, a function in this API
* needs to be called to retrieve information about the interrupt.
* This interrupt type is part of this information and indicates what
@@ -46,55 +46,55 @@ enum ia_css_irq_type {
*/
enum ia_css_irq_info {
IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0,
- /**< the css receiver has encountered an error */
+ /** the css receiver has encountered an error */
IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1,
- /**< the FIFO in the csi receiver has overflown */
+ /** the FIFO in the csi receiver has overflown */
IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2,
- /**< the css receiver received the start of frame */
+ /** the css receiver received the start of frame */
IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3,
- /**< the css receiver received the end of frame */
+ /** the css receiver received the end of frame */
IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4,
- /**< the css receiver received the start of line */
+ /** the css receiver received the start of line */
IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5,
- /**< One or more events are available in the PSYS event queue */
+ /** One or more events are available in the PSYS event queue */
IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
- /**< deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+ /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
* same functionality.} */
IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6,
- /**< the css receiver received the end of line */
+ /** the css receiver received the end of line */
IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
- /**< the css receiver received a change in side band signals */
+ /** the css receiver received a change in side band signals */
IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8,
- /**< generic short packets (0) */
+ /** generic short packets (0) */
IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9,
- /**< generic short packets (1) */
+ /** generic short packets (1) */
IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10,
- /**< the primary input formatter (A) has encountered an error */
+ /** the primary input formatter (A) has encountered an error */
IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11,
- /**< the primary input formatter (B) has encountered an error */
+ /** the primary input formatter (B) has encountered an error */
IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12,
- /**< the secondary input formatter has encountered an error */
+ /** the secondary input formatter has encountered an error */
IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13,
- /**< the stream-to-memory device has encountered an error */
+ /** the stream-to-memory device has encountered an error */
IA_CSS_IRQ_INFO_SW_0 = 1 << 14,
- /**< software interrupt 0 */
+ /** software interrupt 0 */
IA_CSS_IRQ_INFO_SW_1 = 1 << 15,
- /**< software interrupt 1 */
+ /** software interrupt 1 */
IA_CSS_IRQ_INFO_SW_2 = 1 << 16,
- /**< software interrupt 2 */
+ /** software interrupt 2 */
IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17,
- /**< ISP binary statistics are ready */
+ /** ISP binary statistics are ready */
IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18,
- /**< the input system in in error */
+ /** the input system in in error */
IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19,
- /**< the input formatter in in error */
+ /** the input formatter in in error */
IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20,
- /**< the dma in in error */
+ /** the dma in in error */
IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21,
- /**< end-of-frame events are ready in the isys_event queue */
+ /** end-of-frame events are ready in the isys_event queue */
};
-/** CSS receiver error types. Whenever the CSS receiver has encountered
+/* CSS receiver error types. Whenever the CSS receiver has encountered
* an error, this enumeration is used to indicate which errors have occurred.
*
* Note that multiple error flags can be enabled at once and that this is in
@@ -105,39 +105,39 @@ enum ia_css_irq_info {
* different receiver types, or possibly none in case of tests systems.
*/
enum ia_css_rx_irq_info {
- IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /**< buffer overrun */
- IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /**< entering sleep mode */
- IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /**< exited sleep mode */
- IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /**< ECC corrected */
+ IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /** buffer overrun */
+ IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
+ IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /** exited sleep mode */
+ IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /** ECC corrected */
IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4,
- /**< Start of transmission */
- IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /**< SOT sync (??) */
- IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /**< Control (??) */
- IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /**< Double ECC */
- IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /**< CRC error */
- IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /**< Unknown ID */
- IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/**< Frame sync error */
- IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/**< Frame data error */
- IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/**< Timeout occurred */
- IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/**< Unknown escape seq. */
- IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/**< Line Sync error */
+ /** Start of transmission */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /** SOT sync (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /** Control (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /** Double ECC */
+ IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /** CRC error */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /** Unknown ID */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/** Frame sync error */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/** Frame data error */
+ IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/** Unknown escape seq. */
+ IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/** Line Sync error */
IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15,
};
-/** Interrupt info structure. This structure contains information about an
+/* Interrupt info structure. This structure contains information about an
* interrupt. This needs to be used after an interrupt is received on the IA
* to perform the correct action.
*/
struct ia_css_irq {
- enum ia_css_irq_info type; /**< Interrupt type. */
- unsigned int sw_irq_0_val; /**< In case of SW interrupt 0, value. */
- unsigned int sw_irq_1_val; /**< In case of SW interrupt 1, value. */
- unsigned int sw_irq_2_val; /**< In case of SW interrupt 2, value. */
+ enum ia_css_irq_info type; /** Interrupt type. */
+ unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
+ unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
+ unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
struct ia_css_pipe *pipe;
- /**< The image pipe that generated the interrupt. */
+ /** The image pipe that generated the interrupt. */
};
-/** @brief Obtain interrupt information.
+/* @brief Obtain interrupt information.
*
* @param[out] info Pointer to the interrupt info. The interrupt
* information wil be written to this info.
@@ -154,7 +154,7 @@ struct ia_css_irq {
enum ia_css_err
ia_css_irq_translate(unsigned int *info);
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
*
* @param[out] irq_bits Pointer to the interrupt bits. The interrupt
* bits will be written this info.
@@ -172,7 +172,7 @@ ia_css_irq_translate(unsigned int *info);
void
ia_css_rx_get_irq_info(unsigned int *irq_bits);
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
*
* @param[in] port Input port identifier.
* @param[out] irq_bits Pointer to the interrupt bits. The interrupt
@@ -188,7 +188,7 @@ ia_css_rx_get_irq_info(unsigned int *irq_bits);
void
ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
*
* @param[in] irq_bits The bits that should be cleared from the CSI receiver
* interrupt bits register.
@@ -205,7 +205,7 @@ ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
void
ia_css_rx_clear_irq_info(unsigned int irq_bits);
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
*
* @param[in] port Input port identifier.
* @param[in] irq_bits The bits that should be cleared from the CSI receiver
@@ -220,7 +220,7 @@ ia_css_rx_clear_irq_info(unsigned int irq_bits);
void
ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits);
-/** @brief Enable or disable specific interrupts.
+/* @brief Enable or disable specific interrupts.
*
* @param[in] type The interrupt type that will be enabled/disabled.
* @param[in] enable enable or disable.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
index c40c5a19bfe1..8b674c98224c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_METADATA_H
#define __IA_CSS_METADATA_H
-/** @file
+/* @file
* This file contains structure for processing sensor metadata.
*/
@@ -23,32 +23,32 @@
#include "ia_css_types.h"
#include "ia_css_stream_format.h"
-/** Metadata configuration. This data structure contains necessary info
+/* Metadata configuration. This data structure contains necessary info
* to process sensor metadata.
*/
struct ia_css_metadata_config {
- enum ia_css_stream_format data_type; /**< Data type of CSI-2 embedded
+ enum ia_css_stream_format data_type; /** Data type of CSI-2 embedded
data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For
certain sensors, user can choose non-default data type for embedded
data. */
- struct ia_css_resolution resolution; /**< Resolution */
+ struct ia_css_resolution resolution; /** Resolution */
};
struct ia_css_metadata_info {
- struct ia_css_resolution resolution; /**< Resolution */
- uint32_t stride; /**< Stride in bytes */
- uint32_t size; /**< Total size in bytes */
+ struct ia_css_resolution resolution; /** Resolution */
+ uint32_t stride; /** Stride in bytes */
+ uint32_t size; /** Total size in bytes */
};
struct ia_css_metadata {
- struct ia_css_metadata_info info; /**< Layout info */
- ia_css_ptr address; /**< CSS virtual address */
+ struct ia_css_metadata_info info; /** Layout info */
+ ia_css_ptr address; /** CSS virtual address */
uint32_t exp_id;
- /**< Exposure ID, see ia_css_event_public.h for more detail */
+ /** Exposure ID, see ia_css_event_public.h for more detail */
};
#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
-/** @brief Allocate a metadata buffer.
+/* @brief Allocate a metadata buffer.
* @param[in] metadata_info Metadata info struct, contains details on metadata buffers.
* @return Pointer of metadata buffer or NULL (if error)
*
@@ -58,7 +58,7 @@ struct ia_css_metadata {
struct ia_css_metadata *
ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
-/** @brief Free a metadata buffer.
+/* @brief Free a metadata buffer.
*
* @param[in] metadata Pointer of metadata buffer.
* @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
index fd2c01b60b28..f9c9cd76be97 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_MIPI_H
#define __IA_CSS_MIPI_H
-/** @file
+/* @file
* This file contains MIPI support functionality
*/
@@ -24,10 +24,10 @@
#include "ia_css_stream_format.h"
#include "ia_css_input_port.h"
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
* TO BE REMOVED when all drivers move to CSS API 2.1.
*/
-/** @brief Specify a CSS MIPI frame buffer.
+/* @brief Specify a CSS MIPI frame buffer.
*
* @param[in] size_mem_words The frame size in memory words (32B).
* @param[in] contiguous Allocate memory physically contiguously or not.
@@ -42,7 +42,7 @@ ia_css_mipi_frame_specify(const unsigned int size_mem_words,
const bool contiguous);
#if !defined(HAS_NO_INPUT_SYSTEM)
-/** @brief Register size of a CSS MIPI frame for check during capturing.
+/* @brief Register size of a CSS MIPI frame for check during capturing.
*
* @param[in] port CSI-2 port this check is registered.
* @param[in] size_mem_words The frame size in memory words (32B).
@@ -59,7 +59,7 @@ ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
const unsigned int size_mem_words);
#endif
-/** @brief Calculate the size of a mipi frame.
+/* @brief Calculate the size of a mipi frame.
*
* @param[in] width The width (in pixels) of the frame.
* @param[in] height The height (in lines) of the frame.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
index 48f8855d61f6..13c21056bfbf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_MMU_H
#define __IA_CSS_MMU_H
-/** @file
+/* @file
* This file contains one support function for invalidating the CSS MMU cache
*/
-/** @brief Invalidate the MMU internal cache.
+/* @brief Invalidate the MMU internal cache.
* @return None
*
* This function triggers an invalidation of the translate-look-aside
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
index 969840da52b2..de409638d009 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
@@ -15,13 +15,13 @@
#ifndef __IA_CSS_MORPH_H
#define __IA_CSS_MORPH_H
-/** @file
+/* @file
* This file contains supporting for morphing table
*/
#include <ia_css_types.h>
-/** @brief Morphing table
+/* @brief Morphing table
* @param[in] width Width of the morphing table.
* @param[in] height Height of the morphing table.
* @return Pointer to the morphing table
@@ -29,7 +29,7 @@
struct ia_css_morph_table *
ia_css_morph_table_allocate(unsigned int width, unsigned int height);
-/** @brief Free the morph table
+/* @brief Free the morph table
* @param[in] me Pointer to the morph table.
* @return None
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
index 733e0ef3afe8..df0aad9a6ab9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_PIPE_PUBLIC_H
#define __IA_CSS_PIPE_PUBLIC_H
-/** @file
+/* @file
* This file contains the public interface for CSS pipes.
*/
@@ -34,7 +34,7 @@ enum {
IA_CSS_PIPE_MAX_OUTPUT_STAGE,
};
-/** Enumeration of pipe modes. This mode can be used to create
+/* Enumeration of pipe modes. This mode can be used to create
* an image pipe for this mode. These pipes can be combined
* to configure and run streams on the ISP.
*
@@ -42,12 +42,12 @@ enum {
* create a continuous capture stream.
*/
enum ia_css_pipe_mode {
- IA_CSS_PIPE_MODE_PREVIEW, /**< Preview pipe */
- IA_CSS_PIPE_MODE_VIDEO, /**< Video pipe */
- IA_CSS_PIPE_MODE_CAPTURE, /**< Still capture pipe */
- IA_CSS_PIPE_MODE_ACC, /**< Accelerated pipe */
- IA_CSS_PIPE_MODE_COPY, /**< Copy pipe, only used for embedded/image data copying */
- IA_CSS_PIPE_MODE_YUVPP, /**< YUV post processing pipe, used for all use cases with YUV input,
+ IA_CSS_PIPE_MODE_PREVIEW, /** Preview pipe */
+ IA_CSS_PIPE_MODE_VIDEO, /** Video pipe */
+ IA_CSS_PIPE_MODE_CAPTURE, /** Still capture pipe */
+ IA_CSS_PIPE_MODE_ACC, /** Accelerated pipe */
+ IA_CSS_PIPE_MODE_COPY, /** Copy pipe, only used for embedded/image data copying */
+ IA_CSS_PIPE_MODE_YUVPP, /** YUV post processing pipe, used for all use cases with YUV input,
for SoC sensor and external ISP */
};
/* Temporary define */
@@ -58,10 +58,10 @@ enum ia_css_pipe_mode {
* the order should match with definition in sh_css_defs.h
*/
enum ia_css_pipe_version {
- IA_CSS_PIPE_VERSION_1 = 1, /**< ISP1.0 pipe */
- IA_CSS_PIPE_VERSION_2_2 = 2, /**< ISP2.2 pipe */
- IA_CSS_PIPE_VERSION_2_6_1 = 3, /**< ISP2.6.1 pipe */
- IA_CSS_PIPE_VERSION_2_7 = 4 /**< ISP2.7 pipe */
+ IA_CSS_PIPE_VERSION_1 = 1, /** ISP1.0 pipe */
+ IA_CSS_PIPE_VERSION_2_2 = 2, /** ISP2.2 pipe */
+ IA_CSS_PIPE_VERSION_2_6_1 = 3, /** ISP2.6.1 pipe */
+ IA_CSS_PIPE_VERSION_2_7 = 4 /** ISP2.7 pipe */
};
/**
@@ -71,79 +71,79 @@ enum ia_css_pipe_version {
*/
struct ia_css_pipe_config {
enum ia_css_pipe_mode mode;
- /**< mode, indicates which mode the pipe should use. */
+ /** mode, indicates which mode the pipe should use. */
enum ia_css_pipe_version isp_pipe_version;
- /**< pipe version, indicates which imaging pipeline the pipe should use. */
+ /** pipe version, indicates which imaging pipeline the pipe should use. */
struct ia_css_resolution input_effective_res;
- /**< input effective resolution */
+ /** input effective resolution */
struct ia_css_resolution bayer_ds_out_res;
- /**< bayer down scaling */
+ /** bayer down scaling */
struct ia_css_resolution capt_pp_in_res;
#ifndef ISP2401
- /**< bayer down scaling */
+ /** bayer down scaling */
#else
- /**< capture post processing input resolution */
+ /** capture post processing input resolution */
#endif
struct ia_css_resolution vf_pp_in_res;
#ifndef ISP2401
- /**< bayer down scaling */
+ /** bayer down scaling */
#else
- /**< view finder post processing input resolution */
+ /** view finder post processing input resolution */
struct ia_css_resolution output_system_in_res;
- /**< For IPU3 only: use output_system_in_res to specify what input resolution
+ /** For IPU3 only: use output_system_in_res to specify what input resolution
will OSYS receive, this resolution is equal to the output resolution of GDC
if not determined CSS will set output_system_in_res with main osys output pin resolution
All other IPUs may ignore this property */
#endif
struct ia_css_resolution dvs_crop_out_res;
- /**< dvs crop, video only, not in use yet. Use dvs_envelope below. */
+ /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
- /**< output of YUV scaling */
+ /** output of YUV scaling */
struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
- /**< output of VF YUV scaling */
+ /** output of VF YUV scaling */
struct ia_css_fw_info *acc_extension;
- /**< Pipeline extension accelerator */
+ /** Pipeline extension accelerator */
struct ia_css_fw_info **acc_stages;
- /**< Standalone accelerator stages */
+ /** Standalone accelerator stages */
uint32_t num_acc_stages;
- /**< Number of standalone accelerator stages */
+ /** Number of standalone accelerator stages */
struct ia_css_capture_config default_capture_config;
- /**< Default capture config for initial capture pipe configuration. */
- struct ia_css_resolution dvs_envelope; /**< temporary */
+ /** Default capture config for initial capture pipe configuration. */
+ struct ia_css_resolution dvs_envelope; /** temporary */
enum ia_css_frame_delay dvs_frame_delay;
- /**< indicates the DVS loop delay in frame periods */
+ /** indicates the DVS loop delay in frame periods */
int acc_num_execs;
- /**< For acceleration pipes only: determine how many times the pipe
+ /** For acceleration pipes only: determine how many times the pipe
should be run. Setting this to -1 means it will run until
stopped. */
bool enable_dz;
- /**< Disabling digital zoom for a pipeline, if this is set to false,
+ /** Disabling digital zoom for a pipeline, if this is set to false,
then setting a zoom factor will have no effect.
In some use cases this provides better performance. */
bool enable_dpc;
- /**< Disabling "Defect Pixel Correction" for a pipeline, if this is set
+ /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
to false. In some use cases this provides better performance. */
bool enable_vfpp_bci;
- /**< Enabling BCI mode will cause yuv_scale binary to be picked up
+ /** Enabling BCI mode will cause yuv_scale binary to be picked up
instead of vf_pp. This only applies to viewfinder post
processing stages. */
#ifdef ISP2401
bool enable_luma_only;
- /**< Enabling of monochrome mode for a pipeline. If enabled only luma processing
+ /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
will be done. */
bool enable_tnr;
- /**< Enabling of TNR (temporal noise reduction). This is only applicable to video
+ /** Enabling of TNR (temporal noise reduction). This is only applicable to video
pipes. Non video-pipes should always set this parameter to false. */
#endif
struct ia_css_isp_config *p_isp_config;
- /**< Pointer to ISP configuration */
+ /** Pointer to ISP configuration */
struct ia_css_resolution gdc_in_buffer_res;
- /**< GDC in buffer resolution. */
+ /** GDC in buffer resolution. */
struct ia_css_point gdc_in_buffer_offset;
- /**< GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+ /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
#ifdef ISP2401
struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
- /**< Origin of internal frame positioned on shading table at shading correction in ISP.
+ /** Origin of internal frame positioned on shading table at shading correction in ISP.
NOTE: Shading table is larger than or equal to internal frame.
Shading table has shading gains and internal frame has bayer data.
The origin of internal frame is used in shading correction in ISP
@@ -228,20 +228,20 @@ struct ia_css_pipe_config {
#endif
-/** Pipe info, this struct describes properties of a pipe after it's stream has
+/* Pipe info, this struct describes properties of a pipe after it's stream has
* been created.
* ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
* - On the Behalf of CSS-API Committee.
*/
struct ia_css_pipe_info {
struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
- /**< Info about output resolution. This contains the stride which
+ /** Info about output resolution. This contains the stride which
should be used for memory allocation. */
struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
- /**< Info about viewfinder output resolution (optional). This contains
+ /** Info about viewfinder output resolution (optional). This contains
the stride that should be used for memory allocation. */
struct ia_css_frame_info raw_output_info;
- /**< Raw output resolution. This indicates the resolution of the
+ /** Raw output resolution. This indicates the resolution of the
RAW bayer output for pipes that support this. Currently, only the
still capture pipes support this feature. When this resolution is
smaller than the input resolution, cropping will be performed by
@@ -252,17 +252,17 @@ struct ia_css_pipe_info {
the input resolution - 8x8. */
#ifdef ISP2401
struct ia_css_resolution output_system_in_res_info;
- /**< For IPU3 only. Info about output system in resolution which is considered
+ /** For IPU3 only. Info about output system in resolution which is considered
as gdc out resolution. */
#endif
struct ia_css_shading_info shading_info;
- /**< After an image pipe is created, this field will contain the info
+ /** After an image pipe is created, this field will contain the info
for the shading correction. */
struct ia_css_grid_info grid_info;
- /**< After an image pipe is created, this field will contain the grid
+ /** After an image pipe is created, this field will contain the grid
info for 3A and DVS. */
int num_invalid_frames;
- /**< The very first frames in a started stream do not contain valid data.
+ /** The very first frames in a started stream do not contain valid data.
In this field, the CSS-firmware communicates to the host-driver how
many initial frames will contain invalid data; this allows the
host-driver to discard those initial invalid frames and start it's
@@ -299,7 +299,7 @@ struct ia_css_pipe_info {
#endif
-/** @brief Load default pipe configuration
+/* @brief Load default pipe configuration
* @param[out] pipe_config The pipe configuration.
* @return None
*
@@ -334,7 +334,7 @@ struct ia_css_pipe_info {
*/
void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
-/** @brief Create a pipe
+/* @brief Create a pipe
* @param[in] config The pipe configuration.
* @param[out] pipe The pipe.
* @return IA_CSS_SUCCESS or the error code.
@@ -346,7 +346,7 @@ enum ia_css_err
ia_css_pipe_create(const struct ia_css_pipe_config *config,
struct ia_css_pipe **pipe);
-/** @brief Destroy a pipe
+/* @brief Destroy a pipe
* @param[in] pipe The pipe.
* @return IA_CSS_SUCCESS or the error code.
*
@@ -355,7 +355,7 @@ ia_css_pipe_create(const struct ia_css_pipe_config *config,
enum ia_css_err
ia_css_pipe_destroy(struct ia_css_pipe *pipe);
-/** @brief Provides information about a pipe
+/* @brief Provides information about a pipe
* @param[in] pipe The pipe.
* @param[out] pipe_info The pipe information.
* @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
@@ -366,7 +366,7 @@ enum ia_css_err
ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
struct ia_css_pipe_info *pipe_info);
-/** @brief Configure a pipe with filter coefficients.
+/* @brief Configure a pipe with filter coefficients.
* @param[in] pipe The pipe.
* @param[in] config The pointer to ISP configuration.
* @return IA_CSS_SUCCESS or error code upon error.
@@ -378,7 +378,7 @@ enum ia_css_err
ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
struct ia_css_isp_config *config);
-/** @brief Controls when the Event generator raises an IRQ to the Host.
+/* @brief Controls when the Event generator raises an IRQ to the Host.
*
* @param[in] pipe The pipe.
* @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
@@ -455,7 +455,7 @@ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
unsigned int or_mask,
unsigned int and_mask);
-/** @brief Reads the current event IRQ mask from the CSS.
+/* @brief Reads the current event IRQ mask from the CSS.
*
* @param[in] pipe The pipe.
* @param[out] or_mask Current or_mask. The bits in this mask are a binary or
@@ -476,7 +476,7 @@ ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
unsigned int *or_mask,
unsigned int *and_mask);
-/** @brief Queue a buffer for an image pipe.
+/* @brief Queue a buffer for an image pipe.
*
* @param[in] pipe The pipe that will own the buffer.
* @param[in] buffer Pointer to the buffer.
@@ -498,7 +498,7 @@ enum ia_css_err
ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
const struct ia_css_buffer *buffer);
-/** @brief Dequeue a buffer from an image pipe.
+/* @brief Dequeue a buffer from an image pipe.
*
* @param[in] pipe The pipeline that the buffer queue belongs to.
* @param[in,out] buffer The buffer is used to lookup the type which determines
@@ -519,7 +519,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
struct ia_css_buffer *buffer);
-/** @brief Set the state (Enable or Disable) of the Extension stage in the
+/* @brief Set the state (Enable or Disable) of the Extension stage in the
* given pipe.
* @param[in] pipe Pipe handle.
* @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
@@ -546,7 +546,7 @@ ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe,
uint32_t fw_handle,
bool enable);
-/** @brief Get the state (Enable or Disable) of the Extension stage in the
+/* @brief Get the state (Enable or Disable) of the Extension stage in the
* given pipe.
* @param[in] pipe Pipe handle.
* @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
@@ -573,7 +573,7 @@ ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe,
bool * enable);
#ifdef ISP2401
-/** @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
+/* @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
* @param[in] pipe Pipe handle.
* @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle).
* @param[in] css_seg Parameter memory descriptors for CSS segments.
@@ -595,7 +595,7 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_hand
struct ia_css_isp_param_isp_segments *isp_seg);
#endif
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
* @param[in] pipe The pipe.
* @param[out] config Configuration settings.
* @return None
@@ -604,7 +604,7 @@ void
ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
struct ia_css_isp_config *config);
-/** @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
* address space. So the LUT can be freed by caller.
* @param[in] pipe Pipe handle.
* @param[in] lut Look up tabel
@@ -623,7 +623,7 @@ ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
enum ia_css_err
ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
const void *lut);
-/** @brief Checking of DVS statistics ability
+/* @brief Checking of DVS statistics ability
* @param[in] pipe_info The pipe info.
* @return true - has DVS statistics ability
* false - otherwise
@@ -631,7 +631,7 @@ ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
#ifdef ISP2401
-/** @brief Override the frameformat set on the output pins.
+/* @brief Override the frameformat set on the output pins.
* @param[in] pipe Pipe handle.
* @param[in] output_pin Pin index to set the format on
* 0 - main output pin
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
index 9b0eeb08ca04..6f24656b6cb4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_PRBS_H
#define __IA_CSS_PRBS_H
-/** @file
+/* @file
* This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
*/
-/** Enumerate the PRBS IDs.
+/* Enumerate the PRBS IDs.
*/
enum ia_css_prbs_id {
IA_CSS_PRBS_ID0,
@@ -44,10 +44,10 @@ enum ia_css_prbs_id {
*/
struct ia_css_prbs_config {
enum ia_css_prbs_id id;
- unsigned int h_blank; /**< horizontal blank */
- unsigned int v_blank; /**< vertical blank */
- int seed; /**< random seed for the 1st 2-pixel-components/clock */
- int seed1; /**< random seed for the 2nd 2-pixel-components/clock */
+ unsigned int h_blank; /** horizontal blank */
+ unsigned int v_blank; /** vertical blank */
+ int seed; /** random seed for the 1st 2-pixel-components/clock */
+ int seed1; /** random seed for the 2nd 2-pixel-components/clock */
};
#endif /* __IA_CSS_PRBS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
index 19af4021b24c..9a167306611c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_PROPERTIES_H
#define __IA_CSS_PROPERTIES_H
-/** @file
+/* @file
* This file contains support for retrieving properties of some hardware the CSS system
*/
@@ -24,12 +24,12 @@
struct ia_css_properties {
int gdc_coord_one;
- bool l1_base_is_index; /**< Indicate whether the L1 page base
+ bool l1_base_is_index; /** Indicate whether the L1 page base
is a page index or a byte address. */
enum ia_css_vamem_type vamem_type;
};
-/** @brief Get hardware properties
+/* @brief Get hardware properties
* @param[in,out] properties The hardware properties
* @return None
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
index cb0f249e98c8..588f53d32b72 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
@@ -15,13 +15,13 @@
#ifndef __IA_CSS_SHADING_H
#define __IA_CSS_SHADING_H
-/** @file
+/* @file
* This file contains support for setting the shading table for CSS
*/
#include <ia_css_types.h>
-/** @brief Shading table
+/* @brief Shading table
* @param[in] width Width of the shading table.
* @param[in] height Height of the shading table.
* @return Pointer to the shading table
@@ -30,7 +30,7 @@ struct ia_css_shading_table *
ia_css_shading_table_alloc(unsigned int width,
unsigned int height);
-/** @brief Free shading table
+/* @brief Free shading table
* @param[in] table Pointer to the shading table.
* @return None
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
index 453fe4db0133..fb6e8c2ca8bf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
@@ -48,7 +48,7 @@ struct ia_css_stream {
bool started;
};
-/** @brief Get a binary in the stream, which binary has the shading correction.
+/* @brief Get a binary in the stream, which binary has the shading correction.
*
* @param[in] stream: The stream.
* @return The binary which has the shading correction.
@@ -76,7 +76,7 @@ sh_css_invalidate_params(struct ia_css_stream *stream);
const struct ia_css_fpn_table *
ia_css_get_fpn_table(struct ia_css_stream *stream);
-/** @brief Get a pointer to the shading table.
+/* @brief Get a pointer to the shading table.
*
* @param[in] stream: The stream.
* @return The pointer to the shading table.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
index ae608a9c9051..f7e9020a86e1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
@@ -15,74 +15,74 @@
#ifndef __IA_CSS_STREAM_FORMAT_H
#define __IA_CSS_STREAM_FORMAT_H
-/** @file
+/* @file
* This file contains formats usable for ISP streaming input
*/
#include <type_support.h> /* bool */
-/** The ISP streaming input interface supports the following formats.
+/* The ISP streaming input interface supports the following formats.
* These match the corresponding MIPI formats.
*/
enum ia_css_stream_format {
- IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, /**< 8 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV420_8, /**< 8 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV420_10, /**< 10 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV420_16, /**< 16 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV422_8, /**< UYVY..UYVY, 8 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV422_10, /**< UYVY..UYVY, 10 bits per subpixel */
- IA_CSS_STREAM_FORMAT_YUV422_16, /**< UYVY..UYVY, 16 bits per subpixel */
- IA_CSS_STREAM_FORMAT_RGB_444, /**< BGR..BGR, 4 bits per subpixel */
- IA_CSS_STREAM_FORMAT_RGB_555, /**< BGR..BGR, 5 bits per subpixel */
- IA_CSS_STREAM_FORMAT_RGB_565, /**< BGR..BGR, 5 bits B and R, 6 bits G */
- IA_CSS_STREAM_FORMAT_RGB_666, /**< BGR..BGR, 6 bits per subpixel */
- IA_CSS_STREAM_FORMAT_RGB_888, /**< BGR..BGR, 8 bits per subpixel */
- IA_CSS_STREAM_FORMAT_RAW_6, /**< RAW data, 6 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_7, /**< RAW data, 7 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_8, /**< RAW data, 8 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_10, /**< RAW data, 10 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_12, /**< RAW data, 12 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_14, /**< RAW data, 14 bits per pixel */
- IA_CSS_STREAM_FORMAT_RAW_16, /**< RAW data, 16 bits per pixel, which is
+ IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, /** 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_8, /** 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_10, /** 10 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_16, /** 16 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_8, /** UYVY..UYVY, 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_10, /** UYVY..UYVY, 10 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_16, /** UYVY..UYVY, 16 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_444, /** BGR..BGR, 4 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_555, /** BGR..BGR, 5 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_565, /** BGR..BGR, 5 bits B and R, 6 bits G */
+ IA_CSS_STREAM_FORMAT_RGB_666, /** BGR..BGR, 6 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_888, /** BGR..BGR, 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RAW_6, /** RAW data, 6 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_7, /** RAW data, 7 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_8, /** RAW data, 8 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_10, /** RAW data, 10 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_12, /** RAW data, 12 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_14, /** RAW data, 14 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_16, /** RAW data, 16 bits per pixel, which is
not specified in CSI-MIPI standard*/
- IA_CSS_STREAM_FORMAT_BINARY_8, /**< Binary byte stream, which is target at
+ IA_CSS_STREAM_FORMAT_BINARY_8, /** Binary byte stream, which is target at
JPEG. */
- /** CSI2-MIPI specific format: Generic short packet data. It is used to
+ /* CSI2-MIPI specific format: Generic short packet data. It is used to
* keep the timing information for the opening/closing of shutters,
* triggering of flashes and etc.
*/
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT1, /**< Generic Short Packet Code 1 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT2, /**< Generic Short Packet Code 2 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT3, /**< Generic Short Packet Code 3 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT4, /**< Generic Short Packet Code 4 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT5, /**< Generic Short Packet Code 5 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT6, /**< Generic Short Packet Code 6 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT7, /**< Generic Short Packet Code 7 */
- IA_CSS_STREAM_FORMAT_GENERIC_SHORT8, /**< Generic Short Packet Code 8 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT1, /** Generic Short Packet Code 1 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT2, /** Generic Short Packet Code 2 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT3, /** Generic Short Packet Code 3 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT4, /** Generic Short Packet Code 4 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT5, /** Generic Short Packet Code 5 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT6, /** Generic Short Packet Code 6 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT7, /** Generic Short Packet Code 7 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT8, /** Generic Short Packet Code 8 */
- /** CSI2-MIPI specific format: YUV data.
+ /* CSI2-MIPI specific format: YUV data.
*/
- IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
- IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+ IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+ IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
- /** CSI2-MIPI specific format: Generic long packet data
+ /* CSI2-MIPI specific format: Generic long packet data
*/
- IA_CSS_STREAM_FORMAT_EMBEDDED, /**< Embedded 8-bit non Image Data */
+ IA_CSS_STREAM_FORMAT_EMBEDDED, /** Embedded 8-bit non Image Data */
- /** CSI2-MIPI specific format: User defined byte-based data. For example,
+ /* CSI2-MIPI specific format: User defined byte-based data. For example,
* the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
* the User Defined Data Type 4 and the MPEG data as the
* User Defined Data Type 7.
*/
- IA_CSS_STREAM_FORMAT_USER_DEF1, /**< User defined 8-bit data type 1 */
- IA_CSS_STREAM_FORMAT_USER_DEF2, /**< User defined 8-bit data type 2 */
- IA_CSS_STREAM_FORMAT_USER_DEF3, /**< User defined 8-bit data type 3 */
- IA_CSS_STREAM_FORMAT_USER_DEF4, /**< User defined 8-bit data type 4 */
- IA_CSS_STREAM_FORMAT_USER_DEF5, /**< User defined 8-bit data type 5 */
- IA_CSS_STREAM_FORMAT_USER_DEF6, /**< User defined 8-bit data type 6 */
- IA_CSS_STREAM_FORMAT_USER_DEF7, /**< User defined 8-bit data type 7 */
- IA_CSS_STREAM_FORMAT_USER_DEF8, /**< User defined 8-bit data type 8 */
+ IA_CSS_STREAM_FORMAT_USER_DEF1, /** User defined 8-bit data type 1 */
+ IA_CSS_STREAM_FORMAT_USER_DEF2, /** User defined 8-bit data type 2 */
+ IA_CSS_STREAM_FORMAT_USER_DEF3, /** User defined 8-bit data type 3 */
+ IA_CSS_STREAM_FORMAT_USER_DEF4, /** User defined 8-bit data type 4 */
+ IA_CSS_STREAM_FORMAT_USER_DEF5, /** User defined 8-bit data type 5 */
+ IA_CSS_STREAM_FORMAT_USER_DEF6, /** User defined 8-bit data type 6 */
+ IA_CSS_STREAM_FORMAT_USER_DEF7, /** User defined 8-bit data type 7 */
+ IA_CSS_STREAM_FORMAT_USER_DEF8, /** User defined 8-bit data type 8 */
};
#define IA_CSS_STREAM_FORMAT_NUM IA_CSS_STREAM_FORMAT_USER_DEF8
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
index 2c8d9de10a59..ca3203357ff5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_STREAM_PUBLIC_H
#define __IA_CSS_STREAM_PUBLIC_H
-/** @file
+/* @file
* This file contains support for configuring and controlling streams
*/
@@ -27,26 +27,26 @@
#include "ia_css_prbs.h"
#include "ia_css_input_port.h"
-/** Input modes, these enumerate all supported input modes.
+/* Input modes, these enumerate all supported input modes.
* Note that not all ISP modes support all input modes.
*/
enum ia_css_input_mode {
- IA_CSS_INPUT_MODE_SENSOR, /**< data from sensor */
- IA_CSS_INPUT_MODE_FIFO, /**< data from input-fifo */
- IA_CSS_INPUT_MODE_TPG, /**< data from test-pattern generator */
- IA_CSS_INPUT_MODE_PRBS, /**< data from pseudo-random bit stream */
- IA_CSS_INPUT_MODE_MEMORY, /**< data from a frame in memory */
- IA_CSS_INPUT_MODE_BUFFERED_SENSOR /**< data is sent through mipi buffer */
+ IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
+ IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */
+ IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */
+ IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */
+ IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
};
-/** Structure of the MIPI buffer configuration
+/* Structure of the MIPI buffer configuration
*/
struct ia_css_mipi_buffer_config {
- unsigned int size_mem_words; /**< The frame size in the system memory
+ unsigned int size_mem_words; /** The frame size in the system memory
words (32B) */
- bool contiguous; /**< Allocated memory physically
+ bool contiguous; /** Allocated memory physically
contiguously or not. \deprecated{Will be false always.}*/
- unsigned int nof_mipi_buffers; /**< The number of MIPI buffers required for this
+ unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
stream */
};
@@ -57,44 +57,44 @@ enum {
IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
};
-/** This is input data configuration for one MIPI data type. We can have
+/* This is input data configuration for one MIPI data type. We can have
* multiple of this in one virtual channel.
*/
struct ia_css_stream_isys_stream_config {
- struct ia_css_resolution input_res; /**< Resolution of input data */
- enum ia_css_stream_format format; /**< Format of input stream. This data
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ enum ia_css_stream_format format; /** Format of input stream. This data
format will be mapped to MIPI data
type internally. */
- int linked_isys_stream_id; /**< default value is -1, other value means
+ int linked_isys_stream_id; /** default value is -1, other value means
current isys_stream shares the same buffer with
indicated isys_stream*/
- bool valid; /**< indicate whether other fields have valid value */
+ bool valid; /** indicate whether other fields have valid value */
};
struct ia_css_stream_input_config {
- struct ia_css_resolution input_res; /**< Resolution of input data */
- struct ia_css_resolution effective_res; /**< Resolution of input data.
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ struct ia_css_resolution effective_res; /** Resolution of input data.
Used for CSS 2400/1 System and deprecated for other
systems (replaced by input_effective_res in
ia_css_pipe_config) */
- enum ia_css_stream_format format; /**< Format of input stream. This data
+ enum ia_css_stream_format format; /** Format of input stream. This data
format will be mapped to MIPI data
type internally. */
- enum ia_css_bayer_order bayer_order; /**< Bayer order for RAW streams */
+ enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
};
-/** Input stream description. This describes how input will flow into the
+/* Input stream description. This describes how input will flow into the
* CSS. This is used to program the CSS hardware.
*/
struct ia_css_stream_config {
- enum ia_css_input_mode mode; /**< Input mode */
+ enum ia_css_input_mode mode; /** Input mode */
union {
- struct ia_css_input_port port; /**< Port, for sensor only. */
- struct ia_css_tpg_config tpg; /**< TPG configuration */
- struct ia_css_prbs_config prbs; /**< PRBS configuration */
- } source; /**< Source of input data */
- unsigned int channel_id; /**< Channel on which input data
+ struct ia_css_input_port port; /** Port, for sensor only. */
+ struct ia_css_tpg_config tpg; /** TPG configuration */
+ struct ia_css_prbs_config prbs; /** PRBS configuration */
+ } source; /** Source of input data */
+ unsigned int channel_id; /** Channel on which input data
will arrive. Use this field
to specify virtual channel id.
Valid values are: 0, 1, 2, 3 */
@@ -110,29 +110,29 @@ struct ia_css_stream_config {
* and will be deprecated. In the future,all platforms will use the N*N method
*/
#endif
- unsigned int sensor_binning_factor; /**< Binning factor used by sensor
+ unsigned int sensor_binning_factor; /** Binning factor used by sensor
to produce image data. This is
used for shading correction. */
- unsigned int pixels_per_clock; /**< Number of pixels per clock, which can be
+ unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
1, 2 or 4. */
- bool online; /**< offline will activate RAW copy on SP, use this for
+ bool online; /** offline will activate RAW copy on SP, use this for
continuous capture. */
/* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
- unsigned init_num_cont_raw_buf; /**< initial number of raw buffers to
+ unsigned init_num_cont_raw_buf; /** initial number of raw buffers to
allocate */
- unsigned target_num_cont_raw_buf; /**< total number of raw buffers to
+ unsigned target_num_cont_raw_buf; /** total number of raw buffers to
allocate */
- bool pack_raw_pixels; /**< Pack pixels in the raw buffers */
- bool continuous; /**< Use SP copy feature to continuously capture frames
+ bool pack_raw_pixels; /** Pack pixels in the raw buffers */
+ bool continuous; /** Use SP copy feature to continuously capture frames
to system memory and run pipes in offline mode */
- bool disable_cont_viewfinder; /**< disable continous viewfinder for ZSL use case */
- int32_t flash_gpio_pin; /**< pin on which the flash is connected, -1 for no flash */
- int left_padding; /**< The number of input-formatter left-paddings, -1 for default from binary.*/
- struct ia_css_mipi_buffer_config mipi_buffer_config; /**< mipi buffer configuration */
- struct ia_css_metadata_config metadata_config; /**< Metadata configuration. */
- bool ia_css_enable_raw_buffer_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+ bool disable_cont_viewfinder; /** disable continous viewfinder for ZSL use case */
+ int32_t flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
+ int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
+ struct ia_css_mipi_buffer_config mipi_buffer_config; /** mipi buffer configuration */
+ struct ia_css_metadata_config metadata_config; /** Metadata configuration. */
+ bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
bool lock_all;
- /**< Lock all RAW buffers (true) or lock only buffers processed by
+ /** Lock all RAW buffers (true) or lock only buffers processed by
video or preview pipe (false).
This setting needs to be enabled to allow raw buffer locking
without continuous viewfinder. */
@@ -140,15 +140,15 @@ struct ia_css_stream_config {
struct ia_css_stream;
-/** Stream info, this struct describes properties of a stream after it has been
+/* Stream info, this struct describes properties of a stream after it has been
* created.
*/
struct ia_css_stream_info {
struct ia_css_metadata_info metadata_info;
- /**< Info about the metadata layout, this contains the stride. */
+ /** Info about the metadata layout, this contains the stride. */
};
-/** @brief Load default stream configuration
+/* @brief Load default stream configuration
* @param[in,out] stream_config The stream configuration.
* @return None
*
@@ -165,7 +165,7 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
* create the internal structures and fill in the configuration data and pipes
*/
- /** @brief Creates a stream
+ /* @brief Creates a stream
* @param[in] stream_config The stream configuration.
* @param[in] num_pipes The number of pipes to incorporate in the stream.
* @param[in] pipes The pipes.
@@ -180,7 +180,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
struct ia_css_pipe *pipes[],
struct ia_css_stream **stream);
-/** @brief Destroys a stream
+/* @brief Destroys a stream
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS or the error code.
*
@@ -189,7 +189,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
enum ia_css_err
ia_css_stream_destroy(struct ia_css_stream *stream);
-/** @brief Provides information about a stream
+/* @brief Provides information about a stream
* @param[in] stream The stream.
* @param[out] stream_info The information about the stream.
* @return IA_CSS_SUCCESS or the error code.
@@ -200,7 +200,7 @@ enum ia_css_err
ia_css_stream_get_info(const struct ia_css_stream *stream,
struct ia_css_stream_info *stream_info);
-/** @brief load (rebuild) a stream that was unloaded.
+/* @brief load (rebuild) a stream that was unloaded.
* @param[in] stream The stream
* @return IA_CSS_SUCCESS or the error code
*
@@ -210,7 +210,7 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
enum ia_css_err
ia_css_stream_load(struct ia_css_stream *stream);
-/** @brief Starts the stream.
+/* @brief Starts the stream.
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS or the error code.
*
@@ -223,7 +223,7 @@ ia_css_stream_load(struct ia_css_stream *stream);
enum ia_css_err
ia_css_stream_start(struct ia_css_stream *stream);
-/** @brief Stop the stream.
+/* @brief Stop the stream.
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS or the error code.
*
@@ -233,7 +233,7 @@ ia_css_stream_start(struct ia_css_stream *stream);
enum ia_css_err
ia_css_stream_stop(struct ia_css_stream *stream);
-/** @brief Check if a stream has stopped
+/* @brief Check if a stream has stopped
* @param[in] stream The stream.
* @return boolean flag
*
@@ -242,7 +242,7 @@ ia_css_stream_stop(struct ia_css_stream *stream);
bool
ia_css_stream_has_stopped(struct ia_css_stream *stream);
-/** @brief destroy a stream according to the stream seed previosly saved in the seed array.
+/* @brief destroy a stream according to the stream seed previosly saved in the seed array.
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS (no other errors are generated now)
*
@@ -251,7 +251,7 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream);
enum ia_css_err
ia_css_stream_unload(struct ia_css_stream *stream);
-/** @brief Returns stream format
+/* @brief Returns stream format
* @param[in] stream The stream.
* @return format of the string
*
@@ -260,7 +260,7 @@ ia_css_stream_unload(struct ia_css_stream *stream);
enum ia_css_stream_format
ia_css_stream_get_format(const struct ia_css_stream *stream);
-/** @brief Check if the stream is configured for 2 pixels per clock
+/* @brief Check if the stream is configured for 2 pixels per clock
* @param[in] stream The stream.
* @return boolean flag
*
@@ -270,7 +270,7 @@ ia_css_stream_get_format(const struct ia_css_stream *stream);
bool
ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
-/** @brief Sets the output frame stride (at the last pipe)
+/* @brief Sets the output frame stride (at the last pipe)
* @param[in] stream The stream
* @param[in] output_padded_width - the output buffer stride.
* @return ia_css_err
@@ -280,7 +280,7 @@ ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
enum ia_css_err
ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width);
-/** @brief Return max number of continuous RAW frames.
+/* @brief Return max number of continuous RAW frames.
* @param[in] stream The stream.
* @param[out] buffer_depth The maximum number of continuous RAW frames.
* @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -291,7 +291,7 @@ ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int
enum ia_css_err
ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
-/** @brief Set nr of continuous RAW frames to use.
+/* @brief Set nr of continuous RAW frames to use.
*
* @param[in] stream The stream.
* @param[in] buffer_depth Number of frames to set.
@@ -302,7 +302,7 @@ ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_dep
enum ia_css_err
ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
-/** @brief Get number of continuous RAW frames to use.
+/* @brief Get number of continuous RAW frames to use.
* @param[in] stream The stream.
* @param[out] buffer_depth The number of frames to use
* @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -315,7 +315,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
/* ===== CAPTURE ===== */
-/** @brief Configure the continuous capture
+/* @brief Configure the continuous capture
*
* @param[in] stream The stream.
* @param[in] num_captures The number of RAW frames to be processed to
@@ -347,7 +347,7 @@ ia_css_stream_capture(struct ia_css_stream *stream,
unsigned int skip,
int offset);
-/** @brief Specify which raw frame to tag based on exp_id found in frame info
+/* @brief Specify which raw frame to tag based on exp_id found in frame info
*
* @param[in] stream The stream.
* @param[in] exp_id The exposure id of the raw frame to tag.
@@ -363,7 +363,7 @@ ia_css_stream_capture_frame(struct ia_css_stream *stream,
/* ===== VIDEO ===== */
-/** @brief Send streaming data into the css input FIFO
+/* @brief Send streaming data into the css input FIFO
*
* @param[in] stream The stream.
* @param[in] data Pointer to the pixels to be send.
@@ -395,7 +395,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
unsigned int width,
unsigned int height);
-/** @brief Start an input frame on the CSS input FIFO.
+/* @brief Start an input frame on the CSS input FIFO.
*
* @param[in] stream The stream.
* @return None
@@ -411,7 +411,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
void
ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
-/** @brief Send a line of input data into the CSS input FIFO.
+/* @brief Send a line of input data into the CSS input FIFO.
*
* @param[in] stream The stream.
* @param[in] data Array of the first line of image data.
@@ -435,7 +435,7 @@ ia_css_stream_send_input_line(const struct ia_css_stream *stream,
const unsigned short *data2,
unsigned int width2);
-/** @brief Send a line of input embedded data into the CSS input FIFO.
+/* @brief Send a line of input embedded data into the CSS input FIFO.
*
* @param[in] stream Pointer of the stream.
* @param[in] format Format of the embedded data.
@@ -457,7 +457,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
const unsigned short *data,
unsigned int width);
-/** @brief End an input frame on the CSS input FIFO.
+/* @brief End an input frame on the CSS input FIFO.
*
* @param[in] stream The stream.
* @return None
@@ -467,7 +467,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
void
ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
-/** @brief send a request flash command to SP
+/* @brief send a request flash command to SP
*
* @param[in] stream The stream.
* @return None
@@ -481,7 +481,7 @@ ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
void
ia_css_stream_request_flash(struct ia_css_stream *stream);
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
* @deprecated {Replaced by
* ia_css_pipe_set_isp_config_on_pipe()}
*
@@ -503,7 +503,7 @@ ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe);
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
* @deprecated {Replaced by
* ia_css_pipe_set_isp_config()}
* @param[in] stream The stream.
@@ -523,7 +523,7 @@ ia_css_stream_set_isp_config(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config);
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
* @param[in] stream The stream.
* @param[out] config Configuration settings.
* @return None
@@ -532,7 +532,7 @@ void
ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
struct ia_css_isp_config *config);
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS or error code.
*
@@ -544,7 +544,7 @@ ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
enum ia_css_err
ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
* @param[in] stream The stream.
* @return IA_CSS_SUCCESS or error code.
*
@@ -555,7 +555,7 @@ ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
enum ia_css_err
ia_css_update_continuous_frames(struct ia_css_stream *stream);
-/** @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
* @param[in] stream The stream.
* @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
* @return ia_css_err IA_CSS_SUCCESS or error code
@@ -567,7 +567,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream);
enum ia_css_err
ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
-/** @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
* @param[in] stream The stream.
* @param[in] enable - true, disable - false
* @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
index 575bb28b4bec..b256d7c88716 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
@@ -31,47 +31,47 @@ more details.
#ifndef __IA_CSS_TIMER_H
#define __IA_CSS_TIMER_H
-/** @file
+/* @file
* Timer interface definitions
*/
#include <type_support.h> /* for uint32_t */
#include "ia_css_err.h"
-/** @brief timer reading definition */
+/* @brief timer reading definition */
typedef uint32_t clock_value_t;
-/** @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
struct ia_css_clock_tick {
- clock_value_t ticks; /**< measured time in ticks.*/
+ clock_value_t ticks; /** measured time in ticks.*/
};
-/** @brief TIMER event codes */
+/* @brief TIMER event codes */
enum ia_css_tm_event {
IA_CSS_TM_EVENT_AFTER_INIT,
- /**< Timer Event after Initialization */
+ /** Timer Event after Initialization */
IA_CSS_TM_EVENT_MAIN_END,
- /**< Timer Event after end of Main */
+ /** Timer Event after end of Main */
IA_CSS_TM_EVENT_THREAD_START,
- /**< Timer Event after thread start */
+ /** Timer Event after thread start */
IA_CSS_TM_EVENT_FRAME_PROC_START,
- /**< Timer Event after Frame Process Start */
+ /** Timer Event after Frame Process Start */
IA_CSS_TM_EVENT_FRAME_PROC_END
- /**< Timer Event after Frame Process End */
+ /** Timer Event after Frame Process End */
};
-/** @brief code measurement common struct */
+/* @brief code measurement common struct */
struct ia_css_time_meas {
- clock_value_t start_timer_value; /**< measured time in ticks */
- clock_value_t end_timer_value; /**< measured time in ticks */
+ clock_value_t start_timer_value; /** measured time in ticks */
+ clock_value_t end_timer_value; /** measured time in ticks */
};
/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
-/** @brief checks to ensure correct alignment for ia_css_time_meas. */
+/* @brief checks to ensure correct alignment for ia_css_time_meas. */
#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
+ sizeof(clock_value_t))
-/** @brief API to fetch timer count directly
+/* @brief API to fetch timer count directly
*
* @param curr_ts [out] measured count value
* @return IA_CSS_SUCCESS if success
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
index 9238a3317a46..81498bd7485b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_TPG_H
#define __IA_CSS_TPG_H
-/** @file
+/* @file
* This file contains support for the test pattern generator (TPG)
*/
-/** Enumerate the TPG IDs.
+/* Enumerate the TPG IDs.
*/
enum ia_css_tpg_id {
IA_CSS_TPG_ID0,
@@ -35,7 +35,7 @@ enum ia_css_tpg_id {
*/
#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1)
-/** Enumerate the TPG modes.
+/* Enumerate the TPG modes.
*/
enum ia_css_tpg_mode {
IA_CSS_TPG_MODE_RAMP,
@@ -44,7 +44,7 @@ enum ia_css_tpg_mode {
IA_CSS_TPG_MODE_MONO
};
-/** @brief Configure the test pattern generator.
+/* @brief Configure the test pattern generator.
*
* Configure the Test Pattern Generator, the way these values are used to
* generate the pattern can be seen in the HRT extension for the test pattern
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
index 5fec3d5c89d8..725b90072cfe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
@@ -16,7 +16,7 @@
#ifndef _IA_CSS_TYPES_H
#define _IA_CSS_TYPES_H
-/** @file
+/* @file
* This file contains types used for the ia_css parameters.
* These types are in a separate file because they are expected
* to be used in software layers that do not access the CSS API
@@ -58,7 +58,7 @@
#include "isp/kernels/output/output_1.0/ia_css_output_types.h"
#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
-/**< Should be removed after Driver adaptation will be done */
+/** Should be removed after Driver adaptation will be done */
#define IA_CSS_VERSION_MAJOR 2
#define IA_CSS_VERSION_MINOR 0
@@ -69,8 +69,8 @@
/* Min and max exposure IDs. These macros are here to allow
* the drivers to get this information. Changing these macros
* constitutes a CSS API change. */
-#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /**< Minimum exposure ID */
-#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /**< Maximum exposure ID */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /** Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
/* opaque types */
struct ia_css_isp_parameters;
@@ -79,72 +79,72 @@ struct ia_css_memory_offsets;
struct ia_css_config_memory_offsets;
struct ia_css_state_memory_offsets;
-/** Virtual address within the CSS address space. */
+/* Virtual address within the CSS address space. */
typedef uint32_t ia_css_ptr;
-/** Generic resolution structure.
+/* Generic resolution structure.
*/
struct ia_css_resolution {
- uint32_t width; /**< Width */
- uint32_t height; /**< Height */
+ uint32_t width; /** Width */
+ uint32_t height; /** Height */
};
-/** Generic coordinate structure.
+/* Generic coordinate structure.
*/
struct ia_css_coordinate {
- int32_t x; /**< Value of a coordinate on the horizontal axis */
- int32_t y; /**< Value of a coordinate on the vertical axis */
+ int32_t x; /** Value of a coordinate on the horizontal axis */
+ int32_t y; /** Value of a coordinate on the vertical axis */
};
-/** Vector with signed values. This is used to indicate motion for
+/* Vector with signed values. This is used to indicate motion for
* Digital Image Stabilization.
*/
struct ia_css_vector {
- int32_t x; /**< horizontal motion (in pixels) */
- int32_t y; /**< vertical motion (in pixels) */
+ int32_t x; /** horizontal motion (in pixels) */
+ int32_t y; /** vertical motion (in pixels) */
};
/* Short hands */
#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
-/** CSS data descriptor */
+/* CSS data descriptor */
struct ia_css_data {
- ia_css_ptr address; /**< CSS virtual address */
- uint32_t size; /**< Disabled if 0 */
+ ia_css_ptr address; /** CSS virtual address */
+ uint32_t size; /** Disabled if 0 */
};
-/** Host data descriptor */
+/* Host data descriptor */
struct ia_css_host_data {
- char *address; /**< Host address */
- uint32_t size; /**< Disabled if 0 */
+ char *address; /** Host address */
+ uint32_t size; /** Disabled if 0 */
};
-/** ISP data descriptor */
+/* ISP data descriptor */
struct ia_css_isp_data {
- uint32_t address; /**< ISP address */
- uint32_t size; /**< Disabled if 0 */
+ uint32_t address; /** ISP address */
+ uint32_t size; /** Disabled if 0 */
};
-/** Shading Correction types. */
+/* Shading Correction types. */
enum ia_css_shading_correction_type {
#ifndef ISP2401
- IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
#else
- IA_CSS_SHADING_CORRECTION_NONE, /**< Shading Correction is not processed in the pipe. */
- IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+ IA_CSS_SHADING_CORRECTION_NONE, /** Shading Correction is not processed in the pipe. */
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
#endif
- /**< More shading correction types can be added in the future. */
+ /** More shading correction types can be added in the future. */
};
-/** Shading Correction information. */
+/* Shading Correction information. */
struct ia_css_shading_info {
- enum ia_css_shading_correction_type type; /**< Shading Correction type. */
+ enum ia_css_shading_correction_type type; /** Shading Correction type. */
- union { /** Shading Correction information of each Shading Correction types. */
+ union { /* Shading Correction information of each Shading Correction types. */
- /** Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+ /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
*
* This structure contains the information necessary to generate
* the shading table required in the isp.
@@ -288,20 +288,20 @@ struct ia_css_shading_info {
*/
struct {
#ifndef ISP2401
- uint32_t enable; /**< Shading correction enabled.
+ uint32_t enable; /** Shading correction enabled.
0:disabled, 1:enabled */
- uint32_t num_hor_grids; /**< Number of data points per line
+ uint32_t num_hor_grids; /** Number of data points per line
per color on shading table. */
- uint32_t num_ver_grids; /**< Number of lines of data points
+ uint32_t num_ver_grids; /** Number of lines of data points
per color on shading table. */
- uint32_t bqs_per_grid_cell; /**< Grid cell size
+ uint32_t bqs_per_grid_cell; /** Grid cell size
in BQ(Bayer Quad) unit.
(1BQ means {Gr,R,B,Gb}(2x2 pixels).)
Valid values are 8,16,32,64. */
#else
- uint32_t num_hor_grids; /**< Number of data points per line per color on shading table. */
- uint32_t num_ver_grids; /**< Number of lines of data points per color on shading table. */
- uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ unit.
+ uint32_t num_hor_grids; /** Number of data points per line per color on shading table. */
+ uint32_t num_ver_grids; /** Number of lines of data points per color on shading table. */
+ uint32_t bqs_per_grid_cell; /** Grid cell size in BQ unit.
NOTE: bqs = size in BQ(Bayer Quad) unit.
1BQ means {Gr,R,B,Gb} (2x2 pixels).
Horizontal 1 bqs corresponds to horizontal 2 pixels.
@@ -310,13 +310,13 @@ struct ia_css_shading_info {
uint32_t bayer_scale_hor_ratio_in;
uint32_t bayer_scale_hor_ratio_out;
#ifndef ISP2401
- /**< Horizontal ratio of bayer scaling
+ /** Horizontal ratio of bayer scaling
between input width and output width, for the scaling
which should be done before shading correction.
output_width = input_width * bayer_scale_hor_ratio_out
/ bayer_scale_hor_ratio_in */
#else
- /**< Horizontal ratio of bayer scaling between input width and output width,
+ /** Horizontal ratio of bayer scaling between input width and output width,
for the scaling which should be done before shading correction.
output_width = input_width * bayer_scale_hor_ratio_out
/ bayer_scale_hor_ratio_in + 0.5 */
@@ -324,30 +324,30 @@ struct ia_css_shading_info {
uint32_t bayer_scale_ver_ratio_in;
uint32_t bayer_scale_ver_ratio_out;
#ifndef ISP2401
- /**< Vertical ratio of bayer scaling
+ /** Vertical ratio of bayer scaling
between input height and output height, for the scaling
which should be done before shading correction.
output_height = input_height * bayer_scale_ver_ratio_out
/ bayer_scale_ver_ratio_in */
uint32_t sc_bayer_origin_x_bqs_on_shading_table;
- /**< X coordinate (in bqs) of bayer origin on shading table.
+ /** X coordinate (in bqs) of bayer origin on shading table.
This indicates the left-most pixel of bayer
(not include margin) inputted to the shading correction.
This corresponds to the left-most pixel of bayer
inputted to isp from sensor. */
uint32_t sc_bayer_origin_y_bqs_on_shading_table;
- /**< Y coordinate (in bqs) of bayer origin on shading table.
+ /** Y coordinate (in bqs) of bayer origin on shading table.
This indicates the top pixel of bayer
(not include margin) inputted to the shading correction.
This corresponds to the top pixel of bayer
inputted to isp from sensor. */
#else
- /**< Vertical ratio of bayer scaling between input height and output height,
+ /** Vertical ratio of bayer scaling between input height and output height,
for the scaling which should be done before shading correction.
output_height = input_height * bayer_scale_ver_ratio_out
/ bayer_scale_ver_ratio_in + 0.5 */
struct ia_css_resolution isp_input_sensor_data_res_bqs;
- /**< Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+ /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
NOTE: This is NOT the size of the physical sensor size.
CSS requests the driver that ISP inputs sensor data
by the size of isp_input_sensor_data_res_bqs.
@@ -357,22 +357,22 @@ struct ia_css_shading_info {
ISP assumes the area of isp_input_sensor_data_res_bqs
is centered on the physical sensor. */
struct ia_css_resolution sensor_data_res_bqs;
- /**< Sensor data size (in bqs) at shading correction.
+ /** Sensor data size (in bqs) at shading correction.
This is the size AFTER bayer scaling. */
struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
- /**< Origin of sensor data area positioned on shading table at shading correction.
+ /** Origin of sensor data area positioned on shading table at shading correction.
The coordinate x,y should be positive values. */
#endif
} type_1;
- /**< More structures can be added here when more shading correction types will be added
+ /** More structures can be added here when more shading correction types will be added
in the future. */
} info;
};
#ifndef ISP2401
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
#define DEFAULT_SHADING_INFO_TYPE_1 \
{ \
IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
@@ -394,7 +394,7 @@ struct ia_css_shading_info {
#else
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
#define DEFAULT_SHADING_INFO_TYPE_1 \
{ \
IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
@@ -416,27 +416,27 @@ struct ia_css_shading_info {
#endif
-/** Default Shading Correction information. */
+/* Default Shading Correction information. */
#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1
-/** structure that describes the 3A and DIS grids */
+/* structure that describes the 3A and DIS grids */
struct ia_css_grid_info {
- /** \name ISP input size
+ /* \name ISP input size
* that is visible for user
* @{
*/
uint32_t isp_in_width;
uint32_t isp_in_height;
- /** @}*/
+ /* @}*/
- struct ia_css_3a_grid_info s3a_grid; /**< 3A grid info */
+ struct ia_css_3a_grid_info s3a_grid; /** 3A grid info */
union ia_css_dvs_grid_u dvs_grid;
- /**< All types of DVS statistics grid info union */
+ /** All types of DVS statistics grid info union */
enum ia_css_vamem_type vamem_type;
};
-/** defaults for ia_css_grid_info structs */
+/* defaults for ia_css_grid_info structs */
#define DEFAULT_GRID_INFO \
{ \
0, /* isp_in_width */ \
@@ -446,25 +446,25 @@ struct ia_css_grid_info {
IA_CSS_VAMEM_TYPE_1 /* vamem_type */ \
}
-/** Morphing table, used for geometric distortion and chromatic abberration
+/* Morphing table, used for geometric distortion and chromatic abberration
* correction (GDCAC, also called GDC).
* This table describes the imperfections introduced by the lens, the
* advanced ISP can correct for these imperfections using this table.
*/
struct ia_css_morph_table {
- uint32_t enable; /**< To disable GDC, set this field to false. The
+ uint32_t enable; /** To disable GDC, set this field to false. The
coordinates fields can be set to NULL in this case. */
- uint32_t height; /**< Table height */
- uint32_t width; /**< Table width */
+ uint32_t height; /** Table height */
+ uint32_t width; /** Table width */
uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
- /**< X coordinates that describe the sensor imperfection */
+ /** X coordinates that describe the sensor imperfection */
uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
- /**< Y coordinates that describe the sensor imperfection */
+ /** Y coordinates that describe the sensor imperfection */
};
struct ia_css_dvs_6axis_config {
unsigned int exp_id;
- /**< Exposure ID, see ia_css_event_public.h for more detail */
+ /** Exposure ID, see ia_css_event_public.h for more detail */
uint32_t width_y;
uint32_t height_y;
uint32_t width_uv;
@@ -479,16 +479,16 @@ struct ia_css_dvs_6axis_config {
* This specifies the coordinates (x,y)
*/
struct ia_css_point {
- int32_t x; /**< x coordinate */
- int32_t y; /**< y coordinate */
+ int32_t x; /** x coordinate */
+ int32_t y; /** y coordinate */
};
/**
* This specifies the region
*/
struct ia_css_region {
- struct ia_css_point origin; /**< Starting point coordinates for the region */
- struct ia_css_resolution resolution; /**< Region resolution */
+ struct ia_css_point origin; /** Starting point coordinates for the region */
+ struct ia_css_resolution resolution; /** Region resolution */
};
/**
@@ -509,30 +509,30 @@ struct ia_css_region {
* y + height <= effective input height
*/
struct ia_css_dz_config {
- uint32_t dx; /**< Horizontal zoom factor */
- uint32_t dy; /**< Vertical zoom factor */
- struct ia_css_region zoom_region; /**< region for zoom */
+ uint32_t dx; /** Horizontal zoom factor */
+ uint32_t dy; /** Vertical zoom factor */
+ struct ia_css_region zoom_region; /** region for zoom */
};
-/** The still capture mode, this can be RAW (simply copy sensor input to DDR),
+/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
* Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
*/
enum ia_css_capture_mode {
- IA_CSS_CAPTURE_MODE_RAW, /**< no processing, copy data only */
- IA_CSS_CAPTURE_MODE_BAYER, /**< bayer processing, up to demosaic */
- IA_CSS_CAPTURE_MODE_PRIMARY, /**< primary ISP */
- IA_CSS_CAPTURE_MODE_ADVANCED, /**< advanced ISP (GDC) */
- IA_CSS_CAPTURE_MODE_LOW_LIGHT /**< low light ISP (ANR) */
+ IA_CSS_CAPTURE_MODE_RAW, /** no processing, copy data only */
+ IA_CSS_CAPTURE_MODE_BAYER, /** bayer processing, up to demosaic */
+ IA_CSS_CAPTURE_MODE_PRIMARY, /** primary ISP */
+ IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
+ IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
};
struct ia_css_capture_config {
- enum ia_css_capture_mode mode; /**< Still capture mode */
- uint32_t enable_xnr; /**< Enable/disable XNR */
+ enum ia_css_capture_mode mode; /** Still capture mode */
+ uint32_t enable_xnr; /** Enable/disable XNR */
uint32_t enable_raw_output;
- bool enable_capture_pp_bli; /**< Enable capture_pp_bli mode */
+ bool enable_capture_pp_bli; /** Enable capture_pp_bli mode */
};
-/** default settings for ia_css_capture_config structs */
+/* default settings for ia_css_capture_config structs */
#define DEFAULT_CAPTURE_CONFIG \
{ \
IA_CSS_CAPTURE_MODE_PRIMARY, /* mode (capture) */ \
@@ -542,7 +542,7 @@ struct ia_css_capture_config {
}
-/** ISP filter configuration. This is a collection of configurations
+/* ISP filter configuration. This is a collection of configurations
* for each of the ISP filters (modules).
*
* NOTE! The contents of all pointers is copied when get or set with the
@@ -557,98 +557,98 @@ struct ia_css_capture_config {
* ["ISP block", 2only] : ISP block is used only for ISP2.
*/
struct ia_css_isp_config {
- struct ia_css_wb_config *wb_config; /**< White Balance
+ struct ia_css_wb_config *wb_config; /** White Balance
[WB1, 1&2] */
- struct ia_css_cc_config *cc_config; /**< Color Correction
+ struct ia_css_cc_config *cc_config; /** Color Correction
[CSC1, 1only] */
- struct ia_css_tnr_config *tnr_config; /**< Temporal Noise Reduction
+ struct ia_css_tnr_config *tnr_config; /** Temporal Noise Reduction
[TNR1, 1&2] */
- struct ia_css_ecd_config *ecd_config; /**< Eigen Color Demosaicing
+ struct ia_css_ecd_config *ecd_config; /** Eigen Color Demosaicing
[DE2, 2only] */
- struct ia_css_ynr_config *ynr_config; /**< Y(Luma) Noise Reduction
+ struct ia_css_ynr_config *ynr_config; /** Y(Luma) Noise Reduction
[YNR2&YEE2, 2only] */
- struct ia_css_fc_config *fc_config; /**< Fringe Control
+ struct ia_css_fc_config *fc_config; /** Fringe Control
[FC2, 2only] */
- struct ia_css_formats_config *formats_config; /**< Formats Control for main output
+ struct ia_css_formats_config *formats_config; /** Formats Control for main output
[FORMATS, 1&2] */
- struct ia_css_cnr_config *cnr_config; /**< Chroma Noise Reduction
+ struct ia_css_cnr_config *cnr_config; /** Chroma Noise Reduction
[CNR2, 2only] */
- struct ia_css_macc_config *macc_config; /**< MACC
+ struct ia_css_macc_config *macc_config; /** MACC
[MACC2, 2only] */
- struct ia_css_ctc_config *ctc_config; /**< Chroma Tone Control
+ struct ia_css_ctc_config *ctc_config; /** Chroma Tone Control
[CTC2, 2only] */
- struct ia_css_aa_config *aa_config; /**< YUV Anti-Aliasing
+ struct ia_css_aa_config *aa_config; /** YUV Anti-Aliasing
[AA2, 2only]
(not used currently) */
- struct ia_css_aa_config *baa_config; /**< Bayer Anti-Aliasing
+ struct ia_css_aa_config *baa_config; /** Bayer Anti-Aliasing
[BAA2, 1&2] */
- struct ia_css_ce_config *ce_config; /**< Chroma Enhancement
+ struct ia_css_ce_config *ce_config; /** Chroma Enhancement
[CE1, 1only] */
struct ia_css_dvs_6axis_config *dvs_6axis_config;
- struct ia_css_ob_config *ob_config; /**< Objective Black
+ struct ia_css_ob_config *ob_config; /** Objective Black
[OB1, 1&2] */
- struct ia_css_dp_config *dp_config; /**< Defect Pixel Correction
+ struct ia_css_dp_config *dp_config; /** Defect Pixel Correction
[DPC1/DPC2, 1&2] */
- struct ia_css_nr_config *nr_config; /**< Noise Reduction
+ struct ia_css_nr_config *nr_config; /** Noise Reduction
[BNR1&YNR1&CNR1, 1&2]*/
- struct ia_css_ee_config *ee_config; /**< Edge Enhancement
+ struct ia_css_ee_config *ee_config; /** Edge Enhancement
[YEE1, 1&2] */
- struct ia_css_de_config *de_config; /**< Demosaic
+ struct ia_css_de_config *de_config; /** Demosaic
[DE1, 1only] */
- struct ia_css_gc_config *gc_config; /**< Gamma Correction (for YUV)
+ struct ia_css_gc_config *gc_config; /** Gamma Correction (for YUV)
[GC1, 1only] */
- struct ia_css_anr_config *anr_config; /**< Advanced Noise Reduction */
- struct ia_css_3a_config *s3a_config; /**< 3A Statistics config */
- struct ia_css_xnr_config *xnr_config; /**< eXtra Noise Reduction */
- struct ia_css_dz_config *dz_config; /**< Digital Zoom */
- struct ia_css_cc_config *yuv2rgb_cc_config; /**< Color Correction
+ struct ia_css_anr_config *anr_config; /** Advanced Noise Reduction */
+ struct ia_css_3a_config *s3a_config; /** 3A Statistics config */
+ struct ia_css_xnr_config *xnr_config; /** eXtra Noise Reduction */
+ struct ia_css_dz_config *dz_config; /** Digital Zoom */
+ struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
[CCM2, 2only] */
- struct ia_css_cc_config *rgb2yuv_cc_config; /**< Color Correction
+ struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
[CSC2, 2only] */
- struct ia_css_macc_table *macc_table; /**< MACC
+ struct ia_css_macc_table *macc_table; /** MACC
[MACC1/MACC2, 1&2]*/
- struct ia_css_gamma_table *gamma_table; /**< Gamma Correction (for YUV)
+ struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
[GC1, 1only] */
- struct ia_css_ctc_table *ctc_table; /**< Chroma Tone Control
+ struct ia_css_ctc_table *ctc_table; /** Chroma Tone Control
[CTC1, 1only] */
- /** \deprecated */
- struct ia_css_xnr_table *xnr_table; /**< eXtra Noise Reduction
+ /* \deprecated */
+ struct ia_css_xnr_table *xnr_table; /** eXtra Noise Reduction
[XNR1, 1&2] */
- struct ia_css_rgb_gamma_table *r_gamma_table;/**< sRGB Gamma Correction
+ struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
[GC2, 2only] */
- struct ia_css_rgb_gamma_table *g_gamma_table;/**< sRGB Gamma Correction
+ struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
[GC2, 2only] */
- struct ia_css_rgb_gamma_table *b_gamma_table;/**< sRGB Gamma Correction
+ struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
[GC2, 2only] */
- struct ia_css_vector *motion_vector; /**< For 2-axis DVS */
+ struct ia_css_vector *motion_vector; /** For 2-axis DVS */
struct ia_css_shading_table *shading_table;
struct ia_css_morph_table *morph_table;
- struct ia_css_dvs_coefficients *dvs_coefs; /**< DVS 1.0 coefficients */
- struct ia_css_dvs2_coefficients *dvs2_coefs; /**< DVS 2.0 coefficients */
+ struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
+ struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
struct ia_css_capture_config *capture_config;
struct ia_css_anr_thres *anr_thres;
- /** @deprecated{Old shading settings, see bugzilla bz675 for details} */
+ /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
struct ia_css_shading_settings *shading_settings;
- struct ia_css_xnr3_config *xnr3_config; /**< eXtreme Noise Reduction v3 */
- /** comment from Lasse: Be aware how this feature will affect coordinate
+ struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
+ /* comment from Lasse: Be aware how this feature will affect coordinate
* normalization in different parts of the system. (e.g. face detection,
* touch focus, 3A statistics and windows of interest, shading correction,
* DVS, GDC) from IQ tool level and application level down-to ISP FW level.
* the risk for regression is not in the individual blocks, but how they
* integrate together. */
- struct ia_css_output_config *output_config; /**< Main Output Mirroring, flipping */
+ struct ia_css_output_config *output_config; /** Main Output Mirroring, flipping */
#ifdef ISP2401
- struct ia_css_tnr3_kernel_config *tnr3_config; /**< TNR3 config */
+ struct ia_css_tnr3_kernel_config *tnr3_config; /** TNR3 config */
#endif
- struct ia_css_scaler_config *scaler_config; /**< Skylake: scaler config (optional) */
- struct ia_css_formats_config *formats_config_display;/**< Formats control for viewfinder/display output (optional)
+ struct ia_css_scaler_config *scaler_config; /** Skylake: scaler config (optional) */
+ struct ia_css_formats_config *formats_config_display;/** Formats control for viewfinder/display output (optional)
[OSYS, n/a] */
- struct ia_css_output_config *output_config_display; /**< Viewfinder/display output mirroring, flipping (optional) */
+ struct ia_css_output_config *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
- struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
- uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
+ struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
+ uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
};
#endif /* _IA_CSS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
index 48c59896e847..1e88901e0b82 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
@@ -15,16 +15,16 @@
#ifndef __IA_CSS_VERSION_H
#define __IA_CSS_VERSION_H
-/** @file
+/* @file
* This file contains functions to retrieve CSS-API version information
*/
#include <ia_css_err.h>
-/** a common size for the version arrays */
+/* a common size for the version arrays */
#define MAX_VERSION_SIZE 500
-/** @brief Retrieves the current CSS version
+/* @brief Retrieves the current CSS version
* @param[out] version A pointer to a buffer where to put the generated
* version string. NULL is ignored.
* @param[in] max_size Size of the version buffer. If version string
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
index 834eedbbeeff..0b95bf9b9aaf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
@@ -15,12 +15,12 @@
#ifndef __IA_CSS_AA2_TYPES_H
#define __IA_CSS_AA2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Anti-Aliasing parameters.
*/
-/** Anti-Aliasing configuration.
+/* Anti-Aliasing configuration.
*
* This structure is used both for YUV AA and Bayer AA.
*
@@ -39,7 +39,7 @@
* ISP2: BAA2 is used.
*/
struct ia_css_aa_config {
- uint16_t strength; /**< Strength of the filter.
+ uint16_t strength; /** Strength of the filter.
u0.13, [0,8191],
default/ineffective 0 */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
index e205574098f2..dc317a857369 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_ANR_TYPES_H
#define __IA_CSS_ANR_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Advanced Noise Reduction kernel v1
*/
@@ -23,11 +23,11 @@
#define ANR_BPP 10
#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8))*8)
-/** Advanced Noise Reduction configuration.
+/* Advanced Noise Reduction configuration.
* This is also known as Low-Light.
*/
struct ia_css_anr_config {
- int32_t threshold; /**< Threshold */
+ int32_t threshold; /** Threshold */
int32_t thresholds[4*4*4];
int32_t factors[3];
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
index 3832ada433ec..9b611315392c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_ANR2_TYPES_H
#define __IA_CSS_ANR2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Advanced Noise Reduction kernel v2
*/
@@ -23,7 +23,7 @@
#define ANR_PARAM_SIZE 13
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
struct ia_css_anr_thres {
int16_t data[13*64];
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
index 4a289853367a..312141793fd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
@@ -18,7 +18,7 @@
#include "vmem.h"
#include "ia_css_anr2_types.h"
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
struct ia_css_isp_anr2_params {
VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
index 75ca7606b95c..a0d355454aa3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
@@ -27,7 +27,7 @@
#define BAYER_QUAD_HEIGHT 2
#define NOF_BAYER_VECTORS 4
-/** bayer load/store */
+/* bayer load/store */
struct sh_css_isp_bayer_ls_isp_config {
uint32_t base_address[NUM_BAYER_LS];
uint32_t width[NUM_BAYER_LS];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
index 9ae27a9e0baa..ec1688e7352d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_BH_TYPES_H
#define __IA_CSS_BH_TYPES_H
-/** Number of elements in the BH table.
+/* Number of elements in the BH table.
* Should be consistent with hmem.h
*/
#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH
@@ -27,7 +27,7 @@
#define BH_COLOR_Y (3)
#define BH_COLOR_NUM (4)
-/** BH table */
+/* BH table */
struct ia_css_bh_table {
uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
index 219fb835cb26..87e0f19c856b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
@@ -15,13 +15,13 @@
#ifndef __IA_CSS_BNLM_TYPES_H
#define __IA_CSS_BNLM_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Bayer Non-Linear Mean parameters.
*/
#include "type_support.h" /* int32_t */
-/** Bayer Non-Linear Mean configuration
+/* Bayer Non-Linear Mean configuration
*
* \brief BNLM public parameters.
* \details Struct with all parameters for the BNLM kernel that can be set
@@ -30,16 +30,16 @@
* ISP2.6.1: BNLM is used.
*/
struct ia_css_bnlm_config {
- bool rad_enable; /**< Enable a radial dependency in a weight calculation */
- int32_t rad_x_origin; /**< Initial x coordinate for a radius calculation */
- int32_t rad_y_origin; /**< Initial x coordinate for a radius calculation */
+ bool rad_enable; /** Enable a radial dependency in a weight calculation */
+ int32_t rad_x_origin; /** Initial x coordinate for a radius calculation */
+ int32_t rad_y_origin; /** Initial x coordinate for a radius calculation */
/* a threshold for average of weights if this < Th, do not denoise pixel */
int32_t avg_min_th;
/* minimum weight for denoising if max < th, do not denoise pixel */
int32_t max_min_th;
/**@{*/
- /** Coefficient for approximation, in the form of (1 + x / N)^N,
+ /* Coefficient for approximation, in the form of (1 + x / N)^N,
* that fits the first-order exp() to default exp_lut in BNLM sheet
* */
int32_t exp_coeff_a;
@@ -48,55 +48,55 @@ struct ia_css_bnlm_config {
uint32_t exp_exponent;
/**@}*/
- int32_t nl_th[3]; /**< Detail thresholds */
+ int32_t nl_th[3]; /** Detail thresholds */
- /** Index for n-th maximum candidate weight for each detail group */
+ /* Index for n-th maximum candidate weight for each detail group */
int32_t match_quality_max_idx[4];
/**@{*/
- /** A lookup table for 1/sqrt(1+mu) approximation */
+ /* A lookup table for 1/sqrt(1+mu) approximation */
int32_t mu_root_lut_thr[15];
int32_t mu_root_lut_val[16];
/**@}*/
/**@{*/
- /** A lookup table for SAD normalization */
+ /* A lookup table for SAD normalization */
int32_t sad_norm_lut_thr[15];
int32_t sad_norm_lut_val[16];
/**@}*/
/**@{*/
- /** A lookup table that models a weight's dependency on textures */
+ /* A lookup table that models a weight's dependency on textures */
int32_t sig_detail_lut_thr[15];
int32_t sig_detail_lut_val[16];
/**@}*/
/**@{*/
- /** A lookup table that models a weight's dependency on a pixel's radial distance */
+ /* A lookup table that models a weight's dependency on a pixel's radial distance */
int32_t sig_rad_lut_thr[15];
int32_t sig_rad_lut_val[16];
/**@}*/
/**@{*/
- /** A lookup table to control denoise power depending on a pixel's radial distance */
+ /* A lookup table to control denoise power depending on a pixel's radial distance */
int32_t rad_pow_lut_thr[15];
int32_t rad_pow_lut_val[16];
/**@}*/
/**@{*/
- /** Non linear transfer functions to calculate the blending coefficient depending on detail group */
- /** detail group 0 */
+ /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
+ /* detail group 0 */
/**@{*/
int32_t nl_0_lut_thr[15];
int32_t nl_0_lut_val[16];
/**@}*/
/**@{*/
- /** detail group 1 */
+ /* detail group 1 */
int32_t nl_1_lut_thr[15];
int32_t nl_1_lut_val[16];
/**@}*/
/**@{*/
- /** detail group 2 */
+ /* detail group 2 */
int32_t nl_2_lut_thr[15];
int32_t nl_2_lut_val[16];
/**@}*/
/**@{*/
- /** detail group 3 */
+ /* detail group 3 */
int32_t nl_3_lut_thr[15];
int32_t nl_3_lut_val[16];
/**@}*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
index be80f705d8a1..551bd0ed3bac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
@@ -15,13 +15,13 @@
#ifndef __IA_CSS_BNR2_2_TYPES_H
#define __IA_CSS_BNR2_2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Bayer Noise Reduction parameters.
*/
#include "type_support.h" /* int32_t */
-/** Bayer Noise Reduction 2.2 configuration
+/* Bayer Noise Reduction 2.2 configuration
*
* \brief BNR2_2 public parameters.
* \details Struct with all parameters for the BNR2.2 kernel that can be set
@@ -31,41 +31,41 @@
*/
struct ia_css_bnr2_2_config {
/**@{*/
- /** Directional variance gain for R/G/B components in dark region */
+ /* Directional variance gain for R/G/B components in dark region */
int32_t d_var_gain_r;
int32_t d_var_gain_g;
int32_t d_var_gain_b;
/**@}*/
/**@{*/
- /** Slope of Directional variance gain between dark and bright region */
+ /* Slope of Directional variance gain between dark and bright region */
int32_t d_var_gain_slope_r;
int32_t d_var_gain_slope_g;
int32_t d_var_gain_slope_b;
/**@}*/
/**@{*/
- /** Non-Directional variance gain for R/G/B components in dark region */
+ /* Non-Directional variance gain for R/G/B components in dark region */
int32_t n_var_gain_r;
int32_t n_var_gain_g;
int32_t n_var_gain_b;
/**@}*/
/**@{*/
- /** Slope of Non-Directional variance gain between dark and bright region */
+ /* Slope of Non-Directional variance gain between dark and bright region */
int32_t n_var_gain_slope_r;
int32_t n_var_gain_slope_g;
int32_t n_var_gain_slope_b;
/**@}*/
- int32_t dir_thres; /**< Threshold for directional filtering */
- int32_t dir_thres_w; /**< Threshold width for directional filtering */
- int32_t var_offset_coef; /**< Variance offset coefficient */
- int32_t dir_gain; /**< Gain for directional coefficient */
- int32_t detail_gain; /**< Gain for low contrast texture control */
- int32_t detail_gain_divisor; /**< Gain divisor for low contrast texture control */
- int32_t detail_level_offset; /**< Bias value for low contrast texture control */
- int32_t d_var_th_min; /**< Minimum clipping value for directional variance*/
- int32_t d_var_th_max; /**< Maximum clipping value for diretional variance*/
- int32_t n_var_th_min; /**< Minimum clipping value for non-directional variance*/
- int32_t n_var_th_max; /**< Maximum clipping value for non-directional variance*/
+ int32_t dir_thres; /** Threshold for directional filtering */
+ int32_t dir_thres_w; /** Threshold width for directional filtering */
+ int32_t var_offset_coef; /** Variance offset coefficient */
+ int32_t dir_gain; /** Gain for directional coefficient */
+ int32_t detail_gain; /** Gain for low contrast texture control */
+ int32_t detail_gain_divisor; /** Gain divisor for low contrast texture control */
+ int32_t detail_level_offset; /** Bias value for low contrast texture control */
+ int32_t d_var_th_min; /** Minimum clipping value for directional variance*/
+ int32_t d_var_th_max; /** Maximum clipping value for diretional variance*/
+ int32_t n_var_th_min; /** Minimum clipping value for non-directional variance*/
+ int32_t n_var_th_max; /** Maximum clipping value for non-directional variance*/
};
#endif /* __IA_CSS_BNR2_2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
index 6df6c2be9a70..3ebc069d8ada 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_CNR2_TYPES_H
#define __IA_CSS_CNR2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Chroma Noise Reduction (CNR) parameters
*/
-/** Chroma Noise Reduction configuration.
+/* Chroma Noise Reduction configuration.
*
* Small sensitivity of edge means strong smoothness and NR performance.
* If you see blurred color on vertical edges,
@@ -33,21 +33,21 @@
* ISP2: CNR2 is used for Still.
*/
struct ia_css_cnr_config {
- uint16_t coring_u; /**< Coring level of U.
+ uint16_t coring_u; /** Coring level of U.
u0.13, [0,8191], default/ineffective 0 */
- uint16_t coring_v; /**< Coring level of V.
+ uint16_t coring_v; /** Coring level of V.
u0.13, [0,8191], default/ineffective 0 */
- uint16_t sense_gain_vy; /**< Sensitivity of horizontal edge of Y.
+ uint16_t sense_gain_vy; /** Sensitivity of horizontal edge of Y.
u13.0, [0,8191], default 100, ineffective 8191 */
- uint16_t sense_gain_vu; /**< Sensitivity of horizontal edge of U.
+ uint16_t sense_gain_vu; /** Sensitivity of horizontal edge of U.
u13.0, [0,8191], default 100, ineffective 8191 */
- uint16_t sense_gain_vv; /**< Sensitivity of horizontal edge of V.
+ uint16_t sense_gain_vv; /** Sensitivity of horizontal edge of V.
u13.0, [0,8191], default 100, ineffective 8191 */
- uint16_t sense_gain_hy; /**< Sensitivity of vertical edge of Y.
+ uint16_t sense_gain_hy; /** Sensitivity of vertical edge of Y.
u13.0, [0,8191], default 50, ineffective 8191 */
- uint16_t sense_gain_hu; /**< Sensitivity of vertical edge of U.
+ uint16_t sense_gain_hu; /** Sensitivity of vertical edge of U.
u13.0, [0,8191], default 50, ineffective 8191 */
- uint16_t sense_gain_hv; /**< Sensitivity of vertical edge of V.
+ uint16_t sense_gain_hv; /** Sensitivity of vertical edge of V.
u13.0, [0,8191], default 50, ineffective 8191 */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
index 3f11442500f0..47a38fd65950 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
@@ -23,10 +23,10 @@
*
*/
struct ia_css_conversion_config {
- uint32_t en; /**< en parameter */
- uint32_t dummy0; /**< dummy0 dummy parameter 0 */
- uint32_t dummy1; /**< dummy1 dummy parameter 1 */
- uint32_t dummy2; /**< dummy2 dummy parameter 2 */
+ uint32_t en; /** en parameter */
+ uint32_t dummy0; /** dummy0 dummy parameter 0 */
+ uint32_t dummy1; /** dummy1 dummy parameter 1 */
+ uint32_t dummy2; /** dummy2 dummy parameter 2 */
};
#endif /* __IA_CSS_CONVERSION_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
index 8bfc8dad37a8..0f1812cdd92a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
@@ -19,7 +19,7 @@
#include "dma.h"
#include "sh_css_internal.h" /* sh_css_crop_pos */
-/** Crop frame */
+/* Crop frame */
struct sh_css_isp_crop_isp_config {
uint32_t width_a_over_b;
struct dma_port_config port_b;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
index 8091ad4d4602..b5d454225f89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_CROP_TYPES_H
#define __IA_CSS_CROP_TYPES_H
-/** Crop frame
+/* Crop frame
*
* ISP block: crop frame
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
index 54ced072467f..10404380c637 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_CSC_TYPES_H
#define __IA_CSS_CSC_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Color Space Conversion parameters.
*/
-/** Color Correction configuration.
+/* Color Correction configuration.
*
* This structure is used for 3 cases.
* ("YCgCo" is the output format of Demosaic.)
@@ -68,9 +68,9 @@
* 4096 -3430 -666
*/
struct ia_css_cc_config {
- uint32_t fraction_bits;/**< Fractional bits of matrix.
+ uint32_t fraction_bits;/** Fractional bits of matrix.
u8.0, [0,13] */
- int32_t matrix[3 * 3]; /**< Conversion matrix.
+ int32_t matrix[3 * 3]; /** Conversion matrix.
s[13-fraction_bits].[fraction_bits],
[-8192,8191] */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
index c66e823618f6..ad7040c9d7cb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
@@ -22,11 +22,11 @@
/*VMEM Luma params*/
struct ia_css_isp_ctc2_vmem_params {
- /**< Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+ /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
- /** kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+ /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
- /** Slopes of lines interconnecting
+ /* Slopes of lines interconnecting
* 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
};
@@ -34,15 +34,15 @@ struct ia_css_isp_ctc2_vmem_params {
/*DMEM Chroma params*/
struct ia_css_isp_ctc2_dmem_params {
- /** Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+ /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
int32_t uv_y0;
int32_t uv_y1;
- /** Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+ /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
int32_t uv_x0;
int32_t uv_x1;
- /** Slope of line interconnecting uv_x0 -> uv_x1*/
+ /* Slope of line interconnecting uv_x0 -> uv_x1*/
int32_t uv_dydx;
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
index 7b75f01e2ad2..1222cf33e851 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_CTC2_TYPES_H
#define __IA_CSS_CTC2_TYPES_H
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
*
* ISP block: CTC2 (CTC by polygonal approximation)
* (ISP1: CTC1 (CTC by look-up table) is used.)
@@ -24,7 +24,7 @@
*/
struct ia_css_ctc2_config {
- /**< Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+ /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
* --default/ineffective value: 4096(0.5f)
*/
int32_t y_y0;
@@ -33,19 +33,19 @@ struct ia_css_ctc2_config {
int32_t y_y3;
int32_t y_y4;
int32_t y_y5;
- /** 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
+ /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
* requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
*/
int32_t y_x1;
int32_t y_x2;
int32_t y_x3;
int32_t y_x4;
- /** Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+ /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
* --default/ineffective value: 4096(0.5f)
*/
int32_t uv_y0;
int32_t uv_y1;
- /** Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+ /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
* --default/ineffective value: n/a
*/
int32_t uv_x0;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
index 1da215bb966d..4ac47ce10566 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_CTC_TYPES_H
#define __IA_CSS_CTC_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Chroma Tone Control parameters.
*/
-/** Fractional bits for CTC gain (used only for ISP1).
+/* Fractional bits for CTC gain (used only for ISP1).
*
* IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
* of gain(=8), but also the bits(=5) to convert chroma
@@ -32,14 +32,14 @@
*/
#define IA_CSS_CTC_COEF_SHIFT 13
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
enum ia_css_vamem_type {
@@ -47,44 +47,44 @@ enum ia_css_vamem_type {
IA_CSS_VAMEM_TYPE_2
};
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
*
* ISP block: CTC2 (CTC by polygonal line approximation)
* (ISP1: CTC1 (CTC by look-up table) is used.)
* ISP2: CTC2 is used.
*/
struct ia_css_ctc_config {
- uint16_t y0; /**< 1st kneepoint gain.
+ uint16_t y0; /** 1st kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t y1; /**< 2nd kneepoint gain.
+ uint16_t y1; /** 2nd kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t y2; /**< 3rd kneepoint gain.
+ uint16_t y2; /** 3rd kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t y3; /**< 4th kneepoint gain.
+ uint16_t y3; /** 4th kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t y4; /**< 5th kneepoint gain.
+ uint16_t y4; /** 5th kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t y5; /**< 6th kneepoint gain.
+ uint16_t y5; /** 6th kneepoint gain.
u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
default/ineffective 4096(0.5) */
- uint16_t ce_gain_exp; /**< Common exponent of y-axis gain.
+ uint16_t ce_gain_exp; /** Common exponent of y-axis gain.
u8.0, [0,13],
default/ineffective 1 */
- uint16_t x1; /**< 2nd kneepoint luma.
+ uint16_t x1; /** 2nd kneepoint luma.
u0.13, [0,8191], constraints: 0<x1<x2,
default/ineffective 1024 */
- uint16_t x2; /**< 3rd kneepoint luma.
+ uint16_t x2; /** 3rd kneepoint luma.
u0.13, [0,8191], constraints: x1<x2<x3,
default/ineffective 2048 */
- uint16_t x3; /**< 4th kneepoint luma.
+ uint16_t x3; /** 4th kneepoint luma.
u0.13, [0,8191], constraints: x2<x3<x4,
default/ineffective 6144 */
- uint16_t x4; /**< 5tn kneepoint luma.
+ uint16_t x4; /** 5tn kneepoint luma.
u0.13, [0,8191], constraints: x3<x4<8191,
default/ineffective 7168 */
};
@@ -94,7 +94,7 @@ union ia_css_ctc_data {
uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
};
-/** CTC table, used for Chroma Tone Control.
+/* CTC table, used for Chroma Tone Control.
*
* ISP block: CTC1 (CTC by look-up table)
* ISP1: CTC1 is used.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
index 525c838d5a99..803be68abc54 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
@@ -15,25 +15,25 @@
#ifndef __IA_CSS_DE_TYPES_H
#define __IA_CSS_DE_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
*/
-/** Demosaic (bayer-to-YCgCo) configuration.
+/* Demosaic (bayer-to-YCgCo) configuration.
*
* ISP block: DE1
* ISP1: DE1 is used.
* (ISP2: DE2 is used.)
*/
struct ia_css_de_config {
- ia_css_u0_16 pixelnoise; /**< Pixel noise used in moire elimination.
+ ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
u0.16, [0,65535],
default 0, ineffective 0 */
- ia_css_u0_16 c1_coring_threshold; /**< Coring threshold for C1.
+ ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
This is the same as nr_config.threshold_cb.
u0.16, [0,65535],
default 128(0.001953125), ineffective 0 */
- ia_css_u0_16 c2_coring_threshold; /**< Coring threshold for C2.
+ ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
This is the same as nr_config.threshold_cr.
u0.16, [0,65535],
default 128(0.001953125), ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
index eac1b2779857..50bdde419bb1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
@@ -15,24 +15,24 @@
#ifndef __IA_CSS_DE2_TYPES_H
#define __IA_CSS_DE2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Demosaicing parameters.
*/
-/** Eigen Color Demosaicing configuration.
+/* Eigen Color Demosaicing configuration.
*
* ISP block: DE2
* (ISP1: DE1 is used.)
* ISP2: DE2 is used.
*/
struct ia_css_ecd_config {
- uint16_t zip_strength; /**< Strength of zipper reduction.
+ uint16_t zip_strength; /** Strength of zipper reduction.
u0.13, [0,8191],
default 5489(0.67), ineffective 0 */
- uint16_t fc_strength; /**< Strength of false color reduction.
+ uint16_t fc_strength; /** Strength of false color reduction.
u0.13, [0,8191],
default 8191(almost 1.0), ineffective 0 */
- uint16_t fc_debias; /**< Prevent color change
+ uint16_t fc_debias; /** Prevent color change
on noise or Gr/Gb imbalance.
u0.13, [0,8191],
default 0, ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
index b5d7b6b175b6..1bf6dcef7dc7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
@@ -15,12 +15,12 @@
#ifndef __IA_CSS_DP_TYPES_H
#define __IA_CSS_DP_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Defect Pixel Correction (DPC) parameters.
*/
-/** Defect Pixel Correction configuration.
+/* Defect Pixel Correction configuration.
*
* ISP block: DPC1 (DPC after WB)
* DPC2 (DPC before WB)
@@ -28,14 +28,14 @@
* ISP2: DPC2 is used.
*/
struct ia_css_dp_config {
- ia_css_u0_16 threshold; /**< The threshold of defect pixel correction,
+ ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
representing the permissible difference of
intensity between one pixel and its
surrounding pixels. Smaller values result
in more frequent pixel corrections.
u0.16, [0,65535],
default 8192, ineffective 65535 */
- ia_css_u8_8 gain; /**< The sensitivity of mis-correction. ISP will
+ ia_css_u8_8 gain; /** The sensitivity of mis-correction. ISP will
miss a lot of defects if the value is set
too large.
u8.8, [0,65535],
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
index b2c974196ce8..6727682d287f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
@@ -15,14 +15,14 @@
#ifndef __IA_CSS_DPC2_TYPES_H
#define __IA_CSS_DPC2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
*/
#include "type_support.h"
/**@{*/
-/** Floating point constants for different metrics. */
+/* Floating point constants for different metrics. */
#define METRIC1_ONE_FP (1<<12)
#define METRIC2_ONE_FP (1<<5)
#define METRIC3_ONE_FP (1<<12)
@@ -30,7 +30,7 @@
/**@}*/
/**@{*/
-/** Defect Pixel Correction 2 configuration.
+/* Defect Pixel Correction 2 configuration.
*
* \brief DPC2 public parameters.
* \details Struct with all parameters for the Defect Pixel Correction 2
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
index 4d0abfe4d0fd..66a7e58659c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
@@ -30,7 +30,7 @@
#ifdef ISP2401
#endif
-/** dvserence frame */
+/* dvserence frame */
struct sh_css_isp_dvs_isp_config {
uint32_t num_horizontal_blocks;
uint32_t num_vertical_blocks;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
index 216c54a21ea5..30772d217fb2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_DVS_TYPES_H
#define __IA_CSS_DVS_TYPES_H
-/** DVS frame
+/* DVS frame
*
* ISP block: dvs frame
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
index 07651f0ac558..32e91824a5e5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_EED1_8_TYPES_H
#define __IA_CSS_EED1_8_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Edge Enhanced Demosaic parameters.
*/
@@ -36,51 +36,51 @@
*/
#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9
-/** Edge Enhanced Demosaic configuration
+/* Edge Enhanced Demosaic configuration
*
* ISP2.6.1: EED1_8 is used.
*/
struct ia_css_eed1_8_config {
- int32_t rbzp_strength; /**< Strength of zipper reduction. */
-
- int32_t fcstrength; /**< Strength of false color reduction. */
- int32_t fcthres_0; /**< Threshold to prevent chroma coring due to noise or green disparity in dark region. */
- int32_t fcthres_1; /**< Threshold to prevent chroma coring due to noise or green disparity in bright region. */
- int32_t fc_sat_coef; /**< How much color saturation to maintain in high color saturation region. */
- int32_t fc_coring_prm; /**< Chroma coring coefficient for tint color suppression. */
-
- int32_t aerel_thres0; /**< Threshold for Non-Directional Reliability at dark region. */
- int32_t aerel_gain0; /**< Gain for Non-Directional Reliability at dark region. */
- int32_t aerel_thres1; /**< Threshold for Non-Directional Reliability at bright region. */
- int32_t aerel_gain1; /**< Gain for Non-Directional Reliability at bright region. */
-
- int32_t derel_thres0; /**< Threshold for Directional Reliability at dark region. */
- int32_t derel_gain0; /**< Gain for Directional Reliability at dark region. */
- int32_t derel_thres1; /**< Threshold for Directional Reliability at bright region. */
- int32_t derel_gain1; /**< Gain for Directional Reliability at bright region. */
-
- int32_t coring_pos0; /**< Positive Edge Coring Threshold in dark region. */
- int32_t coring_pos1; /**< Positive Edge Coring Threshold in bright region. */
- int32_t coring_neg0; /**< Negative Edge Coring Threshold in dark region. */
- int32_t coring_neg1; /**< Negative Edge Coring Threshold in bright region. */
-
- int32_t gain_exp; /**< Common Exponent of Gain. */
- int32_t gain_pos0; /**< Gain for Positive Edge in dark region. */
- int32_t gain_pos1; /**< Gain for Positive Edge in bright region. */
- int32_t gain_neg0; /**< Gain for Negative Edge in dark region. */
- int32_t gain_neg1; /**< Gain for Negative Edge in bright region. */
-
- int32_t pos_margin0; /**< Margin for Positive Edge in dark region. */
- int32_t pos_margin1; /**< Margin for Positive Edge in bright region. */
- int32_t neg_margin0; /**< Margin for Negative Edge in dark region. */
- int32_t neg_margin1; /**< Margin for Negative Edge in bright region. */
-
- int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: X. */
- int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: Y. */
- int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Slope. */
- int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Exponent. */
- int32_t dedgew_max; /**< Max Weight for Directional Edge. */
+ int32_t rbzp_strength; /** Strength of zipper reduction. */
+
+ int32_t fcstrength; /** Strength of false color reduction. */
+ int32_t fcthres_0; /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+ int32_t fcthres_1; /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+ int32_t fc_sat_coef; /** How much color saturation to maintain in high color saturation region. */
+ int32_t fc_coring_prm; /** Chroma coring coefficient for tint color suppression. */
+
+ int32_t aerel_thres0; /** Threshold for Non-Directional Reliability at dark region. */
+ int32_t aerel_gain0; /** Gain for Non-Directional Reliability at dark region. */
+ int32_t aerel_thres1; /** Threshold for Non-Directional Reliability at bright region. */
+ int32_t aerel_gain1; /** Gain for Non-Directional Reliability at bright region. */
+
+ int32_t derel_thres0; /** Threshold for Directional Reliability at dark region. */
+ int32_t derel_gain0; /** Gain for Directional Reliability at dark region. */
+ int32_t derel_thres1; /** Threshold for Directional Reliability at bright region. */
+ int32_t derel_gain1; /** Gain for Directional Reliability at bright region. */
+
+ int32_t coring_pos0; /** Positive Edge Coring Threshold in dark region. */
+ int32_t coring_pos1; /** Positive Edge Coring Threshold in bright region. */
+ int32_t coring_neg0; /** Negative Edge Coring Threshold in dark region. */
+ int32_t coring_neg1; /** Negative Edge Coring Threshold in bright region. */
+
+ int32_t gain_exp; /** Common Exponent of Gain. */
+ int32_t gain_pos0; /** Gain for Positive Edge in dark region. */
+ int32_t gain_pos1; /** Gain for Positive Edge in bright region. */
+ int32_t gain_neg0; /** Gain for Negative Edge in dark region. */
+ int32_t gain_neg1; /** Gain for Negative Edge in bright region. */
+
+ int32_t pos_margin0; /** Margin for Positive Edge in dark region. */
+ int32_t pos_margin1; /** Margin for Positive Edge in bright region. */
+ int32_t neg_margin0; /** Margin for Negative Edge in dark region. */
+ int32_t neg_margin1; /** Margin for Negative Edge in bright region. */
+
+ int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: X. */
+ int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: Y. */
+ int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Slope. */
+ int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Exponent. */
+ int32_t dedgew_max; /** Max Weight for Directional Edge. */
};
#endif /* __IA_CSS_EED1_8_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
index df1565a5914c..49479572b40d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
@@ -15,20 +15,20 @@
#ifndef __IA_CSS_FORMATS_TYPES_H
#define __IA_CSS_FORMATS_TYPES_H
-/** @file
+/* @file
* CSS-API header file for output format parameters.
*/
#include "type_support.h"
-/** Formats configuration.
+/* Formats configuration.
*
* ISP block: FORMATS
* ISP1: FORMATS is used.
* ISP2: FORMATS is used.
*/
struct ia_css_formats_config {
- uint32_t video_full_range_flag; /**< selects the range of YUV output.
+ uint32_t video_full_range_flag; /** selects the range of YUV output.
u8.0, [0,1],
default 1, ineffective n/a\n
1 - full range, luma 0-255, chroma 0-255\n
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
index 5a2f0c06a80d..ef287fa3c428 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_FPN_TYPES_H
#define __IA_CSS_FPN_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Fixed Pattern Noise parameters.
*/
-/** Fixed Pattern Noise table.
+/* Fixed Pattern Noise table.
*
* This contains the fixed patterns noise values
* obtained from a black frame capture.
@@ -33,15 +33,15 @@
*/
struct ia_css_fpn_table {
- int16_t *data; /**< Table content (fixed patterns noise).
+ int16_t *data; /** Table content (fixed patterns noise).
u0.[13-shift], [0,63] */
- uint32_t width; /**< Table width (in pixels).
+ uint32_t width; /** Table width (in pixels).
This is the input frame width. */
- uint32_t height; /**< Table height (in pixels).
+ uint32_t height; /** Table height (in pixels).
This is the input frame height. */
- uint32_t shift; /**< Common exponent of table content.
+ uint32_t shift; /** Common exponent of table content.
u8.0, [0,13] */
- uint32_t enabled; /**< Fpn is enabled.
+ uint32_t enabled; /** Fpn is enabled.
bool */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
index dd9f0eda3353..594807fe2925 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
@@ -15,36 +15,36 @@
#ifndef __IA_CSS_GC_TYPES_H
#define __IA_CSS_GC_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Gamma Correction parameters.
*/
#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */
-/** Fractional bits for GAMMA gain */
+/* Fractional bits for GAMMA gain */
#define IA_CSS_GAMMA_GAIN_K_SHIFT 13
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10
#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8
#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
-/** Gamma table, used for Y(Luma) Gamma Correction.
+/* Gamma table, used for Y(Luma) Gamma Correction.
*
* ISP block: GC1 (YUV Gamma Correction)
* ISP1: GC1 is used.
* (ISP2: GC2(sRGB Gamma Correction) is used.)
*/
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
IA_CSS_VAMEM_TYPE_2(ISP2400) */
union ia_css_gc_data {
uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
- /**< Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+ /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
- /**< Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+ /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
};
struct ia_css_gamma_table {
@@ -52,22 +52,22 @@ struct ia_css_gamma_table {
union ia_css_gc_data data;
};
-/** Gamma Correction configuration (used only for YUV Gamma Correction).
+/* Gamma Correction configuration (used only for YUV Gamma Correction).
*
* ISP block: GC1 (YUV Gamma Correction)
* ISP1: GC1 is used.
* (ISP2: GC2 (sRGB Gamma Correction) is used.)
*/
struct ia_css_gc_config {
- uint16_t gain_k1; /**< Gain to adjust U after YUV Gamma Correction.
+ uint16_t gain_k1; /** Gain to adjust U after YUV Gamma Correction.
u0.16, [0,65535],
default/ineffective 19000(0.29) */
- uint16_t gain_k2; /**< Gain to adjust V after YUV Gamma Correction.
+ uint16_t gain_k2; /** Gain to adjust V after YUV Gamma Correction.
u0.16, [0,65535],
default/ineffective 19000(0.29) */
};
-/** Chroma Enhancement configuration.
+/* Chroma Enhancement configuration.
*
* This parameter specifies range of chroma output level.
* The standard range is [0,255] or [16,240].
@@ -77,20 +77,20 @@ struct ia_css_gc_config {
* (ISP2: CE1 is not used.)
*/
struct ia_css_ce_config {
- uint8_t uv_level_min; /**< Minimum of chroma output level.
+ uint8_t uv_level_min; /** Minimum of chroma output level.
u0.8, [0,255], default/ineffective 0 */
- uint8_t uv_level_max; /**< Maximum of chroma output level.
+ uint8_t uv_level_max; /** Maximum of chroma output level.
u0.8, [0,255], default/ineffective 255 */
};
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
*
* ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
* (ISP1: MACC1 (MACC by only matrix) is used.)
* ISP2: MACC2 is used.
*/
struct ia_css_macc_config {
- uint8_t exp; /**< Common exponent of ia_css_macc_table.
+ uint8_t exp; /** Common exponent of ia_css_macc_table.
u8.0, [0,13], default 1, ineffective 1 */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
index e439583bdfb6..fab7467d30a5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
@@ -17,33 +17,33 @@
#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */
-/** @file
+/* @file
* CSS-API header file for Gamma Correction parameters.
*/
-/** sRGB Gamma table, used for sRGB Gamma Correction.
+/* sRGB Gamma table, used for sRGB Gamma Correction.
*
* ISP block: GC2 (sRGB Gamma Correction)
* (ISP1: GC1(YUV Gamma Correction) is used.)
* ISP2: GC2 is used.
*/
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8
#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
IA_CSS_VAMEM_TYPE_2(ISP2400) */
union ia_css_rgb_gamma_data {
uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
- /**< RGB Gamma table on vamem type1. This table is not used,
+ /** RGB Gamma table on vamem type1. This table is not used,
because sRGB Gamma Correction is not implemented for ISP2300. */
uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
- /**< RGB Gamma table on vamem type2. u0.12, [0,4095] */
+ /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
};
struct ia_css_rgb_gamma_table {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
index c3345b32e3e6..26464421b077 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
@@ -24,14 +24,14 @@
* \detail Currently HDR paramters are used only for testing purposes
*/
struct ia_css_hdr_irradiance_params {
- int test_irr; /**< Test parameter */
- int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching shift parameter */
- int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching multiplication parameter */
- int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold low bound parameter */
- int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold high bound parameter */
- int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold linear function coefficien */
- int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold precision shift parameter */
- int weight_bpp; /**< Weight map bits per pixel */
+ int test_irr; /** Test parameter */
+ int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching shift parameter */
+ int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching multiplication parameter */
+ int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold low bound parameter */
+ int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold high bound parameter */
+ int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold linear function coefficien */
+ int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold precision shift parameter */
+ int weight_bpp; /** Weight map bits per pixel */
};
/**
@@ -39,7 +39,7 @@ struct ia_css_hdr_irradiance_params {
* \detail Currently HDR paramters are used only for testing purposes
*/
struct ia_css_hdr_deghost_params {
- int test_deg; /**< Test parameter */
+ int test_deg; /** Test parameter */
};
/**
@@ -47,7 +47,7 @@ struct ia_css_hdr_deghost_params {
* \detail Currently HDR paramters are used only for testing purposes
*/
struct ia_css_hdr_exclusion_params {
- int test_excl; /**< Test parameter */
+ int test_excl; /** Test parameter */
};
/**
@@ -56,9 +56,9 @@ struct ia_css_hdr_exclusion_params {
* the CSS API. Currenly, only test paramters are defined.
*/
struct ia_css_hdr_config {
- struct ia_css_hdr_irradiance_params irradiance; /**< HDR irradiance paramaters */
- struct ia_css_hdr_deghost_params deghost; /**< HDR deghosting parameters */
- struct ia_css_hdr_exclusion_params exclusion; /**< HDR exclusion parameters */
+ struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance paramaters */
+ struct ia_css_hdr_deghost_params deghost; /** HDR deghosting parameters */
+ struct ia_css_hdr_exclusion_params exclusion; /** HDR exclusion parameters */
};
#endif /* __IA_CSS_HDR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
index 78e159c04851..f80480cf9de2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -1,5 +1,5 @@
#ifdef ISP2401
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
index f7e1a632c47e..eb9e9439cc21 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -1,5 +1,5 @@
#ifdef ISP2401
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
index 3d510bf5886a..9cd31c2c0253 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
@@ -15,22 +15,22 @@
#ifndef __IA_CSS_MACC1_5_TYPES_H
#define __IA_CSS_MACC1_5_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
*/
-/** Multi-Axis Color Conversion configuration
+/* Multi-Axis Color Conversion configuration
*
* ISP2.6.1: MACC1_5 is used.
*/
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
#define IA_CSS_MACC_NUM_AXES 16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
#define IA_CSS_MACC_NUM_COEFS 4
-/** Multi-Axes Color Correction (MACC) table.
+/* Multi-Axes Color Correction (MACC) table.
*
* ISP block: MACC (MACC by only matrix)
* MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
@@ -55,19 +55,19 @@
*/
struct ia_css_macc1_5_table {
int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
- /**< 16 of 2x2 matix
+ /** 16 of 2x2 matix
MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
default/ineffective: (s1.12)
16 of "identity 2x2 matix" {4096,0,0,4096} */
};
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
*
* ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
* ISP2: MACC1_5 is used.
*/
struct ia_css_macc1_5_config {
- uint8_t exp; /**< Common exponent of ia_css_macc_table.
+ uint8_t exp; /** Common exponent of ia_css_macc_table.
u8.0, [0,13], default 1, ineffective 1 */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
index a25581c6f3ac..2c9e5a8ceb98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
@@ -15,17 +15,17 @@
#ifndef __IA_CSS_MACC_TYPES_H
#define __IA_CSS_MACC_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
*/
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
#define IA_CSS_MACC_NUM_AXES 16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
#define IA_CSS_MACC_NUM_COEFS 4
-/** The number of planes in the morphing table. */
+/* The number of planes in the morphing table. */
-/** Multi-Axis Color Correction (MACC) table.
+/* Multi-Axis Color Correction (MACC) table.
*
* ISP block: MACC1 (MACC by only matrix)
* MACC2 (MACC by matrix and exponent(ia_css_macc_config))
@@ -51,7 +51,7 @@
struct ia_css_macc_table {
int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
- /**< 16 of 2x2 matix
+ /** 16 of 2x2 matix
MACC1: s2.13, [-65536,65535]
default/ineffective:
16 of "identity 2x2 matix" {8192,0,0,8192}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
index eeaadfeb5a1e..d981394c1c11 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_OB2_TYPES_H
#define __IA_CSS_OB2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Optical Black algorithm parameters.
*/
-/** Optical Black configuration
+/* Optical Black configuration
*
* ISP2.6.1: OB2 is used.
*/
@@ -27,16 +27,16 @@
#include "ia_css_frac.h"
struct ia_css_ob2_config {
- ia_css_u0_16 level_gr; /**< Black level for GR pixels.
+ ia_css_u0_16 level_gr; /** Black level for GR pixels.
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_r; /**< Black level for R pixels.
+ ia_css_u0_16 level_r; /** Black level for R pixels.
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_b; /**< Black level for B pixels.
+ ia_css_u0_16 level_b; /** Black level for B pixels.
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_gb; /**< Black level for GB pixels.
+ ia_css_u0_16 level_gb; /** Black level for GB pixels.
u0.16, [0,65535],
default/ineffective 0 */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
index 88459b6c003d..a9717b8f44ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
@@ -15,51 +15,51 @@
#ifndef __IA_CSS_OB_TYPES_H
#define __IA_CSS_OB_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Optical Black level parameters.
*/
#include "ia_css_frac.h"
-/** Optical black mode.
+/* Optical black mode.
*/
enum ia_css_ob_mode {
- IA_CSS_OB_MODE_NONE, /**< OB has no effect. */
- IA_CSS_OB_MODE_FIXED, /**< Fixed OB */
- IA_CSS_OB_MODE_RASTER /**< Raster OB */
+ IA_CSS_OB_MODE_NONE, /** OB has no effect. */
+ IA_CSS_OB_MODE_FIXED, /** Fixed OB */
+ IA_CSS_OB_MODE_RASTER /** Raster OB */
};
-/** Optical Black level configuration.
+/* Optical Black level configuration.
*
* ISP block: OB1
* ISP1: OB1 is used.
* ISP2: OB1 is used.
*/
struct ia_css_ob_config {
- enum ia_css_ob_mode mode; /**< Mode (None / Fixed / Raster).
+ enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
enum, [0,2],
default 1, ineffective 0 */
- ia_css_u0_16 level_gr; /**< Black level for GR pixels
+ ia_css_u0_16 level_gr; /** Black level for GR pixels
(used for Fixed Mode only).
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_r; /**< Black level for R pixels
+ ia_css_u0_16 level_r; /** Black level for R pixels
(used for Fixed Mode only).
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_b; /**< Black level for B pixels
+ ia_css_u0_16 level_b; /** Black level for B pixels
(used for Fixed Mode only).
u0.16, [0,65535],
default/ineffective 0 */
- ia_css_u0_16 level_gb; /**< Black level for GB pixels
+ ia_css_u0_16 level_gb; /** Black level for GB pixels
(used for Fixed Mode only).
u0.16, [0,65535],
default/ineffective 0 */
- uint16_t start_position; /**< Start position of OB area
+ uint16_t start_position; /** Start position of OB area
(used for Raster Mode only).
u16.0, [0,63],
default/ineffective 0 */
- uint16_t end_position; /**< End position of OB area
+ uint16_t end_position; /** End position of OB area
(used for Raster Mode only).
u16.0, [0,63],
default/ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
index 26ec27e085c1..eb7defa41145 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
@@ -19,7 +19,7 @@
#include "dma.h"
#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
-/** output frame */
+/* output frame */
struct sh_css_isp_output_isp_config {
uint32_t width_a_over_b;
uint32_t height;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
index 4335ac28b31d..9c7342fb8145 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_OUTPUT_TYPES_H
#define __IA_CSS_OUTPUT_TYPES_H
-/** @file
+/* @file
* CSS-API header file for parameters of output frames.
*/
-/** Output frame
+/* Output frame
*
* ISP block: output frame
*/
@@ -40,8 +40,8 @@ struct ia_css_output1_configuration {
};
struct ia_css_output_config {
- uint8_t enable_hflip; /**< enable horizontal output mirroring */
- uint8_t enable_vflip; /**< enable vertical output mirroring */
+ uint8_t enable_hflip; /** enable horizontal output mirroring */
+ uint8_t enable_vflip; /** enable vertical output mirroring */
};
#endif /* __IA_CSS_OUTPUT_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
index 955fd472a241..62d371841619 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
@@ -18,7 +18,7 @@
#include <ia_css_frame_public.h>
#include "sh_css_internal.h"
-/** qplane frame
+/* qplane frame
*
* ISP block: qplane frame
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
index 54f8c299d227..5c0b8febd79a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
@@ -18,7 +18,7 @@
#include <ia_css_frame_public.h>
#include "sh_css_internal.h"
-/** Raw frame
+/* Raw frame
*
* ISP block: Raw frame
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
index 1f1b72a417d1..026443b999a6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
@@ -19,7 +19,7 @@
#include "sh_css_defs.h"
#include "dma.h"
-/** Reference frame */
+/* Reference frame */
struct ia_css_ref_configuration {
const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
uint32_t dvs_frame_delay;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
index ce0eaeeee9c6..4750fba268b9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_REF_TYPES_H
#define __IA_CSS_REF_TYPES_H
-/** Reference frame
+/* Reference frame
*
* ISP block: reference frame
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
index 8ef6c54ee813..aa733674f42b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -321,7 +321,7 @@ ia_css_s3a_dmem_decode(
}
/* MW: this is an ISP function */
-STORAGE_CLASS_INLINE int
+static inline int
merge_hi_lo_14(unsigned short hi, unsigned short lo)
{
int val = (int) ((((unsigned int) hi << 14) & 0xfffc000) |
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
index f57ed1ec5981..8d674d2c6427 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_S3A_TYPES_H
#define __IA_CSS_S3A_TYPES_H
-/** @file
+/* @file
* CSS-API header file for 3A statistics parameters.
*/
@@ -25,11 +25,11 @@
#include "../../../../components/stats_3a/src/stats_3a_public.h"
#endif
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
* module.
*/
-/** 3A statistics grid
+/* 3A statistics grid
*
* ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
* S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
@@ -39,23 +39,23 @@
struct ia_css_3a_grid_info {
#if defined(SYSTEM_css_skycam_c0_system)
- uint32_t ae_enable; /**< ae enabled in binary,
+ uint32_t ae_enable; /** ae enabled in binary,
0:disabled, 1:enabled */
- struct ae_public_config_grid_config ae_grd_info; /**< see description in ae_public.h*/
+ struct ae_public_config_grid_config ae_grd_info; /** see description in ae_public.h*/
- uint32_t awb_enable; /**< awb enabled in binary,
+ uint32_t awb_enable; /** awb enabled in binary,
0:disabled, 1:enabled */
- struct awb_public_config_grid_config awb_grd_info; /**< see description in awb_public.h*/
+ struct awb_public_config_grid_config awb_grd_info; /** see description in awb_public.h*/
- uint32_t af_enable; /**< af enabled in binary,
+ uint32_t af_enable; /** af enabled in binary,
0:disabled, 1:enabled */
- struct af_public_grid_config af_grd_info; /**< see description in af_public.h*/
+ struct af_public_grid_config af_grd_info; /** see description in af_public.h*/
- uint32_t awb_fr_enable; /**< awb_fr enabled in binary,
+ uint32_t awb_fr_enable; /** awb_fr enabled in binary,
0:disabled, 1:enabled */
- struct awb_fr_public_grid_config awb_fr_grd_info;/**< see description in awb_fr_public.h*/
+ struct awb_fr_public_grid_config awb_fr_grd_info;/** see description in awb_fr_public.h*/
- uint32_t elem_bit_depth; /**< TODO:Taken from BYT - need input from AIQ
+ uint32_t elem_bit_depth; /** TODO:Taken from BYT - need input from AIQ
if needed for SKC
Bit depth of element used
to calculate 3A statistics.
@@ -63,34 +63,34 @@ struct ia_css_3a_grid_info {
bayer bit depth in DSP. */
#else
- uint32_t enable; /**< 3A statistics enabled.
+ uint32_t enable; /** 3A statistics enabled.
0:disabled, 1:enabled */
- uint32_t use_dmem; /**< DMEM or VMEM determines layout.
+ uint32_t use_dmem; /** DMEM or VMEM determines layout.
0:3A statistics are stored to VMEM,
1:3A statistics are stored to DMEM */
- uint32_t has_histogram; /**< Statistics include histogram.
+ uint32_t has_histogram; /** Statistics include histogram.
0:no histogram, 1:has histogram */
- uint32_t width; /**< Width of 3A grid table.
+ uint32_t width; /** Width of 3A grid table.
(= Horizontal number of grid cells
in table, which cells have effective
statistics.) */
- uint32_t height; /**< Height of 3A grid table.
+ uint32_t height; /** Height of 3A grid table.
(= Vertical number of grid cells
in table, which cells have effective
statistics.) */
- uint32_t aligned_width; /**< Horizontal stride (for alloc).
+ uint32_t aligned_width; /** Horizontal stride (for alloc).
(= Horizontal number of grid cells
in table, which means
the allocated width.) */
- uint32_t aligned_height; /**< Vertical stride (for alloc).
+ uint32_t aligned_height; /** Vertical stride (for alloc).
(= Vertical number of grid cells
in table, which means
the allocated height.) */
- uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+ uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
(1BQ means {Gr,R,B,Gb}(2x2 pixels).)
Valid values are 8,16,32,64. */
- uint32_t deci_factor_log2; /**< log2 of bqs_per_grid_cell. */
- uint32_t elem_bit_depth; /**< Bit depth of element used
+ uint32_t deci_factor_log2; /** log2 of bqs_per_grid_cell. */
+ uint32_t elem_bit_depth; /** Bit depth of element used
to calculate 3A statistics.
This is 13, which is the normalized
bayer bit depth in DSP. */
@@ -148,7 +148,7 @@ struct ia_css_3a_grid_info {
* However, that will require driver/ 3A lib modifications.
*/
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
* module.
*
* ae_y_*: Coefficients to calculate luminance from bayer.
@@ -167,38 +167,38 @@ struct ia_css_3a_grid_info {
* ISP2: S3A2 and SDVS2 are used.
*/
struct ia_css_3a_config {
- ia_css_u0_16 ae_y_coef_r; /**< Weight of R for Y.
+ ia_css_u0_16 ae_y_coef_r; /** Weight of R for Y.
u0.16, [0,65535],
default/ineffective 25559 */
- ia_css_u0_16 ae_y_coef_g; /**< Weight of G for Y.
+ ia_css_u0_16 ae_y_coef_g; /** Weight of G for Y.
u0.16, [0,65535],
default/ineffective 32768 */
- ia_css_u0_16 ae_y_coef_b; /**< Weight of B for Y.
+ ia_css_u0_16 ae_y_coef_b; /** Weight of B for Y.
u0.16, [0,65535],
default/ineffective 7209 */
- ia_css_u0_16 awb_lg_high_raw; /**< AWB level gate high for raw.
+ ia_css_u0_16 awb_lg_high_raw; /** AWB level gate high for raw.
u0.16, [0,65535],
default 65472(=1023*64),
ineffective 65535 */
- ia_css_u0_16 awb_lg_low; /**< AWB level gate low.
+ ia_css_u0_16 awb_lg_low; /** AWB level gate low.
u0.16, [0,65535],
default 64(=1*64),
ineffective 0 */
- ia_css_u0_16 awb_lg_high; /**< AWB level gate high.
+ ia_css_u0_16 awb_lg_high; /** AWB level gate high.
u0.16, [0,65535],
default 65535,
ineffective 65535 */
- ia_css_s0_15 af_fir1_coef[7]; /**< AF FIR coefficients of fir1.
+ ia_css_s0_15 af_fir1_coef[7]; /** AF FIR coefficients of fir1.
s0.15, [-32768,32767],
default/ineffective
-6689,-12207,-32768,32767,12207,6689,0 */
- ia_css_s0_15 af_fir2_coef[7]; /**< AF FIR coefficients of fir2.
+ ia_css_s0_15 af_fir2_coef[7]; /** AF FIR coefficients of fir2.
s0.15, [-32768,32767],
default/ineffective
2053,0,-18437,32767,-18437,2053,0 */
};
-/** 3A statistics. This structure describes the data stored
+/* 3A statistics. This structure describes the data stored
* in each 3A grid point.
*
* ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
@@ -209,43 +209,43 @@ struct ia_css_3a_config {
* ISP2: S3A2 is used.
*/
struct ia_css_3a_output {
- int32_t ae_y; /**< Sum of Y in a statistics window, for AE.
+ int32_t ae_y; /** Sum of Y in a statistics window, for AE.
(u19.13) */
- int32_t awb_cnt; /**< Number of effective pixels
+ int32_t awb_cnt; /** Number of effective pixels
in a statistics window.
Pixels passed by the AWB level gate check are
judged as "effective". (u32) */
- int32_t awb_gr; /**< Sum of Gr in a statistics window, for AWB.
+ int32_t awb_gr; /** Sum of Gr in a statistics window, for AWB.
All Gr pixels (not only for effective pixels)
are summed. (u19.13) */
- int32_t awb_r; /**< Sum of R in a statistics window, for AWB.
+ int32_t awb_r; /** Sum of R in a statistics window, for AWB.
All R pixels (not only for effective pixels)
are summed. (u19.13) */
- int32_t awb_b; /**< Sum of B in a statistics window, for AWB.
+ int32_t awb_b; /** Sum of B in a statistics window, for AWB.
All B pixels (not only for effective pixels)
are summed. (u19.13) */
- int32_t awb_gb; /**< Sum of Gb in a statistics window, for AWB.
+ int32_t awb_gb; /** Sum of Gb in a statistics window, for AWB.
All Gb pixels (not only for effective pixels)
are summed. (u19.13) */
- int32_t af_hpf1; /**< Sum of |Y| following high pass filter af_fir1
+ int32_t af_hpf1; /** Sum of |Y| following high pass filter af_fir1
within a statistics window, for AF. (u19.13) */
- int32_t af_hpf2; /**< Sum of |Y| following high pass filter af_fir2
+ int32_t af_hpf2; /** Sum of |Y| following high pass filter af_fir2
within a statistics window, for AF. (u19.13) */
};
-/** 3A Statistics. This structure describes the statistics that are generated
+/* 3A Statistics. This structure describes the statistics that are generated
* using the provided configuration (ia_css_3a_config).
*/
struct ia_css_3a_statistics {
- struct ia_css_3a_grid_info grid; /**< grid info contains the dimensions of the 3A grid */
- struct ia_css_3a_output *data; /**< the pointer to 3a_output[grid.width * grid.height]
+ struct ia_css_3a_grid_info grid; /** grid info contains the dimensions of the 3A grid */
+ struct ia_css_3a_output *data; /** the pointer to 3a_output[grid.width * grid.height]
containing the 3A statistics */
- struct ia_css_3a_rgby_output *rgby_data;/**< the pointer to 3a_rgby_output[256]
+ struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
containing the histogram */
};
-/** Histogram (Statistics for AE).
+/* Histogram (Statistics for AE).
*
* 4 histograms(r,g,b,y),
* 256 bins for each histogram, unsigned 24bit value for each bin.
@@ -256,10 +256,10 @@ struct ia_css_3a_statistics {
* ISP2: HIST2 is used.
*/
struct ia_css_3a_rgby_output {
- uint32_t r; /**< Number of R of one bin of the histogram R. (u24) */
- uint32_t g; /**< Number of G of one bin of the histogram G. (u24) */
- uint32_t b; /**< Number of B of one bin of the histogram B. (u24) */
- uint32_t y; /**< Number of Y of one bin of the histogram Y. (u24) */
+ uint32_t r; /** Number of R of one bin of the histogram R. (u24) */
+ uint32_t g; /** Number of G of one bin of the histogram G. (u24) */
+ uint32_t b; /** Number of B of one bin of the histogram B. (u24) */
+ uint32_t y; /** Number of Y of one bin of the histogram Y. (u24) */
};
#endif /* __IA_CSS_S3A_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
index 8b2b56b0310b..9aa019539f47 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
@@ -22,7 +22,7 @@
#define NUM_S3A_LS 1
-/** s3a statistics store */
+/* s3a statistics store */
#ifdef ISP2401
struct ia_css_s3a_stat_ls_configuration {
uint32_t s3a_grid_size_log2;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
index 44e3c43a5d4a..b35ac3e4009b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -32,7 +32,7 @@ ia_css_sc_dump(
unsigned level);
#ifdef ISP2401
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
* @param[out] to Parameters used in the shading correction kernel in the isp.
* @param[in] from Parameters passed from the host.
* @param[in] size Size of the sh_css_isp_sc_isp_config structure.
@@ -45,7 +45,7 @@ ia_css_sc_config(
const struct ia_css_sc_configuration *from,
unsigned size);
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
* @param[in] binary The binary, which has the shading correction.
* @param[in] internal_frame_origin_x_bqs_on_sctbl
* X coordinate (in bqs) of the origin of the internal frame on the shading table.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
index 5a833bc48af1..30ce499ac8cf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -15,25 +15,25 @@
#ifndef __IA_CSS_SC_TYPES_H
#define __IA_CSS_SC_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Lens Shading Correction (SC) parameters.
*/
-/** Number of color planes in the shading table. */
+/* Number of color planes in the shading table. */
#define IA_CSS_SC_NUM_COLORS 4
-/** The 4 colors that a shading table consists of.
+/* The 4 colors that a shading table consists of.
* For each color we store a grid of values.
*/
enum ia_css_sc_color {
- IA_CSS_SC_COLOR_GR, /**< Green on a green-red line */
- IA_CSS_SC_COLOR_R, /**< Red */
- IA_CSS_SC_COLOR_B, /**< Blue */
- IA_CSS_SC_COLOR_GB /**< Green on a green-blue line */
+ IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
+ IA_CSS_SC_COLOR_R, /** Red */
+ IA_CSS_SC_COLOR_B, /** Blue */
+ IA_CSS_SC_COLOR_GB /** Green on a green-blue line */
};
-/** Lens Shading Correction table.
+/* Lens Shading Correction table.
*
* This describes the color shading artefacts
* introduced by lens imperfections. To correct artefacts,
@@ -64,39 +64,39 @@ enum ia_css_sc_color {
* ISP2: SC1 is used.
*/
struct ia_css_shading_table {
- uint32_t enable; /**< Set to false for no shading correction.
+ uint32_t enable; /** Set to false for no shading correction.
The data field can be NULL when enable == true */
/* ------ deprecated(bz675) : from ------ */
- uint32_t sensor_width; /**< Native sensor width in pixels. */
- uint32_t sensor_height; /**< Native sensor height in lines.
+ uint32_t sensor_width; /** Native sensor width in pixels. */
+ uint32_t sensor_height; /** Native sensor height in lines.
When shading_settings.enable_shading_table_conversion is set
as 0, sensor_width and sensor_height are NOT used.
These are used only in the legacy shading table conversion
in the css, when shading_settings.
enable_shading_table_conversion is set as 1. */
/* ------ deprecated(bz675) : to ------ */
- uint32_t width; /**< Number of data points per line per color.
+ uint32_t width; /** Number of data points per line per color.
u8.0, [0,81] */
- uint32_t height; /**< Number of lines of data points per color.
+ uint32_t height; /** Number of lines of data points per color.
u8.0, [0,61] */
- uint32_t fraction_bits; /**< Bits of fractional part in the data
+ uint32_t fraction_bits; /** Bits of fractional part in the data
points.
u8.0, [0,13] */
uint16_t *data[IA_CSS_SC_NUM_COLORS];
- /**< Table data, one array for each color.
+ /** Table data, one array for each color.
Use ia_css_sc_color to index this array.
u[13-fraction_bits].[fraction_bits], [0,8191] */
};
/* ------ deprecated(bz675) : from ------ */
-/** Shading Correction settings.
+/* Shading Correction settings.
*
* NOTE:
* This structure should be removed when the shading table conversion is
* removed from the css.
*/
struct ia_css_shading_settings {
- uint32_t enable_shading_table_conversion; /**< Set to 0,
+ uint32_t enable_shading_table_conversion; /** Set to 0,
if the conversion of the shading table should be disabled
in the css. (default 1)
0: The shading table is directly sent to the isp.
@@ -119,14 +119,14 @@ struct ia_css_shading_settings {
#ifdef ISP2401
-/** Shading Correction configuration.
+/* Shading Correction configuration.
*
* NOTE: The shading table size is larger than or equal to the internal frame size.
*/
struct ia_css_sc_configuration {
- uint32_t internal_frame_origin_x_bqs_on_sctbl; /**< Origin X (in bqs) of internal frame on shading table. */
- uint32_t internal_frame_origin_y_bqs_on_sctbl; /**< Origin Y (in bqs) of internal frame on shading table. */
- /**< NOTE: bqs = size in BQ(Bayer Quad) unit.
+ uint32_t internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
+ uint32_t internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
+ /** NOTE: bqs = size in BQ(Bayer Quad) unit.
1BQ means {Gr,R,B,Gb}(2x2 pixels).
Horizontal 1 bqs corresponds to horizontal 2 pixels.
Vertical 1 bqs corresponds to vertical 2 pixels. */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
index 295dc60b778c..031983c357e4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
@@ -15,21 +15,21 @@
#ifndef __IA_CSS_SDIS_COMMON_TYPES_H
#define __IA_CSS_SDIS_COMMON_TYPES_H
-/** @file
+/* @file
* CSS-API header file for DVS statistics parameters.
*/
#include <type_support.h>
-/** DVS statistics grid dimensions in number of cells.
+/* DVS statistics grid dimensions in number of cells.
*/
struct ia_css_dvs_grid_dim {
- uint32_t width; /**< Width of DVS grid table in cells */
- uint32_t height; /**< Height of DVS grid table in cells */
+ uint32_t width; /** Width of DVS grid table in cells */
+ uint32_t height; /** Height of DVS grid table in cells */
};
-/** DVS statistics dimensions in number of cells for
+/* DVS statistics dimensions in number of cells for
* grid, coeffieicient and projection.
*/
@@ -55,7 +55,7 @@ struct ia_css_sdis_info {
0, /* dis_deci_factor_log2 */ \
}
-/** DVS statistics grid
+/* DVS statistics grid
*
* ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
* SDVS2 (DVS Support for DVS ver.2 (6-axes))
@@ -63,23 +63,23 @@ struct ia_css_sdis_info {
* ISP2: SDVS2 is used.
*/
struct ia_css_dvs_grid_res {
- uint32_t width; /**< Width of DVS grid table.
+ uint32_t width; /** Width of DVS grid table.
(= Horizontal number of grid cells
in table, which cells have effective
statistics.)
For DVS1, this is equal to
the number of vertical statistics. */
- uint32_t aligned_width; /**< Stride of each grid line.
+ uint32_t aligned_width; /** Stride of each grid line.
(= Horizontal number of grid cells
in table, which means
the allocated width.) */
- uint32_t height; /**< Height of DVS grid table.
+ uint32_t height; /** Height of DVS grid table.
(= Vertical number of grid cells
in table, which cells have effective
statistics.)
For DVS1, This is equal to
the number of horizontal statistics. */
- uint32_t aligned_height;/**< Stride of each grid column.
+ uint32_t aligned_height;/** Stride of each grid column.
(= Vertical number of grid cells
in table, which means
the allocated height.) */
@@ -89,125 +89,125 @@ struct ia_css_dvs_grid_res {
* However, that implies driver I/F changes
*/
struct ia_css_dvs_grid_info {
- uint32_t enable; /**< DVS statistics enabled.
+ uint32_t enable; /** DVS statistics enabled.
0:disabled, 1:enabled */
- uint32_t width; /**< Width of DVS grid table.
+ uint32_t width; /** Width of DVS grid table.
(= Horizontal number of grid cells
in table, which cells have effective
statistics.)
For DVS1, this is equal to
the number of vertical statistics. */
- uint32_t aligned_width; /**< Stride of each grid line.
+ uint32_t aligned_width; /** Stride of each grid line.
(= Horizontal number of grid cells
in table, which means
the allocated width.) */
- uint32_t height; /**< Height of DVS grid table.
+ uint32_t height; /** Height of DVS grid table.
(= Vertical number of grid cells
in table, which cells have effective
statistics.)
For DVS1, This is equal to
the number of horizontal statistics. */
- uint32_t aligned_height;/**< Stride of each grid column.
+ uint32_t aligned_height;/** Stride of each grid column.
(= Vertical number of grid cells
in table, which means
the allocated height.) */
- uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+ uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
(1BQ means {Gr,R,B,Gb}(2x2 pixels).)
For DVS1, valid value is 64.
For DVS2, valid value is only 64,
currently. */
- uint32_t num_hor_coefs; /**< Number of horizontal coefficients. */
- uint32_t num_ver_coefs; /**< Number of vertical coefficients. */
+ uint32_t num_hor_coefs; /** Number of horizontal coefficients. */
+ uint32_t num_ver_coefs; /** Number of vertical coefficients. */
};
-/** Number of DVS statistics levels
+/* Number of DVS statistics levels
*/
#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3
-/** DVS statistics generated by accelerator global configuration
+/* DVS statistics generated by accelerator global configuration
*/
struct dvs_stat_public_dvs_global_cfg {
unsigned char kappa;
- /**< DVS statistics global configuration - kappa */
+ /** DVS statistics global configuration - kappa */
unsigned char match_shift;
- /**< DVS statistics global configuration - match_shift */
+ /** DVS statistics global configuration - match_shift */
unsigned char ybin_mode;
- /**< DVS statistics global configuration - y binning mode */
+ /** DVS statistics global configuration - y binning mode */
};
-/** DVS statistics generated by accelerator level grid
+/* DVS statistics generated by accelerator level grid
* configuration
*/
struct dvs_stat_public_dvs_level_grid_cfg {
unsigned char grid_width;
- /**< DVS statistics grid width */
+ /** DVS statistics grid width */
unsigned char grid_height;
- /**< DVS statistics grid height */
+ /** DVS statistics grid height */
unsigned char block_width;
- /**< DVS statistics block width */
+ /** DVS statistics block width */
unsigned char block_height;
- /**< DVS statistics block height */
+ /** DVS statistics block height */
};
-/** DVS statistics generated by accelerator level grid start
+/* DVS statistics generated by accelerator level grid start
* configuration
*/
struct dvs_stat_public_dvs_level_grid_start {
unsigned short x_start;
- /**< DVS statistics level x start */
+ /** DVS statistics level x start */
unsigned short y_start;
- /**< DVS statistics level y start */
+ /** DVS statistics level y start */
unsigned char enable;
- /**< DVS statistics level enable */
+ /** DVS statistics level enable */
};
-/** DVS statistics generated by accelerator level grid end
+/* DVS statistics generated by accelerator level grid end
* configuration
*/
struct dvs_stat_public_dvs_level_grid_end {
unsigned short x_end;
- /**< DVS statistics level x end */
+ /** DVS statistics level x end */
unsigned short y_end;
- /**< DVS statistics level y end */
+ /** DVS statistics level y end */
};
-/** DVS statistics generated by accelerator Feature Extraction
+/* DVS statistics generated by accelerator Feature Extraction
* Region Of Interest (FE-ROI) configuration
*/
struct dvs_stat_public_dvs_level_fe_roi_cfg {
unsigned char x_start;
- /**< DVS statistics fe-roi level x start */
+ /** DVS statistics fe-roi level x start */
unsigned char y_start;
- /**< DVS statistics fe-roi level y start */
+ /** DVS statistics fe-roi level y start */
unsigned char x_end;
- /**< DVS statistics fe-roi level x end */
+ /** DVS statistics fe-roi level x end */
unsigned char y_end;
- /**< DVS statistics fe-roi level y end */
+ /** DVS statistics fe-roi level y end */
};
-/** DVS statistics generated by accelerator public configuration
+/* DVS statistics generated by accelerator public configuration
*/
struct dvs_stat_public_dvs_grd_cfg {
struct dvs_stat_public_dvs_level_grid_cfg grd_cfg;
- /**< DVS statistics level grid configuration */
+ /** DVS statistics level grid configuration */
struct dvs_stat_public_dvs_level_grid_start grd_start;
- /**< DVS statistics level grid start configuration */
+ /** DVS statistics level grid start configuration */
struct dvs_stat_public_dvs_level_grid_end grd_end;
- /**< DVS statistics level grid end configuration */
+ /** DVS statistics level grid end configuration */
};
-/** DVS statistics grid generated by accelerator
+/* DVS statistics grid generated by accelerator
*/
struct ia_css_dvs_stat_grid_info {
struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg;
- /**< DVS statistics global configuration (kappa, match, binning) */
+ /** DVS statistics global configuration (kappa, match, binning) */
struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
- /**< DVS statistics grid configuration (blocks and grids) */
+ /** DVS statistics grid configuration (blocks and grids) */
struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
- /**< DVS statistics FE ROI (region of interest) configuration */
+ /** DVS statistics FE ROI (region of interest) configuration */
};
-/** DVS statistics generated by accelerator default grid info
+/* DVS statistics generated by accelerator default grid info
*/
#define DEFAULT_DVS_GRID_INFO { \
{ \
@@ -219,14 +219,14 @@ struct ia_css_dvs_stat_grid_info {
}
-/** Union that holds all types of DVS statistics grid info in
+/* Union that holds all types of DVS statistics grid info in
* CSS format
* */
union ia_css_dvs_grid_u {
struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
- /**< DVS statistics produced by accelerator grid info */
+ /** DVS statistics produced by accelerator grid info */
struct ia_css_dvs_grid_info dvs_grid_info;
- /**< DVS (DVS1/DVS2) grid info */
+ /** DVS (DVS1/DVS2) grid info */
};
#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
index d408b58a027d..d2ee57008fb6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
@@ -15,38 +15,38 @@
#ifndef __IA_CSS_SDIS_TYPES_H
#define __IA_CSS_SDIS_TYPES_H
-/** @file
+/* @file
* CSS-API header file for DVS statistics parameters.
*/
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
#define IA_CSS_DVS_NUM_COEF_TYPES 6
#ifndef PIPE_GENERATION
#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
#endif
-/** DVS 1.0 Coefficients.
+/* DVS 1.0 Coefficients.
* This structure describes the coefficients that are needed for the dvs statistics.
*/
struct ia_css_dvs_coefficients {
- struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
- int16_t *hor_coefs; /**< the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+ int16_t *hor_coefs; /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
containing the horizontal coefficients */
- int16_t *ver_coefs; /**< the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ int16_t *ver_coefs; /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
containing the vertical coefficients */
};
-/** DVS 1.0 Statistics.
+/* DVS 1.0 Statistics.
* This structure describes the statistics that are generated using the provided coefficients.
*/
struct ia_css_dvs_statistics {
- struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
- int32_t *hor_proj; /**< the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+ struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+ int32_t *hor_proj; /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
containing the horizontal projections */
- int32_t *ver_proj; /**< the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+ int32_t *ver_proj; /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
containing the vertical projections */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
index 7db7dd10fe00..2a0bc4031746 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -15,55 +15,55 @@
#ifndef __IA_CSS_SDIS2_TYPES_H
#define __IA_CSS_SDIS2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for DVS statistics parameters.
*/
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
#define IA_CSS_DVS2_NUM_COEF_TYPES 4
#ifndef PIPE_GENERATION
#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
#endif
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
* arrays that contain the coeffients for each type.
*/
struct ia_css_dvs2_coef_types {
- int16_t *odd_real; /**< real part of the odd coefficients*/
- int16_t *odd_imag; /**< imaginary part of the odd coefficients*/
- int16_t *even_real;/**< real part of the even coefficients*/
- int16_t *even_imag;/**< imaginary part of the even coefficients*/
+ int16_t *odd_real; /** real part of the odd coefficients*/
+ int16_t *odd_imag; /** imaginary part of the odd coefficients*/
+ int16_t *even_real;/** real part of the even coefficients*/
+ int16_t *even_imag;/** imaginary part of the even coefficients*/
};
-/** DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
* e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real
* coefficients.
*/
struct ia_css_dvs2_coefficients {
- struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */
- struct ia_css_dvs2_coef_types hor_coefs; /**< struct with pointers that contain the horizontal coefficients */
- struct ia_css_dvs2_coef_types ver_coefs; /**< struct with pointers that contain the vertical coefficients */
+ struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_coef_types hor_coefs; /** struct with pointers that contain the horizontal coefficients */
+ struct ia_css_dvs2_coef_types ver_coefs; /** struct with pointers that contain the vertical coefficients */
};
-/** DVS 2.0 Statistic types. This structure contains 4 pointers to
+/* DVS 2.0 Statistic types. This structure contains 4 pointers to
* arrays that contain the statistics for each type.
*/
struct ia_css_dvs2_stat_types {
- int32_t *odd_real; /**< real part of the odd statistics*/
- int32_t *odd_imag; /**< imaginary part of the odd statistics*/
- int32_t *even_real;/**< real part of the even statistics*/
- int32_t *even_imag;/**< imaginary part of the even statistics*/
+ int32_t *odd_real; /** real part of the odd statistics*/
+ int32_t *odd_imag; /** imaginary part of the odd statistics*/
+ int32_t *even_real;/** real part of the even statistics*/
+ int32_t *even_imag;/** imaginary part of the even statistics*/
};
-/** DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
* e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing
* the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
*/
struct ia_css_dvs2_statistics {
- struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */
- struct ia_css_dvs2_stat_types hor_prod; /**< struct with pointers that contain the horizontal statistics */
- struct ia_css_dvs2_stat_types ver_prod; /**< struct with pointers that contain the vertical statistics */
+ struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_stat_types hor_prod; /** struct with pointers that contain the horizontal statistics */
+ struct ia_css_dvs2_stat_types ver_prod; /** struct with pointers that contain the vertical statistics */
};
#endif /* __IA_CSS_SDIS2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
index cc47a50e5ad5..91ea8dd4651d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
@@ -15,13 +15,13 @@
#ifndef __IA_CSS_TDF_TYPES_H
#define __IA_CSS_TDF_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Transform Domain Filter parameters.
*/
#include "type_support.h"
-/** Transform Domain Filter configuration
+/* Transform Domain Filter configuration
*
* \brief TDF public parameters.
* \details Struct with all parameters for the TDF kernel that can be set
@@ -30,23 +30,23 @@
* ISP2.6.1: TDF is used.
*/
struct ia_css_tdf_config {
- int32_t thres_flat_table[64]; /**< Final optimized strength table of NR for flat region. */
- int32_t thres_detail_table[64]; /**< Final optimized strength table of NR for detail region. */
- int32_t epsilon_0; /**< Coefficient to control variance for dark area (for flat region). */
- int32_t epsilon_1; /**< Coefficient to control variance for bright area (for flat region). */
- int32_t eps_scale_text; /**< Epsilon scaling coefficient for texture region. */
- int32_t eps_scale_edge; /**< Epsilon scaling coefficient for edge region. */
- int32_t sepa_flat; /**< Threshold to judge flat (edge < m_Flat_thre). */
- int32_t sepa_edge; /**< Threshold to judge edge (edge > m_Edge_thre). */
- int32_t blend_flat; /**< Blending ratio at flat region. */
- int32_t blend_text; /**< Blending ratio at texture region. */
- int32_t blend_edge; /**< Blending ratio at edge region. */
- int32_t shading_gain; /**< Gain of Shading control. */
- int32_t shading_base_gain; /**< Base Gain of Shading control. */
- int32_t local_y_gain; /**< Gain of local luminance control. */
- int32_t local_y_base_gain; /**< Base gain of local luminance control. */
- int32_t rad_x_origin; /**< Initial x coord. for radius computation. */
- int32_t rad_y_origin; /**< Initial y coord. for radius computation. */
+ int32_t thres_flat_table[64]; /** Final optimized strength table of NR for flat region. */
+ int32_t thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
+ int32_t epsilon_0; /** Coefficient to control variance for dark area (for flat region). */
+ int32_t epsilon_1; /** Coefficient to control variance for bright area (for flat region). */
+ int32_t eps_scale_text; /** Epsilon scaling coefficient for texture region. */
+ int32_t eps_scale_edge; /** Epsilon scaling coefficient for edge region. */
+ int32_t sepa_flat; /** Threshold to judge flat (edge < m_Flat_thre). */
+ int32_t sepa_edge; /** Threshold to judge edge (edge > m_Edge_thre). */
+ int32_t blend_flat; /** Blending ratio at flat region. */
+ int32_t blend_text; /** Blending ratio at texture region. */
+ int32_t blend_edge; /** Blending ratio at edge region. */
+ int32_t shading_gain; /** Gain of Shading control. */
+ int32_t shading_base_gain; /** Base Gain of Shading control. */
+ int32_t local_y_gain; /** Gain of local luminance control. */
+ int32_t local_y_base_gain; /** Base gain of local luminance control. */
+ int32_t rad_x_origin; /** Initial x coord. for radius computation. */
+ int32_t rad_y_origin; /** Initial y coord. for radius computation. */
};
#endif /* __IA_CSS_TDF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
index 135563f52174..223423f8c40b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
@@ -16,7 +16,7 @@ more details.
#ifndef _IA_CSS_TNR3_TYPES_H
#define _IA_CSS_TNR3_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
*/
@@ -27,7 +27,7 @@ more details.
*/
#define TNR3_NUM_SEGMENTS 3
-/** Temporal Noise Reduction v3 (TNR3) configuration.
+/* Temporal Noise Reduction v3 (TNR3) configuration.
* The parameter to this kernel is fourfold
* 1. Three piecewise linear graphs (one for each plane) with three segments
* each. Each line graph has Luma values on the x axis and sigma values for
@@ -44,17 +44,17 @@ more details.
* 4. Selection of the reference frame buffer to be used for noise reduction.
*/
struct ia_css_tnr3_kernel_config {
- unsigned int maxfb_y; /**< Maximum Feedback Gain for Y */
- unsigned int maxfb_u; /**< Maximum Feedback Gain for U */
- unsigned int maxfb_v; /**< Maximum Feedback Gain for V */
- unsigned int round_adj_y; /**< Rounding Adjust for Y */
- unsigned int round_adj_u; /**< Rounding Adjust for U */
- unsigned int round_adj_v; /**< Rounding Adjust for V */
- unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /**< Knee points */
- unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for Y at points Y0, Y1, Y2, Y3 */
- unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for U at points U0, U1, U2, U3 */
- unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for V at points V0, V1, V2, V3 */
- unsigned int ref_buf_select; /**< Selection of the reference buffer */
+ unsigned int maxfb_y; /** Maximum Feedback Gain for Y */
+ unsigned int maxfb_u; /** Maximum Feedback Gain for U */
+ unsigned int maxfb_v; /** Maximum Feedback Gain for V */
+ unsigned int round_adj_y; /** Rounding Adjust for Y */
+ unsigned int round_adj_u; /** Rounding Adjust for U */
+ unsigned int round_adj_v; /** Rounding Adjust for V */
+ unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /** Knee points */
+ unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+ unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
+ unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
+ unsigned int ref_buf_select; /** Selection of the reference buffer */
};
#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
index 4fd35e6ccd70..9bbc9ab2e6c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_TNR_TYPES_H
#define __IA_CSS_TNR_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Temporal Noise Reduction (TNR) parameters.
*/
-/** Temporal Noise Reduction (TNR) configuration.
+/* Temporal Noise Reduction (TNR) configuration.
*
* When difference between current frame and previous frame is less than or
* equal to threshold, TNR works and current frame is mixed
@@ -36,18 +36,18 @@
struct ia_css_tnr_config {
- ia_css_u0_16 gain; /**< Interpolation ratio of current frame
+ ia_css_u0_16 gain; /** Interpolation ratio of current frame
and previous frame.
gain=0.0 -> previous frame is outputted.
gain=1.0 -> current frame is outputted.
u0.16, [0,65535],
default 32768(0.5), ineffective 65535(almost 1.0) */
- ia_css_u0_16 threshold_y; /**< Threshold to enable interpolation of Y.
+ ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
If difference between current frame and
previous frame is greater than threshold_y,
TNR for Y is disabled.
u0.16, [0,65535], default/ineffective 0 */
- ia_css_u0_16 threshold_uv; /**< Threshold to enable interpolation of
+ ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
U/V.
If difference between current frame and
previous frame is greater than threshold_uv,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
index df5d37c8c946..9df4e12f6c2c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
@@ -23,9 +23,9 @@
#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS
-/** Viewfinder decimation */
+/* Viewfinder decimation */
struct sh_css_isp_vf_isp_config {
- uint32_t vf_downscale_bits; /**< Log VF downscale value */
+ uint32_t vf_downscale_bits; /** Log VF downscale value */
uint32_t enable;
struct ia_css_frame_sp_info info;
struct {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
index d8cfdfbc8c0b..e3efafa279ff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_VF_TYPES_H
#define __IA_CSS_VF_TYPES_H
-/** Viewfinder decimation
+/* Viewfinder decimation
*
* ISP block: vfeven_horizontal_downscale
*/
@@ -24,7 +24,7 @@
#include <type_support.h>
struct ia_css_vf_configuration {
- uint32_t vf_downscale_bits; /**< Log VF downscale value */
+ uint32_t vf_downscale_bits; /** Log VF downscale value */
const struct ia_css_frame_info *info;
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
index 6bcfa274be88..bf98734d057e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
@@ -15,31 +15,31 @@
#ifndef __IA_CSS_WB_TYPES_H
#define __IA_CSS_WB_TYPES_H
-/** @file
+/* @file
* CSS-API header file for White Balance parameters.
*/
-/** White Balance configuration (Gain Adjust).
+/* White Balance configuration (Gain Adjust).
*
* ISP block: WB1
* ISP1: WB1 is used.
* ISP2: WB1 is used.
*/
struct ia_css_wb_config {
- uint32_t integer_bits; /**< Common exponent of gains.
+ uint32_t integer_bits; /** Common exponent of gains.
u8.0, [0,3],
default 1, ineffective 1 */
- uint32_t gr; /**< Significand of Gr gain.
+ uint32_t gr; /** Significand of Gr gain.
u[integer_bits].[16-integer_bits], [0,65535],
default/ineffective 32768(u1.15, 1.0) */
- uint32_t r; /**< Significand of R gain.
+ uint32_t r; /** Significand of R gain.
u[integer_bits].[16-integer_bits], [0,65535],
default/ineffective 32768(u1.15, 1.0) */
- uint32_t b; /**< Significand of B gain.
+ uint32_t b; /** Significand of B gain.
u[integer_bits].[16-integer_bits], [0,65535],
default/ineffective 32768(u1.15, 1.0) */
- uint32_t gb; /**< Significand of Gb gain.
+ uint32_t gb; /** Significand of Gb gain.
u[integer_bits].[16-integer_bits], [0,65535],
default/ineffective 32768(u1.15, 1.0) */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
index 3018100f6f76..abcb531f51cc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
@@ -21,7 +21,7 @@
#include "ia_css_xnr.host.h"
const struct ia_css_xnr_config default_xnr_config = {
- /** default threshold 6400 translates to 25 on ISP. */
+ /* default threshold 6400 translates to 25 on ISP. */
6400
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
index 806c9f8f0e2e..a5caebbe2f84 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -41,7 +41,7 @@ struct sh_css_isp_xnr_vamem_params {
};
struct sh_css_isp_xnr_params {
- /** XNR threshold.
+ /* XNR threshold.
* type:u0.16 but actual valid range is:[0,255]
* valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
* default: 25 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
index 89e8b0f17e8c..d2b634211a3f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_XNR_TYPES_H
#define __IA_CSS_XNR_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Extra Noise Reduction (XNR) parameters.
*/
-/** XNR table.
+/* XNR table.
*
* NOTE: The driver does not need to set this table,
* because the default values are set inside the css.
@@ -36,23 +36,23 @@
*
*/
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
IA_CSS_VAMEM_TYPE_2(ISP2400) */
union ia_css_xnr_data {
uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
- /**< Coefficients table on vamem type1. u0.12, [0,4095] */
+ /** Coefficients table on vamem type1. u0.12, [0,4095] */
uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
- /**< Coefficients table on vamem type2. u0.12, [0,4095] */
+ /** Coefficients table on vamem type2. u0.12, [0,4095] */
};
struct ia_css_xnr_table {
@@ -61,7 +61,7 @@ struct ia_css_xnr_table {
};
struct ia_css_xnr_config {
- /** XNR threshold.
+ /* XNR threshold.
* type:u0.16 valid range:[0,65535]
* default: 6400 */
uint16_t threshold;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
index 8f14d1080651..669200caf72e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
@@ -15,7 +15,7 @@
#ifndef __IA_CSS_XNR3_TYPES_H
#define __IA_CSS_XNR3_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Extra Noise Reduction (XNR) parameters.
*/
@@ -47,12 +47,12 @@
* IA_CSS_XNR3_SIGMA_SCALE.
*/
struct ia_css_xnr3_sigma_params {
- int y0; /**< Sigma for Y range similarity in dark area */
- int y1; /**< Sigma for Y range similarity in bright area */
- int u0; /**< Sigma for U range similarity in dark area */
- int u1; /**< Sigma for U range similarity in bright area */
- int v0; /**< Sigma for V range similarity in dark area */
- int v1; /**< Sigma for V range similarity in bright area */
+ int y0; /** Sigma for Y range similarity in dark area */
+ int y1; /** Sigma for Y range similarity in bright area */
+ int u0; /** Sigma for U range similarity in dark area */
+ int u1; /** Sigma for U range similarity in bright area */
+ int v0; /** Sigma for V range similarity in dark area */
+ int v1; /** Sigma for V range similarity in bright area */
};
/**
@@ -64,10 +64,10 @@ struct ia_css_xnr3_sigma_params {
* with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
*/
struct ia_css_xnr3_coring_params {
- int u0; /**< Coring threshold of U channel in dark area */
- int u1; /**< Coring threshold of U channel in bright area */
- int v0; /**< Coring threshold of V channel in dark area */
- int v1; /**< Coring threshold of V channel in bright area */
+ int u0; /** Coring threshold of U channel in dark area */
+ int u1; /** Coring threshold of U channel in bright area */
+ int v0; /** Coring threshold of V channel in dark area */
+ int v1; /** Coring threshold of V channel in bright area */
};
/**
@@ -81,7 +81,7 @@ struct ia_css_xnr3_coring_params {
* value of 0.0 bypasses the entire xnr3 filter.
*/
struct ia_css_xnr3_blending_params {
- int strength; /**< Blending strength */
+ int strength; /** Blending strength */
};
/**
@@ -90,9 +90,9 @@ struct ia_css_xnr3_blending_params {
* from the CSS API.
*/
struct ia_css_xnr3_config {
- struct ia_css_xnr3_sigma_params sigma; /**< XNR3 sigma parameters */
- struct ia_css_xnr3_coring_params coring; /**< XNR3 coring parameters */
- struct ia_css_xnr3_blending_params blending; /**< XNR3 blending parameters */
+ struct ia_css_xnr3_sigma_params sigma; /** XNR3 sigma parameters */
+ struct ia_css_xnr3_coring_params coring; /** XNR3 coring parameters */
+ struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
};
#endif /* __IA_CSS_XNR3_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
index 3f46655bee57..3f8589a5a43a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_YNR_TYPES_H
#define __IA_CSS_YNR_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
*/
-/** Configuration used by Bayer Noise Reduction (BNR) and
+/* Configuration used by Bayer Noise Reduction (BNR) and
* YCC Noise Reduction (YNR,CNR).
*
* ISP block: BNR1, YNR1, CNR1
@@ -28,28 +28,28 @@
* BNR1,YNR2,CNR2 are used for Still.
*/
struct ia_css_nr_config {
- ia_css_u0_16 bnr_gain; /**< Strength of noise reduction (BNR).
+ ia_css_u0_16 bnr_gain; /** Strength of noise reduction (BNR).
u0.16, [0,65535],
default 14336(0.21875), ineffective 0 */
- ia_css_u0_16 ynr_gain; /**< Strength of noise reduction (YNR).
+ ia_css_u0_16 ynr_gain; /** Strength of noise reduction (YNR).
u0.16, [0,65535],
default 14336(0.21875), ineffective 0 */
- ia_css_u0_16 direction; /**< Sensitivity of edge (BNR).
+ ia_css_u0_16 direction; /** Sensitivity of edge (BNR).
u0.16, [0,65535],
default 512(0.0078125), ineffective 0 */
- ia_css_u0_16 threshold_cb; /**< Coring threshold for Cb (CNR).
+ ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
This is the same as
de_config.c1_coring_threshold.
u0.16, [0,65535],
default 0(0), ineffective 0 */
- ia_css_u0_16 threshold_cr; /**< Coring threshold for Cr (CNR).
+ ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
This is the same as
de_config.c2_coring_threshold.
u0.16, [0,65535],
default 0(0), ineffective 0 */
};
-/** Edge Enhancement (sharpen) configuration.
+/* Edge Enhancement (sharpen) configuration.
*
* ISP block: YEE1
* ISP1: YEE1 is used.
@@ -57,24 +57,24 @@ struct ia_css_nr_config {
* (YEE2 is used for Still.)
*/
struct ia_css_ee_config {
- ia_css_u5_11 gain; /**< The strength of sharpness.
+ ia_css_u5_11 gain; /** The strength of sharpness.
u5.11, [0,65535],
default 8192(4.0), ineffective 0 */
- ia_css_u8_8 threshold; /**< The threshold that divides noises from
+ ia_css_u8_8 threshold; /** The threshold that divides noises from
edge.
u8.8, [0,65535],
default 256(1.0), ineffective 65535 */
- ia_css_u5_11 detail_gain; /**< The strength of sharpness in pell-mell
+ ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
area.
u5.11, [0,65535],
default 2048(1.0), ineffective 0 */
};
-/** YNR and YEE (sharpen) configuration.
+/* YNR and YEE (sharpen) configuration.
*/
struct ia_css_yee_config {
- struct ia_css_nr_config nr; /**< The NR configuration. */
- struct ia_css_ee_config ee; /**< The EE configuration. */
+ struct ia_css_nr_config nr; /** The NR configuration. */
+ struct ia_css_ee_config ee; /** The EE configuration. */
};
#endif /* __IA_CSS_YNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
index e0a0b10ac5fa..83161a24207d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
@@ -15,11 +15,11 @@
#ifndef __IA_CSS_YNR2_TYPES_H
#define __IA_CSS_YNR2_TYPES_H
-/** @file
+/* @file
* CSS-API header file for Y(Luma) Noise Reduction.
*/
-/** Y(Luma) Noise Reduction configuration.
+/* Y(Luma) Noise Reduction configuration.
*
* ISP block: YNR2 & YEE2
* (ISP1: YNR1 and YEE1 are used.)
@@ -27,21 +27,21 @@
* ISP2: YNR2 and YEE2 are used for Still.
*/
struct ia_css_ynr_config {
- uint16_t edge_sense_gain_0; /**< Sensitivity of edge in dark area.
+ uint16_t edge_sense_gain_0; /** Sensitivity of edge in dark area.
u13.0, [0,8191],
default 1000, ineffective 0 */
- uint16_t edge_sense_gain_1; /**< Sensitivity of edge in bright area.
+ uint16_t edge_sense_gain_1; /** Sensitivity of edge in bright area.
u13.0, [0,8191],
default 1000, ineffective 0 */
- uint16_t corner_sense_gain_0; /**< Sensitivity of corner in dark area.
+ uint16_t corner_sense_gain_0; /** Sensitivity of corner in dark area.
u13.0, [0,8191],
default 1000, ineffective 0 */
- uint16_t corner_sense_gain_1; /**< Sensitivity of corner in bright area.
+ uint16_t corner_sense_gain_1; /** Sensitivity of corner in bright area.
u13.0, [0,8191],
default 1000, ineffective 0 */
};
-/** Fringe Control configuration.
+/* Fringe Control configuration.
*
* ISP block: FC2 (FC2 is used with YNR2/YEE2.)
* (ISP1: FC2 is not used.)
@@ -49,43 +49,43 @@ struct ia_css_ynr_config {
* ISP2: FC2 is used for Still.
*/
struct ia_css_fc_config {
- uint8_t gain_exp; /**< Common exponent of gains.
+ uint8_t gain_exp; /** Common exponent of gains.
u8.0, [0,13],
default 1, ineffective 0 */
- uint16_t coring_pos_0; /**< Coring threshold for positive edge in dark area.
+ uint16_t coring_pos_0; /** Coring threshold for positive edge in dark area.
u0.13, [0,8191],
default 0(0), ineffective 0 */
- uint16_t coring_pos_1; /**< Coring threshold for positive edge in bright area.
+ uint16_t coring_pos_1; /** Coring threshold for positive edge in bright area.
u0.13, [0,8191],
default 0(0), ineffective 0 */
- uint16_t coring_neg_0; /**< Coring threshold for negative edge in dark area.
+ uint16_t coring_neg_0; /** Coring threshold for negative edge in dark area.
u0.13, [0,8191],
default 0(0), ineffective 0 */
- uint16_t coring_neg_1; /**< Coring threshold for negative edge in bright area.
+ uint16_t coring_neg_1; /** Coring threshold for negative edge in bright area.
u0.13, [0,8191],
default 0(0), ineffective 0 */
- uint16_t gain_pos_0; /**< Gain for positive edge in dark area.
+ uint16_t gain_pos_0; /** Gain for positive edge in dark area.
u0.13, [0,8191],
default 4096(0.5), ineffective 0 */
- uint16_t gain_pos_1; /**< Gain for positive edge in bright area.
+ uint16_t gain_pos_1; /** Gain for positive edge in bright area.
u0.13, [0,8191],
default 4096(0.5), ineffective 0 */
- uint16_t gain_neg_0; /**< Gain for negative edge in dark area.
+ uint16_t gain_neg_0; /** Gain for negative edge in dark area.
u0.13, [0,8191],
default 4096(0.5), ineffective 0 */
- uint16_t gain_neg_1; /**< Gain for negative edge in bright area.
+ uint16_t gain_neg_1; /** Gain for negative edge in bright area.
u0.13, [0,8191],
default 4096(0.5), ineffective 0 */
- uint16_t crop_pos_0; /**< Limit for positive edge in dark area.
+ uint16_t crop_pos_0; /** Limit for positive edge in dark area.
u0.13, [0,8191],
default/ineffective 8191(almost 1.0) */
- uint16_t crop_pos_1; /**< Limit for positive edge in bright area.
+ uint16_t crop_pos_1; /** Limit for positive edge in bright area.
u0.13, [0,8191],
default/ineffective 8191(almost 1.0) */
- int16_t crop_neg_0; /**< Limit for negative edge in dark area.
+ int16_t crop_neg_0; /** Limit for negative edge in dark area.
s0.13, [-8192,0],
default/ineffective -8192(-1.0) */
- int16_t crop_neg_1; /**< Limit for negative edge in bright area.
+ int16_t crop_neg_1; /** Limit for negative edge in bright area.
s0.13, [-8192,0],
default/ineffective -8192(-1.0) */
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
index 63a8703c9c44..c9ff0cb2493a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
@@ -24,7 +24,7 @@
*/
#define NUM_YUV_LS 2
-/** YUV load/store */
+/* YUV load/store */
struct sh_css_isp_yuv_ls_isp_config {
unsigned base_address[NUM_YUV_LS];
unsigned width[NUM_YUV_LS];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
index e814f1bf19f7..6512a1ceb9d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
@@ -1,4 +1,4 @@
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
index c65194619a34..5a58abe2b233 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
@@ -269,7 +269,7 @@ enum ia_css_err
ia_css_binary_find(struct ia_css_binary_descr *descr,
struct ia_css_binary *binary);
-/** @brief Get the shading information of the specified shading correction type.
+/* @brief Get the shading information of the specified shading correction type.
*
* @param[in] binary: The isp binary which has the shading correction.
* @param[in] type: The shading correction type.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
index 9f8a125f0d74..295e07049393 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -972,7 +972,7 @@ ia_css_binary_uninit(void)
return IA_CSS_SUCCESS;
}
-/** @brief Compute decimation factor for 3A statistics and shading correction.
+/* @brief Compute decimation factor for 3A statistics and shading correction.
*
* @param[in] width Frame width in pixels.
* @param[in] height Frame height in pixels.
@@ -1697,11 +1697,11 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
}
#endif
if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */
- req_vf_info && /* and we need vf output. */
+ req_vf_info && /* and we need vf output. */
/* check if the required vf format
is supported. */
- !binary_supports_output_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
req_vf_info,
@@ -1711,8 +1711,8 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf format */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven &&
- !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
@@ -1723,7 +1723,7 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf width */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d < %d)\n",
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
index 5d40afd482f5..e50d9f2e2609 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -152,7 +152,7 @@ void ia_css_queue_map(
unmap_buffer_type_to_queue_id(thread_id, buf_type);
}
-/**
+/*
* @brief Query the internal queue ID.
*/
bool ia_css_query_internal_queue_id(
@@ -280,7 +280,7 @@ static ia_css_queue_t *bufq_get_qhandle(
/* Local function to initialize a buffer queue. This reduces
* the chances of copy-paste errors or typos.
*/
-STORAGE_CLASS_INLINE void
+static inline void
init_bufq(unsigned int desc_offset,
unsigned int elems_offset,
ia_css_queue_t *handle)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
index 91c105cc6204..4b28b2a0863a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -54,21 +54,21 @@ extern unsigned int ia_css_debug_trace_level;
* Values can be combined to dump a combination of sets.
*/
enum ia_css_debug_enable_param_dump {
- IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /**< FPN table */
- IA_CSS_DEBUG_DUMP_OB = 1 << 1, /**< OB table */
- IA_CSS_DEBUG_DUMP_SC = 1 << 2, /**< Shading table */
- IA_CSS_DEBUG_DUMP_WB = 1 << 3, /**< White balance */
- IA_CSS_DEBUG_DUMP_DP = 1 << 4, /**< Defect Pixel */
- IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /**< Bayer Noise Reductions */
- IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /**< 3A Statistics */
- IA_CSS_DEBUG_DUMP_DE = 1 << 7, /**< De Mosaicing */
- IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /**< Luma Noise Reduction */
- IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /**< Color Space Conversion */
- IA_CSS_DEBUG_DUMP_GC = 1 << 10, /**< Gamma Correction */
- IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /**< Temporal Noise Reduction */
- IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /**< Advanced Noise Reduction */
- IA_CSS_DEBUG_DUMP_CE = 1 << 13, /**< Chroma Enhancement */
- IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /**< Dump all device parameters */
+ IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
+ IA_CSS_DEBUG_DUMP_OB = 1 << 1, /** OB table */
+ IA_CSS_DEBUG_DUMP_SC = 1 << 2, /** Shading table */
+ IA_CSS_DEBUG_DUMP_WB = 1 << 3, /** White balance */
+ IA_CSS_DEBUG_DUMP_DP = 1 << 4, /** Defect Pixel */
+ IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /** Bayer Noise Reductions */
+ IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /** 3A Statistics */
+ IA_CSS_DEBUG_DUMP_DE = 1 << 7, /** De Mosaicing */
+ IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /** Luma Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /** Color Space Conversion */
+ IA_CSS_DEBUG_DUMP_GC = 1 << 10, /** Gamma Correction */
+ IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /** Temporal Noise Reduction */
+ IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /** Advanced Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CE = 1 << 13, /** Chroma Enhancement */
+ IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /** Dump all device parameters */
};
#define IA_CSS_ERROR(fmt, ...) \
@@ -130,7 +130,7 @@ enum ia_css_debug_enable_param_dump {
* @param[in] fmt printf like format string
* @param[in] args arguments for the format string
*/
-STORAGE_CLASS_INLINE void
+static inline void
ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
{
if (ia_css_debug_trace_level >= level)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
index 0fa7cb2423d8..dd1127a21494 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
@@ -1617,7 +1617,7 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
#elif SP_DEBUG == SP_DEBUG_TRACE
-/**
+/*
* This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
* me mapped on the file name string.
*
@@ -2267,7 +2267,7 @@ void ia_css_debug_dump_debug_info(const char *context)
return;
}
-/** this function is for debug use, it can make SP go to sleep
+/* this function is for debug use, it can make SP go to sleep
state after each frame, then user can dump the stable SP dmem.
this function can be called after ia_css_start_sp()
and before sh_css_init_buffer_queues()
@@ -2526,7 +2526,7 @@ void ia_css_debug_dump_ddr_debug_queue(void)
}
*/
-/**
+/*
* @brief Initialize the debug mode.
* Refer to "ia_css_debug.h" for more details.
*/
@@ -2537,7 +2537,7 @@ bool ia_css_debug_mode_init(void)
return rc;
}
-/**
+/*
* @brief Disable the DMA channel.
* Refer to "ia_css_debug.h" for more details.
*/
@@ -2552,7 +2552,7 @@ ia_css_debug_mode_disable_dma_channel(int dma_id,
return rc;
}
-/**
+/*
* @brief Enable the DMA channel.
* Refer to "ia_css_debug.h" for more details.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
index 2698c3e1adb0..239c06730bf4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -52,7 +52,7 @@ more details.
#include "ia_css_queue.h" /* host_sp_enqueue_XXX */
#include "ia_css_event.h" /* ia_css_event_encode */
-/**
+/*
* @brief Encode the information into the software-event.
* Refer to "sw_event_public.h" for details.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
index 56d6858890ec..913a4bf7a34f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
@@ -37,7 +37,7 @@ int ia_css_eventq_recv(
return error;
}
-/**
+/*
* @brief The Host sends the event to the SP.
* Refer to "sh_css_sp.h" for details.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
index c7e07b79f4e5..89ad8080ceb1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
@@ -41,7 +41,7 @@ more details.
/*********************************************************************
**** Frame INFO APIs
**********************************************************************/
-/** @brief Sets the given width and alignment to the frame info
+/* @brief Sets the given width and alignment to the frame info
*
* @param
* @param[in] info The info to which parameters would set
@@ -53,7 +53,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
unsigned int width,
unsigned int min_padded_width);
-/** @brief Sets the given format to the frame info
+/* @brief Sets the given format to the frame info
*
* @param
* @param[in] info The info to which parameters would set
@@ -63,7 +63,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
enum ia_css_frame_format format);
-/** @brief Sets the frame info with the given parameters
+/* @brief Sets the frame info with the given parameters
*
* @param
* @param[in] info The info to which parameters would set
@@ -79,7 +79,7 @@ void ia_css_frame_info_init(struct ia_css_frame_info *info,
enum ia_css_frame_format format,
unsigned int aligned);
-/** @brief Checks whether 2 frame infos has the same resolution
+/* @brief Checks whether 2 frame infos has the same resolution
*
* @param
* @param[in] frame_a The first frame to be compared
@@ -90,7 +90,7 @@ bool ia_css_frame_info_is_same_resolution(
const struct ia_css_frame_info *info_a,
const struct ia_css_frame_info *info_b);
-/** @brief Check the frame info is valid
+/* @brief Check the frame info is valid
*
* @param
* @param[in] info The frame attributes to be initialized
@@ -102,7 +102,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
**** Frame APIs
**********************************************************************/
-/** @brief Initialize the plane depending on the frame type
+/* @brief Initialize the plane depending on the frame type
*
* @param
* @param[in] frame The frame attributes to be initialized
@@ -110,7 +110,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
*/
enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
-/** @brief Free an array of frames
+/* @brief Free an array of frames
*
* @param
* @param[in] num_frames The number of frames to be freed in the array
@@ -120,7 +120,7 @@ enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
void ia_css_frame_free_multiple(unsigned int num_frames,
struct ia_css_frame **frames_array);
-/** @brief Allocate a CSS frame structure of given size in bytes..
+/* @brief Allocate a CSS frame structure of given size in bytes..
*
* @param frame The allocated frame.
* @param[in] size_bytes The frame size in bytes.
@@ -135,7 +135,7 @@ enum ia_css_err ia_css_frame_allocate_with_buffer_size(
const unsigned int size_bytes,
const bool contiguous);
-/** @brief Check whether 2 frames are same type
+/* @brief Check whether 2 frames are same type
*
* @param
* @param[in] frame_a The first frame to be compared
@@ -146,7 +146,7 @@ bool ia_css_frame_is_same_type(
const struct ia_css_frame *frame_a,
const struct ia_css_frame *frame_b);
-/** @brief Configure a dma port from frame info
+/* @brief Configure a dma port from frame info
*
* @param
* @param[in] config The DAM port configuration
@@ -158,7 +158,7 @@ void ia_css_dma_configure_from_info(
const struct ia_css_frame_info *info);
#ifdef ISP2401
-/** @brief Finds the cropping resolution
+/* @brief Finds the cropping resolution
* This function finds the maximum cropping resolution in an input image keeping
* the aspect ratio for the given output resolution.Calculates the coordinates
* for cropping from the center and returns the starting pixel location of the
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
index f1a943cf04c0..5faa89ad8a23 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
index a7c6bba7e094..adefa57820a4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -29,6 +29,7 @@ more details.
#endif
#include "system_global.h"
+#include <linux/kernel.h>
#ifdef USE_INPUT_SYSTEM_VERSION_2
@@ -487,7 +488,7 @@ static void ifmtr_set_if_blocking_mode(
{
int i;
bool block[] = { false, false, false, false };
- assert(N_INPUT_FORMATTER_ID <= (sizeof(block) / sizeof(block[0])));
+ assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block)));
#if !defined(IS_ISP_2400_SYSTEM)
#error "ifmtr_set_if_blocking_mode: ISP_SYSTEM must be one of {IS_ISP_2400_SYSTEM}"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
index cf02970d4f59..8dc74927e9a2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -105,7 +105,7 @@ static struct inputfifo_instance
/* Streaming to MIPI */
static unsigned inputfifo_wrap_marker(
-/* STORAGE_CLASS_INLINE unsigned inputfifo_wrap_marker( */
+/* static inline unsigned inputfifo_wrap_marker( */
unsigned marker)
{
return marker |
@@ -113,7 +113,7 @@ static unsigned inputfifo_wrap_marker(
(inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
}
-STORAGE_CLASS_INLINE void
+static inline void
_sh_css_fifo_snd(unsigned token)
{
while (!can_event_send_token(STR2MIPI_EVENT_ID))
@@ -123,7 +123,7 @@ _sh_css_fifo_snd(unsigned token)
}
static void inputfifo_send_data_a(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_a( */
+/* static inline void inputfifo_send_data_a( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
@@ -135,7 +135,7 @@ unsigned int data)
static void inputfifo_send_data_b(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_b( */
+/* static inline void inputfifo_send_data_b( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
@@ -147,7 +147,7 @@ static void inputfifo_send_data_b(
static void inputfifo_send_data(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data( */
+/* static inline void inputfifo_send_data( */
unsigned int a,
unsigned int b)
{
@@ -162,7 +162,7 @@ static void inputfifo_send_data(
static void inputfifo_send_sol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sol(void) */
+/* static inline void inputfifo_send_sol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOL_BIT);
@@ -174,7 +174,7 @@ static void inputfifo_send_sol(void)
static void inputfifo_send_eol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eol(void) */
+/* static inline void inputfifo_send_eol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOL_BIT);
@@ -185,7 +185,7 @@ static void inputfifo_send_eol(void)
static void inputfifo_send_sof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sof(void) */
+/* static inline void inputfifo_send_sof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOF_BIT);
@@ -197,7 +197,7 @@ static void inputfifo_send_sof(void)
static void inputfifo_send_eof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eof(void) */
+/* static inline void inputfifo_send_eof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOF_BIT);
@@ -209,7 +209,7 @@ static void inputfifo_send_eof(void)
#ifdef __ON__
static void inputfifo_send_ch_id(
-/* STORAGE_CLASS_INLINE void inputfifo_send_ch_id( */
+/* static inline void inputfifo_send_ch_id( */
unsigned int ch_id)
{
hrt_data token;
@@ -223,7 +223,7 @@ static void inputfifo_send_ch_id(
}
static void inputfifo_send_fmt_type(
-/* STORAGE_CLASS_INLINE void inputfifo_send_fmt_type( */
+/* static inline void inputfifo_send_fmt_type( */
unsigned int fmt_type)
{
hrt_data token;
@@ -240,7 +240,7 @@ static void inputfifo_send_fmt_type(
static void inputfifo_send_ch_id_and_fmt_type(
-/* STORAGE_CLASS_INLINE
+/* static inline
void inputfifo_send_ch_id_and_fmt_type( */
unsigned int ch_id,
unsigned int fmt_type)
@@ -259,7 +259,7 @@ void inputfifo_send_ch_id_and_fmt_type( */
static void inputfifo_send_empty_token(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_empty_token(void) */
+/* static inline void inputfifo_send_empty_token(void) */
{
hrt_data token = inputfifo_wrap_marker(0);
_sh_css_fifo_snd(token);
@@ -269,7 +269,7 @@ static void inputfifo_send_empty_token(void)
static void inputfifo_start_frame(
-/* STORAGE_CLASS_INLINE void inputfifo_start_frame( */
+/* static inline void inputfifo_start_frame( */
unsigned int ch_id,
unsigned int fmt_type)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
index 8e651b80345a..2283dd1c1c9b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
@@ -53,7 +53,7 @@ enum ia_css_param_class {
};
#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
-/** ISP parameter descriptor */
+/* ISP parameter descriptor */
struct ia_css_isp_parameter {
uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */
uint32_t size; /* Disabled if 0 */
@@ -77,10 +77,10 @@ struct ia_css_isp_param_isp_segments {
/* Memory offsets in binary info */
struct ia_css_isp_param_memory_offsets {
- uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /**< offset wrt hdr in bytes */
+ uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /** offset wrt hdr in bytes */
};
-/** Offsets for ISP kernel parameters per isp memory.
+/* Offsets for ISP kernel parameters per isp memory.
* Only relevant for standard ISP binaries, not ACC or SP.
*/
union ia_css_all_memory_offsets {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
index 832d9e16edeb..f793ce125f02 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
index 02bf908d94e6..4cf2defe9ef0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
@@ -44,7 +44,7 @@ more details.
* Virtual Input System. (Input System 2401)
*/
typedef input_system_cfg_t ia_css_isys_descr_t;
-/** end of Virtual Input System */
+/* end of Virtual Input System */
#endif
#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
@@ -112,7 +112,7 @@ unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
-/** @brief Translate format and compression to format type.
+/* @brief Translate format and compression to format type.
*
* @param[in] input_format The input format.
* @param[in] compression The compression scheme.
@@ -195,7 +195,7 @@ extern void ia_css_isys_stream2mmio_sid_rmgr_release(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid);
-/** end of Virtual Input System */
+/* end of Virtual Input System */
#endif
#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
index d1d4f79c00f1..3b04dc51335a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
index faef97672eac..d8c3b75d7fac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
index 5032627342d9..4def4a542b7d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
index 239ef310bdeb..4122084fd237 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
index a93c7f44ff12..222b294c0ab0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
index 46a157f64343..70f6cb5e5918 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
index 0f1e8a2f6b10..90922a7acefd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -166,7 +166,7 @@ static int32_t calculate_stride(
bool raw_packed,
int32_t align_in_bytes);
-/** end of Forwarded Declaration */
+/* end of Forwarded Declaration */
/**************************************************
*
@@ -292,7 +292,7 @@ ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
return rc;
}
-/** end of Public Methods */
+/* end of Public Methods */
/**************************************************
*
@@ -894,5 +894,5 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
return packet_type;
}
-/** end of Private Methods */
+/* end of Private Methods */
#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
index 90646f5f8885..e64936e2d46e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
@@ -103,7 +103,7 @@ struct ia_css_pipeline_stage_desc {
struct ia_css_frame *vf_frame;
};
-/** @brief initialize the pipeline module
+/* @brief initialize the pipeline module
*
* @return None
*
@@ -112,7 +112,7 @@ struct ia_css_pipeline_stage_desc {
*/
void ia_css_pipeline_init(void);
-/** @brief initialize the pipeline structure with default values
+/* @brief initialize the pipeline structure with default values
*
* @param[out] pipeline structure to be initialized with defaults
* @param[in] pipe_id
@@ -129,7 +129,7 @@ enum ia_css_err ia_css_pipeline_create(
unsigned int pipe_num,
unsigned int dvs_frame_delay);
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
*
* @param[in] pipeline
* @return None
@@ -138,7 +138,7 @@ enum ia_css_err ia_css_pipeline_create(
void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
-/** @brief Starts a pipeline
+/* @brief Starts a pipeline
*
* @param[in] pipe_id
* @param[in] pipeline
@@ -148,7 +148,7 @@ void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
struct ia_css_pipeline *pipeline);
-/** @brief Request to stop a pipeline
+/* @brief Request to stop a pipeline
*
* @param[in] pipeline
* @return IA_CSS_SUCCESS or error code upon error.
@@ -156,7 +156,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
*/
enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
-/** @brief Check whether pipeline has stopped
+/* @brief Check whether pipeline has stopped
*
* @param[in] pipeline
* @return true if the pipeline has stopped
@@ -164,7 +164,7 @@ enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
*/
bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
-/** @brief clean all the stages pipeline and make it as new
+/* @brief clean all the stages pipeline and make it as new
*
* @param[in] pipeline
* @return None
@@ -172,7 +172,7 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
*/
void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
*
* @param pipeline Pointer to the pipeline to be added to.
* @param[in] stage_desc The description of the stage
@@ -188,7 +188,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_pipeline_stage **stage);
-/** @brief Finalize the stages in a pipeline
+/* @brief Finalize the stages in a pipeline
*
* @param pipeline Pointer to the pipeline to be added to.
* @return None
@@ -198,7 +198,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
bool continuous);
-/** @brief gets a stage from the pipeline
+/* @brief gets a stage from the pipeline
*
* @param[in] pipeline
* @return IA_CSS_SUCCESS or error code upon error.
@@ -208,7 +208,7 @@ enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
int mode,
struct ia_css_pipeline_stage **stage);
-/** @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
*
* @param[in] pipeline
* @param[in] fw_handle
@@ -221,7 +221,7 @@ enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeli
uint32_t fw_handle,
struct ia_css_pipeline_stage **stage);
-/** @brief Gets the Firmware handle correponding the stage num from the pipeline
+/* @brief Gets the Firmware handle correponding the stage num from the pipeline
*
* @param[in] pipeline
* @param[in] stage_num
@@ -234,7 +234,7 @@ enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeli
uint32_t stage_num,
uint32_t *fw_handle);
-/** @brief gets the output stage from the pipeline
+/* @brief gets the output stage from the pipeline
*
* @param[in] pipeline
* @return IA_CSS_SUCCESS or error code upon error.
@@ -245,7 +245,7 @@ enum ia_css_err ia_css_pipeline_get_output_stage(
int mode,
struct ia_css_pipeline_stage **stage);
-/** @brief Checks whether the pipeline uses params
+/* @brief Checks whether the pipeline uses params
*
* @param[in] pipeline
* @return true if the pipeline uses params
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
index 95542fc82217..8f93d29d1c51 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -114,7 +114,7 @@ void ia_css_pipeline_map(unsigned int pipe_num, bool map)
IA_CSS_LEAVE_PRIVATE("void");
}
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
*
* @param[in] pipeline
* @return None
@@ -187,7 +187,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
"ia_css_pipeline_start() leave: return_void\n");
}
-/**
+/*
* @brief Query the SP thread ID.
* Refer to "sh_css_internal.h" for details.
*/
@@ -285,7 +285,7 @@ void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
IA_CSS_LEAVE_PRIVATE("void");
}
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
*
* @param pipeline Pointer to the pipeline to be added to.
* @param[in] stage_desc The description of the stage
@@ -603,7 +603,7 @@ static enum ia_css_err pipeline_stage_create(
/* Verify input parameters*/
if (!(stage_desc->in_frame) && !(stage_desc->firmware)
&& (stage_desc->binary) && !(stage_desc->binary->online)) {
- err = IA_CSS_ERR_INTERNAL_ERROR;
+ err = IA_CSS_ERR_INTERNAL_ERROR;
goto ERR;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
index e50a0f813753..aaf2e247cafb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
@@ -51,7 +51,7 @@ typedef struct ia_css_queue ia_css_queue_t;
/*****************************************************************************
* Queue Public APIs
*****************************************************************************/
-/** @brief Initialize a local queue instance.
+/* @brief Initialize a local queue instance.
*
* @param[out] qhandle. Handle to queue instance for use with API
* @param[in] desc. Descriptor with queue properties filled-in
@@ -63,7 +63,7 @@ extern int ia_css_queue_local_init(
ia_css_queue_t *qhandle,
ia_css_queue_local_t *desc);
-/** @brief Initialize a remote queue instance
+/* @brief Initialize a remote queue instance
*
* @param[out] qhandle. Handle to queue instance for use with API
* @param[in] desc. Descriptor with queue properties filled-in
@@ -74,7 +74,7 @@ extern int ia_css_queue_remote_init(
ia_css_queue_t *qhandle,
ia_css_queue_remote_t *desc);
-/** @brief Uninitialize a queue instance
+/* @brief Uninitialize a queue instance
*
* @param[in] qhandle. Handle to queue instance
* @return 0 - Successful uninit.
@@ -83,7 +83,7 @@ extern int ia_css_queue_remote_init(
extern int ia_css_queue_uninit(
ia_css_queue_t *qhandle);
-/** @brief Enqueue an item in the queue instance
+/* @brief Enqueue an item in the queue instance
*
* @param[in] qhandle. Handle to queue instance
* @param[in] item. Object to be enqueued.
@@ -96,7 +96,7 @@ extern int ia_css_queue_enqueue(
ia_css_queue_t *qhandle,
uint32_t item);
-/** @brief Dequeue an item from the queue instance
+/* @brief Dequeue an item from the queue instance
*
* @param[in] qhandle. Handle to queue instance
* @param[out] item. Object to be dequeued into this item.
@@ -110,7 +110,7 @@ extern int ia_css_queue_dequeue(
ia_css_queue_t *qhandle,
uint32_t *item);
-/** @brief Check if the queue is empty
+/* @brief Check if the queue is empty
*
* @param[in] qhandle. Handle to queue instance
* @param[in] is_empty True if empty, False if not.
@@ -123,7 +123,7 @@ extern int ia_css_queue_is_empty(
ia_css_queue_t *qhandle,
bool *is_empty);
-/** @brief Check if the queue is full
+/* @brief Check if the queue is full
*
* @param[in] qhandle. Handle to queue instance
* @param[in] is_full True if Full, False if not.
@@ -136,7 +136,7 @@ extern int ia_css_queue_is_full(
ia_css_queue_t *qhandle,
bool *is_full);
-/** @brief Get used space in the queue
+/* @brief Get used space in the queue
*
* @param[in] qhandle. Handle to queue instance
* @param[in] size Number of available elements in the queue
@@ -148,7 +148,7 @@ extern int ia_css_queue_get_used_space(
ia_css_queue_t *qhandle,
uint32_t *size);
-/** @brief Get free space in the queue
+/* @brief Get free space in the queue
*
* @param[in] qhandle. Handle to queue instance
* @param[in] size Number of free elements in the queue
@@ -160,7 +160,7 @@ extern int ia_css_queue_get_free_space(
ia_css_queue_t *qhandle,
uint32_t *size);
-/** @brief Peek at an element in the queue
+/* @brief Peek at an element in the queue
*
* @param[in] qhandle. Handle to queue instance
* @param[in] offset Offset of element to peek,
@@ -175,7 +175,7 @@ extern int ia_css_queue_peek(
uint32_t offset,
uint32_t *element);
-/** @brief Get the usable size for the queue
+/* @brief Get the usable size for the queue
*
* @param[in] qhandle. Handle to queue instance
* @param[out] size Size value to be returned here.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
index 946d4f2d2108..7bb2b494836e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
index a0bb9f663ce6..9f78e709b3d0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
@@ -31,15 +31,14 @@ more details.
#ifndef _IA_CSS_RMGR_H
#define _IA_CSS_RMGR_H
-#include "storage_class.h"
#include <ia_css_err.h>
#ifndef __INLINE_RMGR__
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RMGR_H extern
#define STORAGE_CLASS_RMGR_C
#else /* __INLINE_RMGR__ */
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RMGR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RMGR_H static inline
+#define STORAGE_CLASS_RMGR_C static inline
#endif /* __INLINE_RMGR__ */
/**
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
index efa9c140484f..370ff3816dbe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -44,7 +44,7 @@ enum ia_css_err ia_css_rmgr_init(void)
return err;
}
-/**
+/*
* @brief Uninitialize resource pool (host)
*/
void ia_css_rmgr_uninit(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
index fa92d8da8f1c..54239ac9d7c9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
@@ -20,13 +20,13 @@
#include <memory_access.h> /* mmmgr_malloc, mhmm_free */
#include <ia_css_debug.h>
-/**
+/*
* @brief VBUF resource handles
*/
#define NUM_HANDLES 1000
struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
-/**
+/*
* @brief VBUF resource pool - refpool
*/
struct ia_css_rmgr_vbuf_pool refpool = {
@@ -37,7 +37,7 @@ struct ia_css_rmgr_vbuf_pool refpool = {
NULL, /* handles */
};
-/**
+/*
* @brief VBUF resource pool - writepool
*/
struct ia_css_rmgr_vbuf_pool writepool = {
@@ -48,7 +48,7 @@ struct ia_css_rmgr_vbuf_pool writepool = {
NULL, /* handles */
};
-/**
+/*
* @brief VBUF resource pool - hmmbufferpool
*/
struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
@@ -63,7 +63,7 @@ struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
-/**
+/*
* @brief Initialize the reference count (host, vbuf)
*/
static void rmgr_refcount_init_vbuf(void)
@@ -72,7 +72,7 @@ static void rmgr_refcount_init_vbuf(void)
memset(&handle_table, 0, sizeof(handle_table));
}
-/**
+/*
* @brief Retain the reference count for a handle (host, vbuf)
*
* @param handle The pointer to the handle
@@ -109,7 +109,7 @@ void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
(*handle)->count++;
}
-/**
+/*
* @brief Release the reference count for a handle (host, vbuf)
*
* @param handle The pointer to the handle
@@ -131,7 +131,7 @@ void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
}
}
-/**
+/*
* @brief Initialize the resource pool (host, vbuf)
*
* @param pool The pointer to the pool
@@ -163,7 +163,7 @@ enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
return err;
}
-/**
+/*
* @brief Uninitialize the resource pool (host, vbuf)
*
* @param pool The pointer to the pool
@@ -174,7 +174,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n");
if (pool == NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n");
- return;
+ return;
}
if (pool->handles != NULL) {
/* free the hmm buffers */
@@ -197,7 +197,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
}
}
-/**
+/*
* @brief Push a handle to the pool
*
* @param pool The pointer to the pool
@@ -224,7 +224,7 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
assert(succes);
}
-/**
+/*
* @brief Pop a handle from the pool
*
* @param pool The pointer to the pool
@@ -254,7 +254,7 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
}
}
-/**
+/*
* @brief Acquire a handle from the pool (host, vbuf)
*
* @param pool The pointer to the pool
@@ -302,7 +302,7 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
ia_css_rmgr_refcount_retain_vbuf(handle);
}
-/**
+/*
* @brief Release a handle to the pool (host, vbuf)
*
* @param pool The pointer to the pool
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
index 27e9eb1e2102..bc4b1723369e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
@@ -37,17 +37,17 @@ more details.
typedef struct {
- uint32_t ddr_data_offset; /**< posistion of data in DDR */
- uint32_t dmem_data_addr; /**< data segment address in dmem */
- uint32_t dmem_bss_addr; /**< bss segment address in dmem */
- uint32_t data_size; /**< data segment size */
- uint32_t bss_size; /**< bss segment size */
- uint32_t spctrl_config_dmem_addr; /** <location of dmem_cfg in SP dmem */
- uint32_t spctrl_state_dmem_addr; /** < location of state in SP dmem */
- unsigned int sp_entry; /** < entry function ptr on SP */
- const void *code; /**< location of firmware */
+ uint32_t ddr_data_offset; /** posistion of data in DDR */
+ uint32_t dmem_data_addr; /** data segment address in dmem */
+ uint32_t dmem_bss_addr; /** bss segment address in dmem */
+ uint32_t data_size; /** data segment size */
+ uint32_t bss_size; /** bss segment size */
+ uint32_t spctrl_config_dmem_addr; /* <location of dmem_cfg in SP dmem */
+ uint32_t spctrl_state_dmem_addr; /* < location of state in SP dmem */
+ unsigned int sp_entry; /* < entry function ptr on SP */
+ const void *code; /** location of firmware */
uint32_t code_size;
- char *program_name; /**< not used on hardware, only for simulation */
+ char *program_name; /** not used on hardware, only for simulation */
} ia_css_spctrl_cfg;
/* Get the code addr in DDR of SP */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
index 3af2891efca7..2620d7514f79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
@@ -41,16 +41,16 @@ typedef enum {
IA_CSS_SP_SW_RUNNING
} ia_css_spctrl_sp_sw_state;
-/** Structure to encapsulate required arguments for
+/* Structure to encapsulate required arguments for
* initialization of SP DMEM using the SP itself
*/
struct ia_css_sp_init_dmem_cfg {
- ia_css_ptr ddr_data_addr; /**< data segment address in ddr */
- uint32_t dmem_data_addr; /**< data segment address in dmem */
- uint32_t dmem_bss_addr; /**< bss segment address in dmem */
- uint32_t data_size; /**< data segment size */
- uint32_t bss_size; /**< bss segment size */
- sp_ID_t sp_id; /** <sp Id */
+ ia_css_ptr ddr_data_addr; /** data segment address in ddr */
+ uint32_t dmem_data_addr; /** data segment address in dmem */
+ uint32_t dmem_bss_addr; /** bss segment address in dmem */
+ uint32_t data_size; /** data segment size */
+ uint32_t bss_size; /** bss segment size */
+ sp_ID_t sp_id; /* <sp Id */
};
#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
index d9178e80dab2..844e4d536cec 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
@@ -37,17 +37,17 @@ more details.
#include "ia_css_spctrl.h"
#include "ia_css_debug.h"
-typedef struct {
+struct spctrl_context_info {
struct ia_css_sp_init_dmem_cfg dmem_config;
- uint32_t spctrl_config_dmem_addr; /** location of dmem_cfg in SP dmem */
+ uint32_t spctrl_config_dmem_addr; /* location of dmem_cfg in SP dmem */
uint32_t spctrl_state_dmem_addr;
unsigned int sp_entry; /* entry function ptr on SP */
hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
uint32_t code_size;
char *program_name; /* used in case of PLATFORM_SIM */
-} spctrl_context_info;
+};
-static spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
static bool spctrl_loaded[N_SP_ID] = {0};
/* Load firmware */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
index 49c69e60ca5c..b7dd18492a91 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
@@ -13,7 +13,7 @@
* more details.
*/
#else
-/**
+/*
Support for Intel Camera Imaging ISP subsystem.
Copyright (c) 2010 - 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
index e882b5596813..322bb3de6098 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -176,7 +176,7 @@ static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
static bool fw_explicitly_loaded = false;
-/**
+/*
* Local prototypes
*/
@@ -187,7 +187,7 @@ static enum ia_css_err
sh_css_pipe_start(struct ia_css_stream *stream);
#ifdef ISP2401
-/**
+/*
* @brief Stop all "ia_css_pipe" instances in the target
* "ia_css_stream" instance.
*
@@ -207,7 +207,7 @@ sh_css_pipe_start(struct ia_css_stream *stream);
static enum ia_css_err
sh_css_pipes_stop(struct ia_css_stream *stream);
-/**
+/*
* @brief Check if all "ia_css_pipe" instances in the target
* "ia_css_stream" instance have stopped.
*
@@ -451,8 +451,6 @@ static enum ia_css_frame_format yuv422_copy_formats[] = {
IA_CSS_FRAME_FORMAT_YUYV
};
-#define array_length(array) (sizeof(array)/sizeof(array[0]))
-
/* Verify whether the selected output format is can be produced
* by the copy binary given the stream format.
* */
@@ -468,7 +466,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
switch (pipe->stream->config.input_config.format) {
case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
case IA_CSS_STREAM_FORMAT_YUV420_8:
- for (i=0; i<array_length(yuv420_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
found = (out_fmt == yuv420_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV420_10:
@@ -476,7 +474,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
break;
case IA_CSS_STREAM_FORMAT_YUV422_8:
- for (i=0; i<array_length(yuv422_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
found = (out_fmt == yuv422_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV422_10:
@@ -1651,7 +1649,7 @@ ia_css_init(const struct ia_css_env *env,
void (*flush_func)(struct ia_css_acc_fw *fw);
hrt_data select, enable;
- /**
+ /*
* The C99 standard does not specify the exact object representation of structs;
* the representation is compiler dependent.
*
@@ -3781,6 +3779,7 @@ static enum ia_css_err
create_host_acc_pipeline(struct ia_css_pipe *pipe)
{
enum ia_css_err err = IA_CSS_SUCCESS;
+ const struct ia_css_fw_info *fw;
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
@@ -3794,14 +3793,12 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe)
if (pipe->config.acc_extension)
pipe->pipeline.pipe_qos_config = 0;
-{
- const struct ia_css_fw_info *fw = pipe->vf_stage;
+ fw = pipe->vf_stage;
for (i = 0; fw; fw = fw->next){
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
if (err != IA_CSS_SUCCESS)
goto ERR;
}
-}
for (i=0; i<pipe->config.num_acc_stages; i++) {
struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
@@ -4333,12 +4330,13 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
}
} else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
- return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
+
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
@@ -4619,23 +4617,23 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
* 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
*/
static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
- IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /**< Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /**< Second output frame ready. */
- IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /**< Viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /**< Second viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /**< Indication that 3A statistics are available. */
- IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /**< Indication that DIS statistics are available. */
- IA_CSS_EVENT_TYPE_PIPELINE_DONE, /**< Pipeline Done event, sent after last pipeline stage. */
- IA_CSS_EVENT_TYPE_FRAME_TAGGED, /**< Frame tagged. */
- IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /**< Input frame ready. */
- IA_CSS_EVENT_TYPE_METADATA_DONE, /**< Metadata ready. */
- IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /**< Indication that LACE statistics are available. */
- IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /**< Extension stage executed. */
- IA_CSS_EVENT_TYPE_TIMER, /**< Timing measurement data. */
- IA_CSS_EVENT_TYPE_PORT_EOF, /**< End Of Frame event, sent when in buffered sensor mode. */
- IA_CSS_EVENT_TYPE_FW_WARNING, /**< Performance warning encountered by FW */
- IA_CSS_EVENT_TYPE_FW_ASSERT, /**< Assertion hit by FW */
- 0, /** error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /** Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /** Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /** Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /** Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /** Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE, /** Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED, /** Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /** Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE, /** Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /** Extension stage executed. */
+ IA_CSS_EVENT_TYPE_TIMER, /** Timing measurement data. */
+ IA_CSS_EVENT_TYPE_PORT_EOF, /** End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING, /** Performance warning encountered by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT, /** Assertion hit by FW */
+ 0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
};
enum ia_css_err
@@ -5030,7 +5028,7 @@ sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
bool
sh_css_continuous_is_enabled(uint8_t pipe_num)
#else
-/**
+/*
* @brief Stop all "ia_css_pipe" instances in the target
* "ia_css_stream" instance.
*
@@ -5109,7 +5107,7 @@ ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
return IA_CSS_SUCCESS;
}
#else
- /**
+ /*
* Stop all "ia_css_pipe" instances in this target
* "ia_css_stream" instance.
*/
@@ -5148,7 +5146,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
}
}
- /**
+ /*
* In the CSS firmware use scenario "Continuous Preview"
* as well as "Continuous Video", the "ia_css_pipe" instance
* "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5185,7 +5183,7 @@ ERR:
return err;
}
-/**
+/*
* @brief Check if all "ia_css_pipe" instances in the target
* "ia_css_stream" instance have stopped.
*
@@ -5220,7 +5218,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
main_pipe_id = main_pipe->mode;
IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
- /**
+ /*
* Check if every "ia_css_pipe" instance in this target
* "ia_css_stream" instance has stopped.
*/
@@ -5231,7 +5229,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
rval);
}
- /**
+ /*
* In the CSS firmware use scenario "Continuous Preview"
* as well as "Continuous Video", the "ia_css_pipe" instance
* "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5476,7 +5474,7 @@ ERR:
}
#ifdef ISP2401
-/**
+/*
* @brief Check if a format is supported by the pipe.
*
*/
@@ -5607,13 +5605,13 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage
* sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
@@ -5797,13 +5795,15 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
#endif
/* Make tnr reference buffers output block height align */
- tnr_info.res.height =
#ifndef ISP2401
+ tnr_info.res.height =
CEIL_MUL(tnr_info.res.height,
+ mycs->video_binary.info->sp.block.output_block_height);
#else
+ tnr_info.res.height =
CEIL_MUL(tnr_height,
+ mycs->video_binary.info->sp.block.output_block_height);
#endif
- mycs->video_binary.info->sp.block.output_block_height);
} else {
tnr_info = mycs->video_binary.internal_frame_info;
}
@@ -6027,7 +6027,7 @@ sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
err = ia_css_util_check_res(width, height);
if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->vf_output_info[idx].res.width != width ||
@@ -6258,14 +6258,14 @@ static enum ia_css_err load_primary_binaries(
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6982,27 +6982,27 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
}
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
- if (descr->is_output_stage == NULL) {
+ if (!descr->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7118,22 +7118,22 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pi
descr->num_stage = num_stages;
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7276,13 +7276,13 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7383,7 +7383,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
}
mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
GFP_KERNEL);
- if (mycs->vf_pp_binary == NULL) {
+ if (!mycs->vf_pp_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -8626,7 +8626,7 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
return err;
}
-/**
+/*
* @brief Tag a specific frame in continuous capture.
* Refer to "sh_css_internal.h" for details.
*/
@@ -8666,7 +8666,7 @@ enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
return err;
}
-/**
+/*
* @brief Configure the continuous capture.
* Refer to "sh_css_internal.h" for details.
*/
@@ -8689,9 +8689,9 @@ enum ia_css_err ia_css_stream_capture(
/* Check if the tag descriptor is valid */
if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_stream_capture() leave: return_err=%d\n",
- IA_CSS_ERR_INVALID_ARGUMENTS);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ IA_CSS_ERR_INVALID_ARGUMENTS);
return IA_CSS_ERR_INVALID_ARGUMENTS;
}
@@ -8822,7 +8822,7 @@ sh_css_init_host_sp_control_vars(void)
"sh_css_init_host_sp_control_vars() leave: return_void\n");
}
-/**
+/*
* create the internal structures and fill in the configuration data
*/
void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
@@ -9445,7 +9445,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate the stream instance */
curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
- if (curr_stream == NULL) {
+ if (!curr_stream) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9457,7 +9457,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
curr_stream->pipes = kzalloc(num_pipes * sizeof(struct ia_css_pipe *), GFP_KERNEL);
- if (curr_stream->pipes == NULL) {
+ if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
curr_stream = NULL;
@@ -9780,23 +9780,22 @@ ERR:
if (err == IA_CSS_SUCCESS)
{
/* working mode: enter into the seed list */
- if (my_css_save.mode == sh_css_mode_working)
- for(i = 0; i < MAX_ACTIVE_STREAMS; i++)
- if (my_css_save.stream_seeds[i].stream == NULL)
- {
- IA_CSS_LOG("entered stream into loc=%d", i);
- my_css_save.stream_seeds[i].orig_stream = stream;
- my_css_save.stream_seeds[i].stream = curr_stream;
- my_css_save.stream_seeds[i].num_pipes = num_pipes;
- my_css_save.stream_seeds[i].stream_config = *stream_config;
- for(j = 0; j < num_pipes; j++)
- {
- my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
- my_css_save.stream_seeds[i].pipes[j] = pipes[j];
- my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
- }
- break;
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (!my_css_save.stream_seeds[i].stream) {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for (j = 0; j < num_pipes; j++) {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
}
+ }
#else
if (err == IA_CSS_SUCCESS) {
err = ia_css_save_stream(curr_stream);
@@ -9970,32 +9969,32 @@ ia_css_stream_load(struct ia_css_stream *stream)
enum ia_css_err err;
assert(stream != NULL);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter, \n");
- for(i=0;i<MAX_ACTIVE_STREAMS;i++)
- if (my_css_save.stream_seeds[i].stream == stream)
- {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream == stream) {
int j;
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
- if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS)
- {
- if (j)
- {
+ for ( j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
+ if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS) {
+ if (j) {
int k;
for(k=0;k<j;k++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
}
return err;
}
- err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config), my_css_save.stream_seeds[i].num_pipes,
- my_css_save.stream_seeds[i].pipes, &(my_css_save.stream_seeds[i].stream));
- if (err != IA_CSS_SUCCESS)
- {
+ }
+ err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config),
+ my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes,
+ &(my_css_save.stream_seeds[i].stream));
+ if (err != IA_CSS_SUCCESS) {
ia_css_stream_destroy(stream);
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
return err;
}
break;
}
+ }
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit, \n");
return IA_CSS_SUCCESS;
#else
@@ -10436,7 +10435,7 @@ ia_css_start_sp(void)
return err;
}
-/**
+/*
* Time to wait SP for termincate. Only condition when this can happen
* is a fatal hw failure, but we must be able to detect this and emit
* a proper error trace.
@@ -10524,7 +10523,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream)
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
- return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
}
pipe = stream->continuous_pipe;
@@ -10714,7 +10713,7 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
return ret;
}
-/** @brief Set the state (Enable or Disable) of the Extension stage in the
+/* @brief Set the state (Enable or Disable) of the Extension stage in the
* given pipe.
*/
enum ia_css_err
@@ -10759,7 +10758,7 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool
return err;
}
-/** @brief Get the state (Enable or Disable) of the Extension stage in the
+/* @brief Get the state (Enable or Disable) of the Extension stage in the
* given pipe.
*/
enum ia_css_err
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
index 53a7891111f9..8158ea40d069 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
@@ -147,7 +147,7 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia
char *parambuf = kmalloc(paramstruct_size + configstruct_size + statestruct_size,
GFP_KERNEL);
- if (parambuf == NULL)
+ if (!parambuf)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
@@ -229,14 +229,15 @@ sh_css_load_firmware(const char *fw_data,
sh_css_blob_info = kmalloc(
(sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
- if (sh_css_blob_info == NULL)
+ if (!sh_css_blob_info)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
} else {
sh_css_blob_info = NULL;
}
- fw_minibuffer = kzalloc(sh_css_num_binaries * sizeof(struct fw_param), GFP_KERNEL);
- if (fw_minibuffer == NULL)
+ fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
+ GFP_KERNEL);
+ if (!fw_minibuffer)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
for (i = 0; i < sh_css_num_binaries; i++) {
@@ -295,10 +296,8 @@ void sh_css_unload_firmware(void)
}
memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
- if (sh_css_blob_info) {
- kfree(sh_css_blob_info);
- sh_css_blob_info = NULL;
- }
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
sh_css_num_binaries = 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
index 0bfebced63af..716d808d56db 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
@@ -80,5 +80,5 @@ enum ia_css_err sh_css_hrt_sp_wait(void)
hrt_sleep();
}
-return IA_CSS_SUCCESS;
+ return IA_CSS_SUCCESS;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
index 5b2b78f96dc5..161122e1bcbc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -188,7 +188,7 @@ enum host2sp_commands {
N_host2sp_cmd
};
-/** Enumeration used to indicate the events that are produced by
+/* Enumeration used to indicate the events that are produced by
* the SP and consumed by the Host.
*
* !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -274,10 +274,10 @@ struct sh_css_ddr_address_map_compound {
};
struct ia_css_isp_parameter_set_info {
- struct sh_css_ddr_address_map mem_map;/**< pointers to Parameters in ISP format IMPT:
+ struct sh_css_ddr_address_map mem_map;/** pointers to Parameters in ISP format IMPT:
This should be first member of this struct */
- uint32_t isp_parameters_id;/**< Unique ID to track which config was actually applied to a particular frame */
- ia_css_ptr output_frame_ptr;/**< Output frame to which this config has to be applied (optional) */
+ uint32_t isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
};
/* this struct contains all arguments that can be passed to
@@ -398,9 +398,9 @@ struct sh_css_sp_input_formatter_set {
/* SP configuration information */
struct sh_css_sp_config {
uint8_t no_isp_sync; /* Signal host immediately after start */
- uint8_t enable_raw_pool_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+ uint8_t enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
uint8_t lock_all;
- /**< If raw buffer locking is enabled, this flag indicates whether raw
+ /** If raw buffer locking is enabled, this flag indicates whether raw
frames are locked when their EOF event is successfully sent to the
host (true) or when they are passed to the preview/video pipe
(false). */
@@ -458,13 +458,13 @@ struct sh_css_sp_pipeline_io {
/*struct sh_css_sp_pipeline_terminal output;*/
};
-/** This struct tracks how many streams are registered per CSI port.
+/* This struct tracks how many streams are registered per CSI port.
* This is used to track which streams have already been configured.
* Only when all streams are configured, the CSI RX is started for that port.
*/
struct sh_css_sp_pipeline_io_status {
- uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /**< registered streams */
- uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /**< configured streams */
+ uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /** registered streams */
+ uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */
};
#endif
@@ -500,7 +500,7 @@ enum sh_css_port_type {
#define SH_CSS_METADATA_OFFLINE_MODE 0x04
#define SH_CSS_METADATA_WAIT_INPUT 0x08
-/** @brief Free an array of metadata buffers.
+/* @brief Free an array of metadata buffers.
*
* @param[in] num_bufs Number of metadata buffers to be freed.
* @param[in] bufs Pointer of array of metadata buffers.
@@ -764,7 +764,7 @@ struct sh_css_hmm_buffer {
hrt_vaddress frame_data;
uint32_t flashed;
uint32_t exp_id;
- uint32_t isp_parameters_id; /**< Unique ID to track which config was
+ uint32_t isp_parameters_id; /** Unique ID to track which config was
actually applied to a particular frame */
#if CONFIG_ON_FRAME_ENQUEUE()
struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
@@ -961,7 +961,7 @@ struct host_sp_queues {
extern int (*sh_css_printf)(const char *fmt, va_list args);
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_print(const char *fmt, ...)
{
va_list ap;
@@ -973,7 +973,7 @@ sh_css_print(const char *fmt, ...)
}
}
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_vprint(const char *fmt, va_list args)
{
if (sh_css_printf)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
index e12789236bb9..4bcc35d219f8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
@@ -22,7 +22,7 @@
#include <ia_css_pipe_public.h>
#include <ia_css_stream_public.h>
-/** The pipe id type, distinguishes the kind of pipes that
+/* The pipe id type, distinguishes the kind of pipes that
* can be run in parallel.
*/
enum ia_css_pipe_id {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
index 36aaa3019a15..883474e90c81 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
@@ -321,7 +321,7 @@ calculate_mipi_buff_size(
height = stream_cfg->input_config.input_res.height;
format = stream_cfg->input_config.format;
pack_raw_pixels = stream_cfg->pack_raw_pixels;
- /** end of NOTE */
+ /* end of NOTE */
/**
#ifndef ISP2401
@@ -341,7 +341,7 @@ calculate_mipi_buff_size(
* in the non-continuous use scenario.
*/
width_padded = width + (2 * ISP_VEC_NELEMS);
- /** end of NOTE */
+ /* end of NOTE */
IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
width_padded, height, format);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
index eaf60e7b2dac..e6ebd1b08f0d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
@@ -365,10 +365,8 @@ ia_css_shading_table_alloc(
IA_CSS_ENTER("");
me = kmalloc(sizeof(*me), GFP_KERNEL);
- if (me == NULL) {
- IA_CSS_ERROR("out of memory");
+ if (!me)
return me;
- }
me->width = width;
me->height = height;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
index 48224370b8bf..fbb36112fe3c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
@@ -4266,33 +4266,33 @@ sh_css_params_write_to_ddr_internal(
size_t *virt_size_tetra_y[
IA_CSS_MORPH_TABLE_NUM_PLANES];
- virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
- virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
- virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
- virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
- virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
- virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
-
- virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
- virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
- virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
- virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
- virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
- virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
-
- virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
- virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
- virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
- virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
- virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
- virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
-
- virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
- virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
- virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
- virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
- virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
- virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
buff_realloced = false;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
index a7ffe6d8331b..270ec2b60a3e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
@@ -144,8 +144,8 @@ struct ia_css_isp_parameters {
struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
struct sh_css_ddr_address_map ddr_ptrs;
struct sh_css_ddr_address_map_size ddr_ptrs_size;
- struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
- uint32_t isp_parameters_id; /**< Unique ID to track which config was actually applied to a particular frame */
+ struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
+ uint32_t isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
};
void
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
index e6a345979ff1..6fc00fc402b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
@@ -261,7 +261,7 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
assert(out_frame != NULL);
{
- /**
+ /*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
@@ -335,7 +335,7 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
assert(out_frame != NULL);
{
- /**
+ /*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
@@ -909,7 +909,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
xinfo = binary->info;
info = &xinfo->sp;
{
- /**
+ /*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
@@ -980,7 +980,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
sh_css_isp_stage.mem_initializers = *isp_mem_if;
- /**
+ /*
* Even when a stage does not need uds and does not params,
* ia_css_uds_sp_scale_params() seems to be called (needs
* further investigation). This function can not deal with
@@ -1429,7 +1429,7 @@ sh_css_init_host2sp_frame_data(void)
}
-/**
+/*
* @brief Update the offline frame information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
@@ -1461,7 +1461,7 @@ sh_css_update_host2sp_offline_frame(
}
#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
-/**
+/*
* @brief Update the mipi frame information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
@@ -1488,7 +1488,7 @@ sh_css_update_host2sp_mipi_frame(
frame ? frame->data : 0);
}
-/**
+/*
* @brief Update the mipi metadata information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
@@ -1735,7 +1735,7 @@ ia_css_isp_has_started(void)
}
-/**
+/*
* @brief Initialize the DMA software-mask in the debug mode.
* Refer to "sh_css_sp.h" for more details.
*/
@@ -1761,7 +1761,7 @@ sh_css_sp_init_dma_sw_reg(int dma_id)
return true;
}
-/**
+/*
* @brief Set the DMA software-mask in the debug mode.
* Refer to "sh_css_sp.h" for more details.
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
index e49e478ab354..0b8e3d872069 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
@@ -61,7 +61,7 @@ struct sh_css {
#endif
hrt_vaddress sp_bin_addr;
hrt_data page_table_base_index;
- unsigned int size_mem_words; /** \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+ unsigned int size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
enum ia_css_irq_type irq_type;
unsigned int pipe_counter;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
index b8aae4ba5a78..a1c81c12718c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
index 5232327f5d9c..79bd540d7882 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -58,7 +54,7 @@ static unsigned int nr_to_order_bottom(unsigned int nr)
return fls(nr) - 1;
}
-struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
+static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
{
struct hmm_buffer_object *bo;
@@ -99,7 +95,7 @@ static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
return 0;
}
-struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
struct rb_node *node, unsigned int pgnr)
{
struct hmm_buffer_object *this, *ret_bo, *temp_bo;
@@ -150,7 +146,7 @@ remove_bo_and_return:
return temp_bo;
}
-struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
ia_css_ptr start)
{
struct rb_node *n = root->rb_node;
@@ -175,8 +171,8 @@ struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
return NULL;
}
-struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
- unsigned int start)
+static struct hmm_buffer_object *__bo_search_by_addr_in_range(
+ struct rb_root *root, unsigned int start)
{
struct rb_node *n = root->rb_node;
struct hmm_buffer_object *bo;
@@ -258,7 +254,7 @@ static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
rb_insert_color(&bo->node, root);
}
-struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
struct hmm_buffer_object *bo,
unsigned int pgnr)
{
@@ -331,7 +327,7 @@ static void __bo_take_off_handling(struct hmm_buffer_object *bo)
}
}
-struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
struct hmm_buffer_object *next_bo)
{
struct hmm_bo_device *bdev;
@@ -727,10 +723,8 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
- if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
+ if (unlikely(!bo->page_obj))
return -ENOMEM;
- }
i = 0;
alloc_pgnr = 0;
@@ -991,15 +985,12 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
struct page **pages;
pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
- if (unlikely(!pages)) {
- dev_err(atomisp_dev, "out of memory for pages...\n");
+ if (unlikely(!pages))
return -ENOMEM;
- }
bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
kfree(pages);
return -ENOMEM;
}
@@ -1029,10 +1020,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
} else {
/*Handle frame buffer allocated in user space*/
mutex_unlock(&bo->mutex);
- down_read(&current->mm->mmap_sem);
- page_nr = get_user_pages((unsigned long)userptr,
- (int)(bo->pgnr), 1, pages, NULL);
- up_read(&current->mm->mmap_sem);
+ page_nr = get_user_pages_fast((unsigned long)userptr,
+ (int)(bo->pgnr), 1, pages);
mutex_lock(&bo->mutex);
bo->mem_type = HMM_BO_MEM_TYPE_USER;
}
@@ -1168,13 +1157,9 @@ status_err2:
int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
{
- int ret;
-
check_bo_null_return(bo, 0);
- ret = bo->status & HMM_BO_PAGE_ALLOCED;
-
- return ret;
+ return bo->status & HMM_BO_PAGE_ALLOCED;
}
/*
@@ -1366,7 +1351,6 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
if (unlikely(!pages)) {
mutex_unlock(&bo->mutex);
- dev_err(atomisp_dev, "out of memory for pages...\n");
return NULL;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
index 19e0e9ee37de..f59fd9908257 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -116,8 +112,6 @@ static void free_pages_to_dynamic_pool(void *pool,
hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
GFP_KERNEL);
if (!hmm_page) {
- dev_err(atomisp_dev, "out of memory for hmm_page.\n");
-
/* free page directly */
ret = set_pages_wb(page_obj->page, 1);
if (ret)
@@ -151,10 +145,8 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
GFP_KERNEL);
- if (unlikely(!dypool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!dypool_info))
return -ENOMEM;
- }
dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
sizeof(struct hmm_page), 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
index bf6586805f7f..f300e7547997 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -92,15 +88,12 @@ static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
GFP_KERNEL);
- if (unlikely(!pool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!pool_info))
return -ENOMEM;
- }
pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
GFP_KERNEL);
if (unlikely(!pool_info->pages)) {
- dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
kfree(pool_info);
return -ENOMEM;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
index 0722a68a49e7..0df96e661983 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -89,10 +85,8 @@ static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
struct hmm_vm_node *node;
node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
- if (!node) {
- dev_err(atomisp_dev, "out of memory.\n");
+ if (!node)
return NULL;
- }
INIT_LIST_HEAD(&node->list);
node->pgnr = pgnr;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
index 46a5d29e2d3a..fb38fc540b81 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _hive_isp_css_custom_host_hrt_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
index 2e78976bb2ac..a94958bde718 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
index 1328944a7afd..15c2dfb6794e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
index 6b9fb1b2caaf..1e135c7c6d9b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
index dffd6e9cf693..bd44ebbc427c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
index a9446adb4c70..9e51a657ece4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
index f1593aa38ce1..00885203fb14 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
index 1ba360433d88..bf24e44462bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __HMM_POOL_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
index 07d40662de32..52098161082d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
index 6b4eefc929e2..560014add005 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
index 06041e94cbb2..031c0398bf65 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef SH_MMU_H_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
index b9bad9f06235..662e98f41da2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
index 706bd43e8b1b..e36c2a33b41a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
index 97546bd124cd..c59bcc982966 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include "type_support.h"
diff --git a/drivers/staging/media/atomisp/platform/Makefile b/drivers/staging/media/atomisp/platform/Makefile
index df157630bda9..0e3b7e1c81c6 100644
--- a/drivers/staging/media/atomisp/platform/Makefile
+++ b/drivers/staging/media/atomisp/platform/Makefile
@@ -2,5 +2,4 @@
# Makefile for camera drivers.
#
-obj-$(CONFIG_INTEL_ATOMISP) += clock/
obj-$(CONFIG_INTEL_ATOMISP) += intel-mid/
diff --git a/drivers/staging/media/atomisp/platform/clock/Makefile b/drivers/staging/media/atomisp/platform/clock/Makefile
deleted file mode 100644
index 82fbe8b6968a..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for clock devices.
-#
-
-obj-$(CONFIG_INTEL_ATOMISP) += vlv2_plat_clock.o
-obj-$(CONFIG_INTEL_ATOMISP) += platform_vlv2_plat_clk.o
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
deleted file mode 100644
index 0aae9b0283bb..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * platform_vlv2_plat_clk.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/printk.h>
-
-static int __init vlv2_plat_clk_init(void)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("vlv2_plat_clk", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("platform_vlv2_plat_clk:register failed: %ld\n",
- PTR_ERR(pdev));
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
-device_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
deleted file mode 100644
index b730ab0e8223..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * platform_vlv2_plat_clk.h: platform clock driver library header file
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-#ifndef _PLATFORM_VLV2_PLAT_CLK_H_
-#define _PLATFORM_VLV2_PLAT_CLK_H_
-
-#include <linux/sfi.h>
-#include <asm/intel-mid.h>
-
-extern void __init *vlv2_plat_clk_device_platform_data(
- void *info) __attribute__((weak));
-#endif
diff --git a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c b/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
deleted file mode 100644
index f96789a31819..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * vlv2_plat_clock.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include "../../include/linux/vlv2_plat_clock.h"
-
-/* NOTE: Most of below constants could come from platform data.
- * To be fixed when appropriate ACPI support comes.
- */
-#define VLV2_PMC_CLK_BASE_ADDRESS 0xfed03060
-#define PLT_CLK_CTL_OFFSET(x) (0x04 * (x))
-
-#define CLK_CONFG_BIT_POS 0
-#define CLK_CONFG_BIT_LEN 2
-#define CLK_CONFG_D3_GATED 0
-#define CLK_CONFG_FORCE_ON 1
-#define CLK_CONFG_FORCE_OFF 2
-
-#define CLK_FREQ_TYPE_BIT_POS 2
-#define CLK_FREQ_TYPE_BIT_LEN 1
-#define CLK_FREQ_TYPE_XTAL 0 /* 25 MHz */
-#define CLK_FREQ_TYPE_PLL 1 /* 19.2 MHz */
-
-#define MAX_CLK_COUNT 5
-
-/* Helper macros to manipulate bitfields */
-#define REG_MASK(n) (((1 << (n##_BIT_LEN)) - 1) << (n##_BIT_POS))
-#define REG_SET_FIELD(r, n, v) (((r) & ~REG_MASK(n)) | \
- (((v) << (n##_BIT_POS)) & REG_MASK(n)))
-#define REG_GET_FIELD(r, n) (((r) & REG_MASK(n)) >> n##_BIT_POS)
-/*
- * vlv2 platform has 6 platform clocks, controlled by 4 byte registers
- * Total size required for mapping is 6*4 = 24 bytes
- */
-#define PMC_MAP_SIZE 24
-
-static DEFINE_MUTEX(clk_mutex);
-static void __iomem *pmc_base;
-
-/*
- * vlv2_plat_set_clock_freq - Set clock frequency to a specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @freq_type: Clock frequency (0-25 MHz(XTAL), 1-19.2 MHz(PLL) )
- */
-int vlv2_plat_set_clock_freq(int clk_num, int freq_type)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (freq_type != CLK_FREQ_TYPE_XTAL &&
- freq_type != CLK_FREQ_TYPE_PLL) {
- pr_err("wrong clock type\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_FREQ_TYPE, freq_type), addr);
- mutex_unlock(&clk_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_set_clock_freq);
-
-/*
- * vlv2_plat_get_clock_freq - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 0 for 25 MHz(XTAL) and 1 for 19.2 MHz(PLL)
- */
-int vlv2_plat_get_clock_freq(int clk_num)
-{
- u32 ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_FREQ_TYPE);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_freq);
-
-/*
- * vlv2_plat_configure_clock - Configure the specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @conf: Clock gating:
- * 0 - Clock gated on D3 state
- * 1 - Force on
- * 2,3 - Force off
- */
-int vlv2_plat_configure_clock(int clk_num, u32 conf)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (conf != CLK_CONFG_D3_GATED &&
- conf != CLK_CONFG_FORCE_ON &&
- conf != CLK_CONFG_FORCE_OFF) {
- pr_err("Invalid clock configuration requested\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_CONFG, conf), addr);
- mutex_unlock(&clk_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_configure_clock);
-
-/*
- * vlv2_plat_get_clock_status - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 1 - On, 0 - Off
- */
-int vlv2_plat_get_clock_status(int clk_num)
-{
- int ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = (int)REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_CONFG);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_status);
-
-static int vlv2_plat_clk_probe(struct platform_device *pdev)
-{
- int i = 0;
-
- pmc_base = ioremap_nocache(VLV2_PMC_CLK_BASE_ADDRESS, PMC_MAP_SIZE);
- if (!pmc_base) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- return -ENOMEM;
- }
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- dev_info(&pdev->dev, "vlv2_plat_clk initialized\n");
- return 0;
-}
-
-static const struct platform_device_id vlv2_plat_clk_id[] = {
- {"vlv2_plat_clk", 0},
- {}
-};
-
-static int vlv2_resume(struct device *device)
-{
- int i;
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- return 0;
-}
-
-static int vlv2_suspend(struct device *device)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vlv2_pm_ops = {
- .suspend = vlv2_suspend,
- .resume = vlv2_resume,
-};
-
-static struct platform_driver vlv2_plat_clk_driver = {
- .probe = vlv2_plat_clk_probe,
- .id_table = vlv2_plat_clk_id,
- .driver = {
- .name = "vlv2_plat_clk",
- .pm = &vlv2_pm_ops,
- },
-};
-
-static int __init vlv2_plat_clk_init(void)
-{
- return platform_driver_register(&vlv2_plat_clk_driver);
-}
-arch_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/Makefile b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
index 4621261c35db..c53db1364e21 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/Makefile
+++ b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
@@ -1,5 +1,4 @@
#
# Makefile for intel-mid devices.
#
-obj-$(CONFIG_INTEL_ATOMISP) += intel_mid_pcihelpers.o
obj-$(CONFIG_INTEL_ATOMISP) += atomisp_gmin_platform.o
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
index edaae93af8f9..bf9f34b7ad72 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
@@ -4,10 +4,10 @@
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <media/v4l2-subdev.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include "../../include/linux/vlv2_plat_clock.h"
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
@@ -17,11 +17,7 @@
#define MAX_SUBDEVS 8
-/* Should be defined in vlv2_plat_clock API, isn't: */
-#define VLV2_CLK_PLL_19P2MHZ 1
-#define VLV2_CLK_XTAL_19P2MHZ 0
-#define VLV2_CLK_ON 1
-#define VLV2_CLK_OFF 2
+#define VLV2_CLK_PLL_19P2MHZ 1 /* XTAL on CHT */
#define ELDO1_SEL_REG 0x19
#define ELDO1_1P8V 0x16
#define ELDO1_CTRL_SHIFT 0x00
@@ -33,6 +29,8 @@ struct gmin_subdev {
struct v4l2_subdev *subdev;
int clock_num;
int clock_src;
+ bool clock_on;
+ struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
struct regulator *v1p8_reg;
@@ -108,49 +106,6 @@ const struct atomisp_platform_data *atomisp_get_platform_data(void)
}
EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
-static int af_power_ctrl(struct v4l2_subdev *subdev, int flag)
-{
- struct gmin_subdev *gs = find_gmin_subdev(subdev);
-
- if (gs && gs->v2p8_vcm_on == flag)
- return 0;
- gs->v2p8_vcm_on = flag;
-
- /*
- * The power here is used for dw9817,
- * regulator is from rear sensor
- */
- if (gs->v2p8_vcm_reg) {
- if (flag)
- return regulator_enable(gs->v2p8_vcm_reg);
- else
- return regulator_disable(gs->v2p8_vcm_reg);
- }
- return 0;
-}
-
-/*
- * Used in a handful of modules. Focus motor control, I think. Note
- * that there is no configurability in the API, so this needs to be
- * fixed where it is used.
- *
- * struct camera_af_platform_data {
- * int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
- * };
- *
- * Note that the implementation in MCG platform_camera.c is stubbed
- * out anyway (i.e. returns zero from the callback) on BYT. So
- * neither needed on gmin platforms or supported upstream.
- */
-const struct camera_af_platform_data *camera_get_af_platform_data(void)
-{
- static struct camera_af_platform_data afpd = {
- .power_ctrl = af_power_ctrl,
- };
- return &afpd;
-}
-EXPORT_SYMBOL_GPL(camera_get_af_platform_data);
-
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type)
@@ -334,15 +289,8 @@ static const struct {
#define CFG_VAR_NAME_MAX 64
-static int gmin_platform_init(struct i2c_client *client)
-{
- return 0;
-}
-
-static int gmin_platform_deinit(void)
-{
- return 0;
-}
+#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
+static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
{
@@ -377,21 +325,42 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
- if (!IS_ERR(gmin_subdevs[i].gpio0)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio0, 0);
- if (ret)
- dev_err(dev, "gpio0 set output failed: %d\n", ret);
- } else {
- gmin_subdevs[i].gpio0 = NULL;
+ /* get PMC clock with clock framework */
+ snprintf(gmin_pmc_clk_name,
+ sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
+
+ gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
+ ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
+
+ dev_err(dev,
+ "Failed to get clk from %s : %d\n",
+ gmin_pmc_clk_name,
+ ret);
+
+ return NULL;
}
- if (!IS_ERR(gmin_subdevs[i].gpio1)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio1, 0);
- if (ret)
- dev_err(dev, "gpio1 set output failed: %d\n", ret);
- } else {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
+ if (!ret)
+ clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
+
+ if (IS_ERR(gmin_subdevs[i].gpio0))
+ gmin_subdevs[i].gpio0 = NULL;
+
+ if (IS_ERR(gmin_subdevs[i].gpio1))
gmin_subdevs[i].gpio1 = NULL;
- }
if (pmic_id == PMIC_REGULATOR) {
gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
@@ -539,13 +508,27 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (gs->clock_on == !!on)
+ return 0;
+
+ if (on) {
+ ret = clk_set_rate(gs->pmc_clk, gs->clock_src);
+
+ if (ret)
+ dev_err(&client->dev, "unable to set PMC rate %d\n",
+ gs->clock_src);
+
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (ret == 0)
+ gs->clock_on = true;
+ } else {
+ clk_disable_unprepare(gs->pmc_clk);
+ gs->clock_on = false;
+ }
- if (on)
- ret = vlv2_plat_set_clock_freq(gs->clock_num, gs->clock_src);
- if (ret)
- return ret;
- return vlv2_plat_configure_clock(gs->clock_num,
- on ? VLV2_CLK_ON : VLV2_CLK_OFF);
+ return ret;
}
static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
@@ -592,8 +575,6 @@ static struct camera_sensor_platform_data gmin_plat = {
.v2p8_ctrl = gmin_v2p8_ctrl,
.v1p2_ctrl = gmin_v1p2_ctrl,
.flisclk_ctrl = gmin_flisclk_ctrl,
- .platform_init = gmin_platform_init,
- .platform_deinit = gmin_platform_deinit,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
@@ -739,10 +720,8 @@ int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
if (flag) {
csi = kzalloc(sizeof(*csi), GFP_KERNEL);
- if (!csi) {
- dev_err(&client->dev, "out of memory\n");
+ if (!csi)
return -ENOMEM;
- }
csi->port = port;
csi->num_lanes = lanes;
csi->input_format = format;
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
deleted file mode 100644
index 4631b1d39bb4..000000000000
--- a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/pm_qos.h>
-#include <linux/delay.h>
-
-/* G-Min addition: "platform_is()" lives in intel_mid_pm.h in the MCG
- * tree, but it's just platform ID info and we don't want to pull in
- * the whole SFI-based PM architecture.
- */
-#define INTEL_ATOM_MRST 0x26
-#define INTEL_ATOM_MFLD 0x27
-#define INTEL_ATOM_CLV 0x35
-#define INTEL_ATOM_MRFLD 0x4a
-#define INTEL_ATOM_BYT 0x37
-#define INTEL_ATOM_MOORFLD 0x5a
-#define INTEL_ATOM_CHT 0x4c
-/* synchronization for sharing the I2C controller */
-#define PUNIT_PORT 0x04
-#define PUNIT_DOORBELL_OPCODE (0xE0)
-#define PUNIT_DOORBELL_REG (0x0)
-#ifndef CSTATE_EXIT_LATENCY
-#define CSTATE_EXIT_LATENCY_C1 1
-#endif
-static inline int platform_is(u8 model)
-{
- return (boot_cpu_data.x86_model == model);
-}
-
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
-/* Unified message bus read/write operation */
-static DEFINE_SPINLOCK(msgbus_lock);
-
-static struct pci_dev *pci_root;
-static struct pm_qos_request pm_qos;
-
-#define DW_I2C_NEED_QOS (platform_is(INTEL_ATOM_BYT))
-
-static int intel_mid_msgbus_init(void)
-{
- pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
- if (!pci_root) {
- pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
- return -ENODEV;
- }
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_add_request(&pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- }
- return 0;
-}
-fs_initcall(intel_mid_msgbus_init);
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw_ext);
-
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw_ext);
-
-u32 intel_mid_msgbus_read32(u8 port, u32 addr)
-{
- unsigned long irq_flags;
- u32 data;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
- ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32);
-
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
-{
- unsigned long irq_flags;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
- ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32);
-
-/* called only from where is later then fs_initcall */
-u32 intel_mid_soc_stepping(void)
-{
- return pci_root->revision;
-}
-EXPORT_SYMBOL(intel_mid_soc_stepping);
-
-static bool is_south_complex_device(struct pci_dev *dev)
-{
- unsigned int base_class = dev->class >> 16;
- unsigned int sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
-
- /* other than camera, pci bridges and display,
- * everything else are south complex devices.
- */
- if (((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
- (sub_class == ISP_SUB_CLASS)) ||
- (base_class == PCI_BASE_CLASS_BRIDGE) ||
- ((base_class == PCI_BASE_CLASS_DISPLAY) && !sub_class))
- return false;
- else
- return true;
-}
-
-/* In BYT platform, d3_delay for internal south complex devices,
- * they are not subject to 10 ms d3 to d0 delay required by pci spec.
- */
-static void pci_d3_delay_fixup(struct pci_dev *dev)
-{
- if (platform_is(INTEL_ATOM_BYT) ||
- platform_is(INTEL_ATOM_CHT)) {
- /* All internal devices are in bus 0. */
- if (dev->bus->number == 0 && is_south_complex_device(dev)) {
- dev->d3_delay = INTERNAL_PCI_PM_D3_WAIT;
- dev->d3cold_delay = INTERNAL_PCI_PM_D3_WAIT;
- }
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3_delay_fixup);
-
-#define PUNIT_SEMAPHORE (platform_is(INTEL_ATOM_BYT) ? 0x7 : 0x10E)
-#define GET_SEM() (intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE) & 0x1)
-
-static void reset_semaphore(void)
-{
- u32 data;
-
- data = intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE);
- smp_mb();
- data = data & 0xfffffffc;
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, data);
- smp_mb();
-
-}
-
-int intel_mid_dw_i2c_acquire_ownership(void)
-{
- u32 ret = 0;
- u32 data = 0; /* data sent to PUNIT */
- u32 cmd;
- u32 cmdext;
- int timeout = 1000;
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, CSTATE_EXIT_LATENCY_C1 - 1);
-
- /*
- * We need disable irq. Otherwise, the main thread
- * might be preempted and the other thread jumps to
- * disable irq for a long time. Another case is
- * some irq handlers might trigger power voltage change
- */
- BUG_ON(irqs_disabled());
- local_irq_disable();
-
- /* host driver writes 0x2 to side band register 0x7 */
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, 0x2);
- smp_mb();
-
- /* host driver sends 0xE0 opcode to PUNIT and writes 0 register */
- cmd = (PUNIT_DOORBELL_OPCODE << 24) | (PUNIT_PORT << 16) |
- ((PUNIT_DOORBELL_REG & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = PUNIT_DOORBELL_REG & 0xffffff00;
-
- if (cmdext)
- intel_mid_msgbus_write32_raw_ext(cmd, cmdext, data);
- else
- intel_mid_msgbus_write32_raw(cmd, data);
-
- /* host driver waits for bit 0 to be set in side band 0x7 */
- while (GET_SEM() != 0x1) {
- udelay(100);
- timeout--;
- if (timeout <= 0) {
- pr_err("Timeout: semaphore timed out, reset sem\n");
- ret = -ETIMEDOUT;
- reset_semaphore();
- /*Delay 1ms in case race with punit*/
- udelay(1000);
- if (GET_SEM() != 0) {
- /*Reset again as kernel might race with punit*/
- reset_semaphore();
- }
- pr_err("PUNIT SEM: %d\n",
- intel_mid_msgbus_read32(PUNIT_PORT,
- PUNIT_SEMAPHORE));
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_update_request(&pm_qos,
- PM_QOS_DEFAULT_VALUE);
- }
-
- return ret;
- }
- }
- smp_mb();
-
- return ret;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_acquire_ownership);
-
-int intel_mid_dw_i2c_release_ownership(void)
-{
- reset_semaphore();
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
-
- return 0;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_release_ownership);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index c2bb5ef2acb4..9e41987f9884 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -320,9 +320,10 @@ static int prp_link_validate(struct v4l2_subdev *sd,
* the ->PRPENC link cannot be enabled if the source
* is the VDIC
*/
- if (priv->sink_sd_prpenc)
+ if (priv->sink_sd_prpenc) {
ret = -EINVAL;
- goto out;
+ goto out;
+ }
} else {
/* the source is a CSI */
if (!csi) {
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 0790b3d9e255..143038c6c403 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -293,9 +293,9 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
{
- struct prp_priv *priv = (struct prp_priv *)data;
+ struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
@@ -1292,8 +1292,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
priv->ic_priv = ic_priv;
spin_lock_init(&priv->irqlock);
- setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
PRPENCVF_SRC_PAD);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 6d856118c223..bb1d6dafca83 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -254,9 +254,9 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
{
- struct csi_priv *priv = (struct csi_priv *)data;
+ struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
@@ -1739,8 +1739,7 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
- setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index b55e5ebba8b4..47c4c954fed5 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -440,6 +440,11 @@ unlock:
return media_device_register(&imxmd->md);
}
+static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
@@ -608,8 +613,7 @@ static int imx_media_probe(struct platform_device *pdev)
/* prepare the async subdev notifier and register it */
imxmd->subdev_notifier.subdevs = imxmd->async_ptrs;
- imxmd->subdev_notifier.bound = imx_media_subdev_bound;
- imxmd->subdev_notifier.complete = imx_media_probe_complete;
+ imxmd->subdev_notifier.ops = &imx_media_subdev_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->subdev_notifier);
if (ret) {
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 71af13bd0ebd..6bd0717bf76e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -99,13 +99,14 @@ struct IR {
struct kref ref;
struct list_head list;
- /* FIXME spinlock access to l.features */
- struct lirc_driver l;
+ /* FIXME spinlock access to l->features */
+ struct lirc_dev *l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
+ struct device *dev;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
@@ -183,10 +184,8 @@ static void release_ir_device(struct kref *ref)
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
- if (ir->l.minor >= 0) {
- lirc_unregister_driver(ir->l.minor);
- ir->l.minor = -1;
- }
+ if (ir->l)
+ lirc_unregister_device(ir->l);
if (kfifo_initialized(&ir->rbuf.fifo))
lirc_buffer_free(&ir->rbuf);
@@ -243,7 +242,7 @@ static void release_ir_rx(struct kref *ref)
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
@@ -288,7 +287,7 @@ static void release_ir_tx(struct kref *ref)
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
- ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_SEND_LIRCCODE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
@@ -317,12 +316,12 @@ static int add_to_buf(struct IR *ir)
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
- dev_dbg(ir->l.dev, "buffer overflow\n");
+ dev_dbg(ir->dev, "buffer overflow\n");
return -EOVERFLOW;
}
@@ -368,17 +367,17 @@ static int add_to_buf(struct IR *ir)
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
- dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
+ dev_err(ir->dev, "i2c_master_send failed with %d\n",
ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"unable to read from the IR chip after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"polling the IR receiver chip failed, trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -405,14 +404,14 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"i2c_master_recv failed with %d -- keeping last read buffer\n",
ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dev_dbg(ir->l.dev,
+ dev_dbg(ir->dev,
"key (0x%02x/0x%02x)\n",
rx->b[0], rx->b[1]);
}
@@ -463,9 +462,9 @@ static int add_to_buf(struct IR *ir)
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
- dev_dbg(ir->l.dev, "poll thread started\n");
+ dev_dbg(ir->dev, "poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -493,7 +492,7 @@ static int lirc_thread(void *arg)
wake_up_interruptible(&rbuf->wait_poll);
}
- dev_dbg(ir->l.dev, "poll thread ended\n");
+ dev_dbg(ir->dev, "poll thread ended\n");
return 0;
}
@@ -646,10 +645,10 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
- dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
+ dev_dbg(tx->ir->dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -674,7 +673,7 @@ static int send_boot_data(struct IR_tx *tx)
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -691,22 +690,22 @@ static int send_boot_data(struct IR_tx *tx)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX init response: %02x\n",
buf[0]);
return 0;
}
- dev_notice(tx->ir->l.dev,
+ dev_notice(tx->ir->dev,
"Zilog/Hauppauge IR blaster firmware version %d.%d.%d loaded\n",
buf[1], buf[2], buf[3]);
@@ -751,15 +750,15 @@ static int fw_load(struct IR_tx *tx)
}
/* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
+ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->dev);
if (ret != 0) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"firmware haup-ir-blaster.bin not available (%d)\n",
ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
- dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
+ dev_dbg(tx->ir->dev, "firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
@@ -787,7 +786,7 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unsupported code set file version (%u, expected 1) -- please upgrade to a newer driver\n",
version);
fw_unload_locked();
@@ -804,7 +803,7 @@ static int fw_load(struct IR_tx *tx)
&tx_data->num_code_sets))
goto corrupt;
- dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
+ dev_dbg(tx->ir->dev, "%u IR blaster codesets loaded\n",
tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
@@ -869,7 +868,7 @@ static int fw_load(struct IR_tx *tx)
goto out;
corrupt:
- dev_err(tx->ir->l.dev, "firmware is corrupt\n");
+ dev_err(tx->ir->dev, "firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
@@ -882,16 +881,16 @@ out:
static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
- dev_dbg(ir->l.dev, "read called\n");
+ dev_dbg(ir->dev, "read called\n");
if (n % rbuf->chunk_size) {
- dev_dbg(ir->l.dev, "read result = -EINVAL\n");
+ dev_dbg(ir->dev, "read result = -EINVAL\n");
return -EINVAL;
}
@@ -935,7 +934,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
@@ -950,7 +949,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
retries++;
}
if (retries >= 5) {
- dev_err(ir->l.dev, "Buffer read failed!\n");
+ dev_err(ir->dev, "Buffer read failed!\n");
ret = -EIO;
}
}
@@ -960,7 +959,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dev_dbg(ir->l.dev, "read result = %d (%s)\n", ret,
+ dev_dbg(ir->dev, "read result = %d (%s)\n", ret,
ret ? "Error" : "OK");
return ret ? ret : written;
@@ -977,7 +976,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"failed to get data for code %u, key %u -- check lircd.conf entries\n",
code, key);
return ret;
@@ -995,7 +994,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1008,18 +1007,18 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
@@ -1029,7 +1028,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1039,7 +1038,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1055,12 +1054,12 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dev_dbg(tx->ir->l.dev,
+ dev_dbg(tx->ir->dev,
"NAK expected: i2c_master_send failed with %d (try %d)\n",
ret, i + 1);
}
if (ret != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"IR TX chip never got ready: last i2c_master_send failed with %d\n",
ret);
return ret < 0 ? ret : -EFAULT;
@@ -1069,17 +1068,17 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #2: %02x\n",
buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1092,7 +1091,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
static ssize_t write(struct file *filep, const char __user *buf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_tx *tx;
size_t i;
int failures = 0;
@@ -1165,11 +1164,11 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"sending to the IR transmitter chip failed, trying reset\n");
if (failures >= 3) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unable to send to the IR chip after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
@@ -1200,12 +1199,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
unsigned int ret;
- dev_dbg(ir->l.dev, "%s called\n", __func__);
+ dev_dbg(ir->dev, "%s called\n", __func__);
rx = get_ir_rx(ir);
if (!rx) {
@@ -1213,7 +1212,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dev_dbg(ir->l.dev, "%s result = POLLERR\n", __func__);
+ dev_dbg(ir->dev, "%s result = POLLERR\n", __func__);
return POLLERR;
}
@@ -1226,19 +1225,19 @@ static unsigned int poll(struct file *filep, poll_table *wait)
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN | POLLRDNORM);
- dev_dbg(ir->l.dev, "%s result = %s\n", __func__,
+ dev_dbg(ir->dev, "%s result = %s\n", __func__,
ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
unsigned long __user *uptr = (unsigned long __user *)arg;
int result;
unsigned long mode, features;
- features = ir->l.features;
+ features = ir->l->features;
switch (cmd) {
case LIRC_GET_LENGTH:
@@ -1283,46 +1282,18 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
}
-static struct IR *get_ir_device_by_minor(unsigned int minor)
-{
- struct IR *ir;
- struct IR *ret = NULL;
-
- mutex_lock(&ir_devices_lock);
-
- if (!list_empty(&ir_devices_list)) {
- list_for_each_entry(ir, &ir_devices_list, list) {
- if (ir->l.minor == minor) {
- ret = get_ir_device(ir, true);
- break;
- }
- }
- }
-
- mutex_unlock(&ir_devices_lock);
- return ret;
-}
-
/*
- * Open the IR device. Get hold of our IR structure and
- * stash it in private_data for the file
+ * Open the IR device.
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
- unsigned int minor = MINOR(node->i_rdev);
- /* find our IR struct */
- ir = get_ir_device_by_minor(minor);
-
- if (!ir)
- return -ENODEV;
+ lirc_init_pdata(node, filep);
+ ir = lirc_get_pdata(filep);
atomic_inc(&ir->open_count);
- /* stash our IR struct */
- filep->private_data = ir;
-
nonseekable_open(node, filep);
return 0;
}
@@ -1330,14 +1301,7 @@ static int open(struct inode *node, struct file *filep)
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
- /* find our IR struct */
- struct IR *ir = filep->private_data;
-
- if (!ir) {
- pr_err("ir: %s: no private_data attached to the file!\n",
- __func__);
- return -ENODEV;
- }
+ struct IR *ir = lirc_get_pdata(filep);
atomic_dec(&ir->open_count);
@@ -1383,16 +1347,6 @@ static const struct file_operations lirc_fops = {
.release = close
};
-static struct lirc_driver lirc_template = {
- .name = "lirc_zilog",
- .minor = -1,
- .code_length = 13,
- .buffer_size = BUFLEN / 2,
- .chunk_size = 2,
- .fops = &lirc_fops,
- .owner = THIS_MODULE,
-};
-
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
@@ -1476,27 +1430,42 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
+ ir->dev = &adap->dev;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l = lirc_allocate_device();
+ if (!ir->l) {
+ ret = -ENOMEM;
+ goto out_put_ir;
+ }
+
+ snprintf(ir->l->name, sizeof(ir->l->name), "lirc_zilog");
+ ir->l->code_length = 13;
+ ir->l->fops = &lirc_fops;
+ ir->l->owner = THIS_MODULE;
+ ir->l->dev.parent = &adap->dev;
+
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
- * Currently our own lirc_fops rely on this ir->l.rbuf pointer
+ * Currently our own lirc_fops rely on this ir->l->buf pointer
*/
- ir->l.rbuf = &ir->rbuf;
- ir->l.dev = &adap->dev;
- ret = lirc_buffer_init(ir->l.rbuf,
- ir->l.chunk_size, ir->l.buffer_size);
- if (ret)
+ ir->l->buf = &ir->rbuf;
+ /* This will be returned by lirc_get_pdata() */
+ ir->l->data = ir;
+ ret = lirc_buffer_init(ir->l->buf, 2, BUFLEN / 2);
+ if (ret) {
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_ir;
+ }
}
if (tx_probe) {
@@ -1512,7 +1481,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&tx->ref);
ir->tx = tx;
- ir->l.features |= LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features |= LIRC_CAN_SEND_LIRCCODE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
@@ -1538,7 +1507,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (!rx && !tx_only) {
- dev_info(tx->ir->l.dev,
+ dev_info(tx->ir->dev,
"probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
adap->name, adap->nr);
goto out_ok;
@@ -1556,7 +1525,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&rx->ref);
ir->rx = rx;
- ir->l.features |= LIRC_CAN_REC_LIRCCODE;
+ ir->l->features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
@@ -1578,7 +1547,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"%s: could not start IR Rx polling thread\n",
__func__);
/* Failed kthread, so put back the ir ref */
@@ -1586,7 +1555,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_tx;
}
@@ -1599,17 +1568,19 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* register with lirc */
- ir->l.minor = lirc_register_driver(&ir->l);
- if (ir->l.minor < 0) {
- dev_err(tx->ir->l.dev,
- "%s: lirc_register_driver() failed: %i\n",
- __func__, ir->l.minor);
- ret = -EBADRQC;
+ ret = lirc_register_device(ir->l);
+ if (ret < 0) {
+ dev_err(tx->ir->dev,
+ "%s: lirc_register_device() failed: %i\n",
+ __func__, ret);
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_xx;
}
- dev_info(ir->l.dev,
+
+ dev_info(ir->dev,
"IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
- adap->name, adap->nr, ir->l.minor);
+ adap->name, adap->nr, ir->l->minor);
out_ok:
if (rx)
@@ -1617,7 +1588,7 @@ out_ok:
if (tx)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- dev_info(ir->l.dev,
+ dev_info(ir->dev,
"probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 85775da293fb..667dacac81f0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -744,9 +744,9 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
*/
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
{
- struct most_dev *mdev = (struct most_dev *)data;
+ struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
@@ -1138,8 +1138,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
- setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
- (unsigned long)mdev);
+ timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev->usb_device = usb_dev;
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 068aece25d37..cded30f145aa 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -394,7 +394,7 @@ struct octeon_hcd {
result = -1; \
break; \
} else \
- cvmx_wait(100); \
+ __delay(100); \
} \
} while (0); \
result; })
@@ -774,7 +774,7 @@ retry:
usbn_clk_ctl.s.hclk_rst = 1;
cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
/* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- cvmx_wait(64);
+ __delay(64);
/*
* 3. Program the power-on reset field in the USBN clock-control
* register:
@@ -795,7 +795,7 @@ retry:
cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
/* 6. Wait 10 cycles */
- cvmx_wait(10);
+ __delay(10);
/*
* 7. Clear ATE_RESET field in the USBN clock-control register:
* USBN_USBP_CTL_STATUS[ATE_RESET] = 0
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index d946838450d4..2a205c6173dc 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -762,32 +762,15 @@ abort:
static long
pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- int err = 0;
int retval = 0;
struct pi433_instance *instance;
struct pi433_device *device;
- u32 tmp;
+ void __user *argp = (void __user *)arg;
/* Check type and command number */
if (_IOC_TYPE(cmd) != PI433_IOC_MAGIC)
return -ENOTTY;
- /* Check access direction once here; don't repeat below.
- * IOC_DIR is from the user perspective, while access_ok is
- * from the kernel perspective; so they look reversed.
- */
- if (_IOC_DIR(cmd) & _IOC_READ)
- err = !access_ok(VERIFY_WRITE,
- (void __user *)arg,
- _IOC_SIZE(cmd));
-
- if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
- err = !access_ok(VERIFY_READ,
- (void __user *)arg,
- _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
/* TODO? guard against device removal before, or while,
* we issue this ioctl. --> device_get()
*/
@@ -799,78 +782,33 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case PI433_IOC_RD_TX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ((tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0)) {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_to_user((void __user *)arg,
- &instance->tx_cfg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_to_user(argp, &instance->tx_cfg,
+ sizeof(struct pi433_tx_cfg)))
+ return -EFAULT;
break;
case PI433_IOC_WR_TX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ((tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0)) {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_from_user(&instance->tx_cfg,
- (void __user *)arg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_from_user(&instance->tx_cfg, argp,
+ sizeof(struct pi433_tx_cfg)))
+ return -EFAULT;
break;
-
case PI433_IOC_RD_RX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_to_user((void __user *)arg,
- &device->rx_cfg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_to_user(argp, &device->rx_cfg,
+ sizeof(struct pi433_rx_cfg)))
+ return -EFAULT;
break;
case PI433_IOC_WR_RX_CFG:
- tmp = _IOC_SIZE(cmd);
mutex_lock(&device->rx_lock);
/* during pendig read request, change of config not allowed */
if (device->rx_active) {
- retval = -EAGAIN;
- mutex_unlock(&device->rx_lock);
- break;
- }
-
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
- retval = -EINVAL;
mutex_unlock(&device->rx_lock);
- break;
+ return -EAGAIN;
}
- if (__copy_from_user(&device->rx_cfg,
- (void __user *)arg,
- tmp))
- {
- retval = -EFAULT;
+ if (copy_from_user(&device->rx_cfg, argp,
+ sizeof(struct pi433_rx_cfg))) {
mutex_unlock(&device->rx_lock);
- break;
+ return -EFAULT;
}
mutex_unlock(&device->rx_lock);
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a2153c999..12c9df9cddde 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
currentValue = READ_REG(REG_DATAMODUL);
- switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define
+ switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
default: return undefined;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index c0664dc80bf2..446310775e90 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
len = pcur_bss->Ssid.SsidLength;
-
- wrqu->essid.length = len;
-
memcpy(extra, pcur_bss->Ssid.Ssid, len);
-
- wrqu->essid.flags = 1;
} else {
- ret = -1;
- goto exit;
+ len = 0;
+ *extra = 0;
}
-
-exit:
-
+ wrqu->essid.length = len;
+ wrqu->essid.flags = 1;
return ret;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 4e7908322d77..f56fdc7a4b61 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -391,10 +391,10 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
{
struct ieee80211_device *ieee =
- (struct ieee80211_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1251,9 +1251,11 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
{
- ieee80211_associate_abort((struct ieee80211_device *) dev);
+ struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+ ieee80211_associate_abort(dev);
}
@@ -2718,11 +2720,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->enable_rx_imm_BA = true;
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 576c15d25a0f..986a55bb9877 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -138,17 +138,16 @@ _recv_indicatepkt_drop:
precvpriv->rx_drop++;
}
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
- (struct recv_reorder_ctrl *)data;
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
}
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- _r8712_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ _r8712_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index da1d4a641dcd..455fba721135 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -74,7 +74,7 @@ enum _LED_STATE_871x {
* Prototype of protected function.
*===========================================================================
*/
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
static void BlinkWorkItemCallback(struct work_struct *work);
/*===========================================================================
@@ -99,8 +99,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
pLed->bLedBlinkInProgress = false;
pLed->BlinkTimes = 0;
pLed->BlinkingLedState = LED_UNKNOWN;
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
@@ -825,9 +824,9 @@ static void SwLedBlink6(struct LED_871x *pLed)
* Callback function of LED BlinkTimer,
* it just schedules to corresponding BlinkWorkItem.
*/
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
/* This fixed the crash problem on Fedora 12 when trying to do the
* insmod;ifconfig up;rmmod commands.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 16497202473f..aae868509e13 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1164,7 +1164,7 @@ static void spkup_write(const u16 *in_buf, int count)
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
+static void cursor_done(struct timer_list *unused);
static DEFINE_TIMER(cursor_timer, cursor_done);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
@@ -1682,7 +1682,7 @@ static int speak_highlight(struct vc_data *vc)
return 0;
}
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 6ddd3fc3f08d..aac29c816d09 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -153,7 +153,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
{
wake_up_interruptible_all(&speakup_event);
}
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index b604d0cccef1..6cb6eb0673c6 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -493,9 +493,9 @@ static const struct file_operations bus_info_debugfs_fops = {
.release = single_release,
};
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
{
- struct visor_device *dev = (struct visor_device *)__opaque;
+ struct visor_device *dev = from_timer(dev, t, timer);
struct visor_driver *drv = to_visor_driver(dev->device.driver);
drv->channel_interrupt(dev);
@@ -667,7 +667,7 @@ int create_visor_device(struct visor_device *dev)
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
get_device(&dev->device);
- setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
+ timer_setup(&dev->timer, dev_periodic_work, 0);
/*
* bus_id must be a unique name with respect to this bus TYPE (NOT bus
* instance). That's why we need to include the bus number within the
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 735d7e5fa86b..6d8239163ba5 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1766,9 +1766,10 @@ static int visornic_poll(struct napi_struct *napi, int budget)
* Main function of the vnic_incoming thread. Periodically check the response
* queue and drain it if needed.
*/
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
{
- struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+ struct visornic_devdata *devdata = from_timer(devdata, t,
+ irq_poll_timer);
if (!visorchannel_signalempty(
devdata->dev->visorchannel,
@@ -1899,8 +1900,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- setup_timer(&devdata->irq_poll_timer, poll_for_irq,
- (unsigned long)devdata);
+ timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
/* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 6f08dc966719..b265fe924556 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -377,7 +377,7 @@ static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index eea2d78b0ec6..315b49c1de3b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -90,8 +90,7 @@ static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id);
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task);
+create_pagelist(char __user *buf, size_t count, unsigned short type);
static void
free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
@@ -255,8 +254,7 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
pagelistinfo = create_pagelist((char __user *)offset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
- : PAGELIST_WRITE,
- current);
+ : PAGELIST_WRITE);
if (!pagelistinfo)
return VCHIQ_ERROR;
@@ -395,8 +393,7 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
*/
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task)
+create_pagelist(char __user *buf, size_t count, unsigned short type)
{
PAGELIST_T *pagelist;
struct vchiq_pagelist_info *pagelistinfo;
@@ -476,14 +473,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
}
/* do not try and release vmalloc pages */
} else {
- down_read(&task->mm->mmap_sem);
- actual_pages = get_user_pages(
- (unsigned long)buf & PAGE_MASK,
+ actual_pages = get_user_pages_fast(
+ (unsigned long)buf & PAGE_MASK,
num_pages,
- (type == PAGELIST_READ) ? FOLL_WRITE : 0,
- pages,
- NULL /*vmas */);
- up_read(&task->mm->mmap_sem);
+ type == PAGELIST_READ,
+ pages);
if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 8a275996d4e6..028da1dc1b81 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -267,7 +267,7 @@ static void update_scan_time(void)
last_scanned_shadow[i].time_scan = jiffies;
}
-static void remove_network_from_shadow(unsigned long unused)
+static void remove_network_from_shadow(struct timer_list *unused)
{
unsigned long now = jiffies;
int i, j;
@@ -292,7 +292,7 @@ static void remove_network_from_shadow(unsigned long unused)
}
}
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
{
wilc_optaining_ip = false;
}
@@ -2278,8 +2278,8 @@ int wilc_init_host_int(struct net_device *net)
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
- setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+ timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+ timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
}
op_ifcs++;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 90388698c222..417b9e66b0cd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index d4fa41be80f9..92eb57e2adaf 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
void cxgbit_free_conn(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
cxgbit_put_csk(csk);
break;
default:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5fdb57cac968..768cce0ccb80 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 4fd775ace541..f3f8856bfb68 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
+ /* fall through */
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9e67c7678c86..9eb10d34682c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, u8 *, u8 *);
+ u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
- padding,
- (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
+ padding, &cmd->pad_bytes,
+ &cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
unsigned char *buf)
{
struct iscsi_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
return data_crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct ahash_request *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+ const void *buf, u32 payload_length, u32 padding,
+ const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+ payload_length, padding,
+ cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req)
+ if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
+ }
+
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(&cmd->se_cmd, true);
- sess_ref = true;
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -2126,12 +2123,8 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(&cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+ payload_length, padding,
+ &pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
return;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+ ISCSI_HDR_LEN, 0, NULL,
+ &checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0dd4c45f7575..0ebc4818e132 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 76184094a0cf..5efa42b939a1 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -34,7 +34,7 @@
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index caab1045742d..29a37b242d30 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index e446a09c886b..f65e5e584212 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -25,8 +25,6 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 594d07a1e995..4b34f71547c6 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 54f20f184dd6..4435bf374d2d 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
struct iscsi_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->conn)
sess = cmd->conn->sess;
else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 928127642574..e46ca968009c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
+ char *path;
int len, rc;
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
return -ENOMEM;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
kfree(md_buf);
return rc;
}
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
goto out_unlock;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- lun->unpacked_lun);
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
kfree(md_buf);
-
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 1902cb5c3b52..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,15 +72,6 @@
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bd87cc26c6e5..72b1cd1bf9d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%lld"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%lld"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
}
break;
case Opt_sa_res_key:
- ret = kstrtoull(args->from, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- mapped_lun = (u64)arg;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
goto out;
break;
case Opt_target_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- target_lun = (u64)arg;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e9e917cc6441..e1416b007aa4 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
NULL,
};
-extern struct configfs_item_operations target_core_dev_item_ops;
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c629817a8854..9b2c0c773022 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18e3eb16e756..9384d19a7326 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -89,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data);
/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
void target_setup_backend_cits(struct target_backend *);
/* target_core_fabric_configfs.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index dd2cd8048582..b024613f9217 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
return (ret < 0) ? -EIO : 0;
}
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e22847bd79b9..9c7bc1ca341a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
+ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+ if (se_cmd->scsi_status) {
+ pr_debug("Attempted to abort io tag: %llu early failure"
+ " status: 0x%02x\n", se_cmd->tag,
+ se_cmd->scsi_status);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ }
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
+ if (tmr)
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
cmd = tmr_p->task_cmd;
if (!cmd) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 836d552b0385..58caacd54a3b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
if (remove && ack_kref)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
return ret;
}
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0, post_ret = 0;
- if (transport_check_aborted_status(cmd, 1))
- return;
-
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
* callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto queue_full;
- goto check_stop;
+
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
} else {
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
transport_lun_remove_cmd(cmd);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
}
/*
* If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
ret = -ESHUTDOWN;
goto out;
}
+ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
};
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9469695f5871..a415d87f22d2 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -150,6 +150,8 @@ struct tcmu_dev {
wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int cmd_id;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- if (udev->cmd_time_out)
- tcmu_cmd->deadline = jiffies +
- msecs_to_jiffies(udev->cmd_time_out);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
return NULL;
}
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&udev->commands_lock);
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
- USHRT_MAX, GFP_NOWAIT);
- spin_unlock_irq(&udev->commands_lock);
- idr_preload_end();
-
- if (cmd_id < 0) {
- tcmu_free_cmd(tcmu_cmd);
- return NULL;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
return tcmu_cmd;
}
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned long tmo = udev->cmd_time_out;
+ int cmd_id;
+
+ if (tcmu_cmd->cmd_id)
+ return 0;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ return cmd_id;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ if (!tmo)
+ return 0;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
}
entry->req.iov_bidi_cnt = iov_cnt;
+ ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ if (ret) {
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ mutex_unlock(&udev->cmdr_lock);
+ return TCM_OUT_OF_RESOURCES;
+ }
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
/*
* Recalaulate the command's base size and size according
* to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
- struct se_device *se_dev = se_cmd->se_dev;
- struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t ret;
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
ret = tcmu_queue_cmd_ring(tcmu_cmd);
if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
- spin_lock_irq(&udev->commands_lock);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
- spin_unlock_irq(&udev->commands_lock);
tcmu_free_cmd(tcmu_cmd);
}
@@ -1044,9 +1055,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
return 0;
}
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = (struct tcmu_dev *)data;
+ struct tcmu_dev *udev = from_timer(udev, t, timeout);
unsigned long flags;
spin_lock_irqsave(&udev->commands_lock, flags);
@@ -1106,12 +1117,13 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
idr_init(&udev->commands);
spin_lock_init(&udev->commands_lock);
- setup_timer(&udev->timeout, tcmu_device_timedout,
- (unsigned long)udev);
+ timer_setup(&udev->timeout, tcmu_device_timedout, 0);
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
return &udev->se_dev;
}
@@ -1280,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+ int i;
+ struct page *page;
+
+ /* Try to release all block pages */
+ mutex_lock(&udev->cmdr_lock);
+ for (i = 0; i <= udev->dbi_max; i++) {
+ page = radix_tree_delete(&udev->data_blocks, i);
+ if (page) {
+ __free_page(page);
+ atomic_dec(&global_db_count);
+ }
+ }
+ mutex_unlock(&udev->cmdr_lock);
+}
+
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ /* Upper layer should drain all requests before calling this */
+ spin_lock_irq(&udev->commands_lock);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ idr_destroy(&udev->commands);
+ spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
@@ -1306,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
if (!tcmu_kern_cmd_reply_supported)
return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
relock:
spin_lock(&udev->nl_cmd_lock);
@@ -1332,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
if (!tcmu_kern_cmd_reply_supported)
return 0;
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
@@ -1476,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
WARN_ON(udev->data_size % PAGE_SIZE);
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
@@ -1527,6 +1594,7 @@ err_netlink:
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
err_vzalloc:
kfree(info->name);
info->name = NULL;
@@ -1534,37 +1602,11 @@ err_vzalloc:
return ret;
}
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
- int i;
- struct page *page;
-
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
del_timer_sync(&udev->timeout);
@@ -1586,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- vfree(udev->mb_addr);
-
- /* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
- }
- idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
- WARN_ON(!all_expired);
-
- tcmu_blocks_release(udev);
-
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_err,
+ Opt_nl_reply_supported, Opt_err,
};
static match_table_t tokens = {
@@ -1618,6 +1643,7 @@ static match_table_t tokens = {
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_err, NULL}
};
@@ -1692,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
+ case Opt_nl_reply_supported:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+ kfree(arg_p);
+ if (ret < 0)
+ pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ break;
default:
break;
}
@@ -1734,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
- struct tcmu_dev *udev = container_of(da->da_dev,
- struct tcmu_dev, se_dev);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
@@ -1842,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR(tcmu_, dev_size);
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
@@ -1884,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_nl_reply_supported,
NULL,
};
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 7952357df9c8..edb6e4e9ef3a 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -590,7 +590,6 @@ static int __init optee_driver_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
- of_node_put(fw_np);
if (!np)
return -ENODEV;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 07002df4f83a..315ae2926e20 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -206,6 +206,7 @@ config HISI_THERMAL
config IMX_THERMAL
tristate "Temperature sensor driver for Freescale i.MX SoCs"
depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST
+ depends on NVMEM || !NVMEM
depends on MFD_SYSCON
depends on OF
help
@@ -408,7 +409,7 @@ config MTK_THERMAL
controller present in Mediatek SoCs
menu "Broadcom thermal drivers"
-depends on ARCH_BCM || COMPILE_TEST
+depends on ARCH_BCM || ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
source "drivers/thermal/broadcom/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 195cd08fbc30..610344eb3e03 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -55,7 +55,7 @@ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_QCOM_TSENS) += qcom/
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
+obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index ae75328945f7..706d74798cbe 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -58,7 +58,7 @@ struct armada_thermal_data {
/* Test for a valid sensor value (optional) */
bool (*is_valid)(struct armada_thermal_priv *);
- /* Formula coeficients: temp = (b + m * reg) / div */
+ /* Formula coeficients: temp = (b - m * reg) / div */
unsigned long coef_b;
unsigned long coef_m;
unsigned long coef_div;
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index 42c098e86f84..c106a15bf7f9 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -6,6 +6,13 @@ config BCM2835_THERMAL
help
Support for thermal sensors on Broadcom bcm2835 SoCs.
+config BRCMSTB_THERMAL
+ tristate "Broadcom STB AVS TMON thermal driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ Enable this driver if you have a Broadcom STB SoC and would like
+ thermal framework support.
+
config BCM_NS_THERMAL
tristate "Northstar thermal driver"
depends on ARCH_BCM_IPROC || COMPILE_TEST
diff --git a/drivers/thermal/broadcom/Makefile b/drivers/thermal/broadcom/Makefile
index c6f62e4fd0ee..fae10ecafaef 100644
--- a/drivers/thermal/broadcom/Makefile
+++ b/drivers/thermal/broadcom/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_BCM2835_THERMAL) += bcm2835_thermal.o
+obj-$(CONFIG_BRCMSTB_THERMAL) += brcmstb_thermal.o
obj-$(CONFIG_BCM_NS_THERMAL) += ns-thermal.o
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
new file mode 100644
index 000000000000..1919f91fa756
--- /dev/null
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -0,0 +1,387 @@
+/*
+ * Broadcom STB AVS TMON thermal sensor driver
+ *
+ * Copyright (c) 2015-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRV_NAME "brcmstb_thermal"
+
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+
+#define AVS_TMON_STATUS 0x00
+ #define AVS_TMON_STATUS_valid_msk BIT(11)
+ #define AVS_TMON_STATUS_data_msk GENMASK(10, 1)
+ #define AVS_TMON_STATUS_data_shift 1
+
+#define AVS_TMON_EN_OVERTEMP_RESET 0x04
+ #define AVS_TMON_EN_OVERTEMP_RESET_msk BIT(0)
+
+#define AVS_TMON_RESET_THRESH 0x08
+ #define AVS_TMON_RESET_THRESH_msk GENMASK(10, 1)
+ #define AVS_TMON_RESET_THRESH_shift 1
+
+#define AVS_TMON_INT_IDLE_TIME 0x10
+
+#define AVS_TMON_EN_TEMP_INT_SRCS 0x14
+ #define AVS_TMON_EN_TEMP_INT_SRCS_high BIT(1)
+ #define AVS_TMON_EN_TEMP_INT_SRCS_low BIT(0)
+
+#define AVS_TMON_INT_THRESH 0x18
+ #define AVS_TMON_INT_THRESH_high_msk GENMASK(26, 17)
+ #define AVS_TMON_INT_THRESH_high_shift 17
+ #define AVS_TMON_INT_THRESH_low_msk GENMASK(10, 1)
+ #define AVS_TMON_INT_THRESH_low_shift 1
+
+#define AVS_TMON_TEMP_INT_CODE 0x1c
+#define AVS_TMON_TP_TEST_ENABLE 0x20
+
+/* Default coefficients */
+#define AVS_TMON_TEMP_SLOPE -487
+#define AVS_TMON_TEMP_OFFSET 410040
+
+/* HW related temperature constants */
+#define AVS_TMON_TEMP_MAX 0x3ff
+#define AVS_TMON_TEMP_MIN -88161
+#define AVS_TMON_TEMP_MASK AVS_TMON_TEMP_MAX
+
+enum avs_tmon_trip_type {
+ TMON_TRIP_TYPE_LOW = 0,
+ TMON_TRIP_TYPE_HIGH,
+ TMON_TRIP_TYPE_RESET,
+ TMON_TRIP_TYPE_MAX,
+};
+
+struct avs_tmon_trip {
+ /* HW bit to enable the trip */
+ u32 enable_offs;
+ u32 enable_mask;
+
+ /* HW field to read the trip temperature */
+ u32 reg_offs;
+ u32 reg_msk;
+ int reg_shift;
+};
+
+static struct avs_tmon_trip avs_tmon_trips[] = {
+ /* Trips when temperature is below threshold */
+ [TMON_TRIP_TYPE_LOW] = {
+ .enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
+ .enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_low,
+ .reg_offs = AVS_TMON_INT_THRESH,
+ .reg_msk = AVS_TMON_INT_THRESH_low_msk,
+ .reg_shift = AVS_TMON_INT_THRESH_low_shift,
+ },
+ /* Trips when temperature is above threshold */
+ [TMON_TRIP_TYPE_HIGH] = {
+ .enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
+ .enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_high,
+ .reg_offs = AVS_TMON_INT_THRESH,
+ .reg_msk = AVS_TMON_INT_THRESH_high_msk,
+ .reg_shift = AVS_TMON_INT_THRESH_high_shift,
+ },
+ /* Automatically resets chip when above threshold */
+ [TMON_TRIP_TYPE_RESET] = {
+ .enable_offs = AVS_TMON_EN_OVERTEMP_RESET,
+ .enable_mask = AVS_TMON_EN_OVERTEMP_RESET_msk,
+ .reg_offs = AVS_TMON_RESET_THRESH,
+ .reg_msk = AVS_TMON_RESET_THRESH_msk,
+ .reg_shift = AVS_TMON_RESET_THRESH_shift,
+ },
+};
+
+struct brcmstb_thermal_priv {
+ void __iomem *tmon_base;
+ struct device *dev;
+ struct thermal_zone_device *thermal;
+};
+
+static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
+ int *offset)
+{
+ *slope = thermal_zone_get_slope(tz);
+ *offset = thermal_zone_get_offset(tz);
+}
+
+/* Convert a HW code to a temperature reading (millidegree celsius) */
+static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+ u32 code)
+{
+ const int val = code & AVS_TMON_TEMP_MASK;
+ int slope, offset;
+
+ avs_tmon_get_coeffs(tz, &slope, &offset);
+
+ return slope * val + offset;
+}
+
+/*
+ * Convert a temperature value (millidegree celsius) to a HW code
+ *
+ * @temp: temperature to convert
+ * @low: if true, round toward the low side
+ */
+static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+ int temp, bool low)
+{
+ int slope, offset;
+
+ if (temp < AVS_TMON_TEMP_MIN)
+ return AVS_TMON_TEMP_MAX; /* Maximum code value */
+
+ avs_tmon_get_coeffs(tz, &slope, &offset);
+
+ if (temp >= offset)
+ return 0; /* Minimum code value */
+
+ if (low)
+ return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
+ else
+ return (u32)((offset - temp) / abs(slope));
+}
+
+static int brcmstb_get_temp(void *data, int *temp)
+{
+ struct brcmstb_thermal_priv *priv = data;
+ u32 val;
+ long t;
+
+ val = __raw_readl(priv->tmon_base + AVS_TMON_STATUS);
+
+ if (!(val & AVS_TMON_STATUS_valid_msk)) {
+ dev_err(priv->dev, "reading not valid\n");
+ return -EIO;
+ }
+
+ val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
+
+ t = avs_tmon_code_to_temp(priv->thermal, val);
+ if (t < 0)
+ *temp = 0;
+ else
+ *temp = t;
+
+ return 0;
+}
+
+static void avs_tmon_trip_enable(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type, int en)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val = __raw_readl(priv->tmon_base + trip->enable_offs);
+
+ dev_dbg(priv->dev, "%sable trip, type %d\n", en ? "en" : "dis", type);
+
+ if (en)
+ val |= trip->enable_mask;
+ else
+ val &= ~trip->enable_mask;
+
+ __raw_writel(val, priv->tmon_base + trip->enable_offs);
+}
+
+static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val = __raw_readl(priv->tmon_base + trip->reg_offs);
+
+ val &= trip->reg_msk;
+ val >>= trip->reg_shift;
+
+ return avs_tmon_code_to_temp(priv->thermal, val);
+}
+
+static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type,
+ int temp)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val, orig;
+
+ dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
+
+ /* round toward low temp for the low interrupt */
+ val = avs_tmon_temp_to_code(priv->thermal, temp,
+ type == TMON_TRIP_TYPE_LOW);
+
+ val <<= trip->reg_shift;
+ val &= trip->reg_msk;
+
+ orig = __raw_readl(priv->tmon_base + trip->reg_offs);
+ orig &= ~trip->reg_msk;
+ orig |= val;
+ __raw_writel(orig, priv->tmon_base + trip->reg_offs);
+}
+
+static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
+{
+ u32 val;
+
+ val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
+ return avs_tmon_code_to_temp(priv->thermal, val);
+}
+
+static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
+{
+ struct brcmstb_thermal_priv *priv = data;
+ int low, high, intr;
+
+ low = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_LOW);
+ high = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_HIGH);
+ intr = avs_tmon_get_intr_temp(priv);
+
+ dev_dbg(priv->dev, "low/intr/high: %d/%d/%d\n",
+ low, intr, high);
+
+ /* Disable high-temp until next threshold shift */
+ if (intr >= high)
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
+ /* Disable low-temp until next threshold shift */
+ if (intr <= low)
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
+
+ /*
+ * Notify using the interrupt temperature, in case the temperature
+ * changes before it can next be read out
+ */
+ thermal_zone_device_update(priv->thermal, intr);
+
+ return IRQ_HANDLED;
+}
+
+static int brcmstb_set_trips(void *data, int low, int high)
+{
+ struct brcmstb_thermal_priv *priv = data;
+
+ dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high);
+
+ /*
+ * Disable low-temp if "low" is too small. As per thermal framework
+ * API, we use -INT_MAX rather than INT_MIN.
+ */
+ if (low <= -INT_MAX) {
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
+ } else {
+ avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_LOW, low);
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 1);
+ }
+
+ /* Disable high-temp if "high" is too big. */
+ if (high == INT_MAX) {
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
+ } else {
+ avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_HIGH, high);
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 1);
+ }
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops of_ops = {
+ .get_temp = brcmstb_get_temp,
+ .set_trips = brcmstb_set_trips,
+};
+
+static const struct of_device_id brcmstb_thermal_id_table[] = {
+ { .compatible = "brcm,avs-tmon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
+
+static int brcmstb_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ struct brcmstb_thermal_priv *priv;
+ struct resource *res;
+ int irq, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->tmon_base))
+ return PTR_ERR(priv->tmon_base);
+
+ priv->dev = &pdev->dev;
+ platform_set_drvdata(pdev, priv);
+
+ thermal = thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &of_ops);
+ if (IS_ERR(thermal)) {
+ ret = PTR_ERR(thermal);
+ dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
+ return ret;
+ }
+
+ priv->thermal = thermal;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "could not get IRQ\n");
+ ret = irq;
+ goto err;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ brcmstb_tmon_irq_thread, IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
+
+ return 0;
+
+err:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal);
+ return ret;
+}
+
+static int brcmstb_thermal_exit(struct platform_device *pdev)
+{
+ struct brcmstb_thermal_priv *priv = platform_get_drvdata(pdev);
+ struct thermal_zone_device *thermal = priv->thermal;
+
+ if (thermal)
+ thermal_zone_of_sensor_unregister(&pdev->dev, priv->thermal);
+
+ return 0;
+}
+
+static struct platform_driver brcmstb_thermal_driver = {
+ .probe = brcmstb_thermal_probe,
+ .remove = brcmstb_thermal_exit,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = brcmstb_thermal_id_table,
+ },
+};
+module_platform_driver(brcmstb_thermal_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("Broadcom STB AVS TMON thermal driver");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 908a8014cf76..dc63aba092e4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -696,7 +696,7 @@ __cpufreq_cooling_register(struct device_node *np,
bool first;
if (IS_ERR_OR_NULL(policy)) {
- pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy);
+ pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index bd3572c41585..2d855a96cdd9 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -23,222 +23,450 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/of_device.h>
#include "thermal_core.h"
-#define TEMP0_TH (0x4)
-#define TEMP0_RST_TH (0x8)
-#define TEMP0_CFG (0xC)
-#define TEMP0_EN (0x10)
-#define TEMP0_INT_EN (0x14)
-#define TEMP0_INT_CLR (0x18)
-#define TEMP0_RST_MSK (0x1C)
-#define TEMP0_VALUE (0x28)
-
-#define HISI_TEMP_BASE (-60)
-#define HISI_TEMP_RESET (100000)
-
-#define HISI_MAX_SENSORS 4
+#define HI6220_TEMP0_LAG (0x0)
+#define HI6220_TEMP0_TH (0x4)
+#define HI6220_TEMP0_RST_TH (0x8)
+#define HI6220_TEMP0_CFG (0xC)
+#define HI6220_TEMP0_CFG_SS_MSK (0xF000)
+#define HI6220_TEMP0_CFG_HDAK_MSK (0x30)
+#define HI6220_TEMP0_EN (0x10)
+#define HI6220_TEMP0_INT_EN (0x14)
+#define HI6220_TEMP0_INT_CLR (0x18)
+#define HI6220_TEMP0_RST_MSK (0x1C)
+#define HI6220_TEMP0_VALUE (0x28)
+
+#define HI3660_OFFSET(chan) ((chan) * 0x40)
+#define HI3660_TEMP(chan) (HI3660_OFFSET(chan) + 0x1C)
+#define HI3660_TH(chan) (HI3660_OFFSET(chan) + 0x20)
+#define HI3660_LAG(chan) (HI3660_OFFSET(chan) + 0x28)
+#define HI3660_INT_EN(chan) (HI3660_OFFSET(chan) + 0x2C)
+#define HI3660_INT_CLR(chan) (HI3660_OFFSET(chan) + 0x30)
+
+#define HI6220_TEMP_BASE (-60000)
+#define HI6220_TEMP_RESET (100000)
+#define HI6220_TEMP_STEP (785)
+#define HI6220_TEMP_LAG (3500)
+
+#define HI3660_TEMP_BASE (-63780)
+#define HI3660_TEMP_STEP (205)
+#define HI3660_TEMP_LAG (4000)
+
+#define HI6220_DEFAULT_SENSOR 2
+#define HI3660_DEFAULT_SENSOR 1
struct hisi_thermal_sensor {
- struct hisi_thermal_data *thermal;
struct thermal_zone_device *tzd;
-
- long sensor_temp;
uint32_t id;
uint32_t thres_temp;
};
struct hisi_thermal_data {
- struct mutex thermal_lock; /* protects register data */
+ int (*get_temp)(struct hisi_thermal_data *data);
+ int (*enable_sensor)(struct hisi_thermal_data *data);
+ int (*disable_sensor)(struct hisi_thermal_data *data);
+ int (*irq_handler)(struct hisi_thermal_data *data);
struct platform_device *pdev;
struct clk *clk;
- struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS];
-
- int irq, irq_bind_sensor;
- bool irq_enabled;
-
+ struct hisi_thermal_sensor sensor;
void __iomem *regs;
+ int irq;
};
-/* in millicelsius */
-static inline int _step_to_temp(int step)
+/*
+ * The temperature computation on the tsensor is as follow:
+ * Unit: millidegree Celsius
+ * Step: 200/255 (0.7843)
+ * Temperature base: -60°C
+ *
+ * The register is programmed in temperature steps, every step is 785
+ * millidegree and begins at -60 000 m°C
+ *
+ * The temperature from the steps:
+ *
+ * Temp = TempBase + (steps x 785)
+ *
+ * and the steps from the temperature:
+ *
+ * steps = (Temp - TempBase) / 785
+ *
+ */
+static inline int hi6220_thermal_step_to_temp(int step)
{
- /*
- * Every step equals (1 * 200) / 255 celsius, and finally
- * need convert to millicelsius.
- */
- return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
+ return HI6220_TEMP_BASE + (step * HI6220_TEMP_STEP);
}
-static inline long _temp_to_step(long temp)
+static inline int hi6220_thermal_temp_to_step(int temp)
{
- return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
+ return DIV_ROUND_UP(temp - HI6220_TEMP_BASE, HI6220_TEMP_STEP);
}
-static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
- struct hisi_thermal_sensor *sensor)
+/*
+ * for Hi3660,
+ * Step: 189/922 (0.205)
+ * Temperature base: -63.780°C
+ *
+ * The register is programmed in temperature steps, every step is 205
+ * millidegree and begins at -63 780 m°C
+ */
+static inline int hi3660_thermal_step_to_temp(int step)
{
- long val;
+ return HI3660_TEMP_BASE + step * HI3660_TEMP_STEP;
+}
- mutex_lock(&data->thermal_lock);
+static inline int hi3660_thermal_temp_to_step(int temp)
+{
+ return DIV_ROUND_UP(temp - HI3660_TEMP_BASE, HI3660_TEMP_STEP);
+}
- /* disable interrupt */
- writel(0x0, data->regs + TEMP0_INT_EN);
- writel(0x1, data->regs + TEMP0_INT_CLR);
+/*
+ * The lag register contains 5 bits encoding the temperature in steps.
+ *
+ * Each time the temperature crosses the threshold boundary, an
+ * interrupt is raised. It could be when the temperature is going
+ * above the threshold or below. However, if the temperature is
+ * fluctuating around this value due to the load, we can receive
+ * several interrupts which may not desired.
+ *
+ * We can setup a temperature representing the delta between the
+ * threshold and the current temperature when the temperature is
+ * decreasing.
+ *
+ * For instance: the lag register is 5°C, the threshold is 65°C, when
+ * the temperature reaches 65°C an interrupt is raised and when the
+ * temperature decrease to 65°C - 5°C another interrupt is raised.
+ *
+ * A very short lag can lead to an interrupt storm, a long lag
+ * increase the latency to react to the temperature changes. In our
+ * case, that is not really a problem as we are polling the
+ * temperature.
+ *
+ * [0:4] : lag register
+ *
+ * The temperature is coded in steps, cf. HI6220_TEMP_STEP.
+ *
+ * Min : 0x00 : 0.0 °C
+ * Max : 0x1F : 24.3 °C
+ *
+ * The 'value' parameter is in milliCelsius.
+ */
+static inline void hi6220_thermal_set_lag(void __iomem *addr, int value)
+{
+ writel(DIV_ROUND_UP(value, HI6220_TEMP_STEP) & 0x1F,
+ addr + HI6220_TEMP0_LAG);
+}
- /* disable module firstly */
- writel(0x0, data->regs + TEMP0_EN);
+static inline void hi6220_thermal_alarm_clear(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_INT_CLR);
+}
- /* select sensor id */
- writel((sensor->id << 12), data->regs + TEMP0_CFG);
+static inline void hi6220_thermal_alarm_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_INT_EN);
+}
- /* enable module */
- writel(0x1, data->regs + TEMP0_EN);
+static inline void hi6220_thermal_alarm_set(void __iomem *addr, int temp)
+{
+ writel(hi6220_thermal_temp_to_step(temp) | 0x0FFFFFF00,
+ addr + HI6220_TEMP0_TH);
+}
- usleep_range(3000, 5000);
+static inline void hi6220_thermal_reset_set(void __iomem *addr, int temp)
+{
+ writel(hi6220_thermal_temp_to_step(temp), addr + HI6220_TEMP0_RST_TH);
+}
- val = readl(data->regs + TEMP0_VALUE);
- val = _step_to_temp(val);
+static inline void hi6220_thermal_reset_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_RST_MSK);
+}
- mutex_unlock(&data->thermal_lock);
+static inline void hi6220_thermal_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_EN);
+}
- return val;
+static inline int hi6220_thermal_get_temperature(void __iomem *addr)
+{
+ return hi6220_thermal_step_to_temp(readl(addr + HI6220_TEMP0_VALUE));
}
-static void hisi_thermal_enable_bind_irq_sensor
- (struct hisi_thermal_data *data)
+/*
+ * [0:6] lag register
+ *
+ * The temperature is coded in steps, cf. HI3660_TEMP_STEP.
+ *
+ * Min : 0x00 : 0.0 °C
+ * Max : 0x7F : 26.0 °C
+ *
+ */
+static inline void hi3660_thermal_set_lag(void __iomem *addr,
+ int id, int value)
{
- struct hisi_thermal_sensor *sensor;
+ writel(DIV_ROUND_UP(value, HI3660_TEMP_STEP) & 0x7F,
+ addr + HI3660_LAG(id));
+}
- mutex_lock(&data->thermal_lock);
+static inline void hi3660_thermal_alarm_clear(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_INT_CLR(id));
+}
- sensor = &data->sensors[data->irq_bind_sensor];
+static inline void hi3660_thermal_alarm_enable(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_INT_EN(id));
+}
- /* setting the hdak time */
- writel(0x0, data->regs + TEMP0_CFG);
+static inline void hi3660_thermal_alarm_set(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_TH(id));
+}
+
+static inline int hi3660_thermal_get_temperature(void __iomem *addr, int id)
+{
+ return hi3660_thermal_step_to_temp(readl(addr + HI3660_TEMP(id)));
+}
+
+/*
+ * Temperature configuration register - Sensor selection
+ *
+ * Bits [19:12]
+ *
+ * 0x0: local sensor (default)
+ * 0x1: remote sensor 1 (ACPU cluster 1)
+ * 0x2: remote sensor 2 (ACPU cluster 0)
+ * 0x3: remote sensor 3 (G3D)
+ */
+static inline void hi6220_thermal_sensor_select(void __iomem *addr, int sensor)
+{
+ writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_SS_MSK) |
+ (sensor << 12), addr + HI6220_TEMP0_CFG);
+}
+
+/*
+ * Temperature configuration register - Hdak conversion polling interval
+ *
+ * Bits [5:4]
+ *
+ * 0x0 : 0.768 ms
+ * 0x1 : 6.144 ms
+ * 0x2 : 49.152 ms
+ * 0x3 : 393.216 ms
+ */
+static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value)
+{
+ writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_HDAK_MSK) |
+ (value << 4), addr + HI6220_TEMP0_CFG);
+}
+
+static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data)
+{
+ hi6220_thermal_alarm_clear(data->regs, 1);
+ return 0;
+}
+
+static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data)
+{
+ hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1);
+ return 0;
+}
+
+static int hi6220_thermal_get_temp(struct hisi_thermal_data *data)
+{
+ return hi6220_thermal_get_temperature(data->regs);
+}
+
+static int hi3660_thermal_get_temp(struct hisi_thermal_data *data)
+{
+ return hi3660_thermal_get_temperature(data->regs, data->sensor.id);
+}
+
+static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
+{
+ /* disable sensor module */
+ hi6220_thermal_enable(data->regs, 0);
+ hi6220_thermal_alarm_enable(data->regs, 0);
+ hi6220_thermal_reset_enable(data->regs, 0);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data)
+{
+ /* disable sensor module */
+ hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0);
+ return 0;
+}
+
+static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
+{
+ struct hisi_thermal_sensor *sensor = &data->sensor;
+ int ret;
+
+ /* enable clock for tsensor */
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
/* disable module firstly */
- writel(0x0, data->regs + TEMP0_RST_MSK);
- writel(0x0, data->regs + TEMP0_EN);
+ hi6220_thermal_reset_enable(data->regs, 0);
+ hi6220_thermal_enable(data->regs, 0);
/* select sensor id */
- writel((sensor->id << 12), data->regs + TEMP0_CFG);
+ hi6220_thermal_sensor_select(data->regs, sensor->id);
+
+ /* setting the hdak time */
+ hi6220_thermal_hdak_set(data->regs, 0);
+
+ /* setting lag value between current temp and the threshold */
+ hi6220_thermal_set_lag(data->regs, HI6220_TEMP_LAG);
/* enable for interrupt */
- writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
- data->regs + TEMP0_TH);
+ hi6220_thermal_alarm_set(data->regs, sensor->thres_temp);
- writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
+ hi6220_thermal_reset_set(data->regs, HI6220_TEMP_RESET);
/* enable module */
- writel(0x1, data->regs + TEMP0_RST_MSK);
- writel(0x1, data->regs + TEMP0_EN);
-
- writel(0x0, data->regs + TEMP0_INT_CLR);
- writel(0x1, data->regs + TEMP0_INT_EN);
+ hi6220_thermal_reset_enable(data->regs, 1);
+ hi6220_thermal_enable(data->regs, 1);
- usleep_range(3000, 5000);
+ hi6220_thermal_alarm_clear(data->regs, 0);
+ hi6220_thermal_alarm_enable(data->regs, 1);
- mutex_unlock(&data->thermal_lock);
+ return 0;
}
-static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data)
{
- mutex_lock(&data->thermal_lock);
+ unsigned int value;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- /* disable sensor module */
- writel(0x0, data->regs + TEMP0_INT_EN);
- writel(0x0, data->regs + TEMP0_RST_MSK);
- writel(0x0, data->regs + TEMP0_EN);
+ /* disable interrupt */
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
- mutex_unlock(&data->thermal_lock);
-}
+ /* setting lag value between current temp and the threshold */
+ hi3660_thermal_set_lag(data->regs, sensor->id, HI3660_TEMP_LAG);
-static int hisi_thermal_get_temp(void *_sensor, int *temp)
-{
- struct hisi_thermal_sensor *sensor = _sensor;
- struct hisi_thermal_data *data = sensor->thermal;
+ /* set interrupt threshold */
+ value = hi3660_thermal_temp_to_step(sensor->thres_temp);
+ hi3660_thermal_alarm_set(data->regs, sensor->id, value);
- int sensor_id = -1, i;
- long max_temp = 0;
+ /* enable interrupt */
+ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1);
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 1);
- *temp = hisi_thermal_get_sensor_temp(data, sensor);
+ return 0;
+}
- sensor->sensor_temp = *temp;
+static int hi6220_thermal_probe(struct hisi_thermal_data *data)
+{
+ struct platform_device *pdev = data->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- if (!data->sensors[i].tzd)
- continue;
+ data->get_temp = hi6220_thermal_get_temp;
+ data->enable_sensor = hi6220_thermal_enable_sensor;
+ data->disable_sensor = hi6220_thermal_disable_sensor;
+ data->irq_handler = hi6220_thermal_irq_handler;
- if (data->sensors[i].sensor_temp >= max_temp) {
- max_temp = data->sensors[i].sensor_temp;
- sensor_id = i;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
}
- /* If no sensor has been enabled, then skip to enable irq */
- if (sensor_id == -1)
- return 0;
-
- mutex_lock(&data->thermal_lock);
- data->irq_bind_sensor = sensor_id;
- mutex_unlock(&data->thermal_lock);
-
- dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n",
- sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
- /*
- * Bind irq to sensor for two cases:
- * Reenable alarm IRQ if temperature below threshold;
- * if irq has been enabled, always set it;
- */
- if (data->irq_enabled) {
- hisi_thermal_enable_bind_irq_sensor(data);
- return 0;
+ data->clk = devm_clk_get(dev, "thermal_clk");
+ if (IS_ERR(data->clk)) {
+ ret = PTR_ERR(data->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get thermal clk: %d\n", ret);
+ return ret;
}
- if (max_temp < sensor->thres_temp) {
- data->irq_enabled = true;
- hisi_thermal_enable_bind_irq_sensor(data);
- enable_irq(data->irq);
- }
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->sensor.id = HI6220_DEFAULT_SENSOR;
return 0;
}
-static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
- .get_temp = hisi_thermal_get_temp,
-};
+static int hi3660_thermal_probe(struct hisi_thermal_data *data)
+{
+ struct platform_device *pdev = data->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ data->get_temp = hi3660_thermal_get_temp;
+ data->enable_sensor = hi3660_thermal_enable_sensor;
+ data->disable_sensor = hi3660_thermal_disable_sensor;
+ data->irq_handler = hi3660_thermal_irq_handler;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
+ }
-static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev)
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->sensor.id = HI3660_DEFAULT_SENSOR;
+
+ return 0;
+}
+
+static int hisi_thermal_get_temp(void *__data, int *temp)
{
- struct hisi_thermal_data *data = dev;
+ struct hisi_thermal_data *data = __data;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- disable_irq_nosync(irq);
- data->irq_enabled = false;
+ *temp = data->get_temp(data);
- return IRQ_WAKE_THREAD;
+ dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n",
+ sensor->id, *temp, sensor->thres_temp);
+
+ return 0;
}
+static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
+ .get_temp = hisi_thermal_get_temp,
+};
+
static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
{
struct hisi_thermal_data *data = dev;
- struct hisi_thermal_sensor *sensor;
- int i;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
+ int temp = 0;
- mutex_lock(&data->thermal_lock);
- sensor = &data->sensors[data->irq_bind_sensor];
+ data->irq_handler(data);
- dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
- sensor->thres_temp / 1000);
- mutex_unlock(&data->thermal_lock);
+ hisi_thermal_get_temp(data, &temp);
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- if (!data->sensors[i].tzd)
- continue;
+ if (temp >= sensor->thres_temp) {
+ dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
+ temp, sensor->thres_temp);
- thermal_zone_device_update(data->sensors[i].tzd,
+ thermal_zone_device_update(data->sensor.tzd,
THERMAL_EVENT_UNSPECIFIED);
+
+ } else {
+ dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
+ temp, sensor->thres_temp);
}
return IRQ_HANDLED;
@@ -246,17 +474,14 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
static int hisi_thermal_register_sensor(struct platform_device *pdev,
struct hisi_thermal_data *data,
- struct hisi_thermal_sensor *sensor,
- int index)
+ struct hisi_thermal_sensor *sensor)
{
int ret, i;
const struct thermal_trip *trip;
- sensor->id = index;
- sensor->thermal = data;
-
sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor, &hisi_of_thermal_ops);
+ sensor->id, data,
+ &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
sensor->tzd = NULL;
@@ -278,7 +503,14 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
}
static const struct of_device_id of_hisi_thermal_match[] = {
- { .compatible = "hisilicon,tsensor" },
+ {
+ .compatible = "hisilicon,tsensor",
+ .data = hi6220_thermal_probe
+ },
+ {
+ .compatible = "hisilicon,hi3660-tsensor",
+ .data = hi3660_thermal_probe
+ },
{ /* end */ }
};
MODULE_DEVICE_TABLE(of, of_hisi_thermal_match);
@@ -295,88 +527,63 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
static int hisi_thermal_probe(struct platform_device *pdev)
{
struct hisi_thermal_data *data;
- struct resource *res;
- int i;
+ int const (*platform_probe)(struct hisi_thermal_data *);
+ struct device *dev = &pdev->dev;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- mutex_init(&data->thermal_lock);
data->pdev = pdev;
+ platform_set_drvdata(pdev, data);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(&pdev->dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
+ platform_probe = of_device_get_match_data(dev);
+ if (!platform_probe) {
+ dev_err(dev, "failed to get probe func\n");
+ return -EINVAL;
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
-
- ret = devm_request_threaded_irq(&pdev->dev, data->irq,
- hisi_thermal_alarm_irq,
- hisi_thermal_alarm_irq_thread,
- 0, "hisi_thermal", data);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+ ret = platform_probe(data);
+ if (ret)
return ret;
- }
- platform_set_drvdata(pdev, data);
-
- data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
- if (IS_ERR(data->clk)) {
- ret = PTR_ERR(data->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get thermal clk: %d\n", ret);
+ ret = hisi_thermal_register_sensor(pdev, data,
+ &data->sensor);
+ if (ret) {
+ dev_err(dev, "failed to register thermal sensor: %d\n", ret);
return ret;
}
- /* enable clock for thermal */
- ret = clk_prepare_enable(data->clk);
+ ret = data->enable_sensor(data);
if (ret) {
- dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ dev_err(dev, "Failed to setup the sensor: %d\n", ret);
return ret;
}
- hisi_thermal_enable_bind_irq_sensor(data);
- irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
- &data->irq_enabled);
-
- for (i = 0; i < HISI_MAX_SENSORS; ++i) {
- ret = hisi_thermal_register_sensor(pdev, data,
- &data->sensors[i], i);
- if (ret)
- dev_err(&pdev->dev,
- "failed to register thermal sensor: %d\n", ret);
- else
- hisi_thermal_toggle_sensor(&data->sensors[i], true);
+ if (data->irq) {
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
+ hisi_thermal_alarm_irq_thread,
+ IRQF_ONESHOT, "hisi_thermal", data);
+ if (ret < 0) {
+ dev_err(dev, "failed to request alarm irq: %d\n", ret);
+ return ret;
+ }
}
+ hisi_thermal_toggle_sensor(&data->sensor, true);
+
return 0;
}
static int hisi_thermal_remove(struct platform_device *pdev)
{
struct hisi_thermal_data *data = platform_get_drvdata(pdev);
- int i;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- struct hisi_thermal_sensor *sensor = &data->sensors[i];
+ hisi_thermal_toggle_sensor(sensor, false);
- if (!sensor->tzd)
- continue;
-
- hisi_thermal_toggle_sensor(sensor, false);
- }
-
- hisi_thermal_disable_sensor(data);
- clk_disable_unprepare(data->clk);
+ data->disable_sensor(data);
return 0;
}
@@ -386,10 +593,7 @@ static int hisi_thermal_suspend(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
- hisi_thermal_disable_sensor(data);
- data->irq_enabled = false;
-
- clk_disable_unprepare(data->clk);
+ data->disable_sensor(data);
return 0;
}
@@ -397,16 +601,8 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(data->clk);
- if (ret)
- return ret;
-
- data->irq_enabled = true;
- hisi_thermal_enable_bind_irq_sensor(data);
-
- return 0;
+ return data->enable_sensor(data);
}
#endif
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 4798b4b1fd77..e7d4ffc3de7f 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
+#include <linux/nvmem-consumer.h>
#define REG_SET 0x4
#define REG_CLR 0x8
@@ -94,7 +95,7 @@ struct imx_thermal_data {
struct thermal_cooling_device *cdev;
enum thermal_device_mode mode;
struct regmap *tempmon;
- u32 c1, c2; /* See formula in imx_get_sensor_data() */
+ u32 c1, c2; /* See formula in imx_init_calib() */
int temp_passive;
int temp_critical;
int temp_max;
@@ -177,7 +178,7 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
- /* See imx_get_sensor_data() for formula derivation */
+ /* See imx_init_calib() for formula derivation */
*temp = data->c2 - n_meas * data->c1;
/* Update alarm value to next higher trip point for TEMPMON_IMX6Q */
@@ -346,29 +347,12 @@ static struct thermal_zone_device_ops imx_tz_ops = {
.set_trip_temp = imx_set_trip_temp,
};
-static int imx_get_sensor_data(struct platform_device *pdev)
+static int imx_init_calib(struct platform_device *pdev, u32 val)
{
struct imx_thermal_data *data = platform_get_drvdata(pdev);
- struct regmap *map;
int t1, n1;
- int ret;
- u32 val;
u64 temp64;
- map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "fsl,tempmon-data");
- if (IS_ERR(map)) {
- ret = PTR_ERR(map);
- dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
- return ret;
- }
-
- ret = regmap_read(map, OCOTP_ANA1, &val);
- if (ret) {
- dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
- return ret;
- }
-
if (val == 0 || val == ~0) {
dev_err(&pdev->dev, "invalid sensor calibration data\n");
return -EINVAL;
@@ -405,12 +389,12 @@ static int imx_get_sensor_data(struct platform_device *pdev)
data->c1 = temp64;
data->c2 = n1 * data->c1 + 1000 * t1;
- /* use OTP for thermal grade */
- ret = regmap_read(map, OCOTP_MEM0, &val);
- if (ret) {
- dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
- return ret;
- }
+ return 0;
+}
+
+static void imx_init_temp_grade(struct platform_device *pdev, u32 val)
+{
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
/* The maximum die temp is specified by the Temperature Grade */
switch ((val >> 6) & 0x3) {
@@ -438,6 +422,55 @@ static int imx_get_sensor_data(struct platform_device *pdev)
*/
data->temp_critical = data->temp_max - (1000 * 5);
data->temp_passive = data->temp_max - (1000 * 10);
+}
+
+static int imx_init_from_tempmon_data(struct platform_device *pdev)
+{
+ struct regmap *map;
+ int ret;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,tempmon-data");
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(map, OCOTP_ANA1, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+ return ret;
+ }
+ ret = imx_init_calib(pdev, val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, OCOTP_MEM0, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+ return ret;
+ }
+ imx_init_temp_grade(pdev, val);
+
+ return 0;
+}
+
+static int imx_init_from_nvmem_cells(struct platform_device *pdev)
+{
+ int ret;
+ u32 val;
+
+ ret = nvmem_cell_read_u32(&pdev->dev, "calib", &val);
+ if (ret)
+ return ret;
+ imx_init_calib(pdev, val);
+
+ ret = nvmem_cell_read_u32(&pdev->dev, "temp_grade", &val);
+ if (ret)
+ return ret;
+ imx_init_temp_grade(pdev, val);
return 0;
}
@@ -514,10 +547,21 @@ static int imx_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- ret = imx_get_sensor_data(pdev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get sensor data\n");
- return ret;
+ if (of_find_property(pdev->dev.of_node, "nvmem-cells", NULL)) {
+ ret = imx_init_from_nvmem_cells(pdev);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init from nvmem: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = imx_init_from_tempmon_data(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init from from fsl,tempmon-data\n");
+ return ret;
+ }
}
/* Make sure sensor is in known good state for measurements */
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index f02341f7134d..80bbf9ce2fb6 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -30,6 +30,10 @@
/* Skylake thermal reporting device */
#define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903
+/* CannonLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_CNL_THERMAL 0x5a03
+#define PCI_DEVICE_ID_PROC_CFL_THERMAL 0x3E83
+
/* Braswell thermal reporting device */
#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
@@ -461,6 +465,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CFL_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c
index ef6b32242ccb..94cfd0064c43 100644
--- a/drivers/thermal/intel_bxt_pmic_thermal.c
+++ b/drivers/thermal/intel_bxt_pmic_thermal.c
@@ -166,7 +166,7 @@ static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
struct pmic_thermal_data *td;
struct intel_soc_pmic *pmic;
struct regmap *regmap;
- u8 reg_val, mask, irq_stat, trip;
+ u8 reg_val, mask, irq_stat;
u16 reg, evt_stat_reg;
int i, j, ret;
@@ -201,7 +201,6 @@ static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
if (regmap_read(regmap, evt_stat_reg, &ret))
return IRQ_HANDLED;
- trip = td->maps[i].trip_config[j].trip_num;
tzd = thermal_zone_get_zone_by_name(td->maps[i].handle);
if (!IS_ERR(tzd))
thermal_zone_device_update(tzd,
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index c60b1cfcc64e..8a7f69b4b022 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -30,6 +30,8 @@
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
+#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
+#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -278,6 +280,7 @@ enum board_ids {
board_hsw,
board_wpt,
board_skl,
+ board_cnl,
};
static const struct board_info {
@@ -296,6 +299,10 @@ static const struct board_info {
.name = "pch_skylake",
.ops = &pch_dev_ops_wpt,
},
+ [board_cnl] = {
+ .name = "pch_cannonlake",
+ .ops = &pch_dev_ops_wpt,
+ },
};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
@@ -398,6 +405,10 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
.driver_data = board_skl, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H),
.driver_data = board_skl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL),
+ .driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
+ .driver_data = board_cnl, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index d718cd179ddb..4540e892b61d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,13 +675,13 @@ static int __init powerclamp_probe(void)
{
if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("CPU does not support MWAIT");
+ pr_err("CPU does not support MWAIT\n");
return -ENODEV;
}
/* The goal for idle time alignment is to achieve package cstate. */
if (!has_pkg_state_counter()) {
- pr_info("No package C-state available");
+ pr_info("No package C-state available\n");
return -ENODEV;
}
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index f50241962ad2..95f987d5aa71 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -125,7 +125,7 @@ static int qpnp_tm_get_temp(void *data, int *temp)
if (!temp)
return -EINVAL;
- if (IS_ERR(chip->adc)) {
+ if (!chip->adc) {
ret = qpnp_tm_update_temp_no_adc(chip);
if (ret < 0)
return ret;
@@ -224,67 +224,53 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return irq;
/* ADC based measurements are optional */
- chip->adc = iio_channel_get(&pdev->dev, "thermal");
- if (PTR_ERR(chip->adc) == -EPROBE_DEFER)
- return PTR_ERR(chip->adc);
+ chip->adc = devm_iio_channel_get(&pdev->dev, "thermal");
+ if (IS_ERR(chip->adc)) {
+ ret = PTR_ERR(chip->adc);
+ chip->adc = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
chip->base = res;
ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
if (ret < 0) {
dev_err(&pdev->dev, "could not read type\n");
- goto fail;
+ return ret;
}
ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype);
if (ret < 0) {
dev_err(&pdev->dev, "could not read subtype\n");
- goto fail;
+ return ret;
}
if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
type, subtype);
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
- goto fail;
+ return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
IRQF_ONESHOT, node->name, chip);
if (ret < 0)
- goto fail;
+ return ret;
chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
&qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
- ret = PTR_ERR(chip->tz_dev);
- goto fail;
+ return PTR_ERR(chip->tz_dev);
}
return 0;
-
-fail:
- if (!IS_ERR(chip->adc))
- iio_channel_release(chip->adc);
-
- return ret;
-}
-
-static int qpnp_tm_remove(struct platform_device *pdev)
-{
- struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
-
- if (!IS_ERR(chip->adc))
- iio_channel_release(chip->adc);
-
- return 0;
}
static const struct of_device_id qpnp_tm_match_table[] = {
@@ -299,7 +285,6 @@ static struct platform_driver qpnp_tm_driver = {
.of_match_table = qpnp_tm_match_table,
},
.probe = qpnp_tm_probe,
- .remove = qpnp_tm_remove,
};
module_platform_driver(qpnp_tm_driver);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 203aca44a2bb..561a0a332208 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <linux/thermal.h>
#include "thermal_core.h"
@@ -90,10 +91,6 @@ struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
spinlock_t lock; /* Protect interrupts on and off */
- const struct rcar_gen3_thermal_data *data;
-};
-
-struct rcar_gen3_thermal_data {
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
};
@@ -278,7 +275,12 @@ static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
+static const struct soc_device_attribute r8a7795es1[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { /* sentinel */ }
+};
+
+static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
{
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, 0x0);
@@ -303,7 +305,7 @@ static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
}
-static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
+static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
{
u32 reg_val;
@@ -324,17 +326,9 @@ static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
}
-static const struct rcar_gen3_thermal_data r8a7795_data = {
- .thermal_init = r8a7795_thermal_init,
-};
-
-static const struct rcar_gen3_thermal_data r8a7796_data = {
- .thermal_init = r8a7796_thermal_init,
-};
-
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
- { .compatible = "renesas,r8a7795-thermal", .data = &r8a7795_data},
- { .compatible = "renesas,r8a7796-thermal", .data = &r8a7796_data},
+ { .compatible = "renesas,r8a7795-thermal", },
+ { .compatible = "renesas,r8a7796-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
@@ -371,7 +365,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->data = of_device_get_match_data(dev);
+ priv->thermal_init = rcar_gen3_thermal_init;
+ if (soc_device_match(r8a7795es1))
+ priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
spin_lock_init(&priv->lock);
@@ -423,7 +419,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
- priv->data->thermal_init(tsc);
+ priv->thermal_init(tsc);
rcar_gen3_thermal_calc_coefs(&tsc->coef, ptat, thcode[i]);
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
@@ -476,7 +472,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- priv->data->thermal_init(tsc);
+ priv->thermal_init(tsc);
rcar_gen3_thermal_set_trips(tsc, tsc->low, tsc->high);
}
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 206035139110..f36375d5a16c 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -242,6 +242,45 @@ struct tsadc_table {
int temp;
};
+static const struct tsadc_table rv1108_table[] = {
+ {0, -40000},
+ {374, -40000},
+ {382, -35000},
+ {389, -30000},
+ {397, -25000},
+ {405, -20000},
+ {413, -15000},
+ {421, -10000},
+ {429, -5000},
+ {436, 0},
+ {444, 5000},
+ {452, 10000},
+ {460, 15000},
+ {468, 20000},
+ {476, 25000},
+ {483, 30000},
+ {491, 35000},
+ {499, 40000},
+ {507, 45000},
+ {515, 50000},
+ {523, 55000},
+ {531, 60000},
+ {539, 65000},
+ {547, 70000},
+ {555, 75000},
+ {562, 80000},
+ {570, 85000},
+ {578, 90000},
+ {586, 95000},
+ {594, 100000},
+ {602, 105000},
+ {610, 110000},
+ {618, 115000},
+ {626, 120000},
+ {634, 125000},
+ {TSADCV2_DATA_MASK, 125000},
+};
+
static const struct tsadc_table rk3228_code_table[] = {
{0, -40000},
{588, -40000},
@@ -779,6 +818,30 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
+static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_num = 1, /* one channel for tsadc */
+
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv2_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rv1108_table,
+ .length = ARRAY_SIZE(rv1108_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_num = 1, /* one channel for tsadc */
@@ -928,6 +991,10 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
static const struct of_device_id of_rockchip_thermal_match[] = {
{
+ .compatible = "rockchip,rv1108-tsadc",
+ .data = (void *)&rv1108_tsadc_data,
+ },
+ {
.compatible = "rockchip,rk3228-tsadc",
.data = (void *)&rk3228_tsadc_data,
},
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index be95826631b7..ee047ca43084 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -31,8 +31,7 @@
* If the temperature is higher than a trip point,
* a. if the trend is THERMAL_TREND_RAISING, use higher cooling
* state for this trip point
- * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
- * state for this trip point
+ * b. if the trend is THERMAL_TREND_DROPPING, do nothing
* c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
* for this trip point
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
@@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (!throttle)
next_target = THERMAL_NO_TARGET;
} else {
- next_target = cur_state - 1;
- if (next_target > instance->upper)
- next_target = instance->upper;
+ if (!throttle) {
+ next_target = cur_state - 1;
+ if (next_target > instance->upper)
+ next_target = instance->upper;
+ }
}
break;
case THERMAL_TREND_DROP_FULL:
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index cec586ec7e4b..f8740f7852e3 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -10,4 +10,11 @@ config TEGRA_SOCTHERM
zones to manage temperatures. This option is also required for the
emergency thermal reset (thermtrip) feature to function.
+config TEGRA_BPMP_THERMAL
+ tristate "Tegra BPMP thermal sensing"
+ depends on TEGRA_BPMP || COMPILE_TEST
+ help
+ Enable this option for support for sensing system temperature of NVIDIA
+ Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+
endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index 8a3f221f17c1..0f2b66edf0d2 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 7d2db23d71a3..075db1de5e53 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -483,7 +483,7 @@ static int throttrip_program(struct device *dev,
unsigned int throt;
u32 r, reg_off;
- if (!dev || !sg || !stc || !stc->init)
+ if (!sg || !stc || !stc->init)
return -EINVAL;
temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
new file mode 100644
index 000000000000..b0980dbca3b3
--- /dev/null
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ * Aapo Vienamo <avienamo@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/workqueue.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_bpmp_thermal_zone {
+ struct tegra_bpmp_thermal *tegra;
+ struct thermal_zone_device *tzd;
+ struct work_struct tz_device_update_work;
+ unsigned int idx;
+};
+
+struct tegra_bpmp_thermal {
+ struct device *dev;
+ struct tegra_bpmp *bpmp;
+ unsigned int num_zones;
+ struct tegra_bpmp_thermal_zone **zones;
+};
+
+static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
+{
+ struct tegra_bpmp_thermal_zone *zone = data;
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_GET_TEMP;
+ req.get_temp.zone = zone->idx;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ if (err)
+ return err;
+
+ *out_temp = reply.get_temp.temp;
+
+ return 0;
+}
+
+static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
+{
+ struct tegra_bpmp_thermal_zone *zone = data;
+ struct mrq_thermal_host_to_bpmp_request req;
+ struct tegra_bpmp_message msg;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_SET_TRIP;
+ req.set_trip.zone = zone->idx;
+ req.set_trip.enabled = true;
+ req.set_trip.low = low;
+ req.set_trip.high = high;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+
+ return tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+}
+
+static void tz_device_update_work_fn(struct work_struct *work)
+{
+ struct tegra_bpmp_thermal_zone *zone;
+
+ zone = container_of(work, struct tegra_bpmp_thermal_zone,
+ tz_device_update_work);
+
+ thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED);
+}
+
+static void bpmp_mrq_thermal(unsigned int mrq, struct tegra_bpmp_channel *ch,
+ void *data)
+{
+ struct mrq_thermal_bpmp_to_host_request *req;
+ struct tegra_bpmp_thermal *tegra = data;
+ int i;
+
+ req = (struct mrq_thermal_bpmp_to_host_request *)ch->ib->data;
+
+ if (req->type != CMD_THERMAL_HOST_TRIP_REACHED) {
+ dev_err(tegra->dev, "%s: invalid request type: %d\n",
+ __func__, req->type);
+ tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
+ return;
+ }
+
+ for (i = 0; i < tegra->num_zones; ++i) {
+ if (tegra->zones[i]->idx != req->host_trip_reached.zone)
+ continue;
+
+ schedule_work(&tegra->zones[i]->tz_device_update_work);
+ tegra_bpmp_mrq_return(ch, 0, NULL, 0);
+ return;
+ }
+
+ dev_err(tegra->dev, "%s: invalid thermal zone: %d\n", __func__,
+ req->host_trip_reached.zone);
+ tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
+}
+
+static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
+ int *num_zones)
+{
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_GET_NUM_ZONES;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+
+ *num_zones = reply.get_num_zones.num;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = {
+ .get_temp = tegra_bpmp_thermal_get_temp,
+ .set_trips = tegra_bpmp_thermal_set_trips,
+};
+
+static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_bpmp_thermal *tegra;
+ struct thermal_zone_device *tzd;
+ unsigned int i, max_num_zones;
+ int err;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->dev = &pdev->dev;
+ tegra->bpmp = bpmp;
+
+ err = tegra_bpmp_thermal_get_num_zones(bpmp, &max_num_zones);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get the number of zones: %d\n",
+ err);
+ return err;
+ }
+
+ tegra->zones = devm_kcalloc(&pdev->dev, max_num_zones,
+ sizeof(*tegra->zones), GFP_KERNEL);
+ if (!tegra->zones)
+ return -ENOMEM;
+
+ for (i = 0; i < max_num_zones; ++i) {
+ struct tegra_bpmp_thermal_zone *zone;
+ int temp;
+
+ zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone)
+ return -ENOMEM;
+
+ zone->idx = i;
+ zone->tegra = tegra;
+
+ err = tegra_bpmp_thermal_get_temp(zone, &temp);
+ if (err < 0) {
+ devm_kfree(&pdev->dev, zone);
+ continue;
+ }
+
+ tzd = devm_thermal_zone_of_sensor_register(
+ &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
+ if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ devm_kfree(&pdev->dev, zone);
+ continue;
+ }
+
+ zone->tzd = tzd;
+ INIT_WORK(&zone->tz_device_update_work,
+ tz_device_update_work_fn);
+
+ tegra->zones[tegra->num_zones++] = zone;
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_THERMAL, bpmp_mrq_thermal,
+ tegra);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mrq handler: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ return 0;
+}
+
+static int tegra_bpmp_thermal_remove(struct platform_device *pdev)
+{
+ struct tegra_bpmp_thermal *tegra = platform_get_drvdata(pdev);
+
+ tegra_bpmp_free_mrq(tegra->bpmp, MRQ_THERMAL, tegra);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_bpmp_thermal_of_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp-thermal" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_bpmp_thermal_of_match);
+
+static struct platform_driver tegra_bpmp_thermal_driver = {
+ .probe = tegra_bpmp_thermal_probe,
+ .remove = tegra_bpmp_thermal_remove,
+ .driver = {
+ .name = "tegra-bpmp-thermal",
+ .of_match_table = tegra_bpmp_thermal_of_match,
+ },
+};
+module_platform_driver(tegra_bpmp_thermal_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra BPMP thermal sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 73f55d6a1721..46d3005335c7 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -126,38 +126,23 @@ static int gadc_thermal_probe(struct platform_device *pdev)
gti->dev = &pdev->dev;
platform_set_drvdata(pdev, gti);
- gti->channel = iio_channel_get(&pdev->dev, "sensor-channel");
+ gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
return ret;
}
- gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0,
- gti, &gadc_thermal_ops);
+ gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
+ &gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
ret);
- goto sensor_fail;
+ return ret;
}
return 0;
-
-sensor_fail:
- iio_channel_release(gti->channel);
-
- return ret;
-}
-
-static int gadc_thermal_remove(struct platform_device *pdev)
-{
- struct gadc_thermal_info *gti = platform_get_drvdata(pdev);
-
- thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev);
- iio_channel_release(gti->channel);
-
- return 0;
}
static const struct of_device_id of_adc_thermal_match[] = {
@@ -172,7 +157,6 @@ static struct platform_driver gadc_thermal_driver = {
.of_match_table = of_adc_thermal_match,
},
.probe = gadc_thermal_probe,
- .remove = gadc_thermal_remove,
};
module_platform_driver(gadc_thermal_driver);
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index c211a8e4a210..b4f981daeaf2 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -278,7 +278,8 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
if (data) {
cpufreq_cooling_unregister(data->cool_dev);
- cpufreq_cpu_put(data->policy);
+ if (data->policy)
+ cpufreq_cpu_put(data->policy);
}
return 0;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d674e06767a5..1424581fd9af 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_port_info(up_port,
"PCIe tunnel activation failed, aborting\n");
tb_pci_free(tunnel);
+ continue;
}
list_add(&tunnel->list, &tcm->tunnel_list);
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 5d442469c95e..cf0bde3bb927 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -279,7 +279,7 @@ static unsigned detect_isa_irq(void __iomem *);
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
@@ -1214,7 +1214,7 @@ static void cyz_rx_restart(struct timer_list *t)
#else /* CONFIG_CYZ_INTR */
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index a6b8240af6cd..b0baa4ce10f9 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -33,7 +33,7 @@ static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
unsigned int address,
const unsigned char *data, int len,
int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx, const unsigned char *data, int len);
@@ -1635,8 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
spin_lock_init(&hw->lock);
tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
- setup_timer(&hw->setup_timer, ipwireless_setup_timer,
- (unsigned long) hw);
+ timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
return hw;
}
@@ -1670,12 +1669,12 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
hw->init_loops = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": waiting for card to start up...\n");
- ipwireless_setup_timer((unsigned long) hw);
+ ipwireless_setup_timer(&hw->setup_timer);
}
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
{
- struct ipw_hardware *hw = (struct ipw_hardware *) data;
+ struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
hw->init_loops++;
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index ee7958ab269f..015686ff4825 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -170,7 +170,7 @@ static struct pci_driver isicom_driver = {
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
static void isicom_start(struct tty_struct *tty);
static DEFINE_TIMER(tx, isicom_tx);
@@ -394,7 +394,7 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
* will do the rest of the work for us.
*/
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
{
unsigned long flags, base;
unsigned int retries;
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 65a70f3c7cde..68cbc03aab4b 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -198,7 +198,7 @@ static void moxa_hangup(struct tty_struct *);
static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
@@ -1429,7 +1429,7 @@ put:
return 0;
}
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
{
struct moxa_board_conf *brd;
u16 __iomem *ip;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 3a39eb685c69..5131bdc9e765 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1310,9 +1310,9 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
* gsm->pending_cmd will be NULL and we just let the timer expire.
*/
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
{
- struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
@@ -1453,9 +1453,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* end will get a DM response)
*/
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
{
- struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_dlci *dlci = from_timer(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
@@ -1634,7 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
}
skb_queue_head_init(&dlci->skb_list);
- setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
+ timer_setup(&dlci->t1, gsm_dlci_t1, 0);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
@@ -2088,7 +2088,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
struct gsm_dlci *dlci;
int i = 0;
- setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 9f246d4db3ca..30bb0900cd2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -115,7 +115,7 @@ static void retry_transmit(struct r3964_info *pInfo);
static void transmit_block(struct r3964_info *pInfo);
static void receive_char(struct r3964_info *pInfo, const unsigned char c);
static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
unsigned char __user * buf);
@@ -688,9 +688,9 @@ static void receive_error(struct r3964_info *pInfo, const char flag)
}
}
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
{
- struct r3964_info *pInfo = (void *)priv;
+ struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
switch (pInfo->state) {
case R3964_TX_REQUEST:
@@ -993,7 +993,7 @@ static int r3964_open(struct tty_struct *tty)
tty->disc_data = pInfo;
tty->receive_room = 65536;
- setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+ timer_setup(&pInfo->tmr, on_timeout, 0);
return 0;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index f7dc9b1ea806..bdd17d2aaafd 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -86,7 +86,7 @@
/****** RocketPort Local Variables ******/
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
static struct tty_driver *rocket_driver;
@@ -525,7 +525,7 @@ static void rp_handle_port(struct r_port *info)
/*
* The top level polling routine. Repeats every 1/100 HZ (10ms).
*/
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
{
CONTROLLER_t *ctlp;
int ctrl, aiop, ch, line;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index ce7ad0acee7a..247788a16f0b 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -27,23 +27,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ int ret;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
return 0;
- return serdev_controller_receive_buf(ctrl, cp, count);
+ ret = serdev_controller_receive_buf(ctrl, cp, count);
+
+ dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
+ "receive_buf returns %d (count = %zu)\n",
+ ret, count);
+ if (ret < 0)
+ return 0;
+ else if (ret > count)
+ return count;
+
+ return ret;
}
static void ttyport_write_wakeup(struct tty_port *port)
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(port);
+ if (!tty)
+ return;
- if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
+ if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
test_bit(SERPORT_ACTIVE, &serport->flags))
serdev_controller_write_wakeup(ctrl);
- wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+
+ tty_kref_put(tty);
}
static const struct tty_port_client_operations client_ops = {
@@ -136,8 +154,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
clear_bit(SERPORT_ACTIVE, &serport->flags);
+ tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
+ tty_unlock(tty);
tty_release_struct(tty, serport->tty_idx);
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index d64afdd93872..9342fc2ee7df 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -325,7 +325,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
if (up->bugs & UART_BUG_THRE) {
pr_debug("ttyS%d - using backup timer\n", serial_index(port));
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
+ up->timer.function = serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
@@ -348,7 +348,7 @@ static void univ8250_release_irq(struct uart_8250_port *up)
struct uart_port *port = &up->port;
del_timer_sync(&up->timer);
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
+ up->timer.function = serial8250_timeout;
if (port->irq)
serial_unlink_irq_chain(up);
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 362c25ff188a..ae6a256524d8 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -122,12 +122,14 @@ static void __init init_port(struct earlycon_device *device)
serial8250_early_out(port, UART_FCR, 0); /* no fifo */
serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */
- divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
- c = serial8250_early_in(port, UART_LCR);
- serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
- serial8250_early_out(port, UART_DLL, divisor & 0xff);
- serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
- serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+ if (port->uartclk && device->baud) {
+ divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
+ c = serial8250_early_in(port, UART_LCR);
+ serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
+ serial8250_early_out(port, UART_DLL, divisor & 0xff);
+ serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
+ serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+ }
}
int __init early_serial8250_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b7e0e3416641..54adf8d56350 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5135,6 +5135,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+ /* Amazon PCI serial device */
+ { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 1421804975e0..c9458a033e3c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -2059,7 +2059,7 @@ static void flush_timeout_function(unsigned long data)
static struct timer_list flush_timer;
static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
{
struct e100_serial *info;
int i;
@@ -4137,7 +4137,7 @@ static int __init rs_init(void)
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
- setup_timer(&flush_timer, timed_flush_handler, 0);
+ timer_setup(&flush_timer, timed_flush_handler, 0);
mod_timer(&flush_timer, jiffies + 5);
#endif
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c84e6f0db54e..1c4d3f387138 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -966,9 +966,9 @@ static void lpuart_dma_rx_complete(void *arg)
lpuart_copy_rx_to_tty(sport);
}
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
{
- struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
lpuart_copy_rx_to_tty(sport);
}
@@ -1263,8 +1263,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
static void rx_dma_timer_init(struct lpuart_port *sport)
{
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
}
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 473f4f81d690..ffefd218761e 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -263,9 +263,9 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
*/
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
{
- struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+ struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
tty_port_tty_hangup(&ifx_dev->tty_port, false);
@@ -1016,8 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
- setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
- (unsigned long)ifx_dev);
+ timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a67a606c38eb..e4b3d9123a03 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -906,9 +906,9 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
{
- struct imx_port *sport = (struct imx_port *)data;
+ struct imx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -2082,7 +2082,7 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.rs485_config = imx_rs485_config;
sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
- setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
+ timer_setup(&sport->timer, imx_timeout, 0);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index ed2b03058627..4029272891f9 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -188,9 +188,9 @@ bool kgdb_nmi_poll_knock(void)
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
*/
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
{
- struct kgdb_nmi_tty_priv *priv = (void *)data;
+ struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
char ch;
priv->timer.expires = jiffies + (HZ/100);
@@ -241,7 +241,7 @@ static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
return -ENOMEM;
INIT_KFIFO(priv->fifo);
- setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+ timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
tty_port_init(&priv->port);
priv->port.ops = &kgdb_nmi_tty_port_ops;
tty->driver_data = priv;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 27d6049eb6a9..371569a0fd00 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -178,9 +178,9 @@ static void max3100_dowork(struct max3100_port *s)
queue_work(s->workqueue, &s->work);
}
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
{
- struct max3100_port *s = (struct max3100_port *)data;
+ struct max3100_port *s = from_timer(s, t, timer);
if (s->port.state) {
max3100_dowork(s);
@@ -780,8 +780,7 @@ static int max3100_probe(struct spi_device *spi)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
- setup_timer(&max3100s[i]->timer, max3100_timeout,
- (unsigned long)max3100s[i]);
+ timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 3b74369c262f..00ce31e8d19a 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -371,7 +371,7 @@ static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
*
* This function periodically polls the Serial MUX to check for new data.
*/
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
{
int i;
@@ -572,7 +572,7 @@ static int __init mux_init(void)
if(port_cnt > 0) {
/* Start the Mux timer */
- setup_timer(&mux_timer, mux_poll, 0UL);
+ timer_setup(&mux_timer, mux_poll, 0);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index f8812389b8a8..223a9499104e 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -103,9 +103,9 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+ struct pnx8xxx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -662,8 +662,7 @@ static void __init pnx8xxx_init_ports(void)
first = 0;
for (i = 0; i < NR_PORTS; i++) {
- setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
- (unsigned long)&pnx8xxx_ports[i]);
+ timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 4e3f169b30cf..a399772be3fc 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -110,9 +110,9 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
{
- struct sa1100_port *sport = (struct sa1100_port *)data;
+ struct sa1100_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -627,8 +627,7 @@ static void __init sa1100_init_ports(void)
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
- setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
- (unsigned long)&sa1100_ports[i]);
+ timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
}
/*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 31fcc7072a90..d9f399c4e90c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1058,9 +1058,9 @@ static int scif_rtrg_enabled(struct uart_port *port)
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_fifo_timer);
struct uart_port *port = &s->port;
dev_dbg(port->dev, "Rx timed out\n");
@@ -1138,8 +1138,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
sci->rx_fifo_timeout = r;
scif_set_rtrg(port, 1);
if (r > 0)
- setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)sci);
+ timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
}
return count;
@@ -1392,9 +1391,9 @@ static void work_fn_tx(struct work_struct *work)
dma_async_issue_pending(chan);
}
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_timer);
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
struct dma_tx_state state;
@@ -1572,7 +1571,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+ timer_setup(&s->rx_timer, rx_timer_fn, 0);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
@@ -2238,8 +2237,7 @@ static void sci_reset(struct uart_port *port)
if (s->rx_trigger > 1) {
if (s->rx_fifo_timeout) {
scif_set_rtrg(port, 1);
- setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)s);
+ timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
} else {
if (port->type == PORT_SCIFA ||
port->type == PORT_SCIFB)
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index ed78542c4c37..42b9aded4eb1 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -612,9 +612,9 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
* Obviously not used in interrupt mode
*
*/
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
{
- struct sn_cons_port *port = (struct sn_cons_port *)data;
+ struct sn_cons_port *port = from_timer(port, t, sc_timer);
unsigned long flags;
if (!port)
@@ -668,7 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
* timer to poll for input and push data from the console
* buffer.
*/
- setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
+ timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index f2c34d656144..3c4ad71f261d 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -700,7 +700,7 @@ static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
static void usc_loopback_frame( struct mgsl_struct *info );
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
@@ -1768,7 +1768,7 @@ static int startup(struct mgsl_struct * info)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = mgsl_claim_resources(info);
@@ -7517,9 +7517,9 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
* Arguments: context pointer to device instance data
* Return Value: None
*/
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
{
- struct mgsl_struct *info = (struct mgsl_struct*)context;
+ struct mgsl_struct *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 06a03731bba7..255c49687877 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -493,8 +493,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
static int alloc_tmp_rbuf(struct slgt_info *info);
static void free_tmp_rbuf(struct slgt_info *info);
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
/*
* ioctl handlers
@@ -3597,8 +3597,8 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
info->adapter_num = adapter_num;
info->port_num = port_num;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->rx_timer, rx_timeout, 0);
/* Copy configuration info to device instance data */
info->pdev = pdev;
@@ -5112,9 +5112,9 @@ static int adapter_test(struct slgt_info *info)
/*
* transmit timeout handler
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, tx_timer);
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5136,9 +5136,9 @@ static void tx_timeout(unsigned long context)
/*
* receive buffer polling timer
*/
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, rx_timer);
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index d45f234e1914..75f11ce1f0a1 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -615,8 +615,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info);
static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
@@ -3782,9 +3782,8 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
info->bus_type = MGSL_BUS_TYPE_PCI;
info->irq_flags = IRQF_SHARED;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->status_timer, status_timeout,
- (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->status_timer, status_timeout, 0);
/* Store the PCI9050 misc control register value because a flaw
* in the PCI9050 prevents LCR registers from being read if
@@ -5468,9 +5467,9 @@ static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5495,10 +5494,10 @@ static void tx_timeout(unsigned long context)
/* called to periodically check the DSR/RI modem signal input status
*/
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
{
u16 status = 0;
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, status_timer);
unsigned long flags;
unsigned char delta;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c8d90d7e7e37..5d412df8e943 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -244,7 +244,7 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
return 0;
}
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
{
static unsigned int zero;
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 7a4c8022c023..af4da9507180 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -156,42 +156,34 @@ static int store_utf8(u16 c, char *p)
int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
- int sel_mode, new_sel_start, new_sel_end, spc;
+ int new_sel_start, new_sel_end, spc;
+ struct tiocl_selection v;
char *bp, *obp;
int i, ps, pe, multiplier;
u16 c;
int mode;
poke_blanked_console();
+ if (copy_from_user(&v, sel, sizeof(*sel)))
+ return -EFAULT;
- { unsigned short xs, ys, xe, ye;
+ v.xs = limit(v.xs - 1, vc->vc_cols - 1);
+ v.ys = limit(v.ys - 1, vc->vc_rows - 1);
+ v.xe = limit(v.xe - 1, vc->vc_cols - 1);
+ v.ye = limit(v.ye - 1, vc->vc_rows - 1);
+ ps = v.ys * vc->vc_size_row + (v.xs << 1);
+ pe = v.ye * vc->vc_size_row + (v.xe << 1);
- if (!access_ok(VERIFY_READ, sel, sizeof(*sel)))
- return -EFAULT;
- __get_user(xs, &sel->xs);
- __get_user(ys, &sel->ys);
- __get_user(xe, &sel->xe);
- __get_user(ye, &sel->ye);
- __get_user(sel_mode, &sel->sel_mode);
- xs--; ys--; xe--; ye--;
- xs = limit(xs, vc->vc_cols - 1);
- ys = limit(ys, vc->vc_rows - 1);
- xe = limit(xe, vc->vc_cols - 1);
- ye = limit(ye, vc->vc_rows - 1);
- ps = ys * vc->vc_size_row + (xs << 1);
- pe = ye * vc->vc_size_row + (xe << 1);
-
- if (sel_mode == TIOCL_SELCLEAR) {
- /* useful for screendump without selection highlights */
- clear_selection();
- return 0;
- }
-
- if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) {
- mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys);
- return 0;
- }
- }
+ if (v.sel_mode == TIOCL_SELCLEAR) {
+ /* useful for screendump without selection highlights */
+ clear_selection();
+ return 0;
+ }
+
+ if (mouse_reporting() && (v.sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, v.sel_mode & TIOCL_SELBUTTONMASK, v.xs, v.ys);
+ return 0;
+ }
if (ps > pe) /* make sel_start <= sel_end */
{
@@ -210,7 +202,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
else
use_unicode = 0;
- switch (sel_mode)
+ switch (v.sel_mode)
{
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index bce4c71cb338..88b902c525d7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -158,7 +158,7 @@ static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
@@ -3929,7 +3929,7 @@ void unblank_screen(void)
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
{
blank_timer_expired = 1;
schedule_work(&console_work);
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 2d2b420598b2..d61be307256a 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -843,58 +843,44 @@ int vt_ioctl(struct tty_struct *tty,
case VT_RESIZEX:
{
- struct vt_consize __user *vtconsize = up;
- ushort ll,cc,vlin,clin,vcol,ccol;
+ struct vt_consize v;
if (!perm)
return -EPERM;
- if (!access_ok(VERIFY_READ, vtconsize,
- sizeof(struct vt_consize))) {
- ret = -EFAULT;
- break;
- }
+ if (copy_from_user(&v, up, sizeof(struct vt_consize)))
+ return -EFAULT;
/* FIXME: Should check the copies properly */
- __get_user(ll, &vtconsize->v_rows);
- __get_user(cc, &vtconsize->v_cols);
- __get_user(vlin, &vtconsize->v_vlin);
- __get_user(clin, &vtconsize->v_clin);
- __get_user(vcol, &vtconsize->v_vcol);
- __get_user(ccol, &vtconsize->v_ccol);
- vlin = vlin ? vlin : vc->vc_scan_lines;
- if (clin) {
- if (ll) {
- if (ll != vlin/clin) {
- /* Parameters don't add up */
- ret = -EINVAL;
- break;
- }
- } else
- ll = vlin/clin;
+ if (!v.v_vlin)
+ v.v_vlin = vc->vc_scan_lines;
+ if (v.v_clin) {
+ int rows = v.v_vlin/v.v_clin;
+ if (v.v_rows != rows) {
+ if (v.v_rows) /* Parameters don't add up */
+ return -EINVAL;
+ v.v_rows = rows;
+ }
}
- if (vcol && ccol) {
- if (cc) {
- if (cc != vcol/ccol) {
- ret = -EINVAL;
- break;
- }
- } else
- cc = vcol/ccol;
+ if (v.v_vcol && v.v_ccol) {
+ int cols = v.v_vcol/v.v_ccol;
+ if (v.v_cols != cols) {
+ if (v.v_cols)
+ return -EINVAL;
+ v.v_cols = cols;
+ }
}
- if (clin > 32) {
- ret = -EINVAL;
- break;
- }
-
+ if (v.v_clin > 32)
+ return -EINVAL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons[i].d)
continue;
console_lock();
- if (vlin)
- vc_cons[i].d->vc_scan_lines = vlin;
- if (clin)
- vc_cons[i].d->vc_font.height = clin;
+ if (v.v_vlin)
+ vc_cons[i].d->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vc_cons[i].d->vc_font.height = v.v_clin;
vc_cons[i].d->vc_resize_user = 1;
- vc_resize(vc_cons[i].d, cc, ll);
+ vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
console_unlock();
}
break;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 6470d259b7d8..8af797252af2 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -547,21 +547,30 @@ static void cxacru_blocking_completion(struct urb *urb)
complete(urb->context);
}
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+ struct timer_list timer;
+ struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
{
- usb_unlink_urb((struct urb *) data);
+ struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+ usb_unlink_urb(timer->urb);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
- struct timer_list timer;
+ struct cxacru_timer timer = {
+ .urb = urb,
+ };
- setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
- timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
- add_timer(&timer);
+ timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+ mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
- del_timer_sync(&timer);
+ del_timer_sync(&timer.timer);
+ destroy_timer_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 5a5e8c0aaa39..973548b5c15c 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -557,9 +557,10 @@ static void speedtch_check_status(struct work_struct *work)
}
}
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ status_check_timer);
schedule_work(&instance->status_check_work);
@@ -570,9 +571,10 @@ static void speedtch_status_poll(unsigned long data)
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
@@ -860,13 +862,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
- setup_timer(&instance->status_check_timer, speedtch_status_poll,
- (unsigned long)instance);
+ timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
- setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
- (unsigned long)instance);
+ timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 044264aa1f96..dbea28495e1d 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -989,18 +989,18 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
return 0;
}
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
{
- tasklet_schedule((struct tasklet_struct *) data);
+ struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+ tasklet_schedule(&channel->tasklet);
}
static void usbatm_init_channel(struct usbatm_channel *channel)
{
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->list);
- channel->delay.function = usbatm_tasklet_schedule;
- channel->delay.data = (unsigned long) &channel->tasklet;
- init_timer(&channel->delay);
+ timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
}
int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 8b351444cc40..9a2ab6751a23 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -180,9 +180,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
/* Find a ulpi bus underneath the parent or the grandparent */
parent = ulpi->dev.parent;
if (parent->of_node)
- np = of_find_node_by_name(parent->of_node, "ulpi");
+ np = of_get_child_by_name(parent->of_node, "ulpi");
else if (parent->parent && parent->parent->of_node)
- np = of_find_node_by_name(parent->parent->of_node, "ulpi");
+ np = of_get_child_by_name(parent->parent->of_node, "ulpi");
if (!np)
return 0;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index da8acd980fc6..78e92d29f8d9 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+ nintf = nintf_orig = config->desc.bNumInterfaces;
+ config->desc.bNumInterfaces = 0; // Adjusted later
+
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
buffer += config->desc.bLength;
size -= config->desc.bLength;
- nintf = nintf_orig = config->desc.bNumInterfaces;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
@@ -905,14 +907,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
}
}
+static const __u8 bos_desc_len[256] = {
+ [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+ [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
+ [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
+ [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
+ [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
+ [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
+};
+
/* Get BOS descriptor set */
int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
unsigned char *buffer;
- int length, total_len, num, i;
+ int length, total_len, num, i, ssac;
+ __u8 cap_type;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -965,7 +978,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->desc->bNumDeviceCaps = i;
break;
}
+ cap_type = cap->bDevCapabilityType;
length = cap->bLength;
+ if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+ dev->bos->desc->bNumDeviceCaps = i;
+ break;
+ }
+
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -973,7 +992,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
continue;
}
- switch (cap->bDevCapabilityType) {
+ switch (cap_type) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
@@ -986,8 +1005,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
- dev->bos->ssp_cap =
- (struct usb_ssp_cap_descriptor *)buffer;
+ ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+ ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+ USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+ if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+ dev->bos->ssp_cap = ssp_cap;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 705c573d0257..a3fad4ec9870 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1442,14 +1442,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
-
- if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
- USBDEVFS_URB_SHORT_NOT_OK |
+ unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
- USBDEVFS_URB_NO_INTERRUPT))
- return -EINVAL;
+ USBDEVFS_URB_NO_INTERRUPT;
+ /* USBDEVFS_URB_ISO_ASAP is a special case */
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+ mask |= USBDEVFS_URB_ISO_ASAP;
+
+ if (uurb->flags & ~mask)
+ return -EINVAL;
+
if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 19b5c4afeef2..fc32391a34d5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -788,9 +788,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
{
- usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+ struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+ usb_hcd_poll_rh_status(_hcd);
}
/*-------------------------------------------------------------------------*/
@@ -2545,7 +2547,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->self.bus_name = bus_name;
hcd->self.uses_dma = (sysdev->dma_mask != NULL);
- setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
+ timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7ccdd3d4db84..cf7bbcb9a63c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4948,6 +4948,15 @@ loop:
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
+
+ /* When halfway through our retry count, power-cycle the port */
+ if (i == (SET_CONFIG_TRIES / 2) - 1) {
+ dev_info(&port_dev->dev, "attempt power cycle\n");
+ usb_hub_set_port_power(hdev, hub, port1, false);
+ msleep(2 * hub_power_on_good_delay(hub));
+ usb_hub_set_port_power(hdev, hub, port1, true);
+ msleep(hub_power_on_good_delay(hub));
+ }
}
if (hub->hdev->parent ||
!hcd->driver->port_handed_over ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f1dbab6f798f..a10b346b9777 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -146,6 +146,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c94130cac..31749c79045f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
* 2 - Internal DMA
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
+ * @num_dev_in_eps Number of device IN endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
* available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
* 2 - 8 or 16 bits
* @snpsid: Value from SNPSID register
* @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
unsigned fs_phy_type:2;
unsigned i2c_enable:1;
unsigned num_dev_ep:4;
+ unsigned num_dev_in_eps : 4;
unsigned num_dev_perio_in_ep:4;
unsigned total_fifo_size:16;
unsigned power_optimized:1;
unsigned utmi_phy_data_width:2;
u32 snpsid;
u32 dev_ep_dirs;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d092503..e4c3ce0de5de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
{
if (hsotg->hw_params.en_multiple_tx_fifo)
/* In dedicated FIFO mode we need count of IN EPs */
- return (dwc2_readl(hsotg->regs + GHWCFG4) &
- GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
+ return hsotg->hw_params.num_dev_in_eps;
else
/* In shared FIFO mode we need count of Periodic IN EPs */
return hsotg->hw_params.num_dev_perio_in_ep;
}
/**
- * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
- */
-static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
-{
- int val = 0;
- int i;
- u32 ep_dirs;
-
- /*
- * Don't need additional space for ep info control registers in
- * slave mode.
- */
- if (!using_dma(hsotg)) {
- dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
- return 0;
- }
-
- /*
- * Buffer DMA mode - 1 location per endpoit
- * Descriptor DMA mode - 4 locations per endpoint
- */
- ep_dirs = hsotg->hw_params.dev_ep_dirs;
-
- for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
- val += ep_dirs & 3 ? 1 : 2;
- ep_dirs >>= 2;
- }
-
- if (using_desc_dma(hsotg))
- val = val * 4;
-
- return val;
-}
-
-/**
* dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
* device mode TX FIFOs
*/
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
{
- int ep_info_size;
int addr;
int tx_addr_max;
u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
hsotg->params.g_np_tx_fifo_size);
/* Get Endpoint Info Control block size in DWORDs. */
- ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
- tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
+ tx_addr_max = hsotg->hw_params.total_fifo_size;
addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 69eb40cd1b47..7b6eb0ad513b 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3314,9 +3314,9 @@ host:
}
}
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
{
- struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+ struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
@@ -5155,8 +5155,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
- (unsigned long)hsotg);
+ timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index f472de238ac2..fcd1676c7f0b 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1275,9 +1275,9 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*
* @work: Pointer to a qh unreserve_work.
*/
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
- struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
@@ -1467,8 +1467,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
- setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
- (unsigned long)qh);
+ timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6e03a9..03fd20f0b496 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
}
for (fifo = 1; fifo <= fifo_count; fifo++) {
- dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
- FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
if (hsotg->params.g_tx_fifo_size[fifo] < min ||
hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
struct dwc2_hw_params *hw = &hsotg->hw_params;
bool forced;
u32 gnptxfsiz;
+ int fifo, fifo_count;
if (hsotg->dr_mode == USB_DR_MODE_HOST)
return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ for (fifo = 1; fifo <= fifo_count; fifo++) {
+ hw->g_tx_fifo_size[fifo] =
+ (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+ FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ }
+
if (forced)
dwc2_clear_force_mode(hsotg);
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
/* hwcfg1 */
hw->dev_ep_dirs = hwcfg1;
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+ GHWCFG4_NUM_IN_EPS_SHIFT;
hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
GRXFSIZ_DEPTH_SHIFT;
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7bd2766..7ae0eefc7cc7 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
- while (--i >= 0)
+ while (--i >= 0) {
+ clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
+ }
return PTR_ERR(clk);
}
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
+ .pm = &dwc3_of_simple_dev_pm_ops,
},
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd986cf82..639dd1b163a0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 500;
+ u32 timeout = 1000;
u32 reg;
int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
*/
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- unsigned int mult = ep->mult - 1;
+ unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eec14e6ed20b..77c7ecca816a 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -146,7 +146,6 @@ int config_ep_by_speed(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep)
{
- struct usb_composite_dev *cdev = get_gadget_data(g);
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
@@ -226,8 +225,12 @@ ep_found:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
- if (comp_desc->bMaxBurst != 0)
+ if (comp_desc->bMaxBurst != 0) {
+ struct usb_composite_dev *cdev;
+
+ cdev = get_gadget_data(g);
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
+ }
_ep->maxburst = 1;
break;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 97ea059a7aa4..b6cf5ab5a0a1 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1012,7 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
else
ret = ep->status;
goto error_mutex;
- } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+ } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
} else {
req->buf = data;
@@ -2282,9 +2282,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
int i;
if (len < sizeof(*d) ||
- d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- !d->Reserved1)
+ d->bFirstInterfaceNumber >= ffs->interfaces_count)
return -EINVAL;
+ if (d->Reserved1 != 1) {
+ /*
+ * According to the spec, Reserved1 must be set to 1
+ * but older kernels incorrectly rejected non-zero
+ * values. We fix it here to avoid returning EINVAL
+ * in response to values we used to accept.
+ */
+ pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+ d->Reserved1 = 1;
+ }
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
return -EINVAL;
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index a12fb459dbd9..784bf86dad4f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -479,7 +479,7 @@ endif
# or video class gadget drivers), or specific hardware, here.
config USB_G_WEBCAM
tristate "USB Webcam Gadget"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select USB_LIBCOMPOSITE
select VIDEOBUF2_VMALLOC
select USB_F_UVC
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index bfe278294e88..ad743a8493be 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1550,9 +1550,9 @@ static void at91_vbus_timer_work(struct work_struct *work)
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
}
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
{
- struct at91_udc *udc = (struct at91_udc *)data;
+ struct at91_udc *udc = from_timer(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
@@ -1918,8 +1918,7 @@ static int at91udc_probe(struct platform_device *pdev)
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
- setup_timer(&udc->vbus_timer, at91_vbus_timer,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index d39f070acbd7..01b44e159623 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -642,7 +642,6 @@ static const struct of_device_id bdc_of_match[] = {
static struct platform_driver bdc_driver = {
.driver = {
.name = BRCM_BDC_NAME,
- .owner = THIS_MODULE,
.pm = &bdc_pm_ops,
.of_match_table = bdc_of_match,
},
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 61422d624ad0..93eff7dec2f5 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1069,8 +1069,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
enum usb_device_speed speed)
{
- if (udc->gadget->ops->udc_set_speed)
- udc->gadget->ops->udc_set_speed(udc->gadget, speed);
+ if (udc->gadget->ops->udc_set_speed) {
+ enum usb_device_speed s;
+
+ s = min(speed, udc->gadget->max_speed);
+ udc->gadget->ops->udc_set_speed(udc->gadget, s);
+ }
}
/**
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4f1b1809472c..d0128f92ec5a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1771,9 +1771,9 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
{
- struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+ struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
@@ -2445,7 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2474,7 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index f19e6282a688..a8288df6aadf 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1259,9 +1259,9 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
return IRQ_HANDLED;
}
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
{
- struct m66592 *m66592 = (struct m66592 *)_m66592;
+ struct m66592 *m66592 = from_timer(m66592, t, timer);
unsigned long flags;
u16 tmp;
@@ -1589,7 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
- setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
+ timer_setup(&m66592->timer, m66592_timer, 0);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index fc7f810baef7..dc35a54bad90 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1854,9 +1854,9 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
{
- struct omap_ep *ep = (void *) _ep;
+ struct omap_ep *ep = from_timer(ep, t, timer);
unsigned long flags;
u16 stat_flg;
@@ -2542,9 +2542,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
- init_timer(&ep->timer);
- ep->timer.function = pio_out_timer;
- ep->timer.data = (unsigned long) ep;
+ timer_setup(&ep->timer, pio_out_timer, 0);
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 8f135d9fa245..0e3f5faa000e 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1624,9 +1624,9 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
nuke(&dev->ep[i], -ECONNABORTED);
}
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
{
- struct pxa25x_udc *dev = (void *)_dev;
+ struct pxa25x_udc *dev = from_timer(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
@@ -2413,7 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
- setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
+ timer_setup(&dev->timer, udc_watchdog, 0);
the_controller = dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 143122ed3c66..a3ecce62662b 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1514,9 +1514,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
return IRQ_HANDLED;
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
@@ -1874,7 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
- setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
+ timer_setup(&r8a66597->timer, r8a66597_timer, 0);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index bc37f40baacf..6e87af248367 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -252,7 +252,7 @@
#define USB3_EP0_SS_MAX_PACKET_SIZE 512
#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
#define USB3_EP0_BUF_SIZE 8
-#define USB3_MAX_NUM_PIPES 30
+#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
#define USB3_WAIT_US 3
#define USB3_DMA_NUM_SETTING_AREA 4
/*
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 19f00424f53e..3ed75aaa09d9 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -827,7 +827,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
default: /* unknown */
break;
}
- temp = (cap >> 8) & 0xff;
+ offset = (cap >> 8) & 0xff;
}
}
#endif
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 10887e09e9bc..ee9676349333 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -80,7 +80,7 @@ static const char hcd_name [] = "ohci_hcd";
static void ohci_dump(struct ohci_hcd *ohci);
static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
#include "ohci-hub.c"
#include "ohci-dbg.c"
@@ -500,8 +500,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (ohci->hcca)
return 0;
- setup_timer(&ohci->io_watchdog, io_watchdog_func,
- (unsigned long) ohci);
+ timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -723,9 +722,9 @@ static int ohci_start(struct usb_hcd *hcd)
* the unlink list. As a result, URBs could never be dequeued and
* endpoints could never be released.
*/
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
{
- struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+ struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 0bf7759aae78..c5e6e8d0b5ef 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2539,9 +2539,9 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
return ret;
}
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
{
- struct oxu_hcd *oxu = (struct oxu_hcd *) param;
+ struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
@@ -2577,7 +2577,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
spin_lock_init(&oxu->lock);
- setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+ timer_setup(&oxu->watchdog, oxu_watchdog, 0);
/*
* hw default: 1K periodic list heads, one per frame.
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f3d9ba420a97..984892dd72f5 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1798,9 +1798,9 @@ static void r8a66597_td_timer(struct timer_list *t)
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
unsigned long flags;
int port;
@@ -2472,8 +2472,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
- setup_timer(&r8a66597->rh_timer, r8a66597_timer,
- (unsigned long)r8a66597);
+ timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 601fb00603cc..fa88a903fa2e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1119,9 +1119,9 @@ sl811h_hub_descriptor (
}
static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
{
- struct sl811 *sl811 = (void *) _sl811;
+ struct sl811 *sl811 = from_timer(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
@@ -1692,7 +1692,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
- setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+ timer_setup(&sl811->timer, sl811h_timer, 0);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index babeefd84ffd..f5c90217777a 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -585,8 +585,7 @@ static int uhci_start(struct usb_hcd *hcd)
hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
- setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
- (unsigned long) uhci);
+ timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 49d4edc03cc2..d40438238938 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -90,9 +90,9 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
}
}
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
{
- struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+ struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index e1fba4688509..3a29b32a3bd0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -934,6 +934,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
if (!vdev)
return;
+ if (vdev->real_port == 0 ||
+ vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+ xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ goto out;
+ }
+
tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
@@ -947,6 +953,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
}
}
}
+out:
/* we are now at a leaf device */
xhci_debugfs_remove_slot(xhci, slot_id);
xhci_free_virt_device(xhci, slot_id);
@@ -964,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0;
}
- xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
- if (!xhci->devs[slot_id])
+ dev = kzalloc(sizeof(*dev), flags);
+ if (!dev)
return 0;
- dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1008,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
trace_xhci_alloc_virt_device(dev);
+ xhci->devs[slot_id] = dev;
+
return 1;
fail:
- xhci_free_virt_device(xhci, slot_id);
+
+ if (dev->in_ctx)
+ xhci_free_container_ctx(xhci, dev->in_ctx);
+ if (dev->out_ctx)
+ xhci_free_container_ctx(xhci, dev->out_ctx);
+ kfree(dev);
+
return 0;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c239c688076c..c5cbc685c691 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2477,12 +2477,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (list_empty(&ep_ring->td_list)) {
/*
- * A stopped endpoint may generate an extra completion
- * event if the device was suspended. Don't print
- * warnings.
+ * Don't print wanings if it's due to a stopped endpoint
+ * generating an extra completion event if the device
+ * was suspended. Or, a event for the last TRB of a
+ * short TD we already got a short event for.
+ * The short TD is already removed from the TD list.
*/
+
if (!(trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+ ep_ring->last_td_was_short)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
@@ -3108,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
{
u32 maxp, total_packet_count;
- /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+ /* MTK xHCI 0.96 contains some features from 1.0 */
if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
return ((td_total_len - transferred) >> 10);
@@ -3117,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
trb_buff_len == td_total_len)
return 0;
- /* for MTK xHCI, TD size doesn't include this TRB */
- if (xhci->quirks & XHCI_MTK_HOST)
+ /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+ if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
trb_buff_len = 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 327ba8b8a98b..2424d3020ca3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -395,14 +395,14 @@ static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
#endif
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 temp;
int i;
- xhci = (struct xhci_hcd *)arg;
+ xhci = from_timer(xhci, t, comp_mode_recovery_timer);
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = readl(xhci->usb3_ports[i]);
@@ -443,8 +443,8 @@ static void compliance_mode_recovery(unsigned long arg)
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
- setup_timer(&xhci->comp_mode_recovery_timer,
- compliance_mode_recovery, (unsigned long)xhci);
+ timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+ 0);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606a211b..6c036de63272 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&musb->dev_timer);
- } else {
+ } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+ /*
+ * When babble condition happens, drvvbus interrupt
+ * is also generated. Ignore this drvvbus interrupt
+ * and let babble interrupt handler recovers the
+ * controller; otherwise, the host-mode flag is lost
+ * due to the MUSB_DEV_MODE() call below and babble
+ * recovery logic will not be called.
+ */
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a859c2d33c29..fdceb46d9fc6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -555,9 +555,9 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -565,9 +565,9 @@ static void mos7840_led_off(unsigned long arg)
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
@@ -2289,12 +2289,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
goto error;
}
- setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
- setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+ 0);
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index aaa7d901a06d..3b3513874cfd 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_BG96 0x0296
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1182,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index ab5a2ac4993a..aaf4813e4971 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -31,12 +31,14 @@ static const struct usb_device_id id_table[] = {
};
static const struct usb_device_id dbc_id_table[] = {
+ { USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(0x0525, 0x127a) },
+ { USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 48e2e32c97e8..31b024441938 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -751,9 +751,9 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
}
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
{
- struct rts51x_chip *chip = (struct rts51x_chip *)data;
+ struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
@@ -917,8 +917,7 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
us->proto_handler = rts51x_invoke_transport;
chip->timer_expires = 0;
- setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
- (unsigned long)chip);
+ timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
fw5895_init(us);
/* enable autosuspend function of the usb device */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 1fcd758a961f..3734a25e09e5 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
}
}
+ /* All Seagate disk enclosures have broken ATA pass-through support */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+ flags |= US_FL_NO_ATA_1X;
+
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046e7c05..f72d045ee9ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/*
* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
* JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374a824e..e6127fb21c12 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 465d7da849c3..bcb2744c5977 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -1,13 +1,53 @@
-menu "USB Power Delivery and Type-C drivers"
+menuconfig TYPEC
+ tristate "USB Type-C Support"
+ help
+ USB Type-C Specification defines a cable and connector for USB where
+ only one type of plug is supported on both ends, i.e. there will not
+ be Type-A plug on one end of the cable and Type-B plug on the other.
+ Determination of the host-to-device relationship happens through a
+ specific Configuration Channel (CC) which goes through the USB Type-C
+ cable. The Configuration Channel may also be used to detect optional
+ Accessory Modes - Analog Audio and Debug - and if USB Power Delivery
+ is supported, the Alternate Modes, where the connector is used for
+ something else then USB communication.
+
+ USB Power Delivery Specification defines a protocol that can be used
+ to negotiate the voltage and current levels with the connected
+ partners. USB Power Delivery allows higher voltages then the normal
+ 5V, up to 20V, and current up to 5A over the cable. The USB Power
+ Delivery protocol is also used to negotiate the optional Alternate
+ Modes when they are supported. USB Power Delivery does not depend on
+ USB Type-C connector, however it is mostly used together with USB
+ Type-C connectors.
+
+ USB Type-C and USB Power Delivery Specifications define a set of state
+ machines that need to be implemented in either software or firmware.
+ Simple USB Type-C PHYs, for example USB Type-C Port Controller
+ Interface Specification compliant "Port Controllers" need the state
+ machines to be handled in the OS, but stand-alone USB Type-C and Power
+ Delivery controllers handle the state machines inside their firmware.
+ The USB Type-C and Power Delivery controllers usually function
+ autonomously, and do not necessarily require drivers.
+
+ Enable this configurations option if you have USB Type-C connectors on
+ your system and 1) you know your USB Type-C hardware requires OS
+ control (a driver) to function, or 2) if you need to be able to read
+ the status of the USB Type-C ports in your system, or 3) if you need
+ to be able to swap the power role (decide are you supplying or
+ consuming power over the cable) or data role (host or device) when
+ both roles are supported.
+
+ For more information, see the kernel documentation for USB Type-C
+ Connector Class API (Documentation/driver-api/usb/typec.rst)
+ <https://www.kernel.org/doc/html/latest/driver-api/usb/typec.html>
+ and ABI (Documentation/ABI/testing/sysfs-class-typec).
-config TYPEC
- tristate
+if TYPEC
config TYPEC_TCPM
tristate "USB Type-C Port Controller Manager"
depends on USB
- select TYPEC
help
The Type-C Port Controller Manager provides a USB PD and USB Type-C
state machine for use with Type-C Port Controllers.
@@ -22,7 +62,6 @@ config TYPEC_WCOVE
depends on INTEL_SOC_PMIC
depends on INTEL_PMC_IPC
depends on BXT_WC_PMIC_OPREGION
- select TYPEC
help
This driver adds support for USB Type-C detection on Intel Broxton
platforms that have Intel Whiskey Cove PMIC. The driver can detect the
@@ -31,14 +70,13 @@ config TYPEC_WCOVE
To compile this driver as module, choose M here: the module will be
called typec_wcove
-endif
+endif # TYPEC_TCPM
source "drivers/usb/typec/ucsi/Kconfig"
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
- select TYPEC
help
Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
Delivery controller.
@@ -46,4 +84,4 @@ config TYPEC_TPS6598X
If you choose to build this driver as a dynamically linked module, the
module will be called tps6598x.ko.
-endmenu
+endif # TYPEC
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index d0c31cee4720..e36d6c73c4a4 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -1,7 +1,6 @@
config TYPEC_UCSI
tristate "USB Type-C Connector System Software Interface driver"
depends on !CPU_BIG_ENDIAN
- select TYPEC
help
USB Type-C Connector System Software Interface (UCSI) is a
specification for an interface that allows the operating system to
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037f541f..493ac2928391 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -322,23 +322,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
return priv;
}
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
+ int epnum = pdu->base.ep;
+ int dir = pdu->base.direction;
+
+ if (epnum < 0 || epnum > 15)
+ goto err_ret;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
- if (!ep) {
- dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
- epnum);
- BUG();
- }
+ if (!ep)
+ goto err_ret;
epd = &ep->desc;
+
+ /* validate transfer_buffer_length */
+ if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
+ dev_err(&sdev->udev->dev,
+ "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
+ pdu->u.cmd_submit.transfer_buffer_length);
+ return -1;
+ }
+
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +372,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
if (usb_endpoint_xfer_isoc(epd)) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(epd);
+ maxp *= usb_endpoint_maxp_mult(epd);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&sdev->udev->dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ return -1;
+ }
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
+err_ret:
/* NOT REACHED */
- dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
- return 0;
+ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+ return -1;
}
static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +460,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+ int pipe = get_pipe(sdev, pdu);
+
+ if (pipe == -1)
+ return;
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
@@ -452,7 +482,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
/* allocate urb transfer buffer, if needed */
- if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+ if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
+ pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce96c212..53172b1f6257 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&sdev->udev->dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
else
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c8c505..473fb8a87289 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
/* lock for status */
spinlock_t lock;
+ int sockfd;
struct socket *tcp_socket;
struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 713e94170963..6b3278c4b72a 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1098,7 +1098,6 @@ static int hcd_name_to_id(const char *name)
static int vhci_setup(struct usb_hcd *hcd)
{
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
- hcd->self.sg_tablesize = ~0;
if (usb_hcd_is_primary_hcd(hcd)) {
vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_hs->vhci = vhci;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f7472cac4..091f76b7196d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
/*
* output example:
- * hub port sta spd dev socket local_busid
- * hs 0000 004 000 00000000 c5a7bb80 1-2.3
+ * hub port sta spd dev sockfd local_busid
+ * hs 0000 004 000 00000000 3 1-2.3
* ................................................
- * ss 0008 004 000 00000000 d8cee980 2-3.4
+ * ss 0008 004 000 00000000 4 2-3.4
* ................................................
*
- * IP address can be retrieved from a socket pointer address by looking
- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
- * port number and its peer IP address.
+ * Output includes socket fd instead of socket pointer address to avoid
+ * leaking kernel memory address in:
+ * /sys/devices/platform/vhci_hcd.0/status and in debug output.
+ * The socket pointer address is not used at the moment and it was made
+ * visible as a convenient way to find IP address from socket pointer
+ * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
+ * hole, the change is made to use sockfd instead.
+ *
*/
static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
if (vdev->ud.status == VDEV_ST_USED) {
*out += sprintf(*out, "%03u %08x ",
vdev->speed, vdev->devid);
- *out += sprintf(*out, "%16p %s",
- vdev->ud.tcp_socket,
+ *out += sprintf(*out, "%u %s",
+ vdev->ud.sockfd,
dev_name(&vdev->udev->dev));
} else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
char *s = out;
/*
- * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+ * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
+ * thus the * 2.
*/
out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->devid = devid;
vdev->speed = speed;
+ vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 38d0504a1bbc..625f706b8160 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -603,9 +603,9 @@ static void uwb_cnflt_update_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
{
- struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+ struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}
@@ -642,7 +642,7 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
}
INIT_LIST_HEAD(&cnflt->rc_node);
- setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+ timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
cnflt->rc = rc;
INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index 36b5cb62c15d..fbdca728bd9f 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -115,7 +115,7 @@ struct uwb_rc_neh {
struct list_head list_node;
};
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
static void uwb_rc_neh_release(struct kref *kref)
{
@@ -223,7 +223,7 @@ struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
kref_init(&neh->kref);
INIT_LIST_HEAD(&neh->list_node);
- setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+ timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
neh->rc = rc;
neh->evt_type = expected_type;
@@ -565,9 +565,9 @@ void uwb_rc_neh_error(struct uwb_rc *rc, int error)
EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
{
- struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+ struct uwb_rc_neh *neh = from_timer(neh, t, timer);
struct uwb_rc *rc = neh->rc;
unsigned long flags;
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index f5e27247a38f..fe25a8cc6fa1 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -23,7 +23,7 @@
#include "uwb-internal.h"
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
static const char *rsv_states[] = {
[UWB_RSV_STATE_NONE] = "none ",
@@ -198,9 +198,9 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
dev_dbg(dev, "put stream %d\n", rsv->stream);
}
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
{
- struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+ struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
struct device *dev = &rc->uwb_dev.dev;
@@ -470,7 +470,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
INIT_LIST_HEAD(&rsv->rc_node);
INIT_LIST_HEAD(&rsv->pal_node);
kref_init(&rsv->kref);
- setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+ timer_setup(&rsv->timer, uwb_rsv_timer, 0);
rsv->rc = rc;
INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
@@ -939,9 +939,9 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
{
- struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+ struct uwb_rsv *rsv = from_timer(rsv, t, timer);
queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
@@ -987,8 +987,7 @@ void uwb_rsv_init(struct uwb_rc *rc)
rc->bow.can_reserve_extra_mases = true;
rc->bow.total_expired = 0;
rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
- (unsigned long)&rc->bow);
+ timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
index 353c0555a1f5..91326ce093a7 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/uwb/uwb-internal.h
@@ -329,7 +329,7 @@ void uwb_rsv_put(struct uwb_rsv *rsv);
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8d626d7c2e7e..c7bdeb655646 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
- if (nvq->rx_array)
- msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
- /* On overrun, truncate and discard */
- if (unlikely(headcount > UIO_MAXIOV)) {
- iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
- err = sock->ops->recvmsg(sock, &msg,
- 1, MSG_DONTWAIT | MSG_TRUNC);
- pr_debug("Discarded rx packet: len %zd\n", sock_len);
- continue;
- }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
+ if (nvq->rx_array)
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+ err = sock->ops->recvmsg(sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
fixup = msg.msg_iter;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 35e929f132e8..71517b3c5558 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -210,12 +210,6 @@ static struct workqueue_struct *vhost_scsi_workqueue;
static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(vhost_scsi_list);
-static int iov_num_pages(void __user *iov_base, size_t iov_len)
-{
- return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
- ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
-}
-
static void vhost_scsi_done_inflight(struct kref *kref)
{
struct vhost_scsi_inflight *inflight;
@@ -519,7 +513,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vs_completion_work);
DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
- struct vhost_scsi_cmd *cmd;
+ struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
struct iov_iter iov_iter;
@@ -527,7 +521,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(&vs->vs_completion_list);
- llist_for_each_entry(cmd, llnode, tvc_completion_list) {
+ llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
@@ -618,48 +612,31 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
*/
static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
- void __user *ptr,
- size_t len,
+ struct iov_iter *iter,
struct scatterlist *sgl,
bool write)
{
- unsigned int npages = 0, offset, nbytes;
- unsigned int pages_nr = iov_num_pages(ptr, len);
- struct scatterlist *sg = sgl;
struct page **pages = cmd->tvc_upages;
- int ret, i;
-
- if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
- pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
- " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
- pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
- return -ENOBUFS;
- }
+ struct scatterlist *sg = sgl;
+ ssize_t bytes;
+ size_t offset;
+ unsigned int npages = 0;
- ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
+ bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
- if (ret < 0)
- goto out;
- /* Less pages pinned than wanted */
- if (ret != pages_nr) {
- for (i = 0; i < ret; i++)
- put_page(pages[i]);
- ret = -EFAULT;
- goto out;
- }
+ if (bytes <= 0)
+ return bytes < 0 ? bytes : -EFAULT;
- while (len > 0) {
- offset = (uintptr_t)ptr & ~PAGE_MASK;
- nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
- sg_set_page(sg, pages[npages], nbytes, offset);
- ptr += nbytes;
- len -= nbytes;
- sg++;
- npages++;
- }
+ iov_iter_advance(iter, bytes);
-out:
- return ret;
+ while (bytes) {
+ unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
+ sg_set_page(sg++, pages[npages++], n, offset);
+ bytes -= n;
+ offset = 0;
+ }
+ return npages;
}
static int
@@ -687,24 +664,20 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
struct iov_iter *iter,
struct scatterlist *sg, int sg_count)
{
- size_t off = iter->iov_offset;
- int i, ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- void __user *base = iter->iov[i].iov_base + off;
- size_t len = iter->iov[i].iov_len - off;
+ struct scatterlist *p = sg;
+ int ret;
- ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
+ while (iov_iter_count(iter)) {
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
if (ret < 0) {
- for (i = 0; i < sg_count; i++) {
- struct page *page = sg_page(&sg[i]);
+ while (p < sg) {
+ struct page *page = sg_page(p++);
if (page)
put_page(page);
}
return ret;
}
sg += ret;
- off = 0;
}
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6dbb28245e6..33ac2b186b85 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1175,7 +1175,7 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq,
{
const struct vhost_umem_node *node;
struct vhost_umem *umem = vq->iotlb;
- u64 s = 0, size, orig_addr = addr;
+ u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
if (vhost_vq_meta_fetch(vq, addr, len, type))
return true;
@@ -1183,7 +1183,7 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq,
while (len > s) {
node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
addr,
- addr + len - 1);
+ last);
if (node == NULL || node->start > addr) {
vhost_iotlb_miss(vq, addr, access);
return false;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c9de9c41aa97..5a5e981bd8e4 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -518,6 +518,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ vsock->guest_cid = 0; /* no CID assigned yet */
+
atomic_set(&vsock->queued_replies, 0);
vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index a9e9cef20ed6..2b6c6aaf4e14 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -251,7 +251,7 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
struct spi_transfer xfer_regindex, xfer_regvalue;
unsigned char tbuf[CMD_BUFSIZE];
unsigned char rbuf[CMD_BUFSIZE];
- int ret, len = 0;
+ int ret;
memset(&xfer_regindex, 0, sizeof(struct spi_transfer));
memset(&xfer_regvalue, 0, sizeof(struct spi_transfer));
@@ -273,7 +273,6 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
ret = spi_sync(spi, &msg);
spi_message_init(&msg);
- len = 0;
tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG,
START_RW_WRITE));
tbuf[1] = set_tx_byte((value & 0xFF00) >> 8);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 9bd17682655a..1c2289ddd555 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
- int duty_cycle;
+ u64 duty_cycle;
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+ duty_cycle *= pb->period - lth;
+ do_div(duty_cycle, pb->scale);
+
+ return duty_cycle + lth;
}
static int pwm_backlight_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index fd524ad860a5..380917c86276 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -239,8 +239,7 @@ tps65217_bl_parse_dt(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "default-brightness", &val)) {
- if (val < 0 ||
- val > 100) {
+ if (val > 100) {
dev_err(&pdev->dev,
"invalid 'default-brightness' value in the device tree\n");
err = ERR_PTR(-EINVAL);
@@ -275,17 +274,9 @@ static int tps65217_bl_probe(struct platform_device *pdev)
struct tps65217_bl_pdata *pdata;
struct backlight_properties bl_props;
- if (tps->dev->of_node) {
- pdata = tps65217_bl_parse_dt(pdev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data provided\n");
- return -EINVAL;
- }
- }
+ pdata = tps65217_bl_parse_dt(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
GFP_KERNEL);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5e58f5ec0a28..2f615b7f1c9f 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -905,16 +905,6 @@ config FB_LEO
This is the frame buffer device driver for the SBUS-based Sun ZX
(leo) frame buffer cards.
-config FB_IGA
- bool "IGA 168x display support"
- depends on (FB = y) && SPARC32
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the INTERGRAPHICS 1680 and
- successor frame buffer cards.
-
config FB_XVR500
bool "Sun XVR-500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 8895536a20d6..115961e0721b 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -65,7 +65,6 @@ obj-$(CONFIG_FB_HGA) += hgafb.o
obj-$(CONFIG_FB_XVR500) += sunxvr500.o
obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
-obj-$(CONFIG_FB_IGA) += igafb.o
obj-$(CONFIG_FB_APOLLO) += dnfb.o
obj-$(CONFIG_FB_Q40) += q40fb.o
obj-$(CONFIG_FB_TGA) += tgafb.o
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 3ec72f19114b..a9a8272f7a6e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2272,10 +2272,10 @@ static void aty_bl_exit(struct backlight_device *bd)
static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
- const int ragepro_tbl[] = {
+ static const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
};
- const int ragexl_tbl[] = {
+ static const int ragexl_tbl[] = {
50, 66, 75, 83, 90, 95, 100, 105,
110, 115, 120, 125, 133, 143, 166
};
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 1e2ec360f8c1..4d77daeecf99 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1454,9 +1454,9 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
/*
* Timer function for delayed LVDS panel power up/down
*/
-static void radeon_lvds_timer_func(unsigned long data)
+static void radeon_lvds_timer_func(struct timer_list *t)
{
- struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
+ struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
radeon_engine_idle();
@@ -1534,7 +1534,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs *regs,
unsigned long freq)
{
- const struct {
+ static const struct {
int divider;
int bitvalue;
} *post_div,
@@ -2291,9 +2291,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
rinfo->pdev = pdev;
spin_lock_init(&rinfo->reg_lock);
- init_timer(&rinfo->lvds_timer);
- rinfo->lvds_timer.function = radeon_lvds_timer_func;
- rinfo->lvds_timer.data = (unsigned long)rinfo;
+ timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
c1 = ent->device >> 8;
c2 = ent->device & 0xff;
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index f7c253dd5899..7137c12cbcee 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -1208,9 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (mc & 0x4)
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
+ /* fall through */
case 0:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
@@ -1219,6 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (!(mc & 0x4))
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 5f04b4096c42..87d5a62bf6ca 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1518,7 +1518,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
struct fb_info *fbi = fbdev->fb_info;
- int bpp;
+ int bpp, ret;
fbi->fbops = &au1200fb_fb_ops;
@@ -1546,15 +1546,14 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
}
fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
- if (!fbi->pseudo_palette) {
+ if (!fbi->pseudo_palette)
return -ENOMEM;
- }
- if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
+ ret = fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
print_err("Fail to allocate colormap (%d entries)",
- AU1200_LCD_NBR_PALETTE_ENTRIES);
- kfree(fbi->pseudo_palette);
- return -EFAULT;
+ AU1200_LCD_NBR_PALETTE_ENTRIES);
+ return ret;
}
strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
@@ -1668,10 +1667,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
- /* shut gcc up */
- ret = 0;
- fbdev = NULL;
-
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
@@ -1681,8 +1676,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
- if (!fbi)
+ if (!fbi) {
+ ret = -ENOMEM;
goto failed;
+ }
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
@@ -1701,7 +1698,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto failed;
}
/*
@@ -1718,7 +1716,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
- if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
+ ret = au1200fb_init_fbinfo(fbdev);
+ if (ret < 0)
goto failed;
/* Register new framebuffer */
@@ -1758,21 +1757,26 @@ static int au1200fb_drv_probe(struct platform_device *dev)
return 0;
failed:
- /* NOTE: This only does the current plane/window that failed; others are still active */
- if (fbi) {
+ for (plane = 0; plane < device_count; ++plane) {
+ fbi = _au1200fb_infos[plane];
+ if (!fbi)
+ break;
+
+ /* Clean up all probe data */
+ unregister_framebuffer(fbi);
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
+
+ framebuffer_release(fbi);
+ _au1200fb_infos[plane] = NULL;
}
- if (plane == 0)
- free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
- struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
@@ -1781,7 +1785,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
- fbdev = fbi->par;
/* Clean up all probe data */
unregister_framebuffer(fbi);
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index d992aa5eb3f0..b3be06dd2908 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1477,10 +1477,12 @@ static void init_vgachip(struct fb_info *info)
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
- case BT_GD5480: /* fall through */
+ /* fall through */
+ case BT_GD5480:
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
- case BT_ALPINE: /* fall through */
+ /* fall through */
+ case BT_ALPINE:
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
break;
diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60fc100..261522fabdac 100644
--- a/drivers/video/fbdev/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
{{ 1, 2}}, /* 1152x870, 75Hz */
{{ 0, 1}}, /* 1280x960, 75Hz */
{{ 0, 1}}, /* 1280x1024, 75Hz */
+ {{ 1, 2}}, /* 1152x768, 60Hz */
+ {{ 0, 1}}, /* 1600x1024, 60Hz */
};
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 04612f938bab..929ca472c524 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -395,10 +395,10 @@ static void fb_flashcursor(struct work_struct *work)
console_unlock();
}
-static void cursor_timer_handler(unsigned long dev_addr)
+static void cursor_timer_handler(struct timer_list *t)
{
- struct fb_info *info = (struct fb_info *) dev_addr;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
+ struct fb_info *info = ops->info;
queue_work(system_power_efficient_wq, &info->queue);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
@@ -414,8 +414,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
if (!info->queue.func)
INIT_WORK(&info->queue, fb_flashcursor);
- setup_timer(&ops->cursor_timer, cursor_timer_handler,
- (unsigned long) info);
+ timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
}
@@ -714,6 +713,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
if (!err) {
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (vc)
@@ -962,6 +962,7 @@ static const char *fbcon_startup(void)
ops->graphics = 1;
ops->cur_rotate = -1;
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (initial_rotation != -1)
p->con_rotate = initial_rotation;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 18f3ac144237..9f7744fbc962 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -69,6 +69,7 @@ struct fbcon_ops {
struct timer_list cursor_timer; /* Cursor timer */
struct fb_cursor cursor_state;
struct display *p;
+ struct fb_info *info;
int currcon; /* Current VC. */
int cur_blink_jiffies;
int cursor_flash;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 7b1492d34e98..5505fa00c634 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-struct fb_var_screeninfo dnfb_var = {
+static const struct fb_var_screeninfo dnfb_var = {
.xres = 1280,
.yres = 1024,
.xres_virtual = 2048,
@@ -242,16 +242,13 @@ static int dnfb_probe(struct platform_device *dev)
info->screen_base = (u_char *) info->fix.smem_start;
err = fb_alloc_cmap(&info->cmap, 2, 0);
- if (err < 0) {
- framebuffer_release(info);
- return err;
- }
+ if (err < 0)
+ goto release_framebuffer;
err = register_framebuffer(info);
if (err < 0) {
fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- return err;
+ goto release_framebuffer;
}
platform_set_drvdata(dev, info);
@@ -265,6 +262,10 @@ static int dnfb_probe(struct platform_device *dev)
printk("apollo frame buffer alive and kicking !\n");
return err;
+
+release_framebuffer:
+ framebuffer_release(info);
+ return err;
}
static struct platform_driver dnfb_driver = {
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 7f6c9e6cfc6c..3b70044773b6 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -304,12 +304,18 @@ static int goldfish_fb_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_fb_of_match[] = {
+ { .compatible = "google,goldfish-fb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
.remove = goldfish_fb_remove,
.driver = {
- .name = "goldfish_fb"
+ .name = "goldfish_fb",
+ .of_match_table = goldfish_fb_of_match,
}
};
diff --git a/drivers/video/fbdev/igafb.c b/drivers/video/fbdev/igafb.c
deleted file mode 100644
index 486f18897414..000000000000
--- a/drivers/video/fbdev/igafb.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682
- *
- * Copyright (C) 1998 Vladimir Roganov and Gleb Raiko
- *
- * This driver is partly based on the Frame buffer device for ATI Mach64
- * and partially on VESA-related code.
- *
- * Copyright (C) 1997-1998 Geert Uytterhoeven
- * Copyright (C) 1998 Bernd Harries
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-/******************************************************************************
-
- TODO:
- Despite of IGA Card has advanced graphic acceleration,
- initial version is almost dummy and does not support it.
- Support for video modes and acceleration must be added
- together with accelerated X-Windows driver implementation.
-
- Most important thing at this moment is that we have working
- JavaEngine1 console & X with new console interface.
-
-******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/nvram.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_SPARC
-#include <asm/prom.h>
-#include <asm/pcic.h>
-#endif
-
-#include <video/iga.h>
-
-struct pci_mmap_map {
- unsigned long voff;
- unsigned long poff;
- unsigned long size;
- unsigned long prot_flag;
- unsigned long prot_mask;
-};
-
-struct iga_par {
- struct pci_mmap_map *mmap_map;
- unsigned long frame_buffer_phys;
- unsigned long io_base;
-};
-
-struct fb_info fb_info;
-
-struct fb_fix_screeninfo igafb_fix __initdata = {
- .id = "IGA 1682",
- .type = FB_TYPE_PACKED_PIXELS,
- .mmio_len = 1000
-};
-
-struct fb_var_screeninfo default_var = {
- /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 39722,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 33,
- .lower_margin = 10,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-#ifdef CONFIG_SPARC
-struct fb_var_screeninfo default_var_1024x768 __initdata = {
- /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
- .xres = 1024,
- .yres = 768,
- .xres_virtual = 1024,
- .yres_virtual = 768,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 12699,
- .left_margin = 176,
- .right_margin = 16,
- .upper_margin = 28,
- .lower_margin = 1,
- .hsync_len = 96,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1152x900 __initdata = {
- /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */
- .xres = 1152,
- .yres = 900,
- .xres_virtual = 1152,
- .yres_virtual = 900,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 9091,
- .left_margin = 234,
- .right_margin = 24,
- .upper_margin = 34,
- .lower_margin = 3,
- .hsync_len = 100,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1280x1024 __initdata = {
- /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */
- .xres = 1280,
- .yres = 1024,
- .xres_virtual = 1280,
- .yres_virtual = 1024,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 7408,
- .left_margin = 248,
- .right_margin = 16,
- .upper_margin = 38,
- .lower_margin = 1,
- .hsync_len = 144,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-/*
- * Memory-mapped I/O functions for Sparc PCI
- *
- * On sparc we happen to access I/O with memory mapped functions too.
- */
-#define pci_inb(par, reg) readb(par->io_base+(reg))
-#define pci_outb(par, val, reg) writeb(val, par->io_base+(reg))
-
-static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg,
- unsigned int idx)
-{
- pci_outb(par, idx, reg);
- return pci_inb(par, reg + 1);
-}
-
-static inline void iga_outb(struct iga_par *par, unsigned char val,
- unsigned int reg, unsigned int idx )
-{
- pci_outb(par, idx, reg);
- pci_outb(par, val, reg+1);
-}
-
-#endif /* CONFIG_SPARC */
-
-/*
- * Very important functionality for the JavaEngine1 computer:
- * make screen border black (usign special IGA registers)
- */
-static void iga_blank_border(struct iga_par *par)
-{
- int i;
-#if 0
- /*
- * PROM does this for us, so keep this code as a reminder
- * about required read from 0x3DA and writing of 0x20 in the end.
- */
- (void) pci_inb(par, 0x3DA); /* required for every access */
- pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL);
- (void) pci_inb(par, IGA_ATTR_CTL+1);
- pci_outb(par, 0x38, IGA_ATTR_CTL);
- pci_outb(par, 0x20, IGA_ATTR_CTL); /* re-enable visual */
-#endif
- /*
- * This does not work as it was designed because the overscan
- * color is looked up in the palette. Therefore, under X11
- * overscan changes color.
- */
- for (i=0; i < 3; i++)
- iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
-}
-
-#ifdef CONFIG_SPARC
-static int igafb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct iga_par *par = (struct iga_par *)info->par;
- unsigned int size, page, map_size = 0;
- unsigned long map_offset = 0;
- int i;
-
- if (!par->mmap_map)
- return -ENXIO;
-
- size = vma->vm_end - vma->vm_start;
-
- /* Each page, see which map applies */
- for (page = 0; page < size; ) {
- map_size = 0;
- for (i = 0; par->mmap_map[i].size; i++) {
- unsigned long start = par->mmap_map[i].voff;
- unsigned long end = start + par->mmap_map[i].size;
- unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page;
-
- if (start > offset)
- continue;
- if (offset >= end)
- continue;
-
- map_size = par->mmap_map[i].size - (offset - start);
- map_offset = par->mmap_map[i].poff + (offset - start);
- break;
- }
- if (!map_size) {
- page += PAGE_SIZE;
- continue;
- }
- if (page + map_size > size)
- map_size = size - page;
-
- pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
- pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
-
- if (remap_pfn_range(vma, vma->vm_start + page,
- map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
- return -EAGAIN;
-
- page += map_size;
- }
-
- if (!map_size)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO;
- return 0;
-}
-#endif /* CONFIG_SPARC */
-
-static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- /*
- * Set a single color register. The values supplied are
- * already rounded down to the hardware's capabilities
- * (according to the entries in the `var' structure). Return
- * != 0 for invalid regno.
- */
- struct iga_par *par = (struct iga_par *)info->par;
-
- if (regno >= info->cmap.len)
- return 1;
-
- pci_outb(par, regno, DAC_W_INDEX);
- pci_outb(par, red, DAC_DATA);
- pci_outb(par, green, DAC_DATA);
- pci_outb(par, blue, DAC_DATA);
-
- if (regno < 16) {
- switch (info->var.bits_per_pixel) {
- case 16:
- ((u16*)(info->pseudo_palette))[regno] =
- (regno << 10) | (regno << 5) | regno;
- break;
- case 24:
- ((u32*)(info->pseudo_palette))[regno] =
- (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- { int i;
- i = (regno << 8) | regno;
- ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i;
- }
- break;
- }
- }
- return 0;
-}
-
-/*
- * Framebuffer option structure
- */
-static struct fb_ops igafb_ops = {
- .owner = THIS_MODULE,
- .fb_setcolreg = igafb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-#ifdef CONFIG_SPARC
- .fb_mmap = igafb_mmap,
-#endif
-};
-
-static int __init iga_init(struct fb_info *info, struct iga_par *par)
-{
- char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL)
- & MEM_SIZE_ALIAS;
- int video_cmap_len;
-
- switch (vramsz) {
- case MEM_SIZE_1M:
- info->fix.smem_len = 0x100000;
- break;
- case MEM_SIZE_2M:
- info->fix.smem_len = 0x200000;
- break;
- case MEM_SIZE_4M:
- case MEM_SIZE_RESERVED:
- info->fix.smem_len = 0x400000;
- break;
- }
-
- if (info->var.bits_per_pixel > 8)
- video_cmap_len = 16;
- else
- video_cmap_len = 256;
-
- info->fbops = &igafb_ops;
- info->flags = FBINFO_DEFAULT;
-
- fb_alloc_cmap(&info->cmap, video_cmap_len, 0);
-
- if (register_framebuffer(info) < 0)
- return 0;
-
- fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
- info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
-
- iga_blank_border(par);
- return 1;
-}
-
-static int __init igafb_init(void)
-{
- struct fb_info *info;
- struct pci_dev *pdev;
- struct iga_par *par;
- unsigned long addr;
- int size, iga2000 = 0;
-
- if (fb_get_options("igafb", NULL))
- return -ENODEV;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG,
- PCI_DEVICE_ID_INTERG_1682, 0);
- if (pdev == NULL) {
- /*
- * XXX We tried to use cyber2000fb.c for IGS 2000.
- * But it does not initialize the chip in JavaStation-E, alas.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0);
- if(pdev == NULL) {
- return -ENXIO;
- }
- iga2000 = 1;
- }
- /* We leak a reference here but as it cannot be unloaded this is
- fine. If you write unload code remember to free it in unload */
-
- size = sizeof(struct iga_par) + sizeof(u32)*16;
-
- info = framebuffer_alloc(size, &pdev->dev);
- if (!info) {
- printk("igafb_init: can't alloc fb_info\n");
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- par = info->par;
-
- if ((addr = pdev->resource[0].start) == 0) {
- printk("igafb_init: no memory start\n");
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
- printk("igafb_init: can't remap %lx[2M]\n", addr);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
-
-#ifdef CONFIG_SPARC
- /*
- * The following is sparc specific and this is why:
- *
- * IGS2000 has its I/O memory mapped and we want
- * to generate memory cycles on PCI, e.g. do ioremap(),
- * then readb/writeb() as in Documentation/io-mapping.txt.
- *
- * IGS1682 is more traditional, it responds to PCI I/O
- * cycles, so we want to access it with inb()/outb().
- *
- * On sparc, PCIC converts CPU memory access within
- * phys window 0x3000xxxx into PCI I/O cycles. Therefore
- * we may use readb/writeb to access them with IGS1682.
- *
- * We do not take io_base_phys from resource[n].start
- * on IGS1682 because that chip is BROKEN. It does not
- * have a base register for I/O. We just "know" what its
- * I/O addresses are.
- */
- if (iga2000) {
- igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000;
- } else {
- igafb_fix.mmio_start = 0x30000000; /* XXX */
- }
- if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) {
- printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
- iounmap((void *)info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- /*
- * Figure mmap addresses from PCI config space.
- * We need two regions: for video memory and for I/O ports.
- * Later one can add region for video coprocessor registers.
- * However, mmap routine loops until size != 0, so we put
- * one additional region with size == 0.
- */
-
- par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC);
- if (!par->mmap_map) {
- printk("igafb_init: can't alloc mmap_map\n");
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- /*
- * Set default vmode and cmode from PROM properties.
- */
- {
- struct device_node *dp = pci_device_to_OF_node(pdev);
- int node = dp->node;
- int width = prom_getintdefault(node, "width", 1024);
- int height = prom_getintdefault(node, "height", 768);
- int depth = prom_getintdefault(node, "depth", 8);
- switch (width) {
- case 1024:
- if (height == 768)
- default_var = default_var_1024x768;
- break;
- case 1152:
- if (height == 900)
- default_var = default_var_1152x900;
- break;
- case 1280:
- if (height == 1024)
- default_var = default_var_1280x1024;
- break;
- default:
- break;
- }
-
- switch (depth) {
- case 8:
- default_var.bits_per_pixel = 8;
- break;
- case 16:
- default_var.bits_per_pixel = 16;
- break;
- case 24:
- default_var.bits_per_pixel = 24;
- break;
- case 32:
- default_var.bits_per_pixel = 32;
- break;
- default:
- break;
- }
- }
-
-#endif
- igafb_fix.smem_start = (unsigned long) info->screen_base;
- igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8);
- igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-
- info->var = default_var;
- info->fix = igafb_fix;
- info->pseudo_palette = (void *)(par + 1);
-
- if (!iga_init(info, par)) {
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(par->mmap_map);
- kfree(info);
- return -ENODEV;
- }
-
-#ifdef CONFIG_SPARC
- /*
- * Add /dev/fb mmap values.
- */
-
- /* First region is for video memory */
- par->mmap_map[0].voff = 0x0;
- par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK;
- par->mmap_map[0].prot_mask = SRMMU_CACHE;
- par->mmap_map[0].prot_flag = SRMMU_WRITE;
-
- /* Second region is for I/O ports */
- par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK;
- par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
- par->mmap_map[1].prot_mask = SRMMU_CACHE;
- par->mmap_map[1].prot_flag = SRMMU_WRITE;
-#endif /* CONFIG_SPARC */
-
- return 0;
-}
-
-static int __init igafb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- }
- return 0;
-}
-
-module_init(igafb_init);
-MODULE_LICENSE("GPL");
-static struct pci_device_id igafb_pci_tbl[] = {
- { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, igafb_pci_tbl);
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index d31ed4e2c46f..83fec573cceb 100644
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
@@ -937,15 +937,11 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
{
u32 m1, m2, n, p1, p2, n1, testm;
u32 f_vco, p, p_best = 0, m, f_out = 0;
- u32 err_max, err_target, err_best = 10000000;
- u32 n_best = 0, m_best = 0, f_best, f_err;
+ u32 err_best = 10000000;
+ u32 n_best = 0, m_best = 0, f_err;
u32 p_min, p_max, p_inc, div_max;
struct pll_min_max *pll = &plls[index];
- /* Accept 0.5% difference, but aim for 0.1% */
- err_max = 5 * clock / 1000;
- err_target = clock / 1000;
-
DBG_MSG("Clock is %d\n", clock);
div_max = pll->max_vco / clock;
@@ -992,7 +988,6 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
m_best = testm;
n_best = n;
p_best = p;
- f_best = f_out;
err_best = f_err;
}
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index b9b284d79631..838869c6490c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2056,7 +2056,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo)
- return -1;
+ return -ENOMEM;
minfo->pcidev = pdev;
minfo->dead = 0;
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 7846f0e8bbbb..79b1dc7f042b 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -150,7 +150,7 @@
#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
enum mxsfb_devtype {
MXSFB_V3,
@@ -788,7 +788,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+
+ /*
+ * The PIXDATA flags of the display_flags enum are controller
+ * centric, e.g. NEGEDGE means drive data on negative edge.
+ * However, the drivers flag is display centric: Sample the
+ * data on negative (falling) edge. Therefore, check for the
+ * POSEDGE flag:
+ * drive on positive edge => sample on negative edge
+ */
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
put_display_node:
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index a4ee65b8f918..6199d4806193 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -474,7 +474,7 @@ static void auto_update_complete(void *data)
jiffies + HWA742_AUTO_UPDATE_TIME);
}
-static void hwa742_update_window_auto(unsigned long arg)
+static void hwa742_update_window_auto(struct timer_list *unused)
{
LIST_HEAD(req_list);
struct hwa742_request *last;
@@ -1002,9 +1002,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
hwa742.auto_update_window.height = fbdev->panel->y_res;
hwa742.auto_update_window.format = 0;
- init_timer(&hwa742.auto_update_timer);
- hwa742.auto_update_timer.function = hwa742_update_window_auto;
- hwa742.auto_update_timer.data = 0;
+ timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0);
hwa742.prev_color_mode = -1;
hwa742.prev_flags = 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 30d49f3800b3..8e1d60d48dbb 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -3988,7 +3988,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5298,9 +5298,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 1d7c012f09db..e08e5664e330 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1477,7 +1477,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
static int omapfb_parse_vram_param(const char *param, int max_entries,
unsigned long *sizes, unsigned long *paddrs)
{
- int fbnum;
+ unsigned int fbnum;
unsigned long size;
unsigned long paddr = 0;
char *p, *start;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 867c5218968f..a582d3ae7ac1 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,9 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
- FOLL_WRITE);
-
+ ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
if (ret < nr_pages) {
nr_pages = ret;
ret = -EINVAL;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 933619da1a94..55fbb432c053 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -512,28 +512,26 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef PXA3XX_GCU_DEBUG_TIMER
static struct timer_list pxa3xx_gcu_debug_timer;
+static struct pxa3xx_gcu_priv *debug_timer_priv;
-static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
+static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
{
- struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
+ struct pxa3xx_gcu_priv *priv = debug_timer_priv;
QERROR("Timer DUMP");
- /* init the timer structure */
- init_timer(&pxa3xx_gcu_debug_timer);
- pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
- pxa3xx_gcu_debug_timer.data = ptr;
- pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
-
- add_timer(&pxa3xx_gcu_debug_timer);
+ mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
}
-static void pxa3xx_gcu_init_debug_timer(void)
+static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
{
- pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
+ /* init the timer structure */
+ debug_timer_priv = priv;
+ timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
+ pxa3xx_gcu_debug_timedout(NULL);
}
#else
-static inline void pxa3xx_gcu_init_debug_timer(void) {}
+static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
#endif
static int
@@ -670,7 +668,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
pxa3xx_gcu_reset(priv);
- pxa3xx_gcu_init_debug_timer();
+ pxa3xx_gcu_init_debug_timer(priv);
dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
(void *) r->start, (void *) priv->shared_phys,
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index fc2aaa5aca23..15ae50063296 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -323,13 +323,11 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* according to the RGB bitfield information.
*/
if (regno < 16) {
- u32 *pal = fbi->fb.pseudo_palette;
-
val = chan_to_field(red, &fbi->fb.var.red);
val |= chan_to_field(green, &fbi->fb.var.green);
val |= chan_to_field(blue, &fbi->fb.var.blue);
- pal[regno] = val;
+ fbi->pseudo_palette[regno] = val;
ret = 0;
}
break;
@@ -1132,12 +1130,10 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
struct sa1100fb_info *fbi;
unsigned i;
- fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
- GFP_KERNEL);
+ fbi = devm_kzalloc(dev, sizeof(struct sa1100fb_info), GFP_KERNEL);
if (!fbi)
return NULL;
- memset(fbi, 0, sizeof(struct sa1100fb_info));
fbi->dev = dev;
strcpy(fbi->fb.fix.id, SA1100_NAME);
@@ -1159,7 +1155,7 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
fbi->fb.fbops = &sa1100fb_ops;
fbi->fb.flags = FBINFO_DEFAULT;
fbi->fb.monspecs = monspecs;
- fbi->fb.pseudo_palette = (fbi + 1);
+ fbi->fb.pseudo_palette = fbi->pseudo_palette;
fbi->rgb[RGB_4] = &rgb_4;
fbi->rgb[RGB_8] = &rgb_8;
@@ -1218,48 +1214,42 @@ static int sa1100fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || !res)
+ if (irq < 0)
return -EINVAL;
- if (!request_mem_region(res->start, resource_size(res), "LCD"))
- return -EBUSY;
-
fbi = sa1100fb_init_fbinfo(&pdev->dev);
- ret = -ENOMEM;
if (!fbi)
- goto failed;
-
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- fbi->clk = NULL;
- goto failed;
- }
+ return -ENOMEM;
- fbi->base = ioremap(res->start, resource_size(res));
- if (!fbi->base)
- goto failed;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fbi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->base))
+ return PTR_ERR(fbi->base);
- /* Initialize video memory */
- ret = sa1100fb_map_video_memory(fbi);
- if (ret)
- goto failed;
+ fbi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fbi->clk))
+ return PTR_ERR(fbi->clk);
- ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
+ ret = devm_request_irq(&pdev->dev, irq, sa1100fb_handle_irq, 0,
+ "LCD", fbi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
- goto failed;
+ return ret;
}
if (machine_is_shannon()) {
- ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
+ ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
GPIOF_OUT_INIT_LOW, "display enable");
if (ret)
- goto err_free_irq;
+ return ret;
}
+ /* Initialize video memory */
+ ret = sa1100fb_map_video_memory(fbi);
+ if (ret)
+ return ret;
+
/*
* This makes sure that our colour bitfield
* descriptors are correctly initialised.
@@ -1269,8 +1259,11 @@ static int sa1100fb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fbi);
ret = register_framebuffer(&fbi->fb);
- if (ret < 0)
- goto err_reg_fb;
+ if (ret < 0) {
+ dma_free_wc(fbi->dev, fbi->map_size, fbi->map_cpu,
+ fbi->map_dma);
+ return ret;
+ }
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1281,20 +1274,6 @@ static int sa1100fb_probe(struct platform_device *pdev)
/* This driver cannot be unloaded at the moment */
return 0;
-
- err_reg_fb:
- if (machine_is_shannon())
- gpio_free(SHANNON_GPIO_DISP_EN);
- err_free_irq:
- free_irq(irq, fbi);
- failed:
- if (fbi)
- iounmap(fbi->base);
- if (fbi->clk)
- clk_put(fbi->clk);
- kfree(fbi);
- release_mem_region(res->start, resource_size(res));
- return ret;
}
static struct platform_driver sa1100fb_driver = {
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index 0139d13377a5..7a1a9ca33cec 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -69,6 +69,8 @@ struct sa1100fb_info {
const struct sa1100fb_mach_info *inf;
struct clk *clk;
+
+ u32 pseudo_palette[16];
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 1ec9c3e0e1d8..02ee752d5000 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -6486,7 +6486,7 @@ SiS_SetTVSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
if(!(SiS_Pr->SiS_TVMode & TVSetPAL)) {
if(SiS_Pr->SiS_TVMode & TVSetNTSC1024) {
- const unsigned char specialtv[] = {
+ static const unsigned char specialtv[] = {
0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53,
0x13,0x40,0x34,0xf4,0x63,0xbb,0xcc,0x7a,
0x58,0xe4,0x73,0xda,0x13
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index e92303823a4b..ecdd054d8951 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1702,6 +1702,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
@@ -1755,6 +1756,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
@@ -1765,6 +1767,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
@@ -1775,6 +1778,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 076dd2711630..6f0a19501c6a 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1008,6 +1008,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
+ /* fall through */
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
@@ -1889,6 +1890,9 @@ static void sm501_free_init_fb(struct sm501fb_info *info,
{
struct fb_info *fbi = info->fb[head];
+ if (!fbi)
+ return;
+
fb_dealloc_cmap(&fbi->cmap);
}
@@ -2076,8 +2080,10 @@ static int sm501fb_remove(struct platform_device *pdev)
sm501_free_init_fb(info, HEAD_CRT);
sm501_free_init_fb(info, HEAD_PANEL);
- unregister_framebuffer(fbinfo_crt);
- unregister_framebuffer(fbinfo_pnl);
+ if (fbinfo_crt)
+ unregister_framebuffer(fbinfo_crt);
+ if (fbinfo_pnl)
+ unregister_framebuffer(fbinfo_pnl);
sm501fb_stop(info);
kfree(info);
@@ -2094,8 +2100,12 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return 0;
+ par = fbi->par;
if (par->screen.size == 0)
return 0;
@@ -2141,8 +2151,12 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return;
+ par = fbi->par;
if (par->screen.size == 0)
return;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index ef08a104fb42..d44f14242016 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
for (i = 0; i < len; i++) {
ret = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0), (0x02),
- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
- HZ);
- if (ret < 1) {
- pr_err("Read EDID byte %d failed err %x\n", i, ret);
+ usb_rcvctrlpipe(dev->udev, 0), 0x02,
+ (0x80 | (0x02 << 5)), i << 8, 0xA1,
+ rbuf, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret < 2) {
+ pr_err("Read EDID byte %d failed: %d\n", i, ret);
i--;
break;
}
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d993df5586c0..d70ad6d38879 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -243,8 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
/* Get the physical addresses of the source buffer */
- num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset,
- num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE);
+ num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
+ num_pages, param.source != -1, pages);
if (num_pinned != num_pages) {
/* get_user_pages() failed */
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 48230a5e12f2..bf7ff3934d7f 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
/* device_register() causes the bus infrastructure to look for a
* matching driver. */
err = device_register(&dev->dev);
+ if (err)
+ ida_simple_remove(&virtio_index_ida, dev->index);
out:
if (err)
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index f0b3a0b9d42f..a1fb52cb3f0a 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -143,16 +143,17 @@ static void set_page_pfns(struct virtio_balloon *vb,
static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
{
- struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
unsigned num_allocated_pages;
+ unsigned num_pfns;
+ struct page *page;
+ LIST_HEAD(pages);
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
- mutex_lock(&vb->balloon_lock);
- for (vb->num_pfns = 0; vb->num_pfns < num;
- vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_enqueue(vb_dev_info);
+ for (num_pfns = 0; num_pfns < num;
+ num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ struct page *page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
@@ -162,11 +163,23 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
msleep(200);
break;
}
+
+ balloon_page_push(&pages, page);
+ }
+
+ mutex_lock(&vb->balloon_lock);
+
+ vb->num_pfns = 0;
+
+ while ((page = balloon_page_pop(&pages))) {
+ balloon_page_enqueue(&vb->vb_dev_info, page);
+
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
if (!virtio_has_feature(vb->vdev,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
adjust_managed_page_count(page, -1);
+ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
}
num_allocated_pages = vb->num_pfns;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 74dc7170fd35..c92131edfaba 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -493,7 +493,16 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
};
-static void virtio_mmio_release_dev_empty(struct device *_d) {}
+static void virtio_mmio_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_mmio_device *vm_dev =
+ container_of(vdev, struct virtio_mmio_device, vdev);
+ struct platform_device *pdev = vm_dev->pdev;
+
+ devm_kfree(&pdev->dev, vm_dev);
+}
/* Platform device */
@@ -514,10 +523,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
- return -ENOMEM;
+ return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
- vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
+ vm_dev->vdev.dev.release = virtio_mmio_release_dev;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
INIT_LIST_HEAD(&vm_dev->virtqueues);
@@ -573,13 +582,16 @@ static int virtio_mmio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vm_dev);
- return register_virtio_device(&vm_dev->vdev);
+ rc = register_virtio_device(&vm_dev->vdev);
+ if (rc)
+ put_device(&vm_dev->vdev.dev);
+
+ return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
-
unregister_virtio_device(&vm_dev->vdev);
return 0;
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 7cc51223db1c..5dd284008630 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -511,7 +511,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
- if (ca91cx42_bridge->parent == NULL) {
+ if (!ca91cx42_bridge->parent) {
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
@@ -529,14 +529,12 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(ca91cx42_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -562,7 +560,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -574,7 +572,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -588,7 +586,7 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
@@ -1036,10 +1034,8 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(dev, "Failed to allocate memory for dma resource "
- "structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1052,7 +1048,7 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
goto err_align;
}
- memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
@@ -1323,7 +1319,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
@@ -1432,7 +1428,7 @@ static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
@@ -1567,7 +1563,7 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
@@ -1618,21 +1614,15 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
- ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
-
- if (ca91cx42_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
+ if (!ca91cx42_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(ca91cx42_bridge);
- ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
-
- if (ca91cx42_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
+ if (!ca91cx42_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1688,11 +1678,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add master windows to list */
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1706,7 +1693,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&ca91cx42_bridge->master_resources);
@@ -1714,11 +1701,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1741,11 +1725,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < CA91C142_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -1762,10 +1743,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 30b3acc93833..7d83691047f4 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -409,7 +409,7 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
/* Each location monitor covers 8 bytes */
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
bridge->lm_callback[i](
bridge->lm_data[i]);
}
@@ -866,7 +866,7 @@ static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor callback attached, can't reset\n");
return -EBUSY;
@@ -940,7 +940,7 @@ static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
pr_err("Existing callback attached\n");
return -EBUSY;
@@ -978,7 +978,7 @@ static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
/* If all location monitors disabled, disable global Location Monitor */
tmp = 0;
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
tmp = 1;
}
@@ -1003,7 +1003,7 @@ static void *fake_alloc_consistent(struct device *parent, size_t size,
{
void *alloc = kmalloc(size, GFP_KERNEL);
- if (alloc != NULL)
+ if (alloc)
*dma = fake_ptr_to_pci(alloc);
return alloc;
@@ -1039,7 +1039,7 @@ static int fake_crcsr_init(struct vme_bridge *fake_bridge)
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
- if (bridge->crcsr_kernel == NULL)
+ if (!bridge->crcsr_kernel)
return -ENOMEM;
vstat = fake_slot_get(fake_bridge);
@@ -1075,14 +1075,14 @@ static int __init fake_init(void)
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
*/
- fake_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (fake_bridge == NULL) {
+ fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL);
+ if (!fake_bridge) {
retval = -ENOMEM;
goto err_struct;
}
- fake_device = kzalloc(sizeof(struct fake_driver), GFP_KERNEL);
- if (fake_device == NULL) {
+ fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL);
+ if (!fake_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1104,9 +1104,8 @@ static int __init fake_init(void)
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
for (i = 0; i < FAKE_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1131,9 +1130,8 @@ static int __init fake_init(void)
/* Add slave windows to list */
INIT_LIST_HEAD(&fake_bridge->slave_resources);
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1154,9 +1152,8 @@ static int __init fake_init(void)
/* Add location monitor to list */
INIT_LIST_HEAD(&fake_bridge->lm_resources);
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- pr_err("Failed to allocate memory for location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index fc1b634b969a..647d231d4422 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -741,18 +741,16 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(tsi148_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -778,7 +776,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -790,7 +788,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -804,7 +802,7 @@ static void tsi148_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/*
@@ -1641,10 +1639,8 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "dma resource structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1661,7 +1657,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
- memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
/* Fill out source part */
switch (src->type) {
@@ -1756,8 +1752,9 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
list_add_tail(&entry->list, &list->entries);
entry->dma_handle = dma_map_single(tsi148_bridge->parent,
- &entry->descriptor,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ &entry->descriptor,
+ sizeof(entry->descriptor),
+ DMA_TO_DEVICE);
if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
dev_err(tsi148_bridge->parent, "DMA mapping error\n");
retval = -EINVAL;
@@ -1946,7 +1943,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
@@ -2071,7 +2068,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
@@ -2208,7 +2205,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
@@ -2294,19 +2291,15 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
- tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (tsi148_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
+ if (!tsi148_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(tsi148_bridge);
- tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
- if (tsi148_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
+ if (!tsi148_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -2371,10 +2364,9 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
master_num--;
tsi148_device->flush_image =
- kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
- if (tsi148_device->flush_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "flush resource structure\n");
+ kmalloc(sizeof(*tsi148_device->flush_image),
+ GFP_KERNEL);
+ if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2383,17 +2375,14 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
memset(&tsi148_device->flush_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(tsi148_device->flush_image->bus_resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
for (i = 0; i < master_num; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2410,7 +2399,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
@@ -2418,11 +2407,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -2442,11 +2428,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < TSI148_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -2465,10 +2448,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 6a3ead42aba8..81246221a13b 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -92,23 +92,23 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return NULL;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return NULL;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return NULL;
}
- if (bridge->alloc_consistent == NULL) {
+ if (!bridge->alloc_consistent) {
printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
bridge->name);
return NULL;
@@ -132,23 +132,23 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return;
}
- if (bridge->free_consistent == NULL) {
+ if (!bridge->free_consistent) {
printk(KERN_ERR "free_consistent not supported by bridge %s\n",
bridge->name);
return;
@@ -208,29 +208,27 @@ int vme_check_window(u32 aspace, unsigned long long vme_base,
{
int retval = 0;
+ if (vme_base + size < size)
+ return -EINVAL;
+
switch (aspace) {
case VME_A16:
- if (((vme_base + size) > VME_A16_MAX) ||
- (vme_base > VME_A16_MAX))
+ if (vme_base + size > VME_A16_MAX)
retval = -EFAULT;
break;
case VME_A24:
- if (((vme_base + size) > VME_A24_MAX) ||
- (vme_base > VME_A24_MAX))
+ if (vme_base + size > VME_A24_MAX)
retval = -EFAULT;
break;
case VME_A32:
- if (((vme_base + size) > VME_A32_MAX) ||
- (vme_base > VME_A32_MAX))
+ if (vme_base + size > VME_A32_MAX)
retval = -EFAULT;
break;
case VME_A64:
- if ((size != 0) && (vme_base > U64_MAX + 1 - size))
- retval = -EFAULT;
+ /* The VME_A64_MAX limit is actually U64_MAX + 1 */
break;
case VME_CRCSR:
- if (((vme_base + size) > VME_CRCSR_MAX) ||
- (vme_base > VME_CRCSR_MAX))
+ if (vme_base + size > VME_CRCSR_MAX)
retval = -EFAULT;
break;
case VME_USER1:
@@ -303,7 +301,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -313,7 +311,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Registered NULL Slave resource\n");
continue;
}
@@ -333,14 +331,13 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
}
/* No free image */
- if (allocated_image == NULL)
+ if (!allocated_image)
goto err_image;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_SLAVE;
resource->entry = &allocated_image->list;
@@ -389,7 +386,7 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_set == NULL) {
+ if (!bridge->slave_set) {
printk(KERN_ERR "Function not supported\n");
return -ENOSYS;
}
@@ -438,7 +435,7 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_get == NULL) {
+ if (!bridge->slave_get) {
printk(KERN_ERR "vme_slave_get not supported\n");
return -EINVAL;
}
@@ -465,7 +462,7 @@ void vme_slave_free(struct vme_resource *resource)
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Can't find slave resource\n");
return;
}
@@ -505,7 +502,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -515,7 +512,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
master_image = list_entry(master_pos,
struct vme_master_resource, list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_WARNING "Registered NULL master resource\n");
continue;
}
@@ -536,16 +533,15 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
}
/* Check to see if we found a resource */
- if (allocated_image == NULL) {
+ if (!allocated_image) {
printk(KERN_ERR "Can't find a suitable resource\n");
goto err_image;
}
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_MASTER;
resource->entry = &allocated_image->list;
@@ -594,7 +590,7 @@ int vme_master_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_set == NULL) {
+ if (!bridge->master_set) {
printk(KERN_WARNING "vme_master_set not supported\n");
return -EINVAL;
}
@@ -644,7 +640,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_get == NULL) {
+ if (!bridge->master_get) {
printk(KERN_WARNING "%s not supported\n", __func__);
return -EINVAL;
}
@@ -676,7 +672,7 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_read == NULL) {
+ if (!bridge->master_read) {
printk(KERN_WARNING "Reading from resource not supported\n");
return -EINVAL;
}
@@ -725,7 +721,7 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_write == NULL) {
+ if (!bridge->master_write) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -776,7 +772,7 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
- if (bridge->master_rmw == NULL) {
+ if (!bridge->master_rmw) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -846,7 +842,7 @@ void vme_master_free(struct vme_resource *resource)
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_ERR "Can't find master resource\n");
return;
}
@@ -886,7 +882,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
printk(KERN_ERR "No VME resource Attribute tests done\n");
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -895,8 +891,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
-
- if (dma_ctrlr == NULL) {
+ if (!dma_ctrlr) {
printk(KERN_ERR "Registered NULL DMA resource\n");
continue;
}
@@ -915,14 +910,13 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
}
/* Check to see if we found a resource */
- if (allocated_ctrlr == NULL)
+ if (!allocated_ctrlr)
goto err_ctrlr;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_DMA;
resource->entry = &allocated_ctrlr->list;
@@ -951,7 +945,6 @@ EXPORT_SYMBOL(vme_dma_request);
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
- struct vme_dma_resource *ctrlr;
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
@@ -959,15 +952,14 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
return NULL;
}
- ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
-
- dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
- if (dma_list == NULL) {
- printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
return NULL;
- }
+
INIT_LIST_HEAD(&dma_list->entries);
- dma_list->parent = ctrlr;
+ dma_list->parent = list_entry(resource->entry,
+ struct vme_dma_resource,
+ list);
mutex_init(&dma_list->mtx);
return dma_list;
@@ -990,17 +982,13 @@ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
- if (pattern_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
+ pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
+ if (!pattern_attr)
goto err_pat;
- }
attributes->type = VME_DMA_PATTERN;
attributes->private = (void *)pattern_attr;
@@ -1034,19 +1022,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
/* XXX Run some sanity checks here */
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
- if (pci_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
+ pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
+ if (!pci_attr)
goto err_pci;
- }
-
-
attributes->type = VME_DMA_PCI;
attributes->private = (void *)pci_attr;
@@ -1081,18 +1063,13 @@ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
- attributes = kmalloc(
- sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
- if (vme_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
+ vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
+ if (!vme_attr)
goto err_vme;
- }
attributes->type = VME_DMA_VME;
attributes->private = (void *)vme_attr;
@@ -1148,7 +1125,7 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_add == NULL) {
+ if (!bridge->dma_list_add) {
printk(KERN_WARNING "Link List DMA generation not supported\n");
return -EINVAL;
}
@@ -1181,7 +1158,7 @@ int vme_dma_list_exec(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_exec == NULL) {
+ if (!bridge->dma_list_exec) {
printk(KERN_ERR "Link List DMA execution not supported\n");
return -EINVAL;
}
@@ -1210,14 +1187,14 @@ int vme_dma_list_free(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_empty == NULL) {
+ if (!bridge->dma_list_empty) {
printk(KERN_WARNING "Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List in use\n");
- return -EINVAL;
+ return -EBUSY;
}
/*
@@ -1342,8 +1319,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
call = bridge->irq[level - 1].callback[statid].func;
priv_data = bridge->irq[level - 1].callback[statid].priv_data;
-
- if (call != NULL)
+ if (call)
call(level, statid, priv_data);
else
printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
@@ -1374,7 +1350,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1384,7 +1360,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
return -EINVAL;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return -EINVAL;
}
@@ -1423,7 +1399,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return;
}
@@ -1433,7 +1409,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
return;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return;
}
@@ -1470,7 +1446,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1480,7 +1456,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
return -EINVAL;
}
- if (bridge->irq_generate == NULL) {
+ if (!bridge->irq_generate) {
printk(KERN_WARNING "Interrupt generation not supported\n");
return -EINVAL;
}
@@ -1508,7 +1484,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -1517,8 +1493,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
-
- if (lm == NULL) {
+ if (!lm) {
printk(KERN_ERR "Registered NULL Location Monitor resource\n");
continue;
}
@@ -1535,14 +1510,13 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
}
/* Check to see if we found a resource */
- if (allocated_lm == NULL)
+ if (!allocated_lm)
goto err_lm;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_LM;
resource->entry = &allocated_lm->list;
@@ -1612,7 +1586,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_set == NULL) {
+ if (!bridge->lm_set) {
printk(KERN_ERR "vme_lm_set not supported\n");
return -EINVAL;
}
@@ -1648,7 +1622,7 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_get == NULL) {
+ if (!bridge->lm_get) {
printk(KERN_ERR "vme_lm_get not supported\n");
return -EINVAL;
}
@@ -1685,7 +1659,7 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_attach == NULL) {
+ if (!bridge->lm_attach) {
printk(KERN_ERR "vme_lm_attach not supported\n");
return -EINVAL;
}
@@ -1718,7 +1692,7 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_detach == NULL) {
+ if (!bridge->lm_detach) {
printk(KERN_ERR "vme_lm_detach not supported\n");
return -EINVAL;
}
@@ -1780,12 +1754,12 @@ int vme_slot_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
- if (bridge->slot_get == NULL) {
+ if (!bridge->slot_get) {
printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
@@ -1808,7 +1782,7 @@ int vme_bus_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
pr_err("Can't find VME bus\n");
return -EINVAL;
}
@@ -1888,7 +1862,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
struct vme_dev *tmp;
for (i = 0; i < ndevs; i++) {
- vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
goto err_devalloc;
@@ -2020,30 +1994,26 @@ static int vme_bus_match(struct device *dev, struct device_driver *drv)
static int vme_bus_probe(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->probe)
+ return driver->probe(vdev);
- if (driver->probe != NULL)
- retval = driver->probe(vdev);
-
- return retval;
+ return -ENODEV;
}
static int vme_bus_remove(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->remove)
+ return driver->remove(vdev);
- if (driver->remove != NULL)
- retval = driver->remove(vdev);
-
- return retval;
+ return -ENODEV;
}
struct bus_type vme_bus_type = {
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 3c945f9f5f0f..7931231d8e80 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -148,4 +148,19 @@ config W1_SLAVE_DS28E04
If you are unsure, say N.
+config W1_SLAVE_DS28E17
+ tristate "1-wire-to-I2C master bridge (DS28E17)"
+ select CRC16
+ depends on I2C
+ help
+ Say Y here if you want to use the DS28E17 1-wire-to-I2C master bridge.
+ For each DS28E17 detected, a new I2C adapter is created within the
+ kernel. I2C devices on that bus can be configured to be used by the
+ kernel and userspace tools as on any other "native" I2C bus.
+
+ This driver is also available as a module. If so, the module
+ will be called w1_ds28e17.
+
+ If you are unsure, say N.
+
endmenu
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 79c611ce5f18..d5f4f4d5b9e5 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
+obj-$(CONFIG_W1_SLAVE_DS28E17) += w1_ds28e17.o
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
new file mode 100644
index 000000000000..e78b63ea4daf
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -0,0 +1,771 @@
+/*
+ * w1_ds28e17.c - w1 family 19 (DS28E17) driver
+ *
+ * Copyright (c) 2016 Jan Kandziora <jjj@gmx.de>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define CRC16_INIT 0
+
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS28E17 0x19
+
+/* Module setup. */
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jan Kandziora <jjj@gmx.de>");
+MODULE_DESCRIPTION("w1 family 19 driver for DS28E17, 1-wire to I2C master bridge");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E17));
+
+
+/* Default I2C speed to be set when a DS28E17 is detected. */
+static int i2c_speed = 100;
+module_param_named(speed, i2c_speed, int, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(speed, "Default I2C speed to be set when a DS28E17 is detected");
+
+/* Default I2C stretch value to be set when a DS28E17 is detected. */
+static char i2c_stretch = 1;
+module_param_named(stretch, i2c_stretch, byte, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(stretch, "Default I2C stretch value to be set when a DS28E17 is detected");
+
+/* DS28E17 device command codes. */
+#define W1_F19_WRITE_DATA_WITH_STOP 0x4B
+#define W1_F19_WRITE_DATA_NO_STOP 0x5A
+#define W1_F19_WRITE_DATA_ONLY 0x69
+#define W1_F19_WRITE_DATA_ONLY_WITH_STOP 0x78
+#define W1_F19_READ_DATA_WITH_STOP 0x87
+#define W1_F19_WRITE_READ_DATA_WITH_STOP 0x2D
+#define W1_F19_WRITE_CONFIGURATION 0xD2
+#define W1_F19_READ_CONFIGURATION 0xE1
+#define W1_F19_ENABLE_SLEEP_MODE 0x1E
+#define W1_F19_READ_DEVICE_REVISION 0xC4
+
+/* DS28E17 status bits */
+#define W1_F19_STATUS_CRC 0x01
+#define W1_F19_STATUS_ADDRESS 0x02
+#define W1_F19_STATUS_START 0x08
+
+/*
+ * Maximum number of I2C bytes to transfer within one CRC16 protected onewire
+ * command.
+ * */
+#define W1_F19_WRITE_DATA_LIMIT 255
+
+/* Maximum number of I2C bytes to read with one onewire command. */
+#define W1_F19_READ_DATA_LIMIT 255
+
+/* Constants for calculating the busy sleep. */
+#define W1_F19_BUSY_TIMEBASES { 90, 23, 10 }
+#define W1_F19_BUSY_GRATUITY 1000
+
+/* Number of checks for the busy flag before timeout. */
+#define W1_F19_BUSY_CHECKS 1000
+
+
+/* Slave specific data. */
+struct w1_f19_data {
+ u8 speed;
+ u8 stretch;
+ struct i2c_adapter adapter;
+};
+
+
+/* Wait a while until the busy flag clears. */
+static int w1_f19_i2c_busy_wait(struct w1_slave *sl, size_t count)
+{
+ const unsigned long timebases[3] = W1_F19_BUSY_TIMEBASES;
+ struct w1_f19_data *data = sl->family_data;
+ unsigned int checks;
+
+ /* Check the busy flag first in any case.*/
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /*
+ * Do a generously long sleep in the beginning,
+ * as we have to wait at least this time for all
+ * the I2C bytes at the given speed to be transferred.
+ */
+ usleep_range(timebases[data->speed] * (data->stretch) * count,
+ timebases[data->speed] * (data->stretch) * count
+ + W1_F19_BUSY_GRATUITY);
+
+ /* Now continusly check the busy flag sent by the DS28E17. */
+ checks = W1_F19_BUSY_CHECKS;
+ while ((checks--) > 0) {
+ /* Return success if the busy flag is cleared. */
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /* Wait one non-streched byte timeslot. */
+ udelay(timebases[data->speed]);
+ }
+
+ /* Timeout. */
+ dev_warn(&sl->dev, "busy timeout\n");
+ return -ETIMEDOUT;
+}
+
+
+/* Utility function: result. */
+static size_t w1_f19_error(struct w1_slave *sl, u8 w1_buf[])
+{
+ /* Warnings. */
+ if (w1_buf[0] & W1_F19_STATUS_CRC)
+ dev_warn(&sl->dev, "crc16 mismatch\n");
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ dev_warn(&sl->dev, "i2c device not responding\n");
+ if ((w1_buf[0] & (W1_F19_STATUS_CRC | W1_F19_STATUS_ADDRESS)) == 0
+ && w1_buf[1] != 0) {
+ dev_warn(&sl->dev, "i2c short write, %d bytes not acknowledged\n",
+ w1_buf[1]);
+ }
+
+ /* Check error conditions. */
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ return -ENXIO;
+ if (w1_buf[0] & W1_F19_STATUS_START)
+ return -EAGAIN;
+ if (w1_buf[0] != 0 || w1_buf[1] != 0)
+ return -EIO;
+
+ /* All ok. */
+ return 0;
+}
+
+
+/* Utility function: write data to I2C slave, single chunk. */
+static int __w1_f19_i2c_write(struct w1_slave *sl,
+ const u8 *command, size_t command_count,
+ const u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[2];
+
+ /* Send command and I2C data to DS28E17. */
+ crc = crc16(CRC16_INIT, command, command_count);
+ w1_write_block(sl->master, command, command_count);
+
+ w1_buf[0] = count;
+ crc = crc16(crc, w1_buf, 1);
+ w1_write_8(sl->master, w1_buf[0]);
+
+ crc = crc16(crc, buffer, count);
+ w1_write_block(sl->master, buffer, count);
+
+ w1_buf[0] = ~(crc & 0xFF);
+ w1_buf[1] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Return number of bytes written. */
+ return count;
+}
+
+
+/* Write data to I2C slave. */
+static int w1_f19_i2c_write(struct w1_slave *sl, u16 i2c_address,
+ const u8 *buffer, size_t count, bool stop)
+{
+ int result;
+ int remaining = count;
+ const u8 *p;
+ u8 command[2];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Check whether we need multiple commands. */
+ if (count <= W1_F19_WRITE_DATA_LIMIT) {
+ /*
+ * Small data amount. Data can be sent with
+ * a single onewire command.
+ */
+
+ /* Send all data to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_WITH_STOP
+ : W1_F19_WRITE_DATA_NO_STOP);
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, buffer, count);
+ } else {
+ /* Large data amount. Data has to be sent in multiple chunks. */
+
+ /* Send first chunk to DS28E17. */
+ p = buffer;
+ command[0] = W1_F19_WRITE_DATA_NO_STOP;
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+
+ while (remaining > W1_F19_WRITE_DATA_LIMIT) {
+ /* Send intermediate chunk to DS28E17. */
+ command[0] = W1_F19_WRITE_DATA_ONLY;
+ result = __w1_f19_i2c_write(sl, command, 1, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+ }
+
+ /* Send final chunk to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_ONLY_WITH_STOP
+ : W1_F19_WRITE_DATA_ONLY);
+ result = __w1_f19_i2c_write(sl, command, 1, p, remaining);
+ }
+
+ return result;
+}
+
+
+/* Read data from I2C slave. */
+static int w1_f19_i2c_read(struct w1_slave *sl, u16 i2c_address,
+ u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[5];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command to DS28E17. */
+ w1_buf[0] = W1_F19_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1 | 0x01;
+ w1_buf[2] = count;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_buf[3] = ~(crc & 0xFF);
+ w1_buf[4] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 5);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_buf[0] = w1_read_8(sl->master);
+ w1_buf[1] = 0;
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, buffer, count);
+}
+
+
+/* Write to, then read data from I2C slave. */
+static int w1_f19_i2c_write_read(struct w1_slave *sl, u16 i2c_address,
+ const u8 *wbuffer, size_t wcount, u8 *rbuffer, size_t rcount)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[3];
+
+ /* Check input. */
+ if (wcount == 0 || rcount == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command and I2C data to DS28E17. */
+ w1_buf[0] = W1_F19_WRITE_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1;
+ w1_buf[2] = wcount;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ crc = crc16(crc, wbuffer, wcount);
+ w1_write_block(sl->master, wbuffer, wcount);
+
+ w1_buf[0] = rcount;
+ crc = crc16(crc, w1_buf, 1);
+ w1_buf[1] = ~(crc & 0xFF);
+ w1_buf[2] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, wcount + rcount + 2) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, rbuffer, rcount);
+}
+
+
+/* Do an I2C master transfer. */
+static int w1_f19_i2c_master_transfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct w1_slave *sl = (struct w1_slave *) adapter->algo_data;
+ int i = 0;
+ int result = 0;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select DS28E17. */
+ if (w1_reset_select_slave(sl)) {
+ i = -EIO;
+ goto error;
+ }
+
+ /* Loop while there are still messages to transfer. */
+ while (i < num) {
+ /*
+ * Check for special case: Small write followed
+ * by read to same I2C device.
+ */
+ if (i < (num-1)
+ && msgs[i].addr == msgs[i+1].addr
+ && !(msgs[i].flags & I2C_M_RD)
+ && (msgs[i+1].flags & I2C_M_RD)
+ && (msgs[i].len <= W1_F19_WRITE_DATA_LIMIT)) {
+ /*
+ * The DS28E17 has a combined transfer
+ * for small write+read.
+ */
+ result = w1_f19_i2c_write_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len,
+ msgs[i+1].buf, msgs[i+1].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i+1].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl, msgs[i+1].addr,
+ &(msgs[i+1].buf[1]), msgs[i+1].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Eat up read message, too. */
+ i++;
+ } else if (msgs[i].flags & I2C_M_RD) {
+ /* Read transfer. */
+ result = w1_f19_i2c_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl,
+ msgs[i].addr,
+ &(msgs[i].buf[1]),
+ msgs[i].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+ } else {
+ /*
+ * Write transfer.
+ * Stop condition only for last
+ * transfer.
+ */
+ result = w1_f19_i2c_write(sl,
+ msgs[i].addr,
+ msgs[i].buf,
+ msgs[i].len,
+ i == (num-1));
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Next message. */
+ i++;
+
+ /* Are there still messages to send/receive? */
+ if (i < num) {
+ /* Yes. Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master)) {
+ i = -EIO;
+ goto error;
+ }
+ }
+ }
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ /* Return number of messages processed or error. */
+ return i;
+}
+
+
+/* Get I2C adapter functionality. */
+static u32 w1_f19_i2c_functionality(struct i2c_adapter *adapter)
+{
+ /*
+ * Plain I2C functions only.
+ * SMBus is emulated by the kernel's I2C layer.
+ * No "I2C_FUNC_SMBUS_QUICK"
+ * No "I2C_FUNC_SMBUS_READ_BLOCK_DATA"
+ * No "I2C_FUNC_SMBUS_BLOCK_PROC_CALL"
+ */
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_PEC;
+}
+
+
+/* I2C adapter quirks. */
+static const struct i2c_adapter_quirks w1_f19_i2c_adapter_quirks = {
+ .max_read_len = W1_F19_READ_DATA_LIMIT,
+};
+
+/* I2C algorithm. */
+static const struct i2c_algorithm w1_f19_i2c_algorithm = {
+ .master_xfer = w1_f19_i2c_master_transfer,
+ .functionality = w1_f19_i2c_functionality,
+};
+
+
+/* Read I2C speed from DS28E17. */
+static int w1_f19_get_i2c_speed(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = sl->family_data;
+ int result = -EIO;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ goto error;
+
+ /* Read slave configuration byte. */
+ w1_write_8(sl->master, W1_F19_READ_CONFIGURATION);
+ result = w1_read_8(sl->master);
+ if (result < 0 || result > 2) {
+ result = -EIO;
+ goto error;
+ }
+
+ /* Update speed in slave specific data. */
+ data->speed = result;
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Set I2C speed on DS28E17. */
+static int __w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ struct w1_f19_data *data = sl->family_data;
+ const int i2c_speeds[3] = { 100, 400, 900 };
+ u8 w1_buf[2];
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ return -EIO;
+
+ w1_buf[0] = W1_F19_WRITE_CONFIGURATION;
+ w1_buf[1] = speed;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Update speed in slave specific data. */
+ data->speed = speed;
+
+ dev_info(&sl->dev, "i2c speed set to %d kBaud\n", i2c_speeds[speed]);
+
+ return 0;
+}
+
+static int w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ int result;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Set I2C speed on DS28E17. */
+ result = __w1_f19_set_i2c_speed(sl, speed);
+
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Sysfs attributes. */
+
+/* I2C speed attribute for a single chip. */
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int result;
+
+ /* Read current speed from slave. Updates data->speed. */
+ result = w1_f19_get_i2c_speed(sl);
+ if (result < 0)
+ return result;
+
+ /* Return current speed value. */
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int error;
+
+ /* Valid values are: "100", "400", "900" */
+ if (count < 3 || count > 4 || !buf)
+ return -EINVAL;
+ if (count == 4 && buf[3] != '\n')
+ return -EINVAL;
+ if (buf[1] != '0' || buf[2] != '0')
+ return -EINVAL;
+
+ /* Set speed on slave. */
+ switch (buf[0]) {
+ case '1':
+ error = w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case '4':
+ error = w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case '9':
+ error = w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (error < 0)
+ return error;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(speed);
+
+
+/* Busy stretch attribute for a single chip. */
+static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Return current stretch value. */
+ return sprintf(buf, "%d\n", data->stretch);
+}
+
+static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Valid values are '1' to '9' */
+ if (count < 1 || count > 2 || !buf)
+ return -EINVAL;
+ if (count == 2 && buf[1] != '\n')
+ return -EINVAL;
+ if (buf[0] < '1' || buf[0] > '9')
+ return -EINVAL;
+
+ /* Set busy stretch value. */
+ data->stretch = buf[0] & 0x0F;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(stretch);
+
+
+/* All attributes. */
+static struct attribute *w1_f19_attrs[] = {
+ &dev_attr_speed.attr,
+ &dev_attr_stretch.attr,
+ NULL,
+};
+
+static const struct attribute_group w1_f19_group = {
+ .attrs = w1_f19_attrs,
+};
+
+static const struct attribute_group *w1_f19_groups[] = {
+ &w1_f19_group,
+ NULL,
+};
+
+
+/* Slave add and remove functions. */
+static int w1_f19_add_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = NULL;
+
+ /* Allocate memory for slave specific data. */
+ data = devm_kzalloc(&sl->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ sl->family_data = data;
+
+ /* Setup default I2C speed on slave. */
+ switch (i2c_speed) {
+ case 100:
+ __w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case 400:
+ __w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case 900:
+ __w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ /*
+ * A i2c_speed module parameter of anything else
+ * than 100, 400, 900 means not to touch the
+ * speed of the DS28E17.
+ * We assume 400kBaud, the power-on value.
+ */
+ data->speed = 1;
+ }
+
+ /*
+ * Setup default busy stretch
+ * configuration for the DS28E17.
+ */
+ data->stretch = i2c_stretch;
+
+ /* Setup I2C adapter. */
+ data->adapter.owner = THIS_MODULE;
+ data->adapter.algo = &w1_f19_i2c_algorithm;
+ data->adapter.algo_data = sl;
+ strcpy(data->adapter.name, "w1-");
+ strcat(data->adapter.name, sl->name);
+ data->adapter.dev.parent = &sl->dev;
+ data->adapter.quirks = &w1_f19_i2c_adapter_quirks;
+
+ return i2c_add_adapter(&data->adapter);
+}
+
+static void w1_f19_remove_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *family_data = sl->family_data;
+
+ /* Delete I2C adapter. */
+ i2c_del_adapter(&family_data->adapter);
+
+ /* Free slave specific data. */
+ devm_kfree(&sl->dev, family_data);
+ sl->family_data = NULL;
+}
+
+
+/* Declarations within the w1 subsystem. */
+static struct w1_family_ops w1_f19_fops = {
+ .add_slave = w1_f19_add_slave,
+ .remove_slave = w1_f19_remove_slave,
+ .groups = w1_f19_groups,
+};
+
+static struct w1_family w1_family_19 = {
+ .fid = W1_FAMILY_DS28E17,
+ .fops = &w1_f19_fops,
+};
+
+
+/* Module init and remove functions. */
+static int __init w1_f19_init(void)
+{
+ return w1_register_family(&w1_family_19);
+}
+
+static void __exit w1_f19_fini(void)
+{
+ w1_unregister_family(&w1_family_19);
+}
+
+module_init(w1_f19_init);
+module_exit(w1_f19_fini);
+
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 259525c3382a..3c350dfbcd0b 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -268,17 +268,18 @@ static inline int w1_therm_eeprom(struct device *device)
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
while (max_trying--) {
@@ -306,17 +307,17 @@ static inline int w1_therm_eeprom(struct device *device)
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto post_unlock;
+ goto dec_refcnt;
}
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret != 0)
- goto post_unlock;
+ goto dec_refcnt;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto pre_unlock;
+ goto mt_unlock;
}
}
@@ -324,11 +325,11 @@ static inline int w1_therm_eeprom(struct device *device)
}
}
-pre_unlock:
+mt_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -350,20 +351,22 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
if (val > 12 || val < 9) {
pr_warn("Unsupported precision\n");
- return -1;
+ ret = -EINVAL;
+ goto error;
}
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
/* translate precision to bitmask (see datasheet page 9) */
@@ -411,11 +414,10 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
}
}
-pre_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -490,17 +492,18 @@ static ssize_t read_therm(struct device *device,
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto error;
-
if (!family_data) {
ret = -ENODEV;
- goto mt_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(info->rom, 0, sizeof(info->rom));
while (max_trying--) {
@@ -542,7 +545,7 @@ static ssize_t read_therm(struct device *device,
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto dec_refcnt;
+ goto mt_unlock;
}
}
@@ -567,10 +570,10 @@ static ssize_t read_therm(struct device *device,
break;
}
-dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
mt_unlock:
mutex_unlock(&dev->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(family_data));
error:
return ret;
}
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index d191e1f80579..075d120e7b88 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -58,7 +58,7 @@ static u8 w1_read_bit(struct w1_master *dev);
* @dev: the master device
* @bit: 0 - write a 0, 1 - write a 0 read the level
*/
-static u8 w1_touch_bit(struct w1_master *dev, int bit)
+u8 w1_touch_bit(struct w1_master *dev, int bit)
{
if (dev->bus_master->touch_bit)
return dev->bus_master->touch_bit(dev->bus_master->data, bit);
@@ -69,6 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
return 0;
}
}
+EXPORT_SYMBOL_GPL(w1_touch_bit);
/**
* w1_write_bit() - Generates a write-0 or write-1 cycle.
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 18e896eeca62..12f7ea62dddd 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -70,7 +70,7 @@ module_param(use_gpio, int, 0);
MODULE_PARM_DESC(use_gpio,
"Use the gpio watchdog (required by old cobalt boards).");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout,
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long unused)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 7e6acaf3ece4..88c05d0448b2 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -120,9 +120,9 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
/*
* Timer tick
*/
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
{
- struct at91wdt *wdt = (struct at91wdt *)data;
+ struct at91wdt *wdt = from_timer(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
@@ -222,7 +222,7 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
"watchdog already configured differently (mr = %x expecting %x)\n",
tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
- setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, at91_ping, 0);
/*
* Use min_heartbeat the first time to avoid spurious watchdog reset:
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 236582809336..f41b756d6dd5 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -106,9 +106,9 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
.restart = bcm47xx_wdt_restart,
};
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
{
- struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+ struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
if (!atomic_dec_and_test(&wdt->soft_ticks)) {
@@ -133,7 +133,7 @@ static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
bcm47xx_wdt_soft_keepalive(wdd);
- bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+ bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
return 0;
}
@@ -190,8 +190,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
if (soft) {
wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
- setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
- (long unsigned int)wdt);
+ timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
} else {
wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
}
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index ab26fd90729e..8555afc70f9b 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -77,7 +77,7 @@ static void bcm63xx_wdt_isr(void *data)
die(PFX " fire", regs);
}
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
{
if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
bcm63xx_wdt_hw_start();
@@ -240,7 +240,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
int ret;
struct resource *r;
- setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+ timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 6c3f78e45c26..6cfb102c397c 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -69,7 +69,7 @@ static struct {
/* generic helper functions */
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
@@ -224,7 +224,7 @@ static int cpu5wdt_init(void)
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
- setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+ timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 8a616a57bb90..88d823d87a4b 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -121,7 +121,7 @@ module_param(action, int, 0);
MODULE_PARM_DESC(action, "after watchdog resets, generate: "
"0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
static int zf_action = GEN_RESET;
static unsigned long zf_is_open;
@@ -237,7 +237,7 @@ static void zf_timer_on(void)
}
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index c9e38096ea91..3cc07447c655 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -99,7 +99,7 @@ static struct {
{0x0000, 0},
};
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
@@ -120,7 +120,7 @@ static void mixcomwd_ping(void)
return;
}
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 366e5c7e650b..6610e9217dbc 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -80,9 +80,9 @@ static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
spin_unlock(&ddata->lock);
}
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
{
- struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+ struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
mpc8xxx_wdt_keepalive(ddata);
/* We're pinging it twice faster than needed, just to be sure. */
@@ -173,8 +173,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
}
spin_lock_init(&ddata->lock);
- setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
- (unsigned long)ddata);
+ timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
ddata->wdd.info = &mpc8xxx_wdt_info,
ddata->wdd.ops = &mpc8xxx_wdt_ops,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ff27c4ac96e4..ca360d204548 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -68,7 +68,7 @@ static struct {
unsigned int gstate;
} mtx1_wdt_device;
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
{
spin_lock(&mtx1_wdt_device.lock);
if (mtx1_wdt_device.running)
@@ -219,7 +219,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
init_completion(&mtx1_wdt_device.stop);
mtx1_wdt_device.queue = 0;
clear_bit(0, &mtx1_wdt_device.inuse);
- setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+ timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
mtx1_wdt_device.default_ticks = ticks;
ret = misc_register(&mtx1_wdt_misc);
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index d5bed78c4d9f..830bd04ff911 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -216,7 +216,7 @@ static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
return len;
}
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
{
if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
nuc900_wdt_keepalive();
@@ -267,7 +267,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
clk_enable(nuc900_wdt->wdt_clock);
- setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+ timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
ret = misc_register(&nuc900wdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 3ad5206d7935..b72ce68eacd3 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -367,7 +367,7 @@ static void pcwd_show_card_info(void)
pr_info("No previous trip detected - Cold boot or reset\n");
}
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
{
int wdrst_stat;
@@ -893,7 +893,7 @@ static int pcwd_isa_probe(struct device *dev, unsigned int id)
/* clear the "card caused reboot" flag */
pcwd_clear_status();
- setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+ timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
/* Disable the board */
pcwd_stop();
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index e35cf5e87907..e0a6f8c0f03c 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -85,7 +85,7 @@ static inline void pikawdt_reset(void)
/*
* Timer tick
*/
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
@@ -269,7 +269,7 @@ static int __init pikawdt_init(void)
iounmap(fpga);
- setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+ timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 47a8f1b1087d..a281aa84bfb1 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -67,7 +67,7 @@ static struct {
/* generic helper functions */
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
{
unsigned long flags;
u32 val;
@@ -262,7 +262,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
clear_bit(0, &rdc321x_wdt_device.inuse);
- setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+ timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
rdc321x_wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 8d589939bc84..87333a41f753 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -122,7 +122,7 @@ static char wdt_expect_close;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 3e9bbaa37bf4..6aadb56e7faa 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(nowayout,
static __u16 __iomem *wdtmrctl;
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 517a733175ef..a7d6425db807 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -175,9 +175,9 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
return 0;
}
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
{
- struct sh_wdt *wdt = (struct sh_wdt *)data;
+ struct sh_wdt *wdt = from_timer(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
@@ -275,7 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
return rc;
}
- setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, sh_wdt_ping, 0);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index ad3c3be13b40..b085ef1084ec 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -67,7 +67,7 @@ static struct watchdog_device wdt_dev;
static struct resource wdt_res;
static void __iomem *wdt_mem;
static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
+static void wdt_timer_tick(struct timer_list *unused);
static DEFINE_TIMER(timer, wdt_timer_tick);
/* The timer that pings the watchdog */
static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
@@ -88,7 +88,7 @@ static inline void wdt_reset(void)
* then the external/userspace heartbeat).
* 2) the watchdog timer has been stopped by userspace.
*/
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
{
if (time_before(jiffies, next_heartbeat) ||
(!watchdog_active(&wdt_dev))) {
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index ba6b680af100..05658ecc0aa4 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
@@ -108,7 +108,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 74265b2f806c..8a8d952f8df9 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -137,25 +137,6 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
}
EXPORT_SYMBOL_GPL(watchdog_init_timeout);
-static int watchdog_reboot_notifier(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
- reboot_nb);
-
- if (code == SYS_DOWN || code == SYS_HALT) {
- if (watchdog_active(wdd)) {
- int ret;
-
- ret = wdd->ops->stop(wdd);
- if (ret)
- return NOTIFY_BAD;
- }
- }
-
- return NOTIFY_DONE;
-}
-
static int watchdog_restart_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -244,19 +225,6 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
- if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = register_reboot_notifier(&wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, wdd->id);
- return ret;
- }
- }
-
if (wdd->ops->restart) {
wdd->restart_nb.notifier_call = watchdog_restart_notifier;
@@ -302,9 +270,6 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
if (wdd->ops->restart)
unregister_restart_handler(&wdd->restart_nb);
- if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
- unregister_reboot_notifier(&wdd->reboot_nb);
-
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 0826e663bd5a..1e971a50d7fb 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,6 +42,7 @@
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
#include <linux/mutex.h> /* For mutexes */
+#include <linux/reboot.h> /* For reboot notifier */
#include <linux/slab.h> /* For memory functions */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/watchdog.h> /* For watchdog specific items */
@@ -1016,6 +1017,25 @@ static struct class watchdog_class = {
.dev_groups = wdt_groups,
};
+static int watchdog_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ struct watchdog_device *wdd;
+
+ wdd = container_of(nb, struct watchdog_device, reboot_nb);
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ if (watchdog_active(wdd)) {
+ int ret;
+
+ ret = wdd->ops->stop(wdd);
+ if (ret)
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
/*
* watchdog_dev_register: register a watchdog device
* @wdd: watchdog device
@@ -1049,6 +1069,18 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (ret) {
device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
+ return ret;
+ }
+
+ if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ }
}
return ret;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 4545561954ee..e5d0c28372ea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,6 +196,17 @@ config XEN_PCIDEV_BACKEND
If in doubt, say m.
+config XEN_PVCALLS_FRONTEND
+ tristate "XEN PV Calls frontend driver"
+ depends on INET && XEN
+ default n
+ select XEN_XENBUS_FRONTEND
+ help
+ Experimental frontend for the Xen PV Calls protocol
+ (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
+ sends a small set of POSIX calls to the backend, which
+ implements them.
+
config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
@@ -258,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d3930ecaf11d..451e833f5931 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
+obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index a8721d718186..f45114fd8e1e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/bootmem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -43,6 +44,7 @@
#include <linux/hardirq.h>
#include <linux/workqueue.h>
#include <linux/ratelimit.h>
+#include <linux/moduleparam.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -52,6 +54,9 @@
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
#include <xen/balloon.h>
+#ifdef CONFIG_X86
+#include <asm/xen/cpuid.h>
+#endif
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -68,15 +73,26 @@ static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
struct grant_frames xen_auto_xlat_grant_frames;
+static unsigned int xen_gnttab_version;
+module_param_named(version, xen_gnttab_version, uint, 0);
static union {
struct grant_entry_v1 *v1;
+ union grant_entry_v2 *v2;
void *addr;
} gnttab_shared;
/*This is a structure of function pointers for grant table*/
struct gnttab_ops {
/*
+ * Version of the grant interface.
+ */
+ unsigned int version;
+ /*
+ * Grant refs per grant frame.
+ */
+ unsigned int grefs_per_grant_frame;
+ /*
* Mapping a list of frames for storing grant entries. Frames parameter
* is used to store grant table address when grant table being setup,
* nr_gframes is the number of frames to map grant table. Returning
@@ -130,14 +146,15 @@ struct unmap_refs_callback_data {
static const struct gnttab_ops *gnttab_interface;
-static int grant_table_version;
-static int grefs_per_grant_frame;
+/* This reflects status of grant entries, so act as a global value. */
+static grant_status_t *grstatus;
static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{
@@ -210,7 +227,7 @@ static void put_free_entry(grant_ref_t ref)
}
/*
- * Following applies to gnttab_update_entry_v1.
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
@@ -229,6 +246,15 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
gnttab_shared.v1[ref].flags = flags;
}
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned int flags)
+{
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ gnttab_shared.v2[ref].full_page.frame = frame;
+ wmb(); /* Hypervisor concurrent accesses. */
+ gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
+}
+
/*
* Public grant-issuing interface functions
*/
@@ -260,6 +286,11 @@ static int gnttab_query_foreign_access_v1(grant_ref_t ref)
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
}
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+ return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
int gnttab_query_foreign_access(grant_ref_t ref)
{
return gnttab_interface->query_foreign_access(ref);
@@ -282,6 +313,29 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
return 1;
}
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+ gnttab_shared.v2[ref].hdr.flags = 0;
+ mb(); /* Concurrent access by hypervisor. */
+ if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+ return 0;
+ } else {
+ /*
+ * The read of grstatus needs to have acquire semantics.
+ * On x86, reads already have that, and we just need to
+ * protect against compiler reorderings.
+ * On other architectures we may need a full barrier.
+ */
+#ifdef CONFIG_X86
+ barrier();
+#else
+ mb();
+#endif
+ }
+
+ return 1;
+}
+
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
@@ -304,10 +358,10 @@ struct deferred_entry {
struct page *page;
};
static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
+static void gnttab_handle_deferred(struct timer_list *);
static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
{
unsigned int nr = 10;
struct deferred_entry *first = NULL;
@@ -442,6 +496,37 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
return frame;
}
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = *pflags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = gnttab_shared.v2[ref].full_page.frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{
return gnttab_interface->end_foreign_transfer_ref(ref);
@@ -563,19 +648,26 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
}
EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
+{
+ return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
+ align;
+}
+
static int grow_gnttab_list(unsigned int more_frames)
{
unsigned int new_nr_grant_frames, extra_entries, i;
unsigned int nr_glist_frames, new_nr_glist_frames;
+ unsigned int grefs_per_frame;
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
+ grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
new_nr_grant_frames = nr_grant_frames + more_frames;
- extra_entries = more_frames * grefs_per_grant_frame;
+ extra_entries = more_frames * grefs_per_frame;
- nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
- new_nr_glist_frames =
- (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
+ new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
if (!gnttab_list[i])
@@ -583,12 +675,12 @@ static int grow_gnttab_list(unsigned int more_frames)
}
- for (i = grefs_per_grant_frame * nr_grant_frames;
- i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
+ for (i = grefs_per_frame * nr_grant_frames;
+ i < grefs_per_frame * new_nr_grant_frames - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(i) = gnttab_free_head;
- gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
+ gnttab_free_head = grefs_per_frame * nr_grant_frames;
gnttab_free_count += extra_entries;
nr_grant_frames = new_nr_grant_frames;
@@ -938,6 +1030,12 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
+static unsigned int nr_status_frames(unsigned int nr_grant_frames)
+{
+ BUG_ON(gnttab_interface == NULL);
+ return gnttab_frames(nr_grant_frames, SPP);
+}
+
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
@@ -955,6 +1053,55 @@ static void gnttab_unmap_frames_v1(void)
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
}
+static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
+{
+ uint64_t *sframes;
+ unsigned int nr_sframes;
+ struct gnttab_get_status_frames getframes;
+ int rc;
+
+ nr_sframes = nr_status_frames(nr_gframes);
+
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_get_status_frames.
+ */
+ sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
+ if (!sframes)
+ return -ENOMEM;
+
+ getframes.dom = DOMID_SELF;
+ getframes.nr_frames = nr_sframes;
+ set_xen_guest_handle(getframes.frame_list, sframes);
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+ &getframes, 1);
+ if (rc == -ENOSYS) {
+ kfree(sframes);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || getframes.status);
+
+ rc = arch_gnttab_map_status(sframes, nr_sframes,
+ nr_status_frames(gnttab_max_grant_frames()),
+ &grstatus);
+ BUG_ON(rc);
+ kfree(sframes);
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+ arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -1014,6 +1161,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
}
static const struct gnttab_ops gnttab_v1_ops = {
+ .version = 1,
+ .grefs_per_grant_frame = XEN_PAGE_SIZE /
+ sizeof(struct grant_entry_v1),
.map_frames = gnttab_map_frames_v1,
.unmap_frames = gnttab_unmap_frames_v1,
.update_entry = gnttab_update_entry_v1,
@@ -1022,14 +1172,56 @@ static const struct gnttab_ops gnttab_v1_ops = {
.query_foreign_access = gnttab_query_foreign_access_v1,
};
-static void gnttab_request_version(void)
+static const struct gnttab_ops gnttab_v2_ops = {
+ .version = 2,
+ .grefs_per_grant_frame = XEN_PAGE_SIZE /
+ sizeof(union grant_entry_v2),
+ .map_frames = gnttab_map_frames_v2,
+ .unmap_frames = gnttab_unmap_frames_v2,
+ .update_entry = gnttab_update_entry_v2,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
+ .query_foreign_access = gnttab_query_foreign_access_v2,
+};
+
+static bool gnttab_need_v2(void)
{
- /* Only version 1 is used, which will always be available. */
- grant_table_version = 1;
- grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(struct grant_entry_v1);
- gnttab_interface = &gnttab_v1_ops;
+#ifdef CONFIG_X86
+ uint32_t base, width;
+
+ if (xen_pv_domain()) {
+ base = xen_cpuid_base();
+ if (cpuid_eax(base) < 5)
+ return false; /* Information not available, use V1. */
+ width = cpuid_ebx(base + 5) &
+ XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
+ return width > 32 + PAGE_SHIFT;
+ }
+#endif
+ return !!(max_possible_pfn >> 32);
+}
- pr_info("Grant tables using version %d layout\n", grant_table_version);
+static void gnttab_request_version(void)
+{
+ long rc;
+ struct gnttab_set_version gsv;
+
+ if (gnttab_need_v2())
+ gsv.version = 2;
+ else
+ gsv.version = 1;
+
+ /* Boot parameter overrides automatic selection. */
+ if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
+ gsv.version = xen_gnttab_version;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0 && gsv.version == 2)
+ gnttab_interface = &gnttab_v2_ops;
+ else
+ gnttab_interface = &gnttab_v1_ops;
+ pr_info("Grant tables using version %d layout\n",
+ gnttab_interface->version);
}
static int gnttab_setup(void)
@@ -1069,10 +1261,10 @@ static int gnttab_expand(unsigned int req_entries)
int rc;
unsigned int cur, extra;
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
cur = nr_grant_frames;
- extra = ((req_entries + (grefs_per_grant_frame-1)) /
- grefs_per_grant_frame);
+ extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
+ gnttab_interface->grefs_per_grant_frame);
if (cur + extra > gnttab_max_grant_frames()) {
pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
" cur=%u extra=%u limit=%u"
@@ -1104,16 +1296,16 @@ int gnttab_init(void)
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
max_nr_glist_frames = (max_nr_grant_frames *
- grefs_per_grant_frame / RPP);
+ gnttab_interface->grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
GFP_KERNEL);
if (gnttab_list == NULL)
return -ENOMEM;
- nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
for (i = 0; i < nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
if (gnttab_list[i] == NULL) {
@@ -1122,7 +1314,8 @@ int gnttab_init(void)
}
}
- ret = arch_gnttab_init(max_nr_grant_frames);
+ ret = arch_gnttab_init(max_nr_grant_frames,
+ nr_status_frames(max_nr_grant_frames));
if (ret < 0)
goto ini_nomem;
@@ -1131,7 +1324,8 @@ int gnttab_init(void)
goto ini_nomem;
}
- nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
+ nr_init_grefs = nr_grant_frames *
+ gnttab_interface->grefs_per_grant_frame;
for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c425d03d37d2..8835065029d3 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -72,18 +72,15 @@ static int xen_suspend(void *data)
}
gnttab_suspend();
+ xen_manage_runstate_time(-1);
xen_arch_pre_suspend();
- /*
- * This hypercall returns 1 if suspend was cancelled
- * or the domain was merely checkpointed, and 0 if it
- * is resuming in a new domain.
- */
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
? virt_to_gfn(xen_start_info)
: 0);
xen_arch_post_suspend(si->cancelled);
+ xen_manage_runstate_time(si->cancelled ? 1 : 0);
gnttab_resume();
if (!si->cancelled) {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index feca75b07fdd..1c909183c42a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -191,13 +191,10 @@ static int traverse_pages_block(unsigned nelem, size_t size,
void *state)
{
void *pagedata;
- unsigned pageidx;
int ret = 0;
BUG_ON(size > PAGE_SIZE);
- pageidx = PAGE_SIZE;
-
while (nelem) {
int nr = (PAGE_SIZE/size);
struct page *page;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b209cd44bb8d..c7822d8078b9 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -134,20 +134,16 @@ static void pvcalls_conn_back_read(void *opaque)
masked_cons = pvcalls_mask(cons, array_size);
memset(&msg, 0, sizeof(msg));
- msg.msg_iter.type = ITER_KVEC|WRITE;
- msg.msg_iter.count = wanted;
if (masked_prod < masked_cons) {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = wanted;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 1;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted);
} else {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = array_size - masked_prod;
vec[1].iov_base = data->in;
vec[1].iov_len = wanted - vec[0].iov_len;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 2;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted);
}
atomic_set(&map->read, 0);
@@ -196,20 +192,16 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT;
- msg.msg_iter.type = ITER_KVEC|READ;
- msg.msg_iter.count = size;
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = size;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 1;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size);
} else {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
vec[1].iov_base = data->out;
vec[1].iov_len = size - vec[0].iov_len;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 2;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size);
}
atomic_set(&map->write, 0);
@@ -1238,3 +1230,7 @@ static void __exit pvcalls_back_fin(void)
}
module_exit(pvcalls_back_fin);
+
+MODULE_DESCRIPTION("Xen PV Calls backend driver");
+MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
new file mode 100644
index 000000000000..d1e1d8d2b9d5
--- /dev/null
+++ b/drivers/xen/pvcalls-front.c
@@ -0,0 +1,1280 @@
+/*
+ * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/pvcalls.h>
+
+#include "pvcalls-front.h"
+
+#define PVCALLS_INVALID_ID UINT_MAX
+#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
+#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
+#define PVCALLS_FRONT_MAX_SPIN 5000
+
+struct pvcalls_bedata {
+ struct xen_pvcalls_front_ring ring;
+ grant_ref_t ref;
+ int irq;
+
+ struct list_head socket_mappings;
+ spinlock_t socket_lock;
+
+ wait_queue_head_t inflight_req;
+ struct xen_pvcalls_response rsp[PVCALLS_NR_RSP_PER_RING];
+};
+/* Only one front/back connection supported. */
+static struct xenbus_device *pvcalls_front_dev;
+static atomic_t pvcalls_refcount;
+
+/* first increment refcount, then proceed */
+#define pvcalls_enter() { \
+ atomic_inc(&pvcalls_refcount); \
+}
+
+/* first complete other operations, then decrement refcount */
+#define pvcalls_exit() { \
+ atomic_dec(&pvcalls_refcount); \
+}
+
+struct sock_mapping {
+ bool active_socket;
+ struct list_head list;
+ struct socket *sock;
+ union {
+ struct {
+ int irq;
+ grant_ref_t ref;
+ struct pvcalls_data_intf *ring;
+ struct pvcalls_data data;
+ struct mutex in_mutex;
+ struct mutex out_mutex;
+
+ wait_queue_head_t inflight_conn_req;
+ } active;
+ struct {
+ /* Socket status */
+#define PVCALLS_STATUS_UNINITALIZED 0
+#define PVCALLS_STATUS_BIND 1
+#define PVCALLS_STATUS_LISTEN 2
+ uint8_t status;
+ /*
+ * Internal state-machine flags.
+ * Only one accept operation can be inflight for a socket.
+ * Only one poll operation can be inflight for a given socket.
+ */
+#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
+#define PVCALLS_FLAG_POLL_INFLIGHT 1
+#define PVCALLS_FLAG_POLL_RET 2
+ uint8_t flags;
+ uint32_t inflight_req_id;
+ struct sock_mapping *accept_map;
+ wait_queue_head_t inflight_accept_req;
+ } passive;
+ };
+};
+
+static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
+{
+ *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
+ if (RING_FULL(&bedata->ring) ||
+ bedata->rsp[*req_id].req_id != PVCALLS_INVALID_ID)
+ return -EAGAIN;
+ return 0;
+}
+
+static bool pvcalls_front_write_todo(struct sock_mapping *map)
+{
+ struct pvcalls_data_intf *intf = map->active.ring;
+ RING_IDX cons, prod, size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ error = intf->out_error;
+ if (error == -ENOTCONN)
+ return false;
+ if (error != 0)
+ return true;
+
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ return !!(size - pvcalls_queued(prod, cons, size));
+}
+
+static bool pvcalls_front_read_todo(struct sock_mapping *map)
+{
+ struct pvcalls_data_intf *intf = map->active.ring;
+ RING_IDX cons, prod;
+ int32_t error;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ error = intf->in_error;
+ return (error != 0 ||
+ pvcalls_queued(prod, cons,
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) != 0);
+}
+
+static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
+{
+ struct xenbus_device *dev = dev_id;
+ struct pvcalls_bedata *bedata;
+ struct xen_pvcalls_response *rsp;
+ uint8_t *src, *dst;
+ int req_id = 0, more = 0, done = 0;
+
+ if (dev == NULL)
+ return IRQ_HANDLED;
+
+ pvcalls_enter();
+ bedata = dev_get_drvdata(&dev->dev);
+ if (bedata == NULL) {
+ pvcalls_exit();
+ return IRQ_HANDLED;
+ }
+
+again:
+ while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
+ rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
+
+ req_id = rsp->req_id;
+ if (rsp->cmd == PVCALLS_POLL) {
+ struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
+ rsp->u.poll.id;
+
+ clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
+ (void *)&map->passive.flags);
+ /*
+ * clear INFLIGHT, then set RET. It pairs with
+ * the checks at the beginning of
+ * pvcalls_front_poll_passive.
+ */
+ smp_wmb();
+ set_bit(PVCALLS_FLAG_POLL_RET,
+ (void *)&map->passive.flags);
+ } else {
+ dst = (uint8_t *)&bedata->rsp[req_id] +
+ sizeof(rsp->req_id);
+ src = (uint8_t *)rsp + sizeof(rsp->req_id);
+ memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
+ /*
+ * First copy the rest of the data, then req_id. It is
+ * paired with the barrier when accessing bedata->rsp.
+ */
+ smp_wmb();
+ bedata->rsp[req_id].req_id = req_id;
+ }
+
+ done = 1;
+ bedata->ring.rsp_cons++;
+ }
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
+ if (more)
+ goto again;
+ if (done)
+ wake_up(&bedata->inflight_req);
+ pvcalls_exit();
+ return IRQ_HANDLED;
+}
+
+static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ struct sock_mapping *map)
+{
+ int i;
+
+ unbind_from_irqhandler(map->active.irq, map);
+
+ spin_lock(&bedata->socket_lock);
+ if (!list_empty(&map->list))
+ list_del_init(&map->list);
+ spin_unlock(&bedata->socket_lock);
+
+ for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
+ gnttab_end_foreign_access(map->active.ref, 0, 0);
+ free_page((unsigned long)map->active.ring);
+
+ kfree(map);
+}
+
+static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
+{
+ struct sock_mapping *map = sock_map;
+
+ if (map == NULL)
+ return IRQ_HANDLED;
+
+ wake_up_interruptible(&map->active.inflight_conn_req);
+
+ return IRQ_HANDLED;
+}
+
+int pvcalls_front_socket(struct socket *sock)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ /*
+ * PVCalls only supports domain AF_INET,
+ * type SOCK_STREAM and protocol 0 sockets for now.
+ *
+ * Check socket type here, AF_INET and protocol checks are done
+ * by the caller.
+ */
+ if (sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -EACCES;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+
+ spin_lock(&bedata->socket_lock);
+
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ kfree(map);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+
+ /*
+ * sock->sk->sk_send_head is not used for ip sockets: reuse the
+ * field to store a pointer to the struct sock_mapping
+ * corresponding to the socket. This way, we can easily get the
+ * struct sock_mapping from the struct socket.
+ */
+ sock->sk->sk_send_head = (void *)map;
+ list_add_tail(&map->list, &bedata->socket_mappings);
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_SOCKET;
+ req->u.socket.id = (uintptr_t) map;
+ req->u.socket.domain = AF_INET;
+ req->u.socket.type = SOCK_STREAM;
+ req->u.socket.protocol = IPPROTO_IP;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ pvcalls_exit();
+ return ret;
+}
+
+static int create_active(struct sock_mapping *map, int *evtchn)
+{
+ void *bytes;
+ int ret = -ENOMEM, irq = -1, i;
+
+ *evtchn = -1;
+ init_waitqueue_head(&map->active.inflight_conn_req);
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (map->active.ring == NULL)
+ goto out_error;
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (bytes == NULL)
+ goto out_error;
+ for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ map->active.ring->ref[i] = gnttab_grant_foreign_access(
+ pvcalls_front_dev->otherend_id,
+ pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
+
+ map->active.ref = gnttab_grant_foreign_access(
+ pvcalls_front_dev->otherend_id,
+ pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
+ if (ret)
+ goto out_error;
+ irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
+ 0, "pvcalls-frontend", map);
+ if (irq < 0) {
+ ret = irq;
+ goto out_error;
+ }
+
+ map->active.irq = irq;
+ map->active_socket = true;
+ mutex_init(&map->active.in_mutex);
+ mutex_init(&map->active.out_mutex);
+
+ return 0;
+
+out_error:
+ if (*evtchn >= 0)
+ xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
+ kfree(map->active.data.in);
+ kfree(map->active.ring);
+ return ret;
+}
+
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len, int flags)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret, evtchn;
+
+ if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ ret = create_active(map, &evtchn);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_CONNECT;
+ req->u.connect.id = (uintptr_t)map;
+ req->u.connect.len = addr_len;
+ req->u.connect.flags = flags;
+ req->u.connect.ref = map->active.ref;
+ req->u.connect.evtchn = evtchn;
+ memcpy(req->u.connect.addr, addr, sizeof(*addr));
+
+ map->sock = sock;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ pvcalls_exit();
+ return ret;
+}
+
+static int __write_ring(struct pvcalls_data_intf *intf,
+ struct pvcalls_data *data,
+ struct iov_iter *msg_iter,
+ int len)
+{
+ RING_IDX cons, prod, size, masked_prod, masked_cons;
+ RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ error = intf->out_error;
+ if (error < 0)
+ return error;
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ /* read indexes before continuing */
+ virt_mb();
+
+ size = pvcalls_queued(prod, cons, array_size);
+ if (size >= array_size)
+ return -EINVAL;
+ if (len > array_size - size)
+ len = array_size - size;
+
+ masked_prod = pvcalls_mask(prod, array_size);
+ masked_cons = pvcalls_mask(cons, array_size);
+
+ if (masked_prod < masked_cons) {
+ len = copy_from_iter(data->out + masked_prod, len, msg_iter);
+ } else {
+ if (len > array_size - masked_prod) {
+ int ret = copy_from_iter(data->out + masked_prod,
+ array_size - masked_prod, msg_iter);
+ if (ret != array_size - masked_prod) {
+ len = ret;
+ goto out;
+ }
+ len = ret + copy_from_iter(data->out, len - ret, msg_iter);
+ } else {
+ len = copy_from_iter(data->out + masked_prod, len, msg_iter);
+ }
+ }
+out:
+ /* write to ring before updating pointer */
+ virt_wmb();
+ intf->out_prod += len;
+
+ return len;
+}
+
+int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int sent, tot_sent = 0;
+ int count = 0, flags;
+
+ flags = msg->msg_flags;
+ if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ mutex_lock(&map->active.out_mutex);
+ if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
+ mutex_unlock(&map->active.out_mutex);
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+ if (len > INT_MAX)
+ len = INT_MAX;
+
+again:
+ count++;
+ sent = __write_ring(map->active.ring,
+ &map->active.data, &msg->msg_iter,
+ len);
+ if (sent > 0) {
+ len -= sent;
+ tot_sent += sent;
+ notify_remote_via_irq(map->active.irq);
+ }
+ if (sent >= 0 && len > 0 && count < PVCALLS_FRONT_MAX_SPIN)
+ goto again;
+ if (sent < 0)
+ tot_sent = sent;
+
+ mutex_unlock(&map->active.out_mutex);
+ pvcalls_exit();
+ return tot_sent;
+}
+
+static int __read_ring(struct pvcalls_data_intf *intf,
+ struct pvcalls_data *data,
+ struct iov_iter *msg_iter,
+ size_t len, int flags)
+{
+ RING_IDX cons, prod, size, masked_prod, masked_cons;
+ RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ error = intf->in_error;
+ /* get pointers before reading from the ring */
+ virt_rmb();
+ if (error < 0)
+ return error;
+
+ size = pvcalls_queued(prod, cons, array_size);
+ masked_prod = pvcalls_mask(prod, array_size);
+ masked_cons = pvcalls_mask(cons, array_size);
+
+ if (size == 0)
+ return 0;
+
+ if (len > size)
+ len = size;
+
+ if (masked_prod > masked_cons) {
+ len = copy_to_iter(data->in + masked_cons, len, msg_iter);
+ } else {
+ if (len > (array_size - masked_cons)) {
+ int ret = copy_to_iter(data->in + masked_cons,
+ array_size - masked_cons, msg_iter);
+ if (ret != array_size - masked_cons) {
+ len = ret;
+ goto out;
+ }
+ len = ret + copy_to_iter(data->in, len - ret, msg_iter);
+ } else {
+ len = copy_to_iter(data->in + masked_cons, len, msg_iter);
+ }
+ }
+out:
+ /* read data from the ring before increasing the index */
+ virt_mb();
+ if (!(flags & MSG_PEEK))
+ intf->in_cons += len;
+
+ return len;
+}
+
+int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct pvcalls_bedata *bedata;
+ int ret;
+ struct sock_mapping *map;
+
+ if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ mutex_lock(&map->active.in_mutex);
+ if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
+ len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) {
+ wait_event_interruptible(map->active.inflight_conn_req,
+ pvcalls_front_read_todo(map));
+ }
+ ret = __read_ring(map->active.ring, &map->active.data,
+ &msg->msg_iter, len, flags);
+
+ if (ret > 0)
+ notify_remote_via_irq(map->active.irq);
+ if (ret == 0)
+ ret = (flags & MSG_DONTWAIT) ? -EAGAIN : 0;
+ if (ret == -ENOTCONN)
+ ret = 0;
+
+ mutex_unlock(&map->active.in_mutex);
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (map == NULL) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ map->sock = sock;
+ req->cmd = PVCALLS_BIND;
+ req->u.bind.id = (uintptr_t)map;
+ memcpy(req->u.bind.addr, addr, sizeof(*addr));
+ req->u.bind.len = addr_len;
+
+ init_waitqueue_head(&map->passive.inflight_accept_req);
+
+ map->active_socket = false;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ map->passive.status = PVCALLS_STATUS_BIND;
+ pvcalls_exit();
+ return 0;
+}
+
+int pvcalls_front_listen(struct socket *sock, int backlog)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ if (map->passive.status != PVCALLS_STATUS_BIND) {
+ pvcalls_exit();
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_LISTEN;
+ req->u.listen.id = (uintptr_t) map;
+ req->u.listen.backlog = backlog;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ map->passive.status = PVCALLS_STATUS_LISTEN;
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ struct sock_mapping *map2 = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret, evtchn, nonblock;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ if (map->passive.status != PVCALLS_STATUS_LISTEN) {
+ pvcalls_exit();
+ return -EINVAL;
+ }
+
+ nonblock = flags & SOCK_NONBLOCK;
+ /*
+ * Backend only supports 1 inflight accept request, will return
+ * errors for the others
+ */
+ if (test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ req_id = READ_ONCE(map->passive.inflight_req_id);
+ if (req_id != PVCALLS_INVALID_ID &&
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id) {
+ map2 = map->passive.accept_map;
+ goto received;
+ }
+ if (nonblock) {
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+ if (wait_event_interruptible(map->passive.inflight_accept_req,
+ !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags))) {
+ pvcalls_exit();
+ return -EINTR;
+ }
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+ ret = create_active(map2, &evtchn);
+ if (ret < 0) {
+ kfree(map2);
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ list_add_tail(&map2->list, &bedata->socket_mappings);
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_ACCEPT;
+ req->u.accept.id = (uintptr_t) map;
+ req->u.accept.ref = map2->active.ref;
+ req->u.accept.id_new = (uintptr_t) map2;
+ req->u.accept.evtchn = evtchn;
+ map->passive.accept_map = map2;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+ /* We could check if we have received a response before returning. */
+ if (nonblock) {
+ WRITE_ONCE(map->passive.inflight_req_id, req_id);
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+
+ if (wait_event_interruptible(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
+ pvcalls_exit();
+ return -EINTR;
+ }
+ /* read req_id, then the content */
+ smp_rmb();
+
+received:
+ map2->sock = newsock;
+ newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ if (!newsock->sk) {
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ map->passive.inflight_req_id = PVCALLS_INVALID_ID;
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ pvcalls_front_free_map(bedata, map2);
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+ newsock->sk->sk_send_head = (void *)map2;
+
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ map->passive.inflight_req_id = PVCALLS_INVALID_ID;
+
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
+ wake_up(&map->passive.inflight_accept_req);
+
+ pvcalls_exit();
+ return ret;
+}
+
+static unsigned int pvcalls_front_poll_passive(struct file *file,
+ struct pvcalls_bedata *bedata,
+ struct sock_mapping *map,
+ poll_table *wait)
+{
+ int notify, req_id, ret;
+ struct xen_pvcalls_request *req;
+
+ if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
+
+ if (req_id != PVCALLS_INVALID_ID &&
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
+ return POLLIN | POLLRDNORM;
+
+ poll_wait(file, &map->passive.inflight_accept_req, wait);
+ return 0;
+ }
+
+ if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
+ (void *)&map->passive.flags))
+ return POLLIN | POLLRDNORM;
+
+ /*
+ * First check RET, then INFLIGHT. No barriers necessary to
+ * ensure execution ordering because of the conditional
+ * instructions creating control dependencies.
+ */
+
+ if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ poll_wait(file, &bedata->inflight_req, wait);
+ return 0;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_POLL;
+ req->u.poll.id = (uintptr_t) map;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ poll_wait(file, &bedata->inflight_req, wait);
+ return 0;
+}
+
+static unsigned int pvcalls_front_poll_active(struct file *file,
+ struct pvcalls_bedata *bedata,
+ struct sock_mapping *map,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ int32_t in_error, out_error;
+ struct pvcalls_data_intf *intf = map->active.ring;
+
+ out_error = intf->out_error;
+ in_error = intf->in_error;
+
+ poll_wait(file, &map->active.inflight_conn_req, wait);
+ if (pvcalls_front_write_todo(map))
+ mask |= POLLOUT | POLLWRNORM;
+ if (pvcalls_front_read_todo(map))
+ mask |= POLLIN | POLLRDNORM;
+ if (in_error != 0 || out_error != 0)
+ mask |= POLLERR;
+
+ return mask;
+}
+
+unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int ret;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return POLLNVAL;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return POLLNVAL;
+ }
+ if (map->active_socket)
+ ret = pvcalls_front_poll_active(file, bedata, map, wait);
+ else
+ ret = pvcalls_front_poll_passive(file, bedata, map, wait);
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_release(struct socket *sock)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int req_id, notify, ret;
+ struct xen_pvcalls_request *req;
+
+ if (sock->sk == NULL)
+ return 0;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -EIO;
+ }
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (map == NULL) {
+ pvcalls_exit();
+ return 0;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ sock->sk->sk_send_head = NULL;
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_RELEASE;
+ req->u.release.id = (uintptr_t)map;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ if (map->active_socket) {
+ /*
+ * Set in_error and wake up inflight_conn_req to force
+ * recvmsg waiters to exit.
+ */
+ map->active.ring->in_error = -EBADF;
+ wake_up_interruptible(&map->active.inflight_conn_req);
+
+ /*
+ * We need to make sure that sendmsg/recvmsg on this socket have
+ * not started before we've cleared sk_send_head here. The
+ * easiest (though not optimal) way to guarantee this is to see
+ * that no pvcall (other than us) is in progress.
+ */
+ while (atomic_read(&pvcalls_refcount) > 1)
+ cpu_relax();
+
+ pvcalls_front_free_map(bedata, map);
+ } else {
+ spin_lock(&bedata->socket_lock);
+ list_del(&map->list);
+ spin_unlock(&bedata->socket_lock);
+ if (READ_ONCE(map->passive.inflight_req_id) !=
+ PVCALLS_INVALID_ID) {
+ pvcalls_front_free_map(bedata,
+ map->passive.accept_map);
+ }
+ kfree(map);
+ }
+ WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+
+ pvcalls_exit();
+ return 0;
+}
+
+static const struct xenbus_device_id pvcalls_front_ids[] = {
+ { "pvcalls" },
+ { "" }
+};
+
+static int pvcalls_front_remove(struct xenbus_device *dev)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL, *n;
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ dev_set_drvdata(&dev->dev, NULL);
+ pvcalls_front_dev = NULL;
+ if (bedata->irq >= 0)
+ unbind_from_irqhandler(bedata->irq, dev);
+
+ list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+ map->sock->sk->sk_send_head = NULL;
+ if (map->active_socket) {
+ map->active.ring->in_error = -EBADF;
+ wake_up_interruptible(&map->active.inflight_conn_req);
+ }
+ }
+
+ smp_mb();
+ while (atomic_read(&pvcalls_refcount) > 0)
+ cpu_relax();
+ list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+ if (map->active_socket) {
+ /* No need to lock, refcount is 0 */
+ pvcalls_front_free_map(bedata, map);
+ } else {
+ list_del(&map->list);
+ kfree(map);
+ }
+ }
+ if (bedata->ref != -1)
+ gnttab_end_foreign_access(bedata->ref, 0, 0);
+ kfree(bedata->ring.sring);
+ kfree(bedata);
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return 0;
+}
+
+static int pvcalls_front_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int ret = -ENOMEM, evtchn, i;
+ unsigned int max_page_order, function_calls, len;
+ char *versions;
+ grant_ref_t gref_head = 0;
+ struct xenbus_transaction xbt;
+ struct pvcalls_bedata *bedata = NULL;
+ struct xen_pvcalls_sring *sring;
+
+ if (pvcalls_front_dev != NULL) {
+ dev_err(&dev->dev, "only one PV Calls connection supported\n");
+ return -EINVAL;
+ }
+
+ versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+ if (IS_ERR(versions))
+ return PTR_ERR(versions);
+ if (!len)
+ return -EINVAL;
+ if (strcmp(versions, "1")) {
+ kfree(versions);
+ return -EINVAL;
+ }
+ kfree(versions);
+ max_page_order = xenbus_read_unsigned(dev->otherend,
+ "max-page-order", 0);
+ if (max_page_order < PVCALLS_RING_ORDER)
+ return -ENODEV;
+ function_calls = xenbus_read_unsigned(dev->otherend,
+ "function-calls", 0);
+ /* See XENBUS_FUNCTIONS_CALLS in pvcalls.h */
+ if (function_calls != 1)
+ return -ENODEV;
+ pr_info("%s max-page-order is %u\n", __func__, max_page_order);
+
+ bedata = kzalloc(sizeof(struct pvcalls_bedata), GFP_KERNEL);
+ if (!bedata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, bedata);
+ pvcalls_front_dev = dev;
+ init_waitqueue_head(&bedata->inflight_req);
+ INIT_LIST_HEAD(&bedata->socket_mappings);
+ spin_lock_init(&bedata->socket_lock);
+ bedata->irq = -1;
+ bedata->ref = -1;
+
+ for (i = 0; i < PVCALLS_NR_RSP_PER_RING; i++)
+ bedata->rsp[i].req_id = PVCALLS_INVALID_ID;
+
+ sring = (struct xen_pvcalls_sring *) __get_free_page(GFP_KERNEL |
+ __GFP_ZERO);
+ if (!sring)
+ goto error;
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
+
+ ret = xenbus_alloc_evtchn(dev, &evtchn);
+ if (ret)
+ goto error;
+
+ bedata->irq = bind_evtchn_to_irqhandler(evtchn,
+ pvcalls_front_event_handler,
+ 0, "pvcalls-frontend", dev);
+ if (bedata->irq < 0) {
+ ret = bedata->irq;
+ goto error;
+ }
+
+ ret = gnttab_alloc_grant_references(1, &gref_head);
+ if (ret < 0)
+ goto error;
+ ret = gnttab_claim_grant_reference(&gref_head);
+ if (ret < 0)
+ goto error;
+ bedata->ref = ret;
+ gnttab_grant_foreign_access_ref(bedata->ref, dev->otherend_id,
+ virt_to_gfn((void *)sring), 0);
+
+ again:
+ ret = xenbus_transaction_start(&xbt);
+ if (ret) {
+ xenbus_dev_fatal(dev, ret, "starting transaction");
+ goto error;
+ }
+ ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", bedata->ref);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
+ evtchn);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_transaction_end(xbt, 0);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, ret, "completing transaction");
+ goto error;
+ }
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ error_xenbus:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, ret, "writing xenstore");
+ error:
+ pvcalls_front_remove(dev);
+ return ret;
+}
+
+static void pvcalls_front_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ break;
+
+ case XenbusStateInitWait:
+ break;
+
+ case XenbusStateConnected:
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state */
+ /* fall through */
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct xenbus_driver pvcalls_front_driver = {
+ .ids = pvcalls_front_ids,
+ .probe = pvcalls_front_probe,
+ .remove = pvcalls_front_remove,
+ .otherend_changed = pvcalls_front_changed,
+};
+
+static int __init pvcalls_frontend_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("Initialising Xen pvcalls frontend driver\n");
+
+ return xenbus_register_frontend(&pvcalls_front_driver);
+}
+
+module_init(pvcalls_frontend_init);
+
+MODULE_DESCRIPTION("Xen PV Calls frontend driver");
+MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
new file mode 100644
index 000000000000..3332978f4fcd
--- /dev/null
+++ b/drivers/xen/pvcalls-front.h
@@ -0,0 +1,28 @@
+#ifndef __PVCALLS_FRONT_H__
+#define __PVCALLS_FRONT_H__
+
+#include <linux/net.h>
+
+int pvcalls_front_socket(struct socket *sock);
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len, int flags);
+int pvcalls_front_bind(struct socket *sock,
+ struct sockaddr *addr,
+ int addr_len);
+int pvcalls_front_listen(struct socket *sock, int backlog);
+int pvcalls_front_accept(struct socket *sock,
+ struct socket *newsock,
+ int flags);
+int pvcalls_front_sendmsg(struct socket *sock,
+ struct msghdr *msg,
+ size_t len);
+int pvcalls_front_recvmsg(struct socket *sock,
+ struct msghdr *msg,
+ size_t len,
+ int flags);
+unsigned int pvcalls_front_poll(struct file *file,
+ struct socket *sock,
+ poll_table *wait);
+int pvcalls_front_release(struct socket *sock);
+
+#endif
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index a63fedbdcbe9..3e741cd1409c 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -6,6 +6,7 @@
#include <linux/kernel_stat.h>
#include <linux/math64.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
@@ -20,6 +21,8 @@
/* runstate info updated by Xen */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(u64[4], old_runstate_time);
+
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
{
@@ -48,8 +51,8 @@ static u64 get64(const u64 *p)
return ret;
}
-static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
- unsigned int cpu)
+static void xen_get_runstate_snapshot_cpu_delta(
+ struct vcpu_runstate_info *res, unsigned int cpu)
{
u64 state_time;
struct vcpu_runstate_info *state;
@@ -67,6 +70,71 @@ static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
(state_time & XEN_RUNSTATE_UPDATE));
}
+static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
+ unsigned int cpu)
+{
+ int i;
+
+ xen_get_runstate_snapshot_cpu_delta(res, cpu);
+
+ for (i = 0; i < 4; i++)
+ res->time[i] += per_cpu(old_runstate_time, cpu)[i];
+}
+
+void xen_manage_runstate_time(int action)
+{
+ static struct vcpu_runstate_info *runstate_delta;
+ struct vcpu_runstate_info state;
+ int cpu, i;
+
+ switch (action) {
+ case -1: /* backup runstate time before suspend */
+ if (unlikely(runstate_delta))
+ pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
+ __func__);
+
+ runstate_delta = kmalloc_array(num_possible_cpus(),
+ sizeof(*runstate_delta),
+ GFP_ATOMIC);
+ if (unlikely(!runstate_delta)) {
+ pr_warn("%s: failed to allocate runstate_delta\n",
+ __func__);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ xen_get_runstate_snapshot_cpu_delta(&state, cpu);
+ memcpy(runstate_delta[cpu].time, state.time,
+ sizeof(runstate_delta[cpu].time));
+ }
+
+ break;
+
+ case 0: /* backup runstate time after resume */
+ if (unlikely(!runstate_delta)) {
+ pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
+ __func__);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < 4; i++)
+ per_cpu(old_runstate_time, cpu)[i] +=
+ runstate_delta[cpu].time[i];
+ }
+
+ break;
+
+ default: /* do not accumulate runstate time for checkpointing */
+ break;
+ }
+
+ if (action != -1 && runstate_delta) {
+ kfree(runstate_delta);
+ runstate_delta = NULL;
+ }
+}
+
/*
* Runstate accounting
*/
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 19e45ce21f89..07896f4b2736 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -379,10 +379,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state)
case XenbusStateConnected:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+ /* fall through */
case XenbusStateClosing:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+ /* fall through */
case XenbusStateClosed:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
OpenPOWER on IntegriCloud